Index: head/sys/dev/mn/if_mn.c =================================================================== --- head/sys/dev/mn/if_mn.c (revision 297861) +++ head/sys/dev/mn/if_mn.c (revision 297862) @@ -1,1431 +1,1431 @@ /*- * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- */ /* * Driver for Siemens reference design card "Easy321-R1". * * This card contains a FALC54 E1/T1 framer and a MUNICH32X 32-channel HDLC * controller. * * The driver supports E1 mode with up to 31 channels. We send CRC4 but don't * check it coming in. * * The FALC54 and MUNICH32X have far too many registers and weird modes for * comfort, so I have not bothered typing it all into a "fooreg.h" file, * you will (badly!) need the documentation anyway if you want to mess with * this gadget. */ #include __FBSDID("$FreeBSD$"); /* * Stuff to describe the MUNIC32X and FALC54 chips. */ #define M32_CHAN 32 /* We have 32 channels */ #define M32_TS 32 /* We have 32 timeslots */ #define NG_MN_NODE_TYPE "mn" #include #include #include #include #include #include #include #include #include #include "pci_if.h" #include #include #include #include #include #include #include static int mn_maxlatency = 1000; SYSCTL_INT(_debug, OID_AUTO, mn_maxlatency, CTLFLAG_RW, &mn_maxlatency, 0, "The number of milliseconds a packet is allowed to spend in the output queue. " "If the output queue is longer than this number of milliseconds when the packet " "arrives for output, the packet will be dropped." ); #ifndef NMN /* Most machines don't support more than 4 busmaster PCI slots, if even that many */ #define NMN 4 #endif /* From: PEB 20321 data sheet, p187, table 22 */ struct m32xreg { u_int32_t conf, cmd, stat, imask; u_int32_t fill10, piqba, piql, fill1c; u_int32_t mode1, mode2, ccba, txpoll; u_int32_t tiqba, tiql, riqba, riql; u_int32_t lconf, lccba, fill48, ltran; u_int32_t ltiqba, ltiql, lriqba, lriql; u_int32_t lreg0, lreg1, lreg2, lreg3; u_int32_t lreg4, lreg5, lre6, lstat; u_int32_t gpdir, gpdata, gpod, fill8c; u_int32_t ssccon, sscbr, ssctb, sscrb; u_int32_t ssccse, sscim, fillab, fillac; u_int32_t iomcon1, iomcon2, iomstat, fillbc; u_int32_t iomcit0, iomcit1, iomcir0, iomcir1; u_int32_t iomtmo, iomrmo, filld8, filldc; u_int32_t mbcmd, mbdata1, mbdata2, mbdata3; u_int32_t mbdata4, mbdata5, mbdata6, mbdata7; }; /* From: PEB 2254 data sheet, p80, table 10 */ struct f54wreg { u_int16_t xfifo; u_int8_t cmdr, mode, rah1, rah2, ral1, ral2; u_int8_t ipc, ccr1, ccr3, pre, rtr1, rtr2, rtr3, rtr4; u_int8_t ttr1, ttr2, ttr3, ttr4, imr0, imr1, imr2, imr3; u_int8_t imr4, fill19, fmr0, fmr1, fmr2, loop, xsw, xsp; u_int8_t xc0, xc1, rc0, rc1, xpm0, xpm1, xpm2, tswm; u_int8_t test1, idle, xsa4, xsa5, xsa6, xsa7, xsa8, fmr3; u_int8_t icb1, icb2, icb3, icb4, lim0, lim1, pcd, pcr; u_int8_t lim2, fill39[7]; u_int8_t fill40[8]; u_int8_t fill48[8]; u_int8_t fill50[8]; u_int8_t fill58[8]; u_int8_t dec, fill61, test2, fill63[5]; u_int8_t fill68[8]; u_int8_t xs[16]; }; /* From: PEB 2254 data sheet, p117, table 10 */ struct f54rreg { u_int16_t rfifo; u_int8_t fill2, mode, rah1, rah2, ral1, ral2; u_int8_t ipc, ccr1, ccr3, pre, rtr1, rtr2, rtr3, rtr4; u_int8_t ttr1, ttr2, ttr3, ttr4, imr0, imr1, imr2, imr3; u_int8_t imr4, fill19, fmr0, fmr1, fmr2, loop, xsw, xsp; u_int8_t xc0, xc1, rc0, rc1, xpm0, xpm1, xpm2, tswm; u_int8_t test, idle, xsa4, xsa5, xsa6, xsa7, xsa8, fmr13; u_int8_t icb1, icb2, icb3, icb4, lim0, lim1, pcd, pcr; u_int8_t lim2, fill39[7]; u_int8_t fill40[8]; u_int8_t fill48[4], frs0, frs1, rsw, rsp; u_int16_t fec, cvc, cec1, ebc; u_int16_t cec2, cec3; u_int8_t rsa4, rsa5, rsa6, rsa7; u_int8_t rsa8, rsa6s, tsr0, tsr1, sis, rsis; u_int16_t rbc; u_int8_t isr0, isr1, isr2, isr3, fill6c, fill6d, gis, vstr; u_int8_t rs[16]; }; /* Transmit & receive descriptors */ struct trxd { u_int32_t flags; vm_offset_t next; vm_offset_t data; u_int32_t status; /* only used for receive */ struct mbuf *m; /* software use only */ struct trxd *vnext; /* software use only */ }; /* Channel specification */ struct cspec { u_int32_t flags; vm_offset_t rdesc; vm_offset_t tdesc; u_int32_t itbs; }; struct m32_mem { vm_offset_t csa; u_int32_t ccb; u_int32_t reserve1[2]; u_int32_t ts[M32_TS]; struct cspec cs[M32_CHAN]; vm_offset_t crxd[M32_CHAN]; vm_offset_t ctxd[M32_CHAN]; }; struct mn_softc; struct sockaddr; struct rtentry; static int mn_probe(device_t self); static int mn_attach(device_t self); static void mn_create_channel(struct mn_softc *sc, int chan); static int mn_reset(struct mn_softc *sc); static struct trxd * mn_alloc_desc(void); static void mn_free_desc(struct trxd *dp); static void mn_intr(void *xsc); static u_int32_t mn_parse_ts(const char *s, int *nbit); #ifdef notyet static void m32_dump(struct mn_softc *sc); static void f54_dump(struct mn_softc *sc); static void mn_fmt_ts(char *p, u_int32_t ts); #endif /* notyet */ static void f54_init(struct mn_softc *sc); static ng_constructor_t ngmn_constructor; static ng_rcvmsg_t ngmn_rcvmsg; static ng_shutdown_t ngmn_shutdown; static ng_newhook_t ngmn_newhook; static ng_connect_t ngmn_connect; static ng_rcvdata_t ngmn_rcvdata; static ng_disconnect_t ngmn_disconnect; static struct ng_type mntypestruct = { .version = NG_ABI_VERSION, .name = NG_MN_NODE_TYPE, .constructor = ngmn_constructor, .rcvmsg = ngmn_rcvmsg, .shutdown = ngmn_shutdown, .newhook = ngmn_newhook, .connect = ngmn_connect, .rcvdata = ngmn_rcvdata, .disconnect = ngmn_disconnect, }; static MALLOC_DEFINE(M_MN, "mn", "Mx driver related"); #define NIQB 64 struct schan { enum {DOWN, UP} state; struct mn_softc *sc; int chan; u_int32_t ts; char name[8]; struct trxd *r1, *rl; struct trxd *x1, *xl; hook_p hook; time_t last_recv; time_t last_rxerr; time_t last_xmit; u_long rx_error; u_long short_error; u_long crc_error; u_long dribble_error; u_long long_error; u_long abort_error; u_long overflow_error; int last_error; int prev_error; u_long tx_pending; u_long tx_limit; }; enum framing {WHOKNOWS, E1, E1U, T1, T1U}; struct mn_softc { int unit; device_t dev; struct resource *irq; void *intrhand; enum framing framing; int nhooks; void *m0v, *m1v; vm_offset_t m0p, m1p; struct m32xreg *m32x; struct f54wreg *f54w; struct f54rreg *f54r; struct m32_mem m32_mem; u_int32_t tiqb[NIQB]; u_int32_t riqb[NIQB]; u_int32_t piqb[NIQB]; u_int32_t ltiqb[NIQB]; u_int32_t lriqb[NIQB]; char name[8]; u_int32_t falc_irq, falc_state, framer_state; struct schan *ch[M32_CHAN]; char nodename[NG_NODESIZ]; node_p node; u_long cnt_fec; u_long cnt_cvc; u_long cnt_cec1; u_long cnt_ebc; u_long cnt_cec2; u_long cnt_cec3; u_long cnt_rbc; }; static int ngmn_constructor(node_p node) { return (EINVAL); } static int ngmn_shutdown(node_p nodep) { return (EINVAL); } static void ngmn_config(node_p node, char *set, char *ret) { struct mn_softc *sc; enum framing wframing; sc = NG_NODE_PRIVATE(node); if (set != NULL) { if (!strncmp(set, "line ", 5)) { wframing = sc->framing; if (!strcmp(set, "line e1")) { wframing = E1; } else if (!strcmp(set, "line e1u")) { wframing = E1U; } else { strcat(ret, "ENOGROK\n"); return; } if (wframing == sc->framing) return; if (sc->nhooks > 0) { sprintf(ret, "Cannot change line when %d hooks open\n", sc->nhooks); return; } sc->framing = wframing; #if 1 f54_init(sc); #else mn_reset(sc); #endif } else { printf("%s CONFIG SET [%s]\n", sc->nodename, set); strcat(ret, "ENOGROK\n"); return; } } } static int ngmn_rcvmsg(node_p node, item_p item, hook_p lasthook) { struct mn_softc *sc; struct ng_mesg *resp = NULL; struct schan *sch; char *s, *r; int pos, i; struct ng_mesg *msg; NGI_GET_MSG(item, msg); sc = NG_NODE_PRIVATE(node); if (msg->header.typecookie != NGM_GENERIC_COOKIE) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (EINVAL); } if (msg->header.cmd != NGM_TEXT_CONFIG && msg->header.cmd != NGM_TEXT_STATUS) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (EINVAL); } NG_MKRESPONSE(resp, msg, sizeof(struct ng_mesg) + NG_TEXTRESPONSE, M_NOWAIT); if (resp == NULL) { NG_FREE_ITEM(item); NG_FREE_MSG(msg); return (ENOMEM); } if (msg->header.arglen) s = (char *)msg->data; else s = NULL; r = (char *)resp->data; *r = '\0'; if (msg->header.cmd == NGM_TEXT_CONFIG) { ngmn_config(node, s, r); resp->header.arglen = strlen(r) + 1; NG_RESPOND_MSG(i, node, item, resp); NG_FREE_MSG(msg); return (0); } pos = 0; pos += sprintf(pos + r,"Framer status %b;\n", sc->framer_state, "\20" "\40LOS\37AIS\36LFA\35RRA" "\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS" "\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0" "\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4" "\4rsp.3\3RSIF\2RS13\1RS15"); pos += sprintf(pos + r," Framing errors: %lu", sc->cnt_fec); pos += sprintf(pos + r," Code Violations: %lu\n", sc->cnt_cvc); pos += sprintf(pos + r," Falc State %b;\n", sc->falc_state, "\20" "\40LOS\37AIS\36LFA\35RRA" "\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS" "\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0" "\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4" "\4rsp.3\3RSIF\2RS13\1RS15"); pos += sprintf(pos + r, " Falc IRQ %b\n", sc->falc_irq, "\20" "\40RME\37RFS\36T8MS\35RMB\34CASC\33CRC4\32SA6SC\31RPF" "\30b27\27RDO\26ALLS\25XDU\24XMB\23b22\22XLSC\21XPR" "\20FAR\17LFA\16MFAR\15T400MS\14AIS\13LOS\12RAR\11RA" "\10ES\7SEC\6LMFA16\5AIS16\4RA16\3API\2SLN\1SLP"); for (i = 0; i < M32_CHAN; i++) { if (!sc->ch[i]) continue; sch = sc->ch[i]; pos += sprintf(r + pos, " Chan %d <%s> ", i, NG_HOOK_NAME(sch->hook)); pos += sprintf(r + pos, " Last Rx: "); if (sch->last_recv) pos += sprintf(r + pos, "%lu s", (unsigned long)(time_second - sch->last_recv)); else pos += sprintf(r + pos, "never"); pos += sprintf(r + pos, ", last RxErr: "); if (sch->last_rxerr) pos += sprintf(r + pos, "%lu s", (unsigned long)(time_second - sch->last_rxerr)); else pos += sprintf(r + pos, "never"); pos += sprintf(r + pos, ", last Tx: "); if (sch->last_xmit) pos += sprintf(r + pos, "%lu s\n", (unsigned long)(time_second - sch->last_xmit)); else pos += sprintf(r + pos, "never\n"); pos += sprintf(r + pos, " RX error(s) %lu", sch->rx_error); pos += sprintf(r + pos, " Short: %lu", sch->short_error); pos += sprintf(r + pos, " CRC: %lu", sch->crc_error); pos += sprintf(r + pos, " Mod8: %lu", sch->dribble_error); pos += sprintf(r + pos, " Long: %lu", sch->long_error); pos += sprintf(r + pos, " Abort: %lu", sch->abort_error); pos += sprintf(r + pos, " Overflow: %lu\n", sch->overflow_error); pos += sprintf(r + pos, " Last error: %b Prev error: %b\n", sch->last_error, "\20\7SHORT\5CRC\4MOD8\3LONG\2ABORT\1OVERRUN", sch->prev_error, "\20\7SHORT\5CRC\4MOD8\3LONG\2ABORT\1OVERRUN"); pos += sprintf(r + pos, " Xmit bytes pending %ld\n", sch->tx_pending); } resp->header.arglen = pos + 1; /* Take care of synchronous response, if any */ NG_RESPOND_MSG(i, node, item, resp); NG_FREE_MSG(msg); return (0); } static int ngmn_newhook(node_p node, hook_p hook, const char *name) { u_int32_t ts, chan; struct mn_softc *sc; int nbit; sc = NG_NODE_PRIVATE(node); if (name[0] != 't' || name[1] != 's') return (EINVAL); ts = mn_parse_ts(name + 2, &nbit); printf("%d bits %x\n", nbit, ts); if (sc->framing == E1 && (ts & 1)) return (EINVAL); if (sc->framing == E1U && nbit != 32) return (EINVAL); if (ts == 0) return (EINVAL); if (sc->framing == E1) chan = ffs(ts) - 1; else chan = 1; if (!sc->ch[chan]) mn_create_channel(sc, chan); else if (sc->ch[chan]->state == UP) return (EBUSY); sc->ch[chan]->ts = ts; sc->ch[chan]->hook = hook; sc->ch[chan]->tx_limit = nbit * 8; NG_HOOK_SET_PRIVATE(hook, sc->ch[chan]); sc->nhooks++; return(0); } static struct trxd *mn_desc_free; static struct trxd * mn_alloc_desc(void) { struct trxd *dp; dp = mn_desc_free; if (dp) mn_desc_free = dp->vnext; else dp = (struct trxd *)malloc(sizeof *dp, M_MN, M_NOWAIT); return (dp); } static void mn_free_desc(struct trxd *dp) { dp->vnext = mn_desc_free; mn_desc_free = dp; } static u_int32_t mn_parse_ts(const char *s, int *nbit) { unsigned r; int i, j; char *p; r = 0; j = -1; *nbit = 0; while(*s) { i = strtol(s, &p, 0); if (i < 0 || i > 31) return (0); while (j != -1 && j < i) { r |= 1 << j++; (*nbit)++; } j = -1; r |= 1 << i; (*nbit)++; if (*p == ',') { s = p + 1; continue; } else if (*p == '-') { j = i + 1; s = p + 1; continue; } else if (!*p) { break; } else { return (0); } } return (r); } #ifdef notyet static void mn_fmt_ts(char *p, u_int32_t ts) { char *s; int j; s = ""; ts &= 0xffffffff; for (j = 0; j < 32; j++) { if (!(ts & (1 << j))) continue; sprintf(p, "%s%d", s, j); p += strlen(p); s = ","; if (!(ts & (1 << (j+1)))) continue; for (; j < 32; j++) if (!(ts & (1 << (j+1)))) break; sprintf(p, "-%d", j); p += strlen(p); s = ","; } } #endif /* notyet */ /* * OUTPUT */ static int ngmn_rcvdata(hook_p hook, item_p item) { struct mbuf *m2; struct trxd *dp, *dp2; struct schan *sch; struct mn_softc *sc; int chan, pitch, len; struct mbuf *m; sch = NG_HOOK_PRIVATE(hook); sc = sch->sc; chan = sch->chan; if (sch->state != UP) { NG_FREE_ITEM(item); return (0); } NGI_GET_M(item, m); if (sch->tx_pending + m->m_pkthdr.len > sch->tx_limit * mn_maxlatency) { NG_FREE_M(m); NG_FREE_ITEM(item); return (0); } NG_FREE_ITEM(item); pitch = 0; m2 = m; dp2 = sc->ch[chan]->xl; len = m->m_pkthdr.len; while (len) { dp = mn_alloc_desc(); if (!dp) { pitch++; m_freem(m); sc->ch[chan]->xl = dp2; dp = dp2->vnext; while (dp) { dp2 = dp->vnext; mn_free_desc(dp); dp = dp2; } - sc->ch[chan]->xl->vnext = 0; + sc->ch[chan]->xl->vnext = NULL; break; } dp->data = vtophys(m2->m_data); dp->flags = m2->m_len << 16; dp->flags += 1; len -= m2->m_len; dp->next = vtophys(dp); - dp->vnext = 0; + dp->vnext = NULL; sc->ch[chan]->xl->next = vtophys(dp); sc->ch[chan]->xl->vnext = dp; sc->ch[chan]->xl = dp; if (!len) { dp->m = m; dp->flags |= 0xc0000000; dp2->flags &= ~0x40000000; } else { - dp->m = 0; + dp->m = NULL; m2 = m2->m_next; } } if (pitch) printf("%s%d: Short on mem, pitched %d packets\n", sc->name, chan, pitch); else { #if 0 printf("%d = %d + %d (%p)\n", sch->tx_pending + m->m_pkthdr.len, sch->tx_pending , m->m_pkthdr.len, m); #endif sch->tx_pending += m->m_pkthdr.len; sc->m32x->txpoll &= ~(1 << chan); } return (0); } /* * OPEN */ static int ngmn_connect(hook_p hook) { int i, nts, chan; struct trxd *dp, *dp2; struct mbuf *m; struct mn_softc *sc; struct schan *sch; u_int32_t u; sch = NG_HOOK_PRIVATE(hook); chan = sch->chan; sc = sch->sc; if (sch->state == UP) return (0); sch->state = UP; /* Count and configure the timeslots for this channel */ for (nts = i = 0; i < 32; i++) if (sch->ts & (1 << i)) { sc->m32_mem.ts[i] = 0x00ff00ff | (chan << 24) | (chan << 8); nts++; } /* Init the receiver & xmitter to HDLC */ sc->m32_mem.cs[chan].flags = 0x80e90006; /* Allocate two buffers per timeslot */ if (nts == 32) sc->m32_mem.cs[chan].itbs = 63; else sc->m32_mem.cs[chan].itbs = nts * 2; /* Setup a transmit chain with one descriptor */ /* XXX: we actually send a 1 byte packet */ dp = mn_alloc_desc(); MGETHDR(m, M_WAITOK, MT_DATA); m->m_pkthdr.len = 0; dp->m = m; dp->flags = 0xc0000000 + (1 << 16); dp->next = vtophys(dp); - dp->vnext = 0; + dp->vnext = NULL; dp->data = vtophys(sc->name); sc->m32_mem.cs[chan].tdesc = vtophys(dp); sc->ch[chan]->x1 = dp; sc->ch[chan]->xl = dp; /* Setup a receive chain with 5 + NTS descriptors */ dp = mn_alloc_desc(); m = NULL; MGETHDR(m, M_WAITOK, MT_DATA); MCLGET(m, M_WAITOK); dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x40000000; dp->flags += 1600 << 16; dp->next = vtophys(dp); - dp->vnext = 0; + dp->vnext = NULL; sc->ch[chan]->rl = dp; for (i = 0; i < (nts + 10); i++) { dp2 = dp; dp = mn_alloc_desc(); m = NULL; MGETHDR(m, M_WAITOK, MT_DATA); MCLGET(m, M_WAITOK); dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x00000000; dp->flags += 1600 << 16; dp->next = vtophys(dp2); dp->vnext = dp2; } sc->m32_mem.cs[chan].rdesc = vtophys(dp); sc->ch[chan]->r1 = dp; /* Initialize this channel */ sc->m32_mem.ccb = 0x00008000 + (chan << 8); sc->m32x->cmd = 0x1; DELAY(1000); u = sc->m32x->stat; if (!(u & 1)) printf("%s: init chan %d stat %08x\n", sc->name, chan, u); sc->m32x->stat = 1; /* probably not at splnet, force outward queueing */ NG_HOOK_FORCE_QUEUE(NG_HOOK_PEER(hook)); return (0); } /* * CLOSE */ static int ngmn_disconnect(hook_p hook) { int chan, i; struct mn_softc *sc; struct schan *sch; struct trxd *dp, *dp2; u_int32_t u; sch = NG_HOOK_PRIVATE(hook); chan = sch->chan; sc = sch->sc; if (sch->state == DOWN) return (0); sch->state = DOWN; /* Set receiver & transmitter off */ sc->m32_mem.cs[chan].flags = 0x80920006; sc->m32_mem.cs[chan].itbs = 0; /* free the timeslots */ for (i = 0; i < 32; i++) if (sc->ch[chan]->ts & (1 << i)) sc->m32_mem.ts[i] = 0x20002000; /* Initialize this channel */ sc->m32_mem.ccb = 0x00008000 + (chan << 8); sc->m32x->cmd = 0x1; DELAY(30); u = sc->m32x->stat; if (!(u & 1)) printf("%s: zap chan %d stat %08x\n", sc->name, chan, u); sc->m32x->stat = 1; /* Free all receive descriptors and mbufs */ for (dp = sc->ch[chan]->r1; dp ; dp = dp2) { if (dp->m) m_freem(dp->m); sc->ch[chan]->r1 = dp2 = dp->vnext; mn_free_desc(dp); } /* Free all transmit descriptors and mbufs */ for (dp = sc->ch[chan]->x1; dp ; dp = dp2) { if (dp->m) { sc->ch[chan]->tx_pending -= dp->m->m_pkthdr.len; m_freem(dp->m); } sc->ch[chan]->x1 = dp2 = dp->vnext; mn_free_desc(dp); } sc->nhooks--; return(0); } /* * Create a new channel. */ static void mn_create_channel(struct mn_softc *sc, int chan) { struct schan *sch; sch = sc->ch[chan] = (struct schan *)malloc(sizeof *sc->ch[chan], M_MN, M_WAITOK | M_ZERO); sch->sc = sc; sch->state = DOWN; sch->chan = chan; sprintf(sch->name, "%s%d", sc->name, chan); return; } #ifdef notyet /* * Dump Munich32x state */ static void m32_dump(struct mn_softc *sc) { u_int32_t *tp4; int i, j; printf("mn%d: MUNICH32X dump\n", sc->unit); tp4 = (u_int32_t *)sc->m0v; for(j = 0; j < 64; j += 8) { printf("%02x", j * sizeof *tp4); for(i = 0; i < 8; i++) printf(" %08x", tp4[i+j]); printf("\n"); } for(j = 0; j < M32_CHAN; j++) { if (!sc->ch[j]) continue; printf("CH%d: state %d ts %08x", j, sc->ch[j]->state, sc->ch[j]->ts); printf(" %08x %08x %08x %08x %08x %08x\n", sc->m32_mem.cs[j].flags, sc->m32_mem.cs[j].rdesc, sc->m32_mem.cs[j].tdesc, sc->m32_mem.cs[j].itbs, sc->m32_mem.crxd[j], sc->m32_mem.ctxd[j] ); } } /* * Dump Falch54 state */ static void f54_dump(struct mn_softc *sc) { u_int8_t *tp1; int i, j; printf("%s: FALC54 dump\n", sc->name); tp1 = (u_int8_t *)sc->m1v; for(j = 0; j < 128; j += 16) { printf("%s: %02x |", sc->name, j * sizeof *tp1); for(i = 0; i < 16; i++) printf(" %02x", tp1[i+j]); printf("\n"); } } #endif /* notyet */ /* * Init Munich32x */ static void m32_init(struct mn_softc *sc) { sc->m32x->conf = 0x00000000; sc->m32x->mode1 = 0x81048000 + 1600; /* XXX: temp */ #if 1 sc->m32x->mode2 = 0x00000081; sc->m32x->txpoll = 0xffffffff; #elif 1 sc->m32x->mode2 = 0x00000081; sc->m32x->txpoll = 0xffffffff; #else sc->m32x->mode2 = 0x00000101; #endif sc->m32x->lconf = 0x6060009B; sc->m32x->imask = 0x00000000; } /* * Init the Falc54 */ static void f54_init(struct mn_softc *sc) { sc->f54w->ipc = 0x07; sc->f54w->xpm0 = 0xbd; sc->f54w->xpm1 = 0x03; sc->f54w->xpm2 = 0x00; sc->f54w->imr0 = 0x18; /* RMB, CASC */ sc->f54w->imr1 = 0x08; /* XMB */ sc->f54w->imr2 = 0x00; sc->f54w->imr3 = 0x38; /* LMFA16, AIS16, RA16 */ sc->f54w->imr4 = 0x00; sc->f54w->fmr0 = 0xf0; /* X: HDB3, R: HDB3 */ sc->f54w->fmr1 = 0x0e; /* Send CRC4, 2Mbit, ECM */ if (sc->framing == E1) sc->f54w->fmr2 = 0x03; /* Auto Rem-Alarm, Auto resync */ else if (sc->framing == E1U) sc->f54w->fmr2 = 0x33; /* dais, rtm, Auto Rem-Alarm, Auto resync */ sc->f54w->lim1 = 0xb0; /* XCLK=8kHz, .62V threshold */ sc->f54w->pcd = 0x0a; sc->f54w->pcr = 0x15; sc->f54w->xsw = 0x9f; /* fmr4 */ if (sc->framing == E1) sc->f54w->xsp = 0x1c; /* fmr5 */ else if (sc->framing == E1U) sc->f54w->xsp = 0x3c; /* tt0, fmr5 */ sc->f54w->xc0 = 0x07; sc->f54w->xc1 = 0x3d; sc->f54w->rc0 = 0x05; sc->f54w->rc1 = 0x00; sc->f54w->cmdr = 0x51; } static int mn_reset(struct mn_softc *sc) { u_int32_t u; int i; sc->m32x->ccba = vtophys(&sc->m32_mem.csa); sc->m32_mem.csa = vtophys(&sc->m32_mem.ccb); bzero(sc->tiqb, sizeof sc->tiqb); sc->m32x->tiqba = vtophys(&sc->tiqb); sc->m32x->tiql = NIQB / 16 - 1; bzero(sc->riqb, sizeof sc->riqb); sc->m32x->riqba = vtophys(&sc->riqb); sc->m32x->riql = NIQB / 16 - 1; bzero(sc->ltiqb, sizeof sc->ltiqb); sc->m32x->ltiqba = vtophys(&sc->ltiqb); sc->m32x->ltiql = NIQB / 16 - 1; bzero(sc->lriqb, sizeof sc->lriqb); sc->m32x->lriqba = vtophys(&sc->lriqb); sc->m32x->lriql = NIQB / 16 - 1; bzero(sc->piqb, sizeof sc->piqb); sc->m32x->piqba = vtophys(&sc->piqb); sc->m32x->piql = NIQB / 16 - 1; m32_init(sc); f54_init(sc); u = sc->m32x->stat; sc->m32x->stat = u; sc->m32_mem.ccb = 0x4; sc->m32x->cmd = 0x1; DELAY(1000); u = sc->m32x->stat; sc->m32x->stat = u; /* set all timeslots to known state */ for (i = 0; i < 32; i++) sc->m32_mem.ts[i] = 0x20002000; if (!(u & 1)) { printf( "mn%d: WARNING: Controller failed the PCI bus-master test.\n" "mn%d: WARNING: Use a PCI slot which can support bus-master cards.\n", sc->unit, sc->unit); return (0); } return (1); } /* * FALC54 interrupt handling */ static void f54_intr(struct mn_softc *sc) { unsigned g, u, s; g = sc->f54r->gis; u = sc->f54r->isr0 << 24; u |= sc->f54r->isr1 << 16; u |= sc->f54r->isr2 << 8; u |= sc->f54r->isr3; sc->falc_irq = u; /* don't chat about the 1 sec heart beat */ if (u & ~0x40) { #if 0 printf("%s*: FALC54 IRQ GIS:%02x %b\n", sc->name, g, u, "\20" "\40RME\37RFS\36T8MS\35RMB\34CASC\33CRC4\32SA6SC\31RPF" "\30b27\27RDO\26ALLS\25XDU\24XMB\23b22\22XLSC\21XPR" "\20FAR\17LFA\16MFAR\15T400MS\14AIS\13LOS\12RAR\11RA" "\10ES\7SEC\6LMFA16\5AIS16\4RA16\3API\2SLN\1SLP"); #endif s = sc->f54r->frs0 << 24; s |= sc->f54r->frs1 << 16; s |= sc->f54r->rsw << 8; s |= sc->f54r->rsp; sc->falc_state = s; s &= ~0x01844038; /* undefined or static bits */ s &= ~0x00009fc7; /* bits we don't care about */ s &= ~0x00780000; /* XXX: TS16 related */ s &= ~0x06000000; /* XXX: Multiframe related */ #if 0 printf("%s*: FALC54 Status %b\n", sc->name, s, "\20" "\40LOS\37AIS\36LFA\35RRA\34AUXP\33NMF\32LMFA\31frs0.0" "\30frs1.7\27TS16RA\26TS16LOS\25TS16AIS\24TS16LFA\23frs1.2\22XLS\21XLO" "\20RS1\17rsw.6\16RRA\15RY0\14RY1\13RY2\12RY3\11RY4" "\10SI1\7SI2\6rsp.5\5rsp.4\4rsp.3\3RSIF\2RS13\1RS15"); #endif if (s != sc->framer_state) { #if 0 for (i = 0; i < M32_CHAN; i++) { if (!sc->ch[i]) continue; sp = &sc->ch[i]->ifsppp; if (!(SP2IFP(sp)->if_flags & IFF_UP)) continue; if (s) timeout((timeout_t *)sp->pp_down, sp, 1 * hz); else timeout((timeout_t *)sp->pp_up, sp, 1 * hz); } #endif sc->framer_state = s; } } /* Once per second check error counters */ /* XXX: not clear if this is actually ok */ if (!(u & 0x40)) return; sc->cnt_fec += sc->f54r->fec; sc->cnt_cvc += sc->f54r->cvc; sc->cnt_cec1 += sc->f54r->cec1; sc->cnt_ebc += sc->f54r->ebc; sc->cnt_cec2 += sc->f54r->cec2; sc->cnt_cec3 += sc->f54r->cec3; sc->cnt_rbc += sc->f54r->rbc; } /* * Transmit interrupt for one channel */ static void mn_tx_intr(struct mn_softc *sc, u_int32_t vector) { u_int32_t chan; struct trxd *dp; struct mbuf *m; chan = vector & 0x1f; if (!sc->ch[chan]) return; if (sc->ch[chan]->state != UP) { printf("%s: tx_intr when not UP\n", sc->name); return; } for (;;) { dp = sc->ch[chan]->x1; if (vtophys(dp) == sc->m32_mem.ctxd[chan]) return; m = dp->m; if (m) { #if 0 printf("%d = %d - %d (%p)\n", sc->ch[chan]->tx_pending - m->m_pkthdr.len, sc->ch[chan]->tx_pending , m->m_pkthdr.len, m); #endif sc->ch[chan]->tx_pending -= m->m_pkthdr.len; m_freem(m); } sc->ch[chan]->last_xmit = time_second; sc->ch[chan]->x1 = dp->vnext; mn_free_desc(dp); } } /* * Receive interrupt for one channel */ static void mn_rx_intr(struct mn_softc *sc, u_int32_t vector) { u_int32_t chan, err; struct trxd *dp; struct mbuf *m; struct schan *sch; chan = vector & 0x1f; if (!sc->ch[chan]) return; sch = sc->ch[chan]; if (sch->state != UP) { printf("%s: rx_intr when not UP\n", sc->name); return; } vector &= ~0x1f; if (vector == 0x30000b00) sch->rx_error++; for (;;) { dp = sch->r1; if (vtophys(dp) == sc->m32_mem.crxd[chan]) return; m = dp->m; - dp->m = 0; + dp->m = NULL; m->m_pkthdr.len = m->m_len = (dp->status >> 16) & 0x1fff; err = (dp->status >> 8) & 0xff; if (!err) { int error; NG_SEND_DATA_ONLY(error, sch->hook, m); sch->last_recv = time_second; /* we could be down by now... */ if (sch->state != UP) return; } else if (err & 0x40) { sch->short_error++; } else if (err & 0x10) { sch->crc_error++; } else if (err & 0x08) { sch->dribble_error++; } else if (err & 0x04) { sch->long_error++; } else if (err & 0x02) { sch->abort_error++; } else if (err & 0x01) { sch->overflow_error++; } if (err) { sch->last_rxerr = time_second; sch->prev_error = sch->last_error; sch->last_error = err; } sc->ch[chan]->r1 = dp->vnext; /* Replenish desc + mbuf supplies */ if (!m) { MGETHDR(m, M_NOWAIT, MT_DATA); if (m == NULL) { mn_free_desc(dp); return; /* ENOBUFS */ } if (!(MCLGET(m, M_NOWAIT))) { mn_free_desc(dp); m_freem(m); return; /* ENOBUFS */ } } dp->m = m; dp->data = vtophys(m->m_data); dp->flags = 0x40000000; dp->flags += 1600 << 16; dp->next = vtophys(dp); - dp->vnext = 0; + dp->vnext = NULL; sc->ch[chan]->rl->next = vtophys(dp); sc->ch[chan]->rl->vnext = dp; sc->ch[chan]->rl->flags &= ~0x40000000; sc->ch[chan]->rl = dp; } } /* * Interupt handler */ static void mn_intr(void *xsc) { struct mn_softc *sc; u_int32_t stat, lstat, u; int i, j; sc = xsc; stat = sc->m32x->stat; lstat = sc->m32x->lstat; #if 0 if (!stat && !(lstat & 2)) return; #endif if (stat & ~0xc200) { printf("%s: I stat=%08x lstat=%08x\n", sc->name, stat, lstat); } if ((stat & 0x200) || (lstat & 2)) f54_intr(sc); for (j = i = 0; i < 64; i ++) { u = sc->riqb[i]; if (u) { sc->riqb[i] = 0; mn_rx_intr(sc, u); if ((u & ~0x1f) == 0x30000800 || (u & ~0x1f) == 0x30000b00) continue; u &= ~0x30000400; /* bits we don't care about */ if ((u & ~0x1f) == 0x00000900) continue; if (!(u & ~0x1f)) continue; if (!j) printf("%s*: RIQB:", sc->name); printf(" [%d]=%08x", i, u); j++; } } if (j) printf("\n"); for (j = i = 0; i < 64; i ++) { u = sc->tiqb[i]; if (u) { sc->tiqb[i] = 0; mn_tx_intr(sc, u); if ((u & ~0x1f) == 0x20000800) continue; u &= ~0x20000000; /* bits we don't care about */ if (!u) continue; if (!j) printf("%s*: TIQB:", sc->name); printf(" [%d]=%08x", i, u); j++; } } if (j) printf("\n"); sc->m32x->stat = stat; } /* * PCI initialization stuff */ static int mn_probe (device_t self) { u_int id = pci_get_devid(self); if (sizeof (struct m32xreg) != 256) { printf("MN: sizeof(struct m32xreg) = %zd, should have been 256\n", sizeof (struct m32xreg)); return (ENXIO); } if (sizeof (struct f54rreg) != 128) { printf("MN: sizeof(struct f54rreg) = %zd, should have been 128\n", sizeof (struct f54rreg)); return (ENXIO); } if (sizeof (struct f54wreg) != 128) { printf("MN: sizeof(struct f54wreg) = %zd, should have been 128\n", sizeof (struct f54wreg)); return (ENXIO); } if (id != 0x2101110a) return (ENXIO); device_set_desc_copy(self, "Munich32X E1/T1 HDLC Controller"); return (BUS_PROBE_DEFAULT); } static int mn_attach (device_t self) { struct mn_softc *sc; u_int32_t u; u_int32_t ver; static int once; int rid, error; struct resource *res; if (!once) { if (ng_newtype(&mntypestruct)) printf("ng_newtype failed\n"); once++; } sc = (struct mn_softc *)malloc(sizeof *sc, M_MN, M_WAITOK | M_ZERO); device_set_softc(self, sc); sc->dev = self; sc->unit = device_get_unit(self); sc->framing = E1; sprintf(sc->name, "mn%d", sc->unit); rid = PCIR_BAR(0); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(self, "Could not map memory\n"); free(sc, M_MN); return ENXIO; } sc->m0v = rman_get_virtual(res); sc->m0p = rman_get_start(res); rid = PCIR_BAR(1); res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (res == NULL) { device_printf(self, "Could not map memory\n"); free(sc, M_MN); return ENXIO; } sc->m1v = rman_get_virtual(res); sc->m1p = rman_get_start(res); /* Allocate interrupt */ rid = 0; sc->irq = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq == NULL) { printf("couldn't map interrupt\n"); free(sc, M_MN); return(ENXIO); } error = bus_setup_intr(self, sc->irq, INTR_TYPE_NET, NULL, mn_intr, sc, &sc->intrhand); if (error) { printf("couldn't set up irq\n"); free(sc, M_MN); return(ENXIO); } u = pci_read_config(self, PCIR_COMMAND, 2); printf("%x\n", u); pci_write_config(self, PCIR_COMMAND, u | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN, 2); #if 0 pci_write_config(self, PCIR_COMMAND, 0x02800046, 4); #endif u = pci_read_config(self, PCIR_COMMAND, 1); printf("%x\n", u); ver = pci_get_revid(self); sc->m32x = (struct m32xreg *) sc->m0v; sc->f54w = (struct f54wreg *) sc->m1v; sc->f54r = (struct f54rreg *) sc->m1v; /* We must reset before poking at FALC54 registers */ u = mn_reset(sc); if (!u) return (0); printf("mn%d: Munich32X", sc->unit); switch (ver) { case 0x13: printf(" Rev 2.2"); break; default: printf(" Rev 0x%x\n", ver); } printf(", Falc54"); switch (sc->f54r->vstr) { case 0: printf(" Rev < 1.3\n"); break; case 1: printf(" Rev 1.3\n"); break; case 2: printf(" Rev 1.4\n"); break; case 0x10: printf("-LH Rev 1.1\n"); break; case 0x13: printf("-LH Rev 1.3\n"); break; default: printf(" Rev 0x%x\n", sc->f54r->vstr); } if (ng_make_node_common(&mntypestruct, &sc->node) != 0) { printf("ng_make_node_common failed\n"); return (0); } NG_NODE_SET_PRIVATE(sc->node, sc); sprintf(sc->nodename, "%s%d", NG_MN_NODE_TYPE, sc->unit); if (ng_name_node(sc->node, sc->nodename)) { NG_NODE_UNREF(sc->node); return (0); } return (0); } static device_method_t mn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mn_probe), DEVMETHOD(device_attach, mn_attach), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD_END }; static driver_t mn_driver = { "mn", mn_methods, 0 }; static devclass_t mn_devclass; DRIVER_MODULE(mn, pci, mn_driver, mn_devclass, 0, 0); Index: head/sys/dev/mpt/mpt_raid.c =================================================================== --- head/sys/dev/mpt/mpt_raid.c (revision 297861) +++ head/sys/dev/mpt/mpt_raid.c (revision 297862) @@ -1,1844 +1,1844 @@ /*- * Routines for handling the integrated RAID features LSI MPT Fusion adapters. * * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2005 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Some Breakage and Bug Fixing added later. * Copyright (c) 2006, by Matthew Jacob * All Rights Reserved * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ #include __FBSDID("$FreeBSD$"); #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_raid.h" #include #include #include #include #include #include #include #include #include struct mpt_raid_action_result { union { MPI_RAID_VOL_INDICATOR indicator_struct; uint32_t new_settings; uint8_t phys_disk_num; } action_data; uint16_t action_status; }; #define REQ_TO_RAID_ACTION_RESULT(req) ((struct mpt_raid_action_result *) \ (((MSG_RAID_ACTION_REQUEST *)(req->req_vbuf)) + 1)) #define REQ_IOCSTATUS(req) ((req)->IOCStatus & MPI_IOCSTATUS_MASK) static mpt_probe_handler_t mpt_raid_probe; static mpt_attach_handler_t mpt_raid_attach; static mpt_enable_handler_t mpt_raid_enable; static mpt_event_handler_t mpt_raid_event; static mpt_shutdown_handler_t mpt_raid_shutdown; static mpt_reset_handler_t mpt_raid_ioc_reset; static mpt_detach_handler_t mpt_raid_detach; static struct mpt_personality mpt_raid_personality = { .name = "mpt_raid", .probe = mpt_raid_probe, .attach = mpt_raid_attach, .enable = mpt_raid_enable, .event = mpt_raid_event, .reset = mpt_raid_ioc_reset, .shutdown = mpt_raid_shutdown, .detach = mpt_raid_detach, }; DECLARE_MPT_PERSONALITY(mpt_raid, SI_ORDER_THIRD); MPT_PERSONALITY_DEPEND(mpt_raid, mpt_cam, 1, 1, 1); static mpt_reply_handler_t mpt_raid_reply_handler; static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame); static int mpt_spawn_raid_thread(struct mpt_softc *mpt); static void mpt_terminate_raid_thread(struct mpt_softc *mpt); static void mpt_raid_thread(void *arg); static timeout_t mpt_raid_timer; #if 0 static void mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, int enable); #endif static void mpt_verify_mwce(struct mpt_softc *, struct mpt_raid_volume *); static void mpt_adjust_queue_depth(struct mpt_softc *, struct mpt_raid_volume *, struct cam_path *); static void mpt_raid_sysctl_attach(struct mpt_softc *); static const char *mpt_vol_type(struct mpt_raid_volume *vol); static const char *mpt_vol_state(struct mpt_raid_volume *vol); static const char *mpt_disk_state(struct mpt_raid_disk *disk); static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol, const char *fmt, ...); static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk, const char *fmt, ...); static int mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req, u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len, int write, int wait); static int mpt_refresh_raid_data(struct mpt_softc *mpt); static void mpt_schedule_raid_refresh(struct mpt_softc *mpt); static uint32_t raid_handler_id = MPT_HANDLER_ID_NONE; static const char * mpt_vol_type(struct mpt_raid_volume *vol) { switch (vol->config_page->VolumeType) { case MPI_RAID_VOL_TYPE_IS: return ("RAID-0"); case MPI_RAID_VOL_TYPE_IME: return ("RAID-1E"); case MPI_RAID_VOL_TYPE_IM: return ("RAID-1"); default: return ("Unknown"); } } static const char * mpt_vol_state(struct mpt_raid_volume *vol) { switch (vol->config_page->VolumeStatus.State) { case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL: return ("Optimal"); case MPI_RAIDVOL0_STATUS_STATE_DEGRADED: return ("Degraded"); case MPI_RAIDVOL0_STATUS_STATE_FAILED: return ("Failed"); default: return ("Unknown"); } } static const char * mpt_disk_state(struct mpt_raid_disk *disk) { switch (disk->config_page.PhysDiskStatus.State) { case MPI_PHYSDISK0_STATUS_ONLINE: return ("Online"); case MPI_PHYSDISK0_STATUS_MISSING: return ("Missing"); case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: return ("Incompatible"); case MPI_PHYSDISK0_STATUS_FAILED: return ("Failed"); case MPI_PHYSDISK0_STATUS_INITIALIZING: return ("Initializing"); case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: return ("Offline Requested"); case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: return ("Failed per Host Request"); case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: return ("Offline"); default: return ("Unknown"); } } static void mpt_vol_prt(struct mpt_softc *mpt, struct mpt_raid_volume *vol, const char *fmt, ...) { va_list ap; printf("%s:vol%d(%s:%d:%d): ", device_get_nameunit(mpt->dev), (u_int)(vol - mpt->raid_volumes), device_get_nameunit(mpt->dev), vol->config_page->VolumeBus, vol->config_page->VolumeID); va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } static void mpt_disk_prt(struct mpt_softc *mpt, struct mpt_raid_disk *disk, const char *fmt, ...) { va_list ap; if (disk->volume != NULL) { printf("(%s:vol%d:%d): ", device_get_nameunit(mpt->dev), disk->volume->config_page->VolumeID, disk->member_number); } else { printf("(%s:%d:%d): ", device_get_nameunit(mpt->dev), disk->config_page.PhysDiskBus, disk->config_page.PhysDiskID); } va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); } static void mpt_raid_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc*)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; struct mpt_raid_volume *mpt_vol; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } mpt_lprt(mpt, MPT_PRT_DEBUG, "Callback for %d\n", cgd->ccb_h.target_id); RAID_VOL_FOREACH(mpt, mpt_vol) { if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; if (mpt_vol->config_page->VolumeID == cgd->ccb_h.target_id) { mpt_adjust_queue_depth(mpt, mpt_vol, path); break; } } } default: break; } } static int mpt_raid_probe(struct mpt_softc *mpt) { if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (ENODEV); } return (0); } static int mpt_raid_attach(struct mpt_softc *mpt) { struct ccb_setasync csa; mpt_handler_t handler; int error; mpt_callout_init(mpt, &mpt->raid_timer); error = mpt_spawn_raid_thread(mpt); if (error != 0) { mpt_prt(mpt, "Unable to spawn RAID thread!\n"); goto cleanup; } MPT_LOCK(mpt); handler.reply_handler = mpt_raid_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &raid_handler_id); if (error != 0) { mpt_prt(mpt, "Unable to register RAID haandler!\n"); goto cleanup; } xpt_setup_ccb(&csa.ccb_h, mpt->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = mpt_raid_async; csa.callback_arg = mpt; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "mpt_raid_attach: Unable to register " "CAM async handler.\n"); } MPT_UNLOCK(mpt); mpt_raid_sysctl_attach(mpt); return (0); cleanup: MPT_UNLOCK(mpt); mpt_raid_detach(mpt); return (error); } static int mpt_raid_enable(struct mpt_softc *mpt) { return (0); } static void mpt_raid_detach(struct mpt_softc *mpt) { struct ccb_setasync csa; mpt_handler_t handler; mpt_callout_drain(mpt, &mpt->raid_timer); MPT_LOCK(mpt); mpt_terminate_raid_thread(mpt); handler.reply_handler = mpt_raid_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, raid_handler_id); xpt_setup_ccb(&csa.ccb_h, mpt->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = mpt_raid_async; csa.callback_arg = mpt; xpt_action((union ccb *)&csa); MPT_UNLOCK(mpt); } static void mpt_raid_ioc_reset(struct mpt_softc *mpt, int type) { /* Nothing to do yet. */ } static const char *raid_event_txt[] = { "Volume Created", "Volume Deleted", "Volume Settings Changed", "Volume Status Changed", "Volume Physical Disk Membership Changed", "Physical Disk Created", "Physical Disk Deleted", "Physical Disk Settings Changed", "Physical Disk Status Changed", "Domain Validation Required", "SMART Data Received", "Replace Action Started", }; static int mpt_raid_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { EVENT_DATA_RAID *raid_event; struct mpt_raid_volume *mpt_vol; struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_VOL_0 *vol_pg; int i; int print_event; if (msg->Event != MPI_EVENT_INTEGRATED_RAID) { return (0); } raid_event = (EVENT_DATA_RAID *)&msg->Data; mpt_vol = NULL; vol_pg = NULL; if (mpt->raid_volumes != NULL && mpt->ioc_page2 != NULL) { for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt_vol = &mpt->raid_volumes[i]; vol_pg = mpt_vol->config_page; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; if (vol_pg->VolumeID == raid_event->VolumeID && vol_pg->VolumeBus == raid_event->VolumeBus) break; } if (i >= mpt->ioc_page2->MaxVolumes) { mpt_vol = NULL; vol_pg = NULL; } } mpt_disk = NULL; if (raid_event->PhysDiskNum != 0xFF && mpt->raid_disks != NULL) { mpt_disk = mpt->raid_disks + raid_event->PhysDiskNum; if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) { mpt_disk = NULL; } } print_event = 1; switch(raid_event->ReasonCode) { case MPI_EVENT_RAID_RC_VOLUME_CREATED: case MPI_EVENT_RAID_RC_VOLUME_DELETED: break; case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED: if (mpt_vol != NULL) { if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) { mpt_vol->flags &= ~MPT_RVF_UP2DATE; } else { /* * Coalesce status messages into one * per background run of our RAID thread. * This removes "spurious" status messages * from our output. */ print_event = 0; } } break; case MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED: case MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED: mpt->raid_rescan++; if (mpt_vol != NULL) { mpt_vol->flags &= ~(MPT_RVF_UP2DATE|MPT_RVF_ANNOUNCED); } break; case MPI_EVENT_RAID_RC_PHYSDISK_CREATED: case MPI_EVENT_RAID_RC_PHYSDISK_DELETED: mpt->raid_rescan++; break; case MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED: case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED: mpt->raid_rescan++; if (mpt_disk != NULL) { mpt_disk->flags &= ~MPT_RDF_UP2DATE; } break; case MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED: mpt->raid_rescan++; break; case MPI_EVENT_RAID_RC_SMART_DATA: case MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED: break; } if (print_event) { if (mpt_disk != NULL) { mpt_disk_prt(mpt, mpt_disk, ""); } else if (mpt_vol != NULL) { mpt_vol_prt(mpt, mpt_vol, ""); } else { mpt_prt(mpt, "Volume(%d:%d", raid_event->VolumeBus, raid_event->VolumeID); if (raid_event->PhysDiskNum != 0xFF) mpt_prtc(mpt, ":%d): ", raid_event->PhysDiskNum); else mpt_prtc(mpt, "): "); } if (raid_event->ReasonCode >= NUM_ELEMENTS(raid_event_txt)) mpt_prtc(mpt, "Unhandled RaidEvent %#x\n", raid_event->ReasonCode); else mpt_prtc(mpt, "%s\n", raid_event_txt[raid_event->ReasonCode]); } if (raid_event->ReasonCode == MPI_EVENT_RAID_RC_SMART_DATA) { /* XXX Use CAM's print sense for this... */ if (mpt_disk != NULL) mpt_disk_prt(mpt, mpt_disk, ""); else mpt_prt(mpt, "Volume(%d:%d:%d: ", raid_event->VolumeBus, raid_event->VolumeID, raid_event->PhysDiskNum); mpt_prtc(mpt, "ASC 0x%x, ASCQ 0x%x)\n", raid_event->ASC, raid_event->ASCQ); } mpt_raid_wakeup(mpt); return (1); } static void mpt_raid_shutdown(struct mpt_softc *mpt) { struct mpt_raid_volume *mpt_vol; if (mpt->raid_mwce_setting != MPT_RAID_MWCE_REBUILD_ONLY) { return; } mpt->raid_mwce_setting = MPT_RAID_MWCE_OFF; RAID_VOL_FOREACH(mpt, mpt_vol) { mpt_verify_mwce(mpt, mpt_vol); } } static int mpt_raid_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int free_req; if (req == NULL) return (TRUE); free_req = TRUE; if (reply_frame != NULL) free_req = mpt_raid_reply_frame_handler(mpt, req, reply_frame); #ifdef NOTYET else if (req->ccb != NULL) { /* Complete Quiesce CCB with error... */ } #endif req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if (free_req) { mpt_free_request(mpt, req); } return (TRUE); } /* * Parse additional completion information in the reply * frame for RAID I/O requests. */ static int mpt_raid_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { MSG_RAID_ACTION_REPLY *reply; struct mpt_raid_action_result *action_result; MSG_RAID_ACTION_REQUEST *rap; reply = (MSG_RAID_ACTION_REPLY *)reply_frame; req->IOCStatus = le16toh(reply->IOCStatus); rap = (MSG_RAID_ACTION_REQUEST *)req->req_vbuf; switch (rap->Action) { case MPI_RAID_ACTION_QUIESCE_PHYS_IO: mpt_prt(mpt, "QUIESCE PHYSIO DONE\n"); break; case MPI_RAID_ACTION_ENABLE_PHYS_IO: mpt_prt(mpt, "ENABLY PHYSIO DONE\n"); break; default: break; } action_result = REQ_TO_RAID_ACTION_RESULT(req); memcpy(&action_result->action_data, &reply->ActionData, sizeof(action_result->action_data)); action_result->action_status = le16toh(reply->ActionStatus); return (TRUE); } /* * Utiltity routine to perform a RAID action command; */ static int mpt_issue_raid_req(struct mpt_softc *mpt, struct mpt_raid_volume *vol, struct mpt_raid_disk *disk, request_t *req, u_int Action, uint32_t ActionDataWord, bus_addr_t addr, bus_size_t len, int write, int wait) { MSG_RAID_ACTION_REQUEST *rap; SGE_SIMPLE32 *se; rap = req->req_vbuf; memset(rap, 0, sizeof *rap); rap->Action = Action; rap->ActionDataWord = htole32(ActionDataWord); rap->Function = MPI_FUNCTION_RAID_ACTION; rap->VolumeID = vol->config_page->VolumeID; rap->VolumeBus = vol->config_page->VolumeBus; - if (disk != 0) + if (disk != NULL) rap->PhysDiskNum = disk->config_page.PhysDiskNum; else rap->PhysDiskNum = 0xFF; se = (SGE_SIMPLE32 *)&rap->ActionDataSGE; se->Address = htole32(addr); MPI_pSGE_SET_LENGTH(se, len); MPI_pSGE_SET_FLAGS(se, (MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_END_OF_LIST | (write ? MPI_SGE_FLAGS_HOST_TO_IOC : MPI_SGE_FLAGS_IOC_TO_HOST))); se->FlagsLength = htole32(se->FlagsLength); rap->MsgContext = htole32(req->index | raid_handler_id); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); if (wait) { return (mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, /*sleep_ok*/FALSE, /*time_ms*/2000)); } else { return (0); } } /*************************** RAID Status Monitoring ***************************/ static int mpt_spawn_raid_thread(struct mpt_softc *mpt) { int error; /* * Freeze out any CAM transactions until our thread * is able to run at least once. We need to update * our RAID pages before acception I/O or we may * reject I/O to an ID we later determine is for a * hidden physdisk. */ MPT_LOCK(mpt); xpt_freeze_simq(mpt->phydisk_sim, 1); MPT_UNLOCK(mpt); error = kproc_create(mpt_raid_thread, mpt, &mpt->raid_thread, /*flags*/0, /*altstack*/0, "mpt_raid%d", mpt->unit); if (error != 0) { MPT_LOCK(mpt); xpt_release_simq(mpt->phydisk_sim, /*run_queue*/FALSE); MPT_UNLOCK(mpt); } return (error); } static void mpt_terminate_raid_thread(struct mpt_softc *mpt) { if (mpt->raid_thread == NULL) { return; } mpt->shutdwn_raid = 1; wakeup(&mpt->raid_volumes); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->raid_thread, PUSER, "thtrm", 0); } static void mpt_raid_thread(void *arg) { struct mpt_softc *mpt; int firstrun; mpt = (struct mpt_softc *)arg; firstrun = 1; MPT_LOCK(mpt); while (mpt->shutdwn_raid == 0) { if (mpt->raid_wakeup == 0) { mpt_sleep(mpt, &mpt->raid_volumes, PUSER, "idle", 0); continue; } mpt->raid_wakeup = 0; if (mpt_refresh_raid_data(mpt)) { mpt_schedule_raid_refresh(mpt); /* XX NOT QUITE RIGHT */ continue; } /* * Now that we have our first snapshot of RAID data, * allow CAM to access our physical disk bus. */ if (firstrun) { firstrun = 0; xpt_release_simq(mpt->phydisk_sim, TRUE); } if (mpt->raid_rescan != 0) { union ccb *ccb; int error; mpt->raid_rescan = 0; MPT_UNLOCK(mpt); ccb = xpt_alloc_ccb(); MPT_LOCK(mpt); error = xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (error != CAM_REQ_CMP) { xpt_free_ccb(ccb); mpt_prt(mpt, "Unable to rescan RAID Bus!\n"); } else { xpt_rescan(ccb); } } } mpt->raid_thread = NULL; wakeup(&mpt->raid_thread); MPT_UNLOCK(mpt); kproc_exit(0); } #if 0 static void mpt_raid_quiesce_timeout(void *arg) { /* Complete the CCB with error */ /* COWWWW */ } static timeout_t mpt_raid_quiesce_timeout; cam_status mpt_raid_quiesce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, request_t *req) { union ccb *ccb; ccb = req->ccb; if ((mpt_disk->flags & MPT_RDF_QUIESCED) != 0) return (CAM_REQ_CMP); if ((mpt_disk->flags & MPT_RDF_QUIESCING) == 0) { int rv; mpt_disk->flags |= MPT_RDF_QUIESCING; xpt_freeze_devq(ccb->ccb_h.path, 1); rv = mpt_issue_raid_req(mpt, mpt_disk->volume, mpt_disk, req, MPI_RAID_ACTION_QUIESCE_PHYS_IO, /*ActionData*/0, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/FALSE); if (rv != 0) return (CAM_REQ_CMP_ERR); mpt_req_timeout(req, mpt_raid_quiesce_timeout, ccb, 5 * hz); #if 0 if (rv == ETIMEDOUT) { mpt_disk_prt(mpt, mpt_disk, "mpt_raid_quiesce_disk: " "Quiece Timed-out\n"); xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); return (CAM_REQ_CMP_ERR); } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_disk_prt(mpt, mpt_disk, "Quiece Failed" "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); xpt_release_devq(ccb->ccb_h.path, 1, /*run*/0); return (CAM_REQ_CMP_ERR); } #endif return (CAM_REQ_INPROG); } return (CAM_REQUEUE_REQ); } #endif /* XXX Ignores that there may be multiple busses/IOCs involved. */ cam_status mpt_map_physdisk(struct mpt_softc *mpt, union ccb *ccb, target_id_t *tgt) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + ccb->ccb_h.target_id; if (ccb->ccb_h.target_id < mpt->raid_max_disks && (mpt_disk->flags & MPT_RDF_ACTIVE) != 0) { *tgt = mpt_disk->config_page.PhysDiskID; return (0); } mpt_lprt(mpt, MPT_PRT_DEBUG1, "mpt_map_physdisk(%d) - Not Active\n", ccb->ccb_h.target_id); return (-1); } /* XXX Ignores that there may be multiple busses/IOCs involved. */ int mpt_is_raid_member(struct mpt_softc *mpt, target_id_t tgt) { struct mpt_raid_disk *mpt_disk; int i; if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) return (0); for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { mpt_disk = &mpt->raid_disks[i]; if ((mpt_disk->flags & MPT_RDF_ACTIVE) != 0 && mpt_disk->config_page.PhysDiskID == tgt) return (1); } return (0); } /* XXX Ignores that there may be multiple busses/IOCs involved. */ int mpt_is_raid_volume(struct mpt_softc *mpt, target_id_t tgt) { CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; for (;ioc_vol != ioc_last_vol; ioc_vol++) { if (ioc_vol->VolumeID == tgt) { return (1); } } return (0); } #if 0 static void mpt_enable_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, int enable) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; int enabled; int rv; vol_pg = mpt_vol->config_page; enabled = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_ENABLED; /* * If the setting matches the configuration, * there is nothing to do. */ if ((enabled && enable) || (!enabled && !enable)) return; req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, enable ? MPI_RAID_ACTION_ENABLE_VOLUME : MPI_RAID_ACTION_DISABLE_VOLUME, /*data*/0, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_enable_vol: " "%s Volume Timed-out\n", enable ? "Enable" : "Disable"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "%s Volume Failed: %d:%x:%x\n", enable ? "Enable" : "Disable", rv, req->IOCStatus, ar->action_status); } mpt_free_request(mpt, req); } #endif static void mpt_verify_mwce(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; uint32_t data; int rv; int resyncing; int mwce; vol_pg = mpt_vol->config_page; resyncing = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; mwce = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; /* * If the setting matches the configuration, * there is nothing to do. */ switch (mpt->raid_mwce_setting) { case MPT_RAID_MWCE_REBUILD_ONLY: if ((resyncing && mwce) || (!resyncing && !mwce)) { return; } mpt_vol->flags ^= MPT_RVF_WCE_CHANGED; if ((mpt_vol->flags & MPT_RVF_WCE_CHANGED) == 0) { /* * Wait one more status update to see if * resyncing gets enabled. It gets disabled * temporarilly when WCE is changed. */ return; } break; case MPT_RAID_MWCE_ON: if (mwce) return; break; case MPT_RAID_MWCE_OFF: if (!mwce) return; break; case MPT_RAID_MWCE_NC: return; } req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: Get request failed!\n"); return; } vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, data, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_mwce: " "Write Cache Enable Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Write Cache Enable Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else { vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; } mpt_free_request(mpt, req); } static void mpt_verify_resync_rate(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { request_t *req; struct mpt_raid_action_result *ar; CONFIG_PAGE_RAID_VOL_0 *vol_pg; u_int prio; int rv; vol_pg = mpt_vol->config_page; if (mpt->raid_resync_rate == MPT_RAID_RESYNC_RATE_NC) return; /* * If the current RAID resync rate does not * match our configured rate, update it. */ prio = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; if (vol_pg->ResyncRate != 0 && vol_pg->ResyncRate != mpt->raid_resync_rate) { req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " "Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_SET_RESYNC_RATE, mpt->raid_resync_rate, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " "Resync Rate Setting Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else vol_pg->ResyncRate = mpt->raid_resync_rate; mpt_free_request(mpt, req); } else if ((prio && mpt->raid_resync_rate < 128) || (!prio && mpt->raid_resync_rate >= 128)) { uint32_t data; req = mpt_get_request(mpt, /*sleep_ok*/TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_verify_resync_rate: " "Get request failed!\n"); return; } vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; memcpy(&data, &vol_pg->VolumeSettings, sizeof(data)); vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; rv = mpt_issue_raid_req(mpt, mpt_vol, /*disk*/NULL, req, MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS, data, /*addr*/0, /*len*/0, /*write*/FALSE, /*wait*/TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_data: " "Resync Rate Setting Timed-out\n"); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv != 0 || REQ_IOCSTATUS(req) != MPI_IOCSTATUS_SUCCESS || (ar->action_status != MPI_RAID_ACTION_ASTATUS_SUCCESS)) { mpt_vol_prt(mpt, mpt_vol, "Resync Rate Setting Failed: " "%d:%x:%x\n", rv, req->IOCStatus, ar->action_status); } else { vol_pg->VolumeSettings.Settings ^= MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; } mpt_free_request(mpt, req); } } static void mpt_adjust_queue_depth(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, struct cam_path *path) { struct ccb_relsim crs; xpt_setup_ccb(&crs.ccb_h, path, /*priority*/5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = mpt->raid_queue_depth; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) mpt_vol_prt(mpt, mpt_vol, "mpt_adjust_queue_depth failed " "with CAM status %#x\n", crs.ccb_h.status); } static void mpt_announce_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; u_int i; vol_pg = mpt_vol->config_page; mpt_vol_prt(mpt, mpt_vol, "Settings ("); for (i = 1; i <= 0x8000; i <<= 1) { switch (vol_pg->VolumeSettings.Settings & i) { case MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE: mpt_prtc(mpt, " Member-WCE"); break; case MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART: mpt_prtc(mpt, " Offline-On-SMART-Err"); break; case MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE: mpt_prtc(mpt, " Hot-Plug-Spares"); break; case MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC: mpt_prtc(mpt, " High-Priority-ReSync"); break; default: break; } } mpt_prtc(mpt, " )\n"); if (vol_pg->VolumeSettings.HotSparePool != 0) { mpt_vol_prt(mpt, mpt_vol, "Using Spare Pool%s", powerof2(vol_pg->VolumeSettings.HotSparePool) ? ":" : "s:"); for (i = 0; i < 8; i++) { u_int mask; mask = 0x1 << i; if ((vol_pg->VolumeSettings.HotSparePool & mask) == 0) continue; mpt_prtc(mpt, " %d", i); } mpt_prtc(mpt, "\n"); } mpt_vol_prt(mpt, mpt_vol, "%d Members:\n", vol_pg->NumPhysDisks); for (i = 0; i < vol_pg->NumPhysDisks; i++){ struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int pt_bus = cam_sim_bus(mpt->phydisk_sim); U8 f, s; mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; disk_pg = &mpt_disk->config_page; mpt_prtc(mpt, " "); mpt_prtc(mpt, "(%s:%d:%d:0): ", device_get_nameunit(mpt->dev), pt_bus, disk_pg->PhysDiskID); if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { mpt_prtc(mpt, "%s", mpt_disk->member_number == 0? "Primary" : "Secondary"); } else { mpt_prtc(mpt, "Stripe Position %d", mpt_disk->member_number); } f = disk_pg->PhysDiskStatus.Flags; s = disk_pg->PhysDiskStatus.State; if (f & MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC) { mpt_prtc(mpt, " Out of Sync"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_QUIESCED) { mpt_prtc(mpt, " Quiesced"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME) { mpt_prtc(mpt, " Inactive"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS) { mpt_prtc(mpt, " Was Optimal"); } if (f & MPI_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS) { mpt_prtc(mpt, " Was Non-Optimal"); } switch (s) { case MPI_PHYSDISK0_STATUS_ONLINE: mpt_prtc(mpt, " Online"); break; case MPI_PHYSDISK0_STATUS_MISSING: mpt_prtc(mpt, " Missing"); break; case MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE: mpt_prtc(mpt, " Incompatible"); break; case MPI_PHYSDISK0_STATUS_FAILED: mpt_prtc(mpt, " Failed"); break; case MPI_PHYSDISK0_STATUS_INITIALIZING: mpt_prtc(mpt, " Initializing"); break; case MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED: mpt_prtc(mpt, " Requested Offline"); break; case MPI_PHYSDISK0_STATUS_FAILED_REQUESTED: mpt_prtc(mpt, " Requested Failed"); break; case MPI_PHYSDISK0_STATUS_OTHER_OFFLINE: default: mpt_prtc(mpt, " Offline Other (%x)", s); break; } mpt_prtc(mpt, "\n"); } } static void mpt_announce_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk) { CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int rd_bus = cam_sim_bus(mpt->sim); int pt_bus = cam_sim_bus(mpt->phydisk_sim); u_int i; disk_pg = &mpt_disk->config_page; mpt_disk_prt(mpt, mpt_disk, "Physical (%s:%d:%d:0), Pass-thru (%s:%d:%d:0)\n", device_get_nameunit(mpt->dev), rd_bus, disk_pg->PhysDiskID, device_get_nameunit(mpt->dev), pt_bus, mpt_disk - mpt->raid_disks); if (disk_pg->PhysDiskSettings.HotSparePool == 0) return; mpt_disk_prt(mpt, mpt_disk, "Member of Hot Spare Pool%s", powerof2(disk_pg->PhysDiskSettings.HotSparePool) ? ":" : "s:"); for (i = 0; i < 8; i++) { u_int mask; mask = 0x1 << i; if ((disk_pg->PhysDiskSettings.HotSparePool & mask) == 0) continue; mpt_prtc(mpt, " %d", i); } mpt_prtc(mpt, "\n"); } static void mpt_refresh_raid_disk(struct mpt_softc *mpt, struct mpt_raid_disk *mpt_disk, IOC_3_PHYS_DISK *ioc_disk) { int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_PHYSDISK, /*PageNumber*/0, ioc_disk->PhysDiskNum, &mpt_disk->config_page.Header, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv != 0) { mpt_prt(mpt, "mpt_refresh_raid_disk: " "Failed to read RAID Disk Hdr(%d)\n", ioc_disk->PhysDiskNum); return; } rv = mpt_read_cur_cfg_page(mpt, ioc_disk->PhysDiskNum, &mpt_disk->config_page.Header, sizeof(mpt_disk->config_page), /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv != 0) mpt_prt(mpt, "mpt_refresh_raid_disk: " "Failed to read RAID Disk Page(%d)\n", ioc_disk->PhysDiskNum); mpt2host_config_page_raid_phys_disk_0(&mpt_disk->config_page); } static void mpt_refresh_raid_vol(struct mpt_softc *mpt, struct mpt_raid_volume *mpt_vol, CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; struct mpt_raid_action_result *ar; request_t *req; int rv; int i; vol_pg = mpt_vol->config_page; mpt_vol->flags &= ~MPT_RVF_UP2DATE; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_RAID_VOLUME, 0, ioc_vol->VolumePageNumber, &vol_pg->Header, TRUE, 5000); if (rv != 0) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Failed to read RAID Vol Hdr(%d)\n", ioc_vol->VolumePageNumber); return; } rv = mpt_read_cur_cfg_page(mpt, ioc_vol->VolumePageNumber, &vol_pg->Header, mpt->raid_page0_len, TRUE, 5000); if (rv != 0) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Failed to read RAID Vol Page(%d)\n", ioc_vol->VolumePageNumber); return; } mpt2host_config_page_raid_vol_0(vol_pg); mpt_vol->flags |= MPT_RVF_ACTIVE; /* Update disk entry array data. */ for (i = 0; i < vol_pg->NumPhysDisks; i++) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + vol_pg->PhysDisk[i].PhysDiskNum; mpt_disk->volume = mpt_vol; mpt_disk->member_number = vol_pg->PhysDisk[i].PhysDiskMap; if (vol_pg->VolumeType == MPI_RAID_VOL_TYPE_IM) { mpt_disk->member_number--; } } if ((vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) return; req = mpt_get_request(mpt, TRUE); if (req == NULL) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Get request failed!\n"); return; } rv = mpt_issue_raid_req(mpt, mpt_vol, NULL, req, MPI_RAID_ACTION_INDICATOR_STRUCT, 0, 0, 0, FALSE, TRUE); if (rv == ETIMEDOUT) { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Progress Indicator fetch timeout\n"); mpt_free_request(mpt, req); return; } ar = REQ_TO_RAID_ACTION_RESULT(req); if (rv == 0 && ar->action_status == MPI_RAID_ACTION_ASTATUS_SUCCESS && REQ_IOCSTATUS(req) == MPI_IOCSTATUS_SUCCESS) { memcpy(&mpt_vol->sync_progress, &ar->action_data.indicator_struct, sizeof(mpt_vol->sync_progress)); mpt2host_mpi_raid_vol_indicator(&mpt_vol->sync_progress); } else { mpt_vol_prt(mpt, mpt_vol, "mpt_refresh_raid_vol: Progress indicator fetch failed!\n"); } mpt_free_request(mpt, req); } /* * Update in-core information about RAID support. We update any entries * that didn't previously exists or have been marked as needing to * be updated by our event handler. Interesting changes are displayed * to the console. */ static int mpt_refresh_raid_data(struct mpt_softc *mpt) { CONFIG_PAGE_IOC_2_RAID_VOL *ioc_vol; CONFIG_PAGE_IOC_2_RAID_VOL *ioc_last_vol; IOC_3_PHYS_DISK *ioc_disk; IOC_3_PHYS_DISK *ioc_last_disk; CONFIG_PAGE_RAID_VOL_0 *vol_pg; size_t len; int rv; int i; u_int nonopt_volumes; if (mpt->ioc_page2 == NULL || mpt->ioc_page3 == NULL) { return (0); } /* * Mark all items as unreferenced by the configuration. * This allows us to find, report, and discard stale * entries. */ for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { mpt->raid_disks[i].flags &= ~MPT_RDF_REFERENCED; } for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { mpt->raid_volumes[i].flags &= ~MPT_RVF_REFERENCED; } /* * Get Physical Disk information. */ len = mpt->ioc_page3->Header.PageLength * sizeof(uint32_t); rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, &mpt->ioc_page3->Header, len, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv) { mpt_prt(mpt, "mpt_refresh_raid_data: Failed to read IOC Page 3\n"); return (-1); } mpt2host_config_page_ioc3(mpt->ioc_page3); ioc_disk = mpt->ioc_page3->PhysDisk; ioc_last_disk = ioc_disk + mpt->ioc_page3->NumPhysDisks; for (; ioc_disk != ioc_last_disk; ioc_disk++) { struct mpt_raid_disk *mpt_disk; mpt_disk = mpt->raid_disks + ioc_disk->PhysDiskNum; mpt_disk->flags |= MPT_RDF_REFERENCED; if ((mpt_disk->flags & (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) != (MPT_RDF_ACTIVE|MPT_RDF_UP2DATE)) { mpt_refresh_raid_disk(mpt, mpt_disk, ioc_disk); } mpt_disk->flags |= MPT_RDF_ACTIVE; mpt->raid_rescan++; } /* * Refresh volume data. */ len = mpt->ioc_page2->Header.PageLength * sizeof(uint32_t); rv = mpt_read_cur_cfg_page(mpt, /*PageAddress*/0, &mpt->ioc_page2->Header, len, /*sleep_ok*/TRUE, /*timeout_ms*/5000); if (rv) { mpt_prt(mpt, "mpt_refresh_raid_data: " "Failed to read IOC Page 2\n"); return (-1); } mpt2host_config_page_ioc2(mpt->ioc_page2); ioc_vol = mpt->ioc_page2->RaidVolume; ioc_last_vol = ioc_vol + mpt->ioc_page2->NumActiveVolumes; for (;ioc_vol != ioc_last_vol; ioc_vol++) { struct mpt_raid_volume *mpt_vol; mpt_vol = mpt->raid_volumes + ioc_vol->VolumePageNumber; mpt_vol->flags |= MPT_RVF_REFERENCED; vol_pg = mpt_vol->config_page; if (vol_pg == NULL) continue; if (((mpt_vol->flags & (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) != (MPT_RVF_ACTIVE|MPT_RVF_UP2DATE)) || (vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) != 0) { mpt_refresh_raid_vol(mpt, mpt_vol, ioc_vol); } mpt_vol->flags |= MPT_RVF_ACTIVE; } nonopt_volumes = 0; for (i = 0; i < mpt->ioc_page2->MaxVolumes; i++) { struct mpt_raid_volume *mpt_vol; uint64_t total; uint64_t left; int m; u_int prio; mpt_vol = &mpt->raid_volumes[i]; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { continue; } vol_pg = mpt_vol->config_page; if ((mpt_vol->flags & (MPT_RVF_REFERENCED|MPT_RVF_ANNOUNCED)) == MPT_RVF_ANNOUNCED) { mpt_vol_prt(mpt, mpt_vol, "No longer configured\n"); mpt_vol->flags = 0; continue; } if ((mpt_vol->flags & MPT_RVF_ANNOUNCED) == 0) { mpt_announce_vol(mpt, mpt_vol); mpt_vol->flags |= MPT_RVF_ANNOUNCED; } if (vol_pg->VolumeStatus.State != MPI_RAIDVOL0_STATUS_STATE_OPTIMAL) nonopt_volumes++; if ((mpt_vol->flags & MPT_RVF_UP2DATE) != 0) continue; mpt_vol->flags |= MPT_RVF_UP2DATE; mpt_vol_prt(mpt, mpt_vol, "%s - %s\n", mpt_vol_type(mpt_vol), mpt_vol_state(mpt_vol)); mpt_verify_mwce(mpt, mpt_vol); if (vol_pg->VolumeStatus.Flags == 0) { continue; } mpt_vol_prt(mpt, mpt_vol, "Status ("); for (m = 1; m <= 0x80; m <<= 1) { switch (vol_pg->VolumeStatus.Flags & m) { case MPI_RAIDVOL0_STATUS_FLAG_ENABLED: mpt_prtc(mpt, " Enabled"); break; case MPI_RAIDVOL0_STATUS_FLAG_QUIESCED: mpt_prtc(mpt, " Quiesced"); break; case MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS: mpt_prtc(mpt, " Re-Syncing"); break; case MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE: mpt_prtc(mpt, " Inactive"); break; default: break; } } mpt_prtc(mpt, " )\n"); if ((vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) == 0) continue; mpt_verify_resync_rate(mpt, mpt_vol); left = MPT_U64_2_SCALAR(mpt_vol->sync_progress.BlocksRemaining); total = MPT_U64_2_SCALAR(mpt_vol->sync_progress.TotalBlocks); if (vol_pg->ResyncRate != 0) { prio = ((u_int)vol_pg->ResyncRate * 100000) / 0xFF; mpt_vol_prt(mpt, mpt_vol, "Rate %d.%d%%\n", prio / 1000, prio % 1000); } else { prio = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC; mpt_vol_prt(mpt, mpt_vol, "%s Priority Re-Sync\n", prio ? "High" : "Low"); } mpt_vol_prt(mpt, mpt_vol, "%ju of %ju " "blocks remaining\n", (uintmax_t)left, (uintmax_t)total); /* Periodically report on sync progress. */ mpt_schedule_raid_refresh(mpt); } for (i = 0; i < mpt->ioc_page2->MaxPhysDisks; i++) { struct mpt_raid_disk *mpt_disk; CONFIG_PAGE_RAID_PHYS_DISK_0 *disk_pg; int m; mpt_disk = &mpt->raid_disks[i]; disk_pg = &mpt_disk->config_page; if ((mpt_disk->flags & MPT_RDF_ACTIVE) == 0) continue; if ((mpt_disk->flags & (MPT_RDF_REFERENCED|MPT_RDF_ANNOUNCED)) == MPT_RDF_ANNOUNCED) { mpt_disk_prt(mpt, mpt_disk, "No longer configured\n"); mpt_disk->flags = 0; mpt->raid_rescan++; continue; } if ((mpt_disk->flags & MPT_RDF_ANNOUNCED) == 0) { mpt_announce_disk(mpt, mpt_disk); mpt_disk->flags |= MPT_RVF_ANNOUNCED; } if ((mpt_disk->flags & MPT_RDF_UP2DATE) != 0) continue; mpt_disk->flags |= MPT_RDF_UP2DATE; mpt_disk_prt(mpt, mpt_disk, "%s\n", mpt_disk_state(mpt_disk)); if (disk_pg->PhysDiskStatus.Flags == 0) continue; mpt_disk_prt(mpt, mpt_disk, "Status ("); for (m = 1; m <= 0x80; m <<= 1) { switch (disk_pg->PhysDiskStatus.Flags & m) { case MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC: mpt_prtc(mpt, " Out-Of-Sync"); break; case MPI_PHYSDISK0_STATUS_FLAG_QUIESCED: mpt_prtc(mpt, " Quiesced"); break; default: break; } } mpt_prtc(mpt, " )\n"); } mpt->raid_nonopt_volumes = nonopt_volumes; return (0); } static void mpt_raid_timer(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK_ASSERT(mpt); mpt_raid_wakeup(mpt); } static void mpt_schedule_raid_refresh(struct mpt_softc *mpt) { callout_reset(&mpt->raid_timer, MPT_RAID_SYNC_REPORT_INTERVAL, mpt_raid_timer, mpt); } void mpt_raid_free_mem(struct mpt_softc *mpt) { if (mpt->raid_volumes) { struct mpt_raid_volume *mpt_raid; int i; for (i = 0; i < mpt->raid_max_volumes; i++) { mpt_raid = &mpt->raid_volumes[i]; if (mpt_raid->config_page) { free(mpt_raid->config_page, M_DEVBUF); mpt_raid->config_page = NULL; } } free(mpt->raid_volumes, M_DEVBUF); mpt->raid_volumes = NULL; } if (mpt->raid_disks) { free(mpt->raid_disks, M_DEVBUF); mpt->raid_disks = NULL; } if (mpt->ioc_page2) { free(mpt->ioc_page2, M_DEVBUF); mpt->ioc_page2 = NULL; } if (mpt->ioc_page3) { free(mpt->ioc_page3, M_DEVBUF); mpt->ioc_page3 = NULL; } mpt->raid_max_volumes = 0; mpt->raid_max_disks = 0; } static int mpt_raid_set_vol_resync_rate(struct mpt_softc *mpt, u_int rate) { struct mpt_raid_volume *mpt_vol; if ((rate > MPT_RAID_RESYNC_RATE_MAX || rate < MPT_RAID_RESYNC_RATE_MIN) && rate != MPT_RAID_RESYNC_RATE_NC) return (EINVAL); MPT_LOCK(mpt); mpt->raid_resync_rate = rate; RAID_VOL_FOREACH(mpt, mpt_vol) { if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) { continue; } mpt_verify_resync_rate(mpt, mpt_vol); } MPT_UNLOCK(mpt); return (0); } static int mpt_raid_set_vol_queue_depth(struct mpt_softc *mpt, u_int vol_queue_depth) { struct mpt_raid_volume *mpt_vol; if (vol_queue_depth > 255 || vol_queue_depth < 1) return (EINVAL); MPT_LOCK(mpt); mpt->raid_queue_depth = vol_queue_depth; RAID_VOL_FOREACH(mpt, mpt_vol) { struct cam_path *path; int error; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; mpt->raid_rescan = 0; error = xpt_create_path(&path, NULL, cam_sim_path(mpt->sim), mpt_vol->config_page->VolumeID, /*lun*/0); if (error != CAM_REQ_CMP) { mpt_vol_prt(mpt, mpt_vol, "Unable to allocate path!\n"); continue; } mpt_adjust_queue_depth(mpt, mpt_vol, path); xpt_free_path(path); } MPT_UNLOCK(mpt); return (0); } static int mpt_raid_set_vol_mwce(struct mpt_softc *mpt, mpt_raid_mwce_t mwce) { struct mpt_raid_volume *mpt_vol; int force_full_resync; MPT_LOCK(mpt); if (mwce == mpt->raid_mwce_setting) { MPT_UNLOCK(mpt); return (0); } /* * Catch MWCE being left on due to a failed shutdown. Since * sysctls cannot be set by the loader, we treat the first * setting of this varible specially and force a full volume * resync if MWCE is enabled and a resync is in progress. */ force_full_resync = 0; if (mpt->raid_mwce_set == 0 && mpt->raid_mwce_setting == MPT_RAID_MWCE_NC && mwce == MPT_RAID_MWCE_REBUILD_ONLY) force_full_resync = 1; mpt->raid_mwce_setting = mwce; RAID_VOL_FOREACH(mpt, mpt_vol) { CONFIG_PAGE_RAID_VOL_0 *vol_pg; int resyncing; int mwce; if ((mpt_vol->flags & MPT_RVF_ACTIVE) == 0) continue; vol_pg = mpt_vol->config_page; resyncing = vol_pg->VolumeStatus.Flags & MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS; mwce = vol_pg->VolumeSettings.Settings & MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE; if (force_full_resync && resyncing && mwce) { /* * XXX disable/enable volume should force a resync, * but we'll need to queice, drain, and restart * I/O to do that. */ mpt_vol_prt(mpt, mpt_vol, "WARNING - Unsafe shutdown " "detected. Suggest full resync.\n"); } mpt_verify_mwce(mpt, mpt_vol); } mpt->raid_mwce_set = 1; MPT_UNLOCK(mpt); return (0); } static const char *mpt_vol_mwce_strs[] = { "On", "Off", "On-During-Rebuild", "NC" }; static int mpt_raid_sysctl_vol_member_wce(SYSCTL_HANDLER_ARGS) { char inbuf[20]; struct mpt_softc *mpt; const char *str; int error; u_int size; u_int i; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; str = mpt_vol_mwce_strs[mpt->raid_mwce_setting]; error = SYSCTL_OUT(req, str, strlen(str) + 1); if (error || !req->newptr) { return (error); } size = req->newlen - req->newidx; if (size >= sizeof(inbuf)) { return (EINVAL); } error = SYSCTL_IN(req, inbuf, size); if (error) { return (error); } inbuf[size] = '\0'; for (i = 0; i < NUM_ELEMENTS(mpt_vol_mwce_strs); i++) { if (strcmp(mpt_vol_mwce_strs[i], inbuf) == 0) { return (mpt_raid_set_vol_mwce(mpt, i)); } } return (EINVAL); } static int mpt_raid_sysctl_vol_resync_rate(SYSCTL_HANDLER_ARGS) { struct mpt_softc *mpt; u_int raid_resync_rate; int error; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; raid_resync_rate = mpt->raid_resync_rate; error = sysctl_handle_int(oidp, &raid_resync_rate, 0, req); if (error || !req->newptr) { return error; } return (mpt_raid_set_vol_resync_rate(mpt, raid_resync_rate)); } static int mpt_raid_sysctl_vol_queue_depth(SYSCTL_HANDLER_ARGS) { struct mpt_softc *mpt; u_int raid_queue_depth; int error; GIANT_REQUIRED; mpt = (struct mpt_softc *)arg1; raid_queue_depth = mpt->raid_queue_depth; error = sysctl_handle_int(oidp, &raid_queue_depth, 0, req); if (error || !req->newptr) { return error; } return (mpt_raid_set_vol_queue_depth(mpt, raid_queue_depth)); } static void mpt_raid_sysctl_attach(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(mpt->dev); struct sysctl_oid *tree = device_get_sysctl_tree(mpt->dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_member_wce", CTLTYPE_STRING | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_member_wce, "A", "volume member WCE(On,Off,On-During-Rebuild,NC)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_queue_depth", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_queue_depth, "I", "default volume queue depth"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "vol_resync_rate", CTLTYPE_INT | CTLFLAG_RW, mpt, 0, mpt_raid_sysctl_vol_resync_rate, "I", "volume resync priority (0 == NC, 1 - 255)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "nonoptimal_volumes", CTLFLAG_RD, &mpt->raid_nonopt_volumes, 0, "number of nonoptimal volumes"); } Index: head/sys/dev/mrsas/mrsas_ioctl.c =================================================================== --- head/sys/dev/mrsas/mrsas_ioctl.c (revision 297861) +++ head/sys/dev/mrsas/mrsas_ioctl.c (revision 297862) @@ -1,507 +1,507 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES, 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include /* * Function prototypes */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void *mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); static int mrsas_create_frame_pool(struct mrsas_softc *sc); static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); extern struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); extern void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); extern int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); /* * mrsas_passthru: Handle pass-through commands * input: Adapter instance soft state argument pointer * * This function is called from mrsas_ioctl() to handle pass-through and ioctl * commands to Firmware. */ int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd) { struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; #ifdef COMPAT_FREEBSD32 struct mrsas_iocpacket32 *user_ioc32 = (struct mrsas_iocpacket32 *)arg; #endif union mrsas_frame *in_cmd = (union mrsas_frame *)&(user_ioc->frame.raw); struct mrsas_mfi_cmd *cmd = NULL; bus_dma_tag_t ioctl_data_tag[MAX_IOCTL_SGE]; bus_dmamap_t ioctl_data_dmamap[MAX_IOCTL_SGE]; void *ioctl_data_mem[MAX_IOCTL_SGE]; bus_addr_t ioctl_data_phys_addr[MAX_IOCTL_SGE]; bus_dma_tag_t ioctl_sense_tag = 0; bus_dmamap_t ioctl_sense_dmamap = 0; - void *ioctl_sense_mem = 0; + void *ioctl_sense_mem = NULL; bus_addr_t ioctl_sense_phys_addr = 0; int i, ioctl_data_size = 0, ioctl_sense_size, ret = 0; struct mrsas_sge32 *kern_sge32; unsigned long *sense_ptr; uint8_t *iov_base_ptrin = NULL; size_t iov_len = 0; /* * Check for NOP from MegaCli... MegaCli can issue a DCMD of 0. In * this case do nothing and return 0 to it as status. */ if (in_cmd->dcmd.opcode == 0) { device_printf(sc->mrsas_dev, "In %s() Got a NOP\n", __func__); user_ioc->frame.hdr.cmd_status = MFI_STAT_OK; return (0); } /* Validate SGL length */ if (user_ioc->sge_count > MAX_IOCTL_SGE) { device_printf(sc->mrsas_dev, "In %s() SGL is too long (%d > 8).\n", __func__, user_ioc->sge_count); return (ENOENT); } /* Get a command */ cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd for IOCTL\n"); return (ENOMEM); } /* * User's IOCTL packet has 2 frames (maximum). Copy those two frames * into our cmd's frames. cmd->frame's context will get overwritten * when we copy from user's frames. So set that value alone * separately */ memcpy(cmd->frame, user_ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); cmd->frame->hdr.context = cmd->index; cmd->frame->hdr.pad_0 = 0; cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | MFI_FRAME_SENSE64); /* * The management interface between applications and the fw uses MFI * frames. E.g, RAID configuration changes, LD property changes etc * are accomplishes through different kinds of MFI frames. The driver * needs to care only about substituting user buffers with kernel * buffers in SGLs. The location of SGL is embedded in the struct * iocpacket itself. */ kern_sge32 = (struct mrsas_sge32 *) ((unsigned long)cmd->frame + user_ioc->sgl_off); /* * For each user buffer, create a mirror buffer and copy in */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; ioctl_data_size = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; ioctl_data_size = user_ioc32->sgl[i].iov_len; #endif } if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_data_size, 1, ioctl_data_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_data_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_data_tag[i], (void **)&ioctl_data_mem[i], (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_data_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl data mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_data_tag[i], ioctl_data_dmamap[i], ioctl_data_mem[i], ioctl_data_size, mrsas_alloc_cb, &ioctl_data_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl data mem\n"); ret = ENOMEM; goto out; } /* Save the physical address and length */ kern_sge32[i].phys_addr = (u_int32_t)ioctl_data_phys_addr[i]; if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { kern_sge32[i].length = user_ioc->sgl[i].iov_len; iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { kern_sge32[i].length = user_ioc32->sgl[i].iov_len; iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } /* Copy in data from user space */ ret = copyin(iov_base_ptrin, ioctl_data_mem[i], iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyin failed!\n"); goto out; } } ioctl_sense_size = user_ioc->sense_len; if (user_ioc->sense_len) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioctl_sense_size, 1, ioctl_sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &ioctl_sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense tag\n"); ret = ENOMEM; goto out; } if (bus_dmamem_alloc(ioctl_sense_tag, (void **)&ioctl_sense_mem, (BUS_DMA_NOWAIT | BUS_DMA_ZERO), &ioctl_sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioctl sense mem\n"); ret = ENOMEM; goto out; } if (bus_dmamap_load(ioctl_sense_tag, ioctl_sense_dmamap, ioctl_sense_mem, ioctl_sense_size, mrsas_alloc_cb, &ioctl_sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioctl sense mem\n"); ret = ENOMEM; goto out; } sense_ptr = (unsigned long *)((unsigned long)cmd->frame + user_ioc->sense_off); *sense_ptr = ioctl_sense_phys_addr; } /* * Set the sync_cmd flag so that the ISR knows not to complete this * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; mrsas_issue_blocked_cmd(sc, cmd); cmd->sync_cmd = 0; /* * copy out the kernel buffers to user buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { iov_base_ptrin = user_ioc->sgl[i].iov_base; iov_len = user_ioc->sgl[i].iov_len; #ifdef COMPAT_FREEBSD32 } else { iov_base_ptrin = PTRIN(user_ioc32->sgl[i].iov_base); iov_len = user_ioc32->sgl[i].iov_len; #endif } ret = copyout(ioctl_data_mem[i], iov_base_ptrin, iov_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL copyout failed!\n"); goto out; } } /* * copy out the sense */ if (user_ioc->sense_len) { /* * sense_buff points to the location that has the user sense * buffer address */ sense_ptr = (unsigned long *)((unsigned long)user_ioc->frame.raw + user_ioc->sense_off); ret = copyout(ioctl_sense_mem, (unsigned long *)*sense_ptr, user_ioc->sense_len); if (ret) { device_printf(sc->mrsas_dev, "IOCTL sense copyout failed!\n"); goto out; } } /* * Return command status to user space */ memcpy(&user_ioc->frame.hdr.cmd_status, &cmd->frame->hdr.cmd_status, sizeof(u_int8_t)); out: /* * Release sense buffer */ if (user_ioc->sense_len) { if (ioctl_sense_phys_addr) bus_dmamap_unload(ioctl_sense_tag, ioctl_sense_dmamap); if (ioctl_sense_mem != NULL) bus_dmamem_free(ioctl_sense_tag, ioctl_sense_mem, ioctl_sense_dmamap); if (ioctl_sense_tag != NULL) bus_dma_tag_destroy(ioctl_sense_tag); } /* * Release data buffers */ for (i = 0; i < user_ioc->sge_count; i++) { if (ioctlCmd == MRSAS_IOC_FIRMWARE_PASS_THROUGH64) { if (!user_ioc->sgl[i].iov_len) continue; #ifdef COMPAT_FREEBSD32 } else { if (!user_ioc32->sgl[i].iov_len) continue; #endif } if (ioctl_data_phys_addr[i]) bus_dmamap_unload(ioctl_data_tag[i], ioctl_data_dmamap[i]); if (ioctl_data_mem[i] != NULL) bus_dmamem_free(ioctl_data_tag[i], ioctl_data_mem[i], ioctl_data_dmamap[i]); if (ioctl_data_tag[i] != NULL) bus_dma_tag_destroy(ioctl_data_tag[i]); } /* Free command */ mrsas_release_mfi_cmd(cmd); return (ret); } /* * mrsas_alloc_mfi_cmds: Allocates the command packets * input: Adapter instance soft state * * Each IOCTL or passthru command that is issued to the FW are wrapped in a * local data structure called mrsas_mfi_cmd. The frame embedded in this * mrsas_mfi is issued to FW. The array is used only to look up the * mrsas_mfi_cmd given the context. The free commands are maintained in a * linked list. */ int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd; struct mrsas_mfi_cmd *cmd; max_cmd = MRSAS_MAX_MFI_CMDS; /* * sc->mfi_cmd_list is an array of struct mrsas_mfi_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ sc->mfi_cmd_list = malloc(sizeof(struct mrsas_mfi_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mfi_cmd cmd_list.\n"); return (ENOMEM); } memset(sc->mfi_cmd_list, 0, sizeof(struct mrsas_mfi_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mfi_cmd_list[i] = malloc(sizeof(struct mrsas_mfi_cmd), M_MRSAS, M_NOWAIT); if (!sc->mfi_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mfi_cmd_list[j], M_MRSAS); free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; return (ENOMEM); } } for (i = 0; i < max_cmd; i++) { cmd = sc->mfi_cmd_list[i]; memset(cmd, 0, sizeof(struct mrsas_mfi_cmd)); cmd->index = i; cmd->ccb_ptr = NULL; cmd->sc = sc; TAILQ_INSERT_TAIL(&(sc->mrsas_mfi_cmd_list_head), cmd, next); } /* create a frame pool and assign one frame to each command */ if (mrsas_create_frame_pool(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate DMA frame pool.\n"); /* Free the frames */ for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, cmd); } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); return (ENOMEM); } return (0); } /* * mrsas_create_frame_pool: Creates DMA pool for cmd frames * input: Adapter soft state * * Each command packet has an embedded DMA memory buffer that is used for * filling MFI frame and the SG list that immediately follows the frame. This * function creates those DMA memory buffers for each command packet by using * PCI pool facility. pad_0 is initialized to 0 to prevent corrupting value * of context and could cause FW crash. */ static int mrsas_create_frame_pool(struct mrsas_softc *sc) { int i; struct mrsas_mfi_cmd *cmd; if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MRSAS_MFI_FRAME_SIZE, 1, MRSAS_MFI_FRAME_SIZE, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->mficmd_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create MFI frame tag\n"); return (ENOMEM); } for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { cmd = sc->mfi_cmd_list[i]; cmd->frame = mrsas_alloc_frame(sc, cmd); if (cmd->frame == NULL) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (ENOMEM); } memset(cmd->frame, 0, MRSAS_MFI_FRAME_SIZE); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; } return (0); } /* * mrsas_alloc_frame: Allocates MFI Frames * input: Adapter soft state * * Create bus DMA memory tag and dmamap and load memory for MFI frames. Returns * virtual memory pointer to allocated region. */ void * mrsas_alloc_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int32_t frame_size = MRSAS_MFI_FRAME_SIZE; if (bus_dmamem_alloc(sc->mficmd_frame_tag, (void **)&cmd->frame_mem, BUS_DMA_NOWAIT, &cmd->frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc MFI frame memory\n"); return (NULL); } if (bus_dmamap_load(sc->mficmd_frame_tag, cmd->frame_dmamap, cmd->frame_mem, frame_size, mrsas_alloc_cb, &cmd->frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (NULL); } return (cmd->frame_mem); } /* * mrsas_alloc_cb: Callback function of bus_dmamap_load() * input: callback argument, * machine dependent type that describes DMA segments, * number of segments, * error code. * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ static void mrsas_alloc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_free_frames: Frees memory for MFI frames * input: Adapter soft state * * Deallocates MFI frames memory. Called from mrsas_free_mem() during detach * and error case during creation of frame pool. */ void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->frame_phys_addr) bus_dmamap_unload(sc->mficmd_frame_tag, cmd->frame_dmamap); if (cmd->frame_mem != NULL) bus_dmamem_free(sc->mficmd_frame_tag, cmd->frame_mem, cmd->frame_dmamap); } Index: head/sys/dev/mvs/mvs_pci.c =================================================================== --- head/sys/dev/mvs/mvs_pci.c (revision 297861) +++ head/sys/dev/mvs/mvs_pci.c (revision 297862) @@ -1,524 +1,524 @@ /*- * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {0x504011ab, 0x00, "Marvell 88SX5040", 4, MVS_Q_GENI}, {0x504111ab, 0x00, "Marvell 88SX5041", 4, MVS_Q_GENI}, {0x508011ab, 0x00, "Marvell 88SX5080", 8, MVS_Q_GENI}, {0x508111ab, 0x00, "Marvell 88SX5081", 8, MVS_Q_GENI}, {0x604011ab, 0x00, "Marvell 88SX6040", 4, MVS_Q_GENII}, {0x604111ab, 0x00, "Marvell 88SX6041", 4, MVS_Q_GENII}, {0x604211ab, 0x00, "Marvell 88SX6042", 4, MVS_Q_GENIIE}, {0x608011ab, 0x00, "Marvell 88SX6080", 8, MVS_Q_GENII}, {0x608111ab, 0x00, "Marvell 88SX6081", 8, MVS_Q_GENII}, {0x704211ab, 0x00, "Marvell 88SX7042", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x02419005, 0x00, "Adaptec 1420SA", 4, MVS_Q_GENII}, {0x02439005, 0x00, "Adaptec 1430SA", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x00000000, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = PCIR_BAR(0); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int i, ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); /* Clear PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIC, 0x00000000); if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; /* Configure chip-global CCC */ if (ctlr->channels > 4 && (ctlr->quirks & MVS_Q_GENI) == 0) { ATA_OUTL(ctlr->r_mem, CHIP_ICT, cccc); ATA_OUTL(ctlr->r_mem, CHIP_ITT, ccc); ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); if (ccc) ccim |= IC_ALL_PORTS_COAL_DONE; ccc = 0; cccc = 0; } for (i = 0; i < ctlr->channels / 4; i++) { /* Configure per-HC CCC */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ITT, ccc); if (ccc) ccim |= (IC_HC0_COAL_DONE << (i * IC_HC_SHIFT)); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_IC, 0x00000000); } /* Enable chip interrupts */ ctlr->gmim = (ccim ? ccim : (IC_DONE_HC0 | IC_DONE_HC1)) | IC_ERR_HC0 | IC_ERR_HC1; ctlr->mim = ctlr->gmim | ctlr->pmim; ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); /* Enable PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x007fffff); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2 + unit / 4) ; if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ctlr->mim = ctlr->gmim | ctlr->pmim; if (!ctlr->msia) ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int msi = 0; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; ctlr->msi = msi; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); - ctlr->irq.r_irq = 0; + ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_MIC); if (ctlr->msi) { /* We have to to mask MSI during processing. */ mtx_lock(&ctlr->mtx); ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0); ctlr->msia = 1; /* Deny MIM update during processing. */ mtx_unlock(&ctlr->mtx); } else if (ic == 0) return; /* Acknowledge all-ports CCC interrupt. */ if (ic & IC_ALL_PORTS_COAL_DONE) ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); for (p = 0; p < ctlr->channels; p++) { if ((p & 3) == 0) { if (p != 0) ic >>= 1; if ((ic & IC_HC0) == 0) { p += 3; ic >>= 8; continue; } /* Acknowledge interrupts of this HC. */ aic = 0; if (ic & (IC_DONE_IRQ << 0)) aic |= HC_IC_DONE(0) | HC_IC_DEV(0); if (ic & (IC_DONE_IRQ << 2)) aic |= HC_IC_DONE(1) | HC_IC_DEV(1); if (ic & (IC_DONE_IRQ << 4)) aic |= HC_IC_DONE(2) | HC_IC_DEV(2); if (ic & (IC_DONE_IRQ << 6)) aic |= HC_IC_DONE(3) | HC_IC_DEV(3); if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_BASE(p == 4) + HC_IC, ~aic); } /* Call per-port interrupt handler. */ arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } if (ctlr->msi) { /* Unmasking MSI triggers next interrupt, if needed. */ mtx_lock(&ctlr->mtx); ctlr->msia = 0; /* Allow MIM update. */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = HC_BASE(unit >> 2) + PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int mvs_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int mvs_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), DEVMETHOD(bus_child_location_str, mvs_child_location_str), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, pci, mvs_driver, mvs_devclass, 0, 0); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); Index: head/sys/dev/mvs/mvs_soc.c =================================================================== --- head/sys/dev/mvs/mvs_soc.c (revision 297861) +++ head/sys/dev/mvs/mvs_soc.c (revision 297862) @@ -1,469 +1,469 @@ /*- * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {MV_DEV_88F5182, 0x00, "Marvell 88F5182", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6281, 0x00, "Marvell 88F6281", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_88F6282, 0x00, "Marvell 88F6282", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100, 0x00, "Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78100_Z0, 0x00,"Marvell MV78100", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78260, 0x00, "Marvell MV78260", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {MV_DEV_MV78460, 0x00, "Marvell MV78460", 2, MVS_Q_GENIIE|MVS_Q_SOC}, {0, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid, revid; if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,sata")) return (ENXIO); soc_id(&devid, &revid); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid, revid; soc_id(&devid, &revid); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = 0; if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; if (ATA_INL(ctlr->r_mem, PORT_BASE(0) + SATA_PHYCFG_OFS) != 0) ctlr->quirks |= MVS_Q_SOC65; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_IC, 0x00000000); /* Clear chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIC, 0); /* Configure per-HC CCC */ if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; ATA_OUTL(ctlr->r_mem, HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_ITT, ccc); if (ccc) ccim |= IC_HC0_COAL_DONE; /* Enable chip interrupts */ ctlr->gmim = ((ccc ? IC_HC0_COAL_DONE : (IC_DONE_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))) | (IC_ERR_HC0 & CHIP_SOC_HC0_MASK(ctlr->channels))); ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2); if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, ctlr->gmim | ctlr->pmim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_SOC_MIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); - ctlr->irq.r_irq = 0; + ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p, chan_num; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_SOC_MIC); if ((ic & IC_HC0) == 0) return; /* Acknowledge interrupts of this HC. */ aic = 0; /* Processing interrupts from each initialized channel */ for (chan_num = 0; chan_num < ctlr->channels; chan_num++) { if (ic & (IC_DONE_IRQ << (chan_num * 2))) aic |= HC_IC_DONE(chan_num) | HC_IC_DEV(chan_num); } if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_IC, ~aic); /* Call per-port interrupt handler. */ for (p = 0; p < ctlr->channels; p++) { arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int mvs_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int mvs_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), DEVMETHOD(bus_child_location_str, mvs_child_location_str), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, simplebus, mvs_driver, mvs_devclass, 0, 0); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); Index: head/sys/dev/nxge/xgehal/xgehal-device.c =================================================================== --- head/sys/dev/nxge/xgehal/xgehal-device.c (revision 297861) +++ head/sys/dev/nxge/xgehal/xgehal-device.c (revision 297862) @@ -1,7267 +1,7267 @@ /*- * Copyright (c) 2002-2007 Neterion, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL #define END_SIGN 0x0 #ifdef XGE_HAL_HERC_EMULATION #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR #endif /* * Jenkins hash key length(in bytes) */ #define XGE_HAL_JHASH_MSG_LEN 50 /* * mix(a,b,c) used in Jenkins hash algorithm */ #define mix(a,b,c) { \ a -= b; a -= c; a ^= (c>>13); \ b -= c; b -= a; b ^= (a<<8); \ c -= a; c -= b; c ^= (b>>13); \ a -= b; a -= c; a ^= (c>>12); \ b -= c; b -= a; b ^= (a<<16); \ c -= a; c -= b; c ^= (b>>5); \ a -= b; a -= c; a ^= (c>>3); \ b -= c; b -= a; b ^= (a<<10); \ c -= a; c -= b; c ^= (b>>15); \ } /* * __hal_device_event_queued * @data: pointer to xge_hal_device_t structure * * Will be called when new event succesfully queued. */ void __hal_device_event_queued(void *data, int event_type) { xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC); if (g_xge_hal_driver->uld_callbacks.event_queued) { g_xge_hal_driver->uld_callbacks.event_queued(data, event_type); } } /* * __hal_pio_mem_write32_upper * * Endiann-aware implementation of xge_os_pio_mem_write32(). * Since Xframe has 64bit registers, we differintiate uppper and lower * parts. */ void __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) { #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) xge_os_pio_mem_write32(pdev, regh, val, addr); #else xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4)); #endif } /* * __hal_pio_mem_write32_upper * * Endiann-aware implementation of xge_os_pio_mem_write32(). * Since Xframe has 64bit registers, we differintiate uppper and lower * parts. */ void __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr) { #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN) xge_os_pio_mem_write32(pdev, regh, val, (void *) ((char *)addr + 4)); #else xge_os_pio_mem_write32(pdev, regh, val, addr); #endif } /* * __hal_device_register_poll * @hldev: pointer to xge_hal_device_t structure * @reg: register to poll for * @op: 0 - bit reset, 1 - bit set * @mask: mask for logical "and" condition based on %op * @max_millis: maximum time to try to poll in milliseconds * * Will poll certain register for specified amount of time. * Will poll until masked bit is not cleared. */ xge_hal_status_e __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg, int op, u64 mask, int max_millis) { u64 val64; int i = 0; xge_hal_status_e ret = XGE_HAL_FAIL; xge_os_udelay(10); do { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); if (op == 0 && !(val64 & mask)) return XGE_HAL_OK; else if (op == 1 && (val64 & mask) == mask) return XGE_HAL_OK; xge_os_udelay(100); } while (++i <= 9); do { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); if (op == 0 && !(val64 & mask)) return XGE_HAL_OK; else if (op == 1 && (val64 & mask) == mask) return XGE_HAL_OK; xge_os_udelay(1000); } while (++i < max_millis); return ret; } /* * __hal_device_wait_quiescent * @hldev: the device * @hw_status: hw_status in case of error * * Will wait until device is quiescent for some blocks. */ static xge_hal_status_e __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; /* poll and wait first */ #ifdef XGE_HAL_HERC_EMULATION (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, (XGE_HAL_ADAPTER_STATUS_TDMA_READY | XGE_HAL_ADAPTER_STATUS_RDMA_READY | XGE_HAL_ADAPTER_STATUS_PFC_READY | XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK), XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); #else (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1, (XGE_HAL_ADAPTER_STATUS_TDMA_READY | XGE_HAL_ADAPTER_STATUS_RDMA_READY | XGE_HAL_ADAPTER_STATUS_PFC_READY | XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY | XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT | XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY | XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY | XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK | XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK), XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS); #endif return xge_hal_device_status(hldev, hw_status); } /** * xge_hal_device_is_slot_freeze * @devh: the device * * Returns non-zero if the slot is freezed. * The determination is made based on the adapter_status * register which will never give all FFs, unless PCI read * cannot go through. */ int xge_hal_device_is_slot_freeze(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u16 device_id; u64 adapter_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); xge_os_pci_read16(hldev->pdev,hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, device_id), &device_id); #ifdef TX_DEBUG if (adapter_status == XGE_HAL_ALL_FOXES) { u64 dummy; dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pcc_enable); printf(">>> Slot is frozen!\n"); brkpoint(0); } #endif return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff)); } /* * __hal_device_led_actifity_fix * @hldev: pointer to xge_hal_device_t structure * * SXE-002: Configure link and activity LED to turn it off */ static void __hal_device_led_actifity_fix(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u16 subid; u64 val64; xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid); /* * In the case of Herc, there is a new register named beacon control * is added which was not present in Xena. * Beacon control register in Herc is at the same offset as * gpio control register in Xena. It means they are one and same in * the case of Xena. Also, gpio control register offset in Herc and * Xena is different. * The current register map represents Herc(It means we have * both beacon and gpio control registers in register map). * WRT transition from Xena to Herc, all the code in Xena which was * using gpio control register for LED handling would have to * use beacon control register in Herc and the rest of the code * which uses gpio control in Xena would use the same register * in Herc. * WRT LED handling(following code), In the case of Herc, beacon * control register has to be used. This is applicable for Xena also, * since it represents the gpio control register in Xena. */ if ((subid & 0xFF) >= 0x07) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->beacon_control); val64 |= 0x0000800000000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->beacon_control); val64 = 0x0411040400000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, (void *) ((u8 *)bar0 + 0x2700)); } } /* Constants for Fixing the MacAddress problem seen mostly on * Alpha machines. */ static u64 xena_fix_mac[] = { 0x0060000000000000ULL, 0x0060600000000000ULL, 0x0040600000000000ULL, 0x0000600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0060600000000000ULL, 0x0020600000000000ULL, 0x0000600000000000ULL, 0x0040600000000000ULL, 0x0060600000000000ULL, END_SIGN }; /* * __hal_device_fix_mac * @hldev: HAL device handle. * * Fix for all "FFs" MAC address problems observed on Alpha platforms. */ static void __hal_device_xena_fix_mac(xge_hal_device_t *hldev) { int i = 0; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; /* * In the case of Herc, there is a new register named beacon control * is added which was not present in Xena. * Beacon control register in Herc is at the same offset as * gpio control register in Xena. It means they are one and same in * the case of Xena. Also, gpio control register offset in Herc and * Xena is different. * The current register map represents Herc(It means we have * both beacon and gpio control registers in register map). * WRT transition from Xena to Herc, all the code in Xena which was * using gpio control register for LED handling would have to * use beacon control register in Herc and the rest of the code * which uses gpio control in Xena would use the same register * in Herc. * In the following code(xena_fix_mac), beacon control register has * to be used in the case of Xena, since it represents gpio control * register. In the case of Herc, there is no change required. */ while (xena_fix_mac[i] != END_SIGN) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, xena_fix_mac[i++], &bar0->beacon_control); xge_os_mdelay(1); } } /* * xge_hal_device_bcast_enable * @hldev: HAL device handle. * * Enable receiving broadcasts. * The host must first write RMAC_CFG_KEY "key" * register, and then - MAC_CFG register. */ void xge_hal_device_bcast_enable(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), &bar0->mac_cfg); xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", (unsigned long long)val64, hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); } /* * xge_hal_device_bcast_disable * @hldev: HAL device handle. * * Disable receiving broadcasts. * The host must first write RMAC_CFG_KEY "key" * register, and then - MAC_CFG register. */ void xge_hal_device_bcast_disable(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), &bar0->mac_cfg); xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s", (unsigned long long)val64, hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled"); } /* * __hal_device_shared_splits_configure * @hldev: HAL device handle. * * TxDMA will stop Read request if the number of read split had exceeded * the limit set by shared_splits */ static void __hal_device_shared_splits_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pic_control); val64 |= XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->pic_control); xge_debug_device(XGE_TRACE, "%s", "shared splits configured"); } /* * __hal_device_rmac_padding_configure * @hldev: HAL device handle. * * Configure RMAC frame padding. Depends on configuration, it * can be send to host or removed by MAC. */ static void __hal_device_rmac_padding_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE ); val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE ); val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD; /* * If the RTH enable bit is not set, strip the FCS */ if (!hldev->config.rth_en || !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) { val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS; } val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD ); val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), (char*)&bar0->mac_cfg); xge_os_mdelay(1); xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured", (unsigned long long)val64); } /* * __hal_device_pause_frames_configure * @hldev: HAL device handle. * * Set Pause threshold. * * Pause frame is generated if the amount of data outstanding * on any queue exceeded the ratio of * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256 */ static void __hal_device_pause_frames_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; int i; u64 val64; switch (hldev->config.mac.media) { case XGE_HAL_MEDIA_SR: case XGE_HAL_MEDIA_SW: val64=0xfffbfffbfffbfffbULL; break; case XGE_HAL_MEDIA_LR: case XGE_HAL_MEDIA_LW: val64=0xffbbffbbffbbffbbULL; break; case XGE_HAL_MEDIA_ER: case XGE_HAL_MEDIA_EW: default: val64=0xffbbffbbffbbffbbULL; break; } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_pause_thresh_q0q3); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_pause_thresh_q4q7); /* Set the time value to be inserted in the pause frame generated * by Xframe */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rmac_pause_cfg); if (hldev->config.mac.rmac_pause_gen_en) val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN; else val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN); if (hldev->config.mac.rmac_pause_rcv_en) val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN; else val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN); val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff)); val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_pause_cfg); val64 = 0; for (i = 0; i<4; i++) { val64 |= (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3) <<(i*2*8)); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_pause_thresh_q0q3); val64 = 0; for (i = 0; i<4; i++) { val64 |= (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7) <<(i*2*8)); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_pause_thresh_q4q7); xge_debug_device(XGE_TRACE, "%s", "pause frames configured"); } /* * Herc's clock rate doubled, unless the slot is 33MHz. */ unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev, unsigned int time_ival) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) return time_ival; xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC); if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN && hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ) time_ival *= 2; return time_ival; } /* * __hal_device_bus_master_disable * @hldev: HAL device handle. * * Disable bus mastership. */ static void __hal_device_bus_master_disable (xge_hal_device_t *hldev) { u16 cmd; u16 bus_master = 4; xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); cmd &= ~bus_master; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), cmd); } /* * __hal_device_bus_master_enable * @hldev: HAL device handle. * * Disable bus mastership. */ static void __hal_device_bus_master_enable (xge_hal_device_t *hldev) { u16 cmd; u16 bus_master = 4; xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); /* already enabled? do nothing */ if (cmd & bus_master) return; cmd |= bus_master; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), cmd); } /* * __hal_device_intr_mgmt * @hldev: HAL device handle. * @mask: mask indicating which Intr block must be modified. * @flag: if true - enable, otherwise - disable interrupts. * * Disable or enable device interrupts. Mask is used to specify * which hardware blocks should produce interrupts. For details * please refer to Xframe User Guide. */ static void __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64 = 0, temp64 = 0; u64 gim, gim_saved; gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->general_int_mask); /* Top level interrupt classification */ /* PIC Interrupts */ if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) { /* Enable PIC Intrs in the general intr mask register */ val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/; if (flag) { gim &= ~((u64) val64); temp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pic_int_mask); temp64 &= ~XGE_HAL_PIC_INT_TX; #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { temp64 &= ~XGE_HAL_PIC_INT_MISC; } #endif xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &bar0->pic_int_mask); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * Unmask only Link Up interrupt */ temp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_mask); temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &bar0->misc_int_mask); xge_debug_device(XGE_TRACE, "unmask link up flag "XGE_OS_LLXFMT, (unsigned long long)temp64); } #endif } else { /* flag == 0 */ #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * Mask both Link Up and Down interrupts */ temp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_mask); temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &bar0->misc_int_mask); xge_debug_device(XGE_TRACE, "mask link up/down flag "XGE_OS_LLXFMT, (unsigned long long)temp64); } #endif /* Disable PIC Intrs in the general intr mask * register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->pic_int_mask); gim |= val64; } } /* DMA Interrupts */ /* Enabling/Disabling Tx DMA interrupts */ if (mask & XGE_HAL_TX_DMA_INTR) { /* Enable TxDMA Intrs in the general intr mask register */ val64 = XGE_HAL_TXDMA_INT_M; if (flag) { gim &= ~((u64) val64); /* Enable all TxDMA interrupts */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->txdma_int_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->pfc_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->tda_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->pcc_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->tti_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->lso_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->tpa_err_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->sm_err_mask); } else { /* flag == 0 */ /* Disable TxDMA Intrs in the general intr mask * register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->txdma_int_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->pfc_err_mask); gim |= val64; } } /* Enabling/Disabling Rx DMA interrupts */ if (mask & XGE_HAL_RX_DMA_INTR) { /* Enable RxDMA Intrs in the general intr mask register */ val64 = XGE_HAL_RXDMA_INT_M; if (flag) { gim &= ~((u64) val64); /* All RxDMA block interrupts are disabled for now * TODO */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->rxdma_int_mask); } else { /* flag == 0 */ /* Disable RxDMA Intrs in the general intr mask * register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->rxdma_int_mask); gim |= val64; } } /* MAC Interrupts */ /* Enabling/Disabling MAC interrupts */ if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) { val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M; if (flag) { gim &= ~((u64) val64); /* All MAC block error inter. are disabled for now. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); } else { /* flag == 0 */ /* Disable MAC Intrs in the general intr mask * register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask); gim |= val64; } } /* XGXS Interrupts */ if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) { val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M; if (flag) { gim &= ~((u64) val64); /* All XGXS block error interrupts are disabled for now * TODO */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); } else { /* flag == 0 */ /* Disable MC Intrs in the general intr mask register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask); gim |= val64; } } /* Memory Controller(MC) interrupts */ if (mask & XGE_HAL_MC_INTR) { val64 = XGE_HAL_MC_INT_M; if (flag) { gim &= ~((u64) val64); /* Enable all MC blocks error interrupts */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0ULL, &bar0->mc_int_mask); } else { /* flag == 0 */ /* Disable MC Intrs in the general intr mask * register */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask); gim |= val64; } } /* Tx traffic interrupts */ if (mask & XGE_HAL_TX_TRAFFIC_INTR) { val64 = XGE_HAL_TXTRAFFIC_INT_M; if (flag) { gim &= ~((u64) val64); /* Enable all the Tx side interrupts */ /* '0' Enables all 64 TX interrupt levels. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->tx_traffic_mask); } else { /* flag == 0 */ /* Disable Tx Traffic Intrs in the general intr mask * register. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->tx_traffic_mask); gim |= val64; } } /* Rx traffic interrupts */ if (mask & XGE_HAL_RX_TRAFFIC_INTR) { val64 = XGE_HAL_RXTRAFFIC_INT_M; if (flag) { gim &= ~((u64) val64); /* '0' Enables all 8 RX interrupt levels. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0, &bar0->rx_traffic_mask); } else { /* flag == 0 */ /* Disable Rx Traffic Intrs in the general intr mask * register. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_ALL_INTRS_DIS, &bar0->rx_traffic_mask); gim |= val64; } } /* Sched Timer interrupt */ if (mask & XGE_HAL_SCHED_INTR) { if (flag) { temp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->txpic_int_mask); temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &bar0->txpic_int_mask); xge_hal_device_sched_timer(hldev, hldev->config.sched_timer_us, hldev->config.sched_timer_one_shot); } else { temp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->txpic_int_mask); temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &bar0->txpic_int_mask); xge_hal_device_sched_timer(hldev, XGE_HAL_SCHED_TIMER_DISABLED, XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE); } } if (gim != gim_saved) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim, &bar0->general_int_mask); xge_debug_device(XGE_TRACE, "general_int_mask updated " XGE_OS_LLXFMT" => "XGE_OS_LLXFMT, (unsigned long long)gim_saved, (unsigned long long)gim); } } /* * __hal_device_bimodal_configure * @hldev: HAL device handle. * * Bimodal parameters initialization. */ static void __hal_device_bimodal_configure(xge_hal_device_t *hldev) { int i; for (i=0; iconfig.ring.queue[i].configured) continue; rti = &hldev->config.ring.queue[i].rti; tti = &hldev->bimodal_tti[i]; tti->enabled = 1; tti->urange_a = hldev->bimodal_urange_a_en * 10; tti->urange_b = 20; tti->urange_c = 30; tti->ufc_a = hldev->bimodal_urange_a_en * 8; tti->ufc_b = 16; tti->ufc_c = 32; tti->ufc_d = 64; tti->timer_val_us = hldev->bimodal_timer_val_us; tti->timer_ac_en = 1; tti->timer_ci_en = 0; rti->urange_a = 10; rti->urange_b = 20; rti->urange_c = 30; rti->ufc_a = 1; /* <= for netpipe type of tests */ rti->ufc_b = 4; rti->ufc_c = 4; rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */ rti->timer_ac_en = 1; rti->timer_val_us = 5; /* for optimal bus efficiency usage */ } } /* * __hal_device_tti_apply * @hldev: HAL device handle. * * apply TTI configuration. */ static xge_hal_status_e __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti, int num, int runtime) { u64 val64, data1 = 0, data2 = 0; xge_hal_pci_bar0_t *bar0; if (runtime) bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; else bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (tti->timer_val_us) { unsigned int tx_interval; if (hldev->config.pci_freq_mherz) { tx_interval = hldev->config.pci_freq_mherz * tti->timer_val_us / 64; tx_interval = __hal_fix_time_ival_herc(hldev, tx_interval); } else { tx_interval = tti->timer_val_us; } data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval); if (tti->timer_ac_en) { data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN; } if (tti->timer_ci_en) { data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN; } if (!runtime) { xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s", num, tx_interval, tti->timer_ci_en ? "enabled": "disabled"); } } if (tti->urange_a || tti->urange_b || tti->urange_c || tti->ufc_a || tti->ufc_b || tti->ufc_c || tti->ufc_d ) { data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) | XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) | XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c); data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) | XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) | XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) | XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, &bar0->tti_data1_mem); (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tti_data1_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, &bar0->tti_data2_mem); (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tti_data2_mem); xge_os_wmb(); val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_TTI_CMD_MEM_OFFSET(num); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tti_command_mem); if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem, 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } if (!runtime) { xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x" XGE_OS_LLXFMT, num, (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tti_data1_mem)); } return XGE_HAL_OK; } /* * __hal_device_tti_configure * @hldev: HAL device handle. * * TTI Initialization. * Initialize Transmit Traffic Interrupt Scheme. */ static xge_hal_status_e __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime) { int i; for (i=0; iconfig.fifo.queue[i].configured) continue; for (j=0; jconfig.fifo.queue[i].tti[j].enabled) continue; /* at least some TTI enabled. Record it. */ hldev->tti_enabled = 1; status = __hal_device_tti_apply(hldev, &hldev->config.fifo.queue[i].tti[j], i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime); if (status != XGE_HAL_OK) return status; } } /* processing bimodal TTIs */ for (i=0; ibimodal_tti[i].enabled) continue; /* at least some bimodal TTI enabled. Record it. */ hldev->tti_enabled = 1; status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i], XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime); if (status != XGE_HAL_OK) return status; } return XGE_HAL_OK; } /* * __hal_device_rti_configure * @hldev: HAL device handle. * * RTI Initialization. * Initialize Receive Traffic Interrupt Scheme. */ xge_hal_status_e __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime) { xge_hal_pci_bar0_t *bar0; u64 val64, data1 = 0, data2 = 0; int i; if (runtime) { /* * we don't want to re-configure RTI in case when * bimodal interrupts are in use. Instead reconfigure TTI * with new RTI values. */ if (hldev->config.bimodal_interrupts) { __hal_device_bimodal_configure(hldev); return __hal_device_tti_configure(hldev, 1); } bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; } else bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; for (i=0; iconfig.ring.queue[i].rti; if (!hldev->config.ring.queue[i].configured) continue; if (rti->timer_val_us) { unsigned int rx_interval; if (hldev->config.pci_freq_mherz) { rx_interval = hldev->config.pci_freq_mherz * rti->timer_val_us / 8; rx_interval = __hal_fix_time_ival_herc(hldev, rx_interval); } else { rx_interval = rti->timer_val_us; } data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval); if (rti->timer_ac_en) { data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN; } data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN; } if (rti->urange_a || rti->urange_b || rti->urange_c || rti->ufc_a || rti->ufc_b || rti->ufc_c || rti->ufc_d) { data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) | XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) | XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c); data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) | XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) | XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) | XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1, &bar0->rti_data1_mem); (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rti_data1_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2, &bar0->rti_data2_mem); (void)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rti_data2_mem); xge_os_wmb(); val64 = XGE_HAL_RTI_CMD_MEM_WE | XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD; val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rti_command_mem); if (!runtime && __hal_device_register_poll(hldev, &bar0->rti_command_mem, 0, XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } if (!runtime) { xge_debug_device(XGE_TRACE, "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT, i, (unsigned long long)xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rti_data1_mem)); } } return XGE_HAL_OK; } /* Constants to be programmed into the Xena's registers to configure * the XAUI. */ static u64 default_xena_mdio_cfg[] = { /* Reset PMA PLL */ 0xC001010000000000ULL, 0xC0010100000000E0ULL, 0xC0010100008000E4ULL, /* Remove Reset from PMA PLL */ 0xC001010000000000ULL, 0xC0010100000000E0ULL, 0xC0010100000000E4ULL, END_SIGN }; static u64 default_herc_mdio_cfg[] = { END_SIGN }; static u64 default_xena_dtx_cfg[] = { 0x8000051500000000ULL, 0x80000515000000E0ULL, 0x80000515D93500E4ULL, 0x8001051500000000ULL, 0x80010515000000E0ULL, 0x80010515001E00E4ULL, 0x8002051500000000ULL, 0x80020515000000E0ULL, 0x80020515F21000E4ULL, /* Set PADLOOPBACKN */ 0x8002051500000000ULL, 0x80020515000000E0ULL, 0x80020515B20000E4ULL, 0x8003051500000000ULL, 0x80030515000000E0ULL, 0x80030515B20000E4ULL, 0x8004051500000000ULL, 0x80040515000000E0ULL, 0x80040515B20000E4ULL, 0x8005051500000000ULL, 0x80050515000000E0ULL, 0x80050515B20000E4ULL, SWITCH_SIGN, /* Remove PADLOOPBACKN */ 0x8002051500000000ULL, 0x80020515000000E0ULL, 0x80020515F20000E4ULL, 0x8003051500000000ULL, 0x80030515000000E0ULL, 0x80030515F20000E4ULL, 0x8004051500000000ULL, 0x80040515000000E0ULL, 0x80040515F20000E4ULL, 0x8005051500000000ULL, 0x80050515000000E0ULL, 0x80050515F20000E4ULL, END_SIGN }; /* static u64 default_herc_dtx_cfg[] = { 0x80000515BA750000ULL, 0x80000515BA7500E0ULL, 0x80000515BA750004ULL, 0x80000515BA7500E4ULL, 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 0x80020515F2100004ULL, 0x80020515F21000E4ULL, END_SIGN }; */ static u64 default_herc_dtx_cfg[] = { 0x8000051536750000ULL, 0x80000515367500E0ULL, 0x8000051536750004ULL, 0x80000515367500E4ULL, 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 0x801205150D440000ULL, 0x801205150D4400E0ULL, 0x801205150D440004ULL, 0x801205150D4400E4ULL, 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 0x80020515F2100004ULL, 0x80020515F21000E4ULL, END_SIGN }; void __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg) { __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(value>>32), reg); xge_os_wmb(); __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)value, reg); xge_os_wmb(); xge_os_mdelay(1); } u64 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg) { u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg); xge_os_mdelay(1); return val64; } /* * __hal_device_xaui_configure * @hldev: HAL device handle. * * Configure XAUI Interface of Xena. * * To Configure the Xena's XAUI, one has to write a series * of 64 bit values into two registers in a particular * sequence. Hence a macro 'SWITCH_SIGN' has been defined * which will be defined in the array of configuration values * (default_dtx_cfg & default_mdio_cfg) at appropriate places * to switch writing from one regsiter to another. We continue * writing these values until we encounter the 'END_SIGN' macro. * For example, After making a series of 21 writes into * dtx_control register the 'SWITCH_SIGN' appears and hence we * start writing into mdio_control until we encounter END_SIGN. */ static void __hal_device_xaui_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; int mdio_cnt = 0, dtx_cnt = 0; u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { default_dtx_cfg = default_xena_dtx_cfg; default_mdio_cfg = default_xena_mdio_cfg; } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { default_dtx_cfg = default_herc_dtx_cfg; default_mdio_cfg = default_herc_mdio_cfg; } else { xge_assert(default_dtx_cfg); return; } do { dtx_cfg: while (default_dtx_cfg[dtx_cnt] != END_SIGN) { if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) { dtx_cnt++; goto mdio_cfg; } __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt], &bar0->dtx_control); dtx_cnt++; } mdio_cfg: while (default_mdio_cfg[mdio_cnt] != END_SIGN) { if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) { mdio_cnt++; goto dtx_cfg; } __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt], &bar0->mdio_control); mdio_cnt++; } } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) && (default_mdio_cfg[mdio_cnt] == END_SIGN)) ); xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured"); } /* * __hal_device_mac_link_util_set * @hldev: HAL device handle. * * Set sampling rate to calculate link utilization. */ static void __hal_device_mac_link_util_set(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL( hldev->config.mac.tmac_util_period) | XGE_HAL_MAC_RX_LINK_UTIL_VAL( hldev->config.mac.rmac_util_period); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mac_link_util); xge_debug_device(XGE_TRACE, "%s", "bandwidth link utilization configured"); } /* * __hal_device_set_swapper * @hldev: HAL device handle. * * Set the Xframe's byte "swapper" in accordance with * endianness of the host. */ xge_hal_status_e __hal_device_set_swapper(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; /* * from 32bit errarta: * * The SWAPPER_CONTROL register determines how the adapter accesses * host memory as well as how it responds to read and write requests * from the host system. Writes to this register should be performed * carefully, since the byte swappers could reverse the order of bytes. * When configuring this register keep in mind that writes to the PIF * read and write swappers could reverse the order of the upper and * lower 32-bit words. This means that the driver may have to write * to the upper 32 bits of the SWAPPER_CONTROL twice in order to * configure the entire register. */ /* * The device by default set to a big endian format, so a big endian * driver need not set anything. */ #if defined(XGE_HAL_CUSTOM_HW_SWAPPER) xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xffffffffffffffffULL, &bar0->swapper_ctrl); val64 = XGE_HAL_CUSTOM_HW_SWAPPER; xge_os_wmb(); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->swapper_ctrl); xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT, (unsigned long long)val64); #elif !defined(XGE_OS_HOST_BIG_ENDIAN) /* * Initially we enable all bits to make it accessible by the driver, * then we selectively enable only those bits that we want to set. * i.e. force swapper to swap for the first time since second write * will overwrite with the final settings. * * Use only for little endian platforms. */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xffffffffffffffffULL, &bar0->swapper_ctrl); xge_os_wmb(); val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE | XGE_HAL_SWAPPER_CTRL_PIF_R_SE | XGE_HAL_SWAPPER_CTRL_PIF_W_FE | XGE_HAL_SWAPPER_CTRL_PIF_W_SE | XGE_HAL_SWAPPER_CTRL_RTH_FE | XGE_HAL_SWAPPER_CTRL_RTH_SE | XGE_HAL_SWAPPER_CTRL_TXP_FE | XGE_HAL_SWAPPER_CTRL_TXP_SE | XGE_HAL_SWAPPER_CTRL_TXD_R_FE | XGE_HAL_SWAPPER_CTRL_TXD_R_SE | XGE_HAL_SWAPPER_CTRL_TXD_W_FE | XGE_HAL_SWAPPER_CTRL_TXD_W_SE | XGE_HAL_SWAPPER_CTRL_TXF_R_FE | XGE_HAL_SWAPPER_CTRL_RXD_R_FE | XGE_HAL_SWAPPER_CTRL_RXD_R_SE | XGE_HAL_SWAPPER_CTRL_RXD_W_FE | XGE_HAL_SWAPPER_CTRL_RXD_W_SE | XGE_HAL_SWAPPER_CTRL_RXF_W_FE | XGE_HAL_SWAPPER_CTRL_XMSI_FE | XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE); /* if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE; } */ __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64, &bar0->swapper_ctrl); xge_os_wmb(); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), &bar0->swapper_ctrl); xge_os_wmb(); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32), &bar0->swapper_ctrl); xge_debug_device(XGE_TRACE, "%s", "using little endian set"); #endif /* Verifying if endian settings are accurate by reading a feedback * register. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pif_rd_swapper_fb); if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) { xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT, (unsigned long long) val64); return XGE_HAL_ERR_SWAPPER_CTRL; } xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled"); return XGE_HAL_OK; } /* * __hal_device_rts_mac_configure - Configure RTS steering based on * destination mac address. * @hldev: HAL device handle. * */ xge_hal_status_e __hal_device_rts_mac_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; if (!hldev->config.rts_mac_en) { return XGE_HAL_OK; } /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); return XGE_HAL_OK; } /* * __hal_device_rts_port_configure - Configure RTS steering based on * destination or source port number. * @hldev: HAL device handle. * */ xge_hal_status_e __hal_device_rts_port_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int rnum; if (!hldev->config.rts_port_en) { return XGE_HAL_OK; } /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); /* * Initiate port steering according to per-ring configuration */ for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { int pnum; xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum]; if (!queue->configured || queue->rts_port_en) continue; for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) { xge_hal_rts_port_t *port = &queue->rts_ports[pnum]; /* * Skip and clear empty ports */ if (!port->num) { /* * Clear CAM memory */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, &bar0->rts_pn_cam_data); val64 = BIT(7) | BIT(15); } else { /* * Assign new Port values according * to configuration */ val64 = vBIT(port->num,8,16) | vBIT(rnum,37,3) | BIT(63); if (port->src) val64 = BIT(47); if (!port->udp) val64 = BIT(7); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_pn_cam_data); val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_pn_cam_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, &bar0->rts_pn_cam_ctrl, 0, XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } } } return XGE_HAL_OK; } /* * __hal_device_rts_qos_configure - Configure RTS steering based on * qos. * @hldev: HAL device handle. * */ xge_hal_status_e __hal_device_rts_qos_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int j, rx_ring_num; if (!hldev->config.rts_qos_en) { return XGE_HAL_OK; } /* First clear the RTS_DS_MEM_DATA */ val64 = 0; for (j = 0; j < 64; j++ ) { /* First clear the value */ val64 = XGE_HAL_RTS_DS_MEM_DATA(0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ds_mem_data); val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE | XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD | XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ds_mem_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, &bar0->rts_ds_mem_ctrl, 0, XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } } rx_ring_num = 0; for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) { if (hldev->config.ring.queue[j].configured) rx_ring_num++; } switch (rx_ring_num) { case 1: val64 = 0x0; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 2: val64 = 0x0001000100010001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0001000100000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 3: val64 = 0x0001020001020001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); val64 = 0x0200010200010200ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); val64 = 0x0102000102000102ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); val64 = 0x0001020001020001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0200010200000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 4: val64 = 0x0001020300010203ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0001020300000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 5: val64 = 0x0001020304000102ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); val64 = 0x0304000102030400ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); val64 = 0x0102030400010203ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); val64 = 0x0400010203040001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0203040000000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 6: val64 = 0x0001020304050001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); val64 = 0x0203040500010203ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); val64 = 0x0405000102030405ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); val64 = 0x0001020304050001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0203040500000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 7: val64 = 0x0001020304050600ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); val64 = 0x0102030405060001ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); val64 = 0x0203040506000102ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); val64 = 0x0304050600010203ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0405060000000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; case 8: val64 = 0x0001020304050607ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3); val64 = 0x0001020300000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4); break; } return XGE_HAL_OK; } /* * xge__hal_device_rts_mac_enable * * @devh: HAL device handle. * @index: index number where the MAC addr will be stored * @macaddr: MAC address * * - Enable RTS steering for the given MAC address. This function has to be * called with lock acquired. * * NOTE: * 1. ULD has to call this function with the index value which * statisfies the following condition: * ring_num = (index % 8) * 2.ULD also needs to make sure that the index is not * occupied by any MAC address. If that index has any MAC address * it will be overwritten and HAL will not check for it. * */ xge_hal_status_e xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr) { int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; xge_hal_status_e status; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Set the MAC address at the given location marked by index. */ status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "%s", "Not able to set the mac addr"); return status; } return xge_hal_device_rts_section_enable(hldev, index); } /* * xge__hal_device_rts_mac_disable * @hldev: HAL device handle. * @index: index number where to disable the MAC addr * * Disable RTS Steering based on the MAC address. * This function should be called with lock acquired. * */ xge_hal_status_e xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index) { xge_hal_status_e status; u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_debug_ll(XGE_TRACE, "the index value is %d ", index); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Disable MAC address @ given index location */ status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "%s", "Not able to set the mac addr"); return status; } return XGE_HAL_OK; } /* * __hal_device_rth_configure - Configure RTH for the device * @hldev: HAL device handle. * * Using IT (Indirection Table). */ xge_hal_status_e __hal_device_rth_it_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int rings[XGE_HAL_MAX_RING_NUM]={0}; int rnum; int rmax; int buckets_num; int bucket; if (!hldev->config.rth_en) { return XGE_HAL_OK; } /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); buckets_num = (1 << hldev->config.rth_bucket_size); rmax=0; for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) { if (hldev->config.ring.queue[rnum].configured && hldev->config.ring.queue[rnum].rth_en) rings[rmax++] = rnum; } rnum = 0; /* for starters: fill in all the buckets with rings "equally" */ for (bucket = 0; bucket < buckets_num; bucket++) { if (rnum == rmax) rnum = 0; /* write data */ val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_map_mem_data); /* execute */ val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_map_mem_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, &bar0->rts_rth_map_mem_ctrl, 0, XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } rnum++; } val64 = XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size); val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN | XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d", hldev->config.rth_bucket_size); return XGE_HAL_OK; } /* * __hal_spdm_entry_add - Add a new entry to the SPDM table. * * Add a new entry to the SPDM table * * This function add a new entry to the SPDM table. * * Note: * This function should be called with spdm_lock. * * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove. */ static xge_hal_status_e __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip, xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; u64 spdm_line_arr[8]; u8 line_no; /* * Clear the SPDM READY bit */ val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rxpic_int_reg); xge_debug_device(XGE_TRACE, "L4 SP %x:DP %x: hash %x tgt_queue %d ", l4_sp, l4_dp, jhash_value, tgt_queue); xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr)); /* * Construct the SPDM entry. */ spdm_line_arr[0] = vBIT(l4_sp,0,16) | vBIT(l4_dp,16,32) | vBIT(tgt_queue,53,3) | vBIT(is_tcp,59,1) | vBIT(is_ipv4,63,1); if (is_ipv4) { spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) | vBIT(dst_ip->ipv4.addr,32,32); } else { xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8); xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8); xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8); xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8); } spdm_line_arr[7] = vBIT(jhash_value,0,32) | BIT(63); /* entry enable bit */ /* * Add the entry to the SPDM table */ for(line_no = 0; line_no < 8; line_no++) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, spdm_line_arr[line_no], (void *)((char *)hldev->spdm_mem_base + (spdm_entry * 64) + (line_no * 8))); } /* * Wait for the operation to be completed. */ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, XGE_HAL_RX_PIC_INT_REG_SPDM_READY, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* * Add this information to a local SPDM table. The purpose of * maintaining a local SPDM table is to avoid a search in the * adapter SPDM table for spdm entry lookup which is very costly * in terms of time. */ hldev->spdm_table[spdm_entry]->in_use = 1; xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip, sizeof(xge_hal_ipaddr_t)); xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip, sizeof(xge_hal_ipaddr_t)); hldev->spdm_table[spdm_entry]->l4_sp = l4_sp; hldev->spdm_table[spdm_entry]->l4_dp = l4_dp; hldev->spdm_table[spdm_entry]->is_tcp = is_tcp; hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4; hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue; hldev->spdm_table[spdm_entry]->jhash_value = jhash_value; hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry; return XGE_HAL_OK; } /* * __hal_device_rth_spdm_configure - Configure RTH for the device * @hldev: HAL device handle. * * Using SPDM (Socket-Pair Direct Match). */ xge_hal_status_e __hal_device_rth_spdm_configure(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; u8 spdm_bar_num; u32 spdm_bar_offset; int spdm_table_size; int i; if (!hldev->config.rth_spdm_en) { return XGE_HAL_OK; } /* * Retrieve the base address of SPDM Table. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->spdm_bir_offset); spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64); spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64); /* * spdm_bar_num specifies the PCI bar num register used to * address the memory space. spdm_bar_offset specifies the offset * of the SPDM memory with in the bar num memory space. */ switch (spdm_bar_num) { case 0: { hldev->spdm_mem_base = (char *)bar0 + (spdm_bar_offset * 8); break; } case 1: { char *bar1 = (char *)hldev->bar1; hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8); break; } default: xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1))); } /* * Retrieve the size of SPDM table(number of entries). */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->spdm_structure); hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64); spdm_table_size = hldev->spdm_max_entries * sizeof(xge_hal_spdm_entry_t); if (hldev->spdm_table == NULL) { void *mem; /* * Allocate memory to hold the copy of SPDM table. */ if ((hldev->spdm_table = (xge_hal_spdm_entry_t **) xge_os_malloc( hldev->pdev, (sizeof(xge_hal_spdm_entry_t *) * hldev->spdm_max_entries))) == NULL) { return XGE_HAL_ERR_OUT_OF_MEMORY; } if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL) { xge_os_free(hldev->pdev, hldev->spdm_table, (sizeof(xge_hal_spdm_entry_t *) * hldev->spdm_max_entries)); return XGE_HAL_ERR_OUT_OF_MEMORY; } xge_os_memzero(mem, spdm_table_size); for (i = 0; i < hldev->spdm_max_entries; i++) { hldev->spdm_table[i] = (xge_hal_spdm_entry_t *) ((char *)mem + i * sizeof(xge_hal_spdm_entry_t)); } xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev); } else { /* * We are here because the host driver tries to * do a soft reset on the device. * Since the device soft reset clears the SPDM table, copy * the entries from the local SPDM table to the actual one. */ xge_os_spin_lock(&hldev->spdm_lock); for (i = 0; i < hldev->spdm_max_entries; i++) { xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i]; if (spdm_entry->in_use) { if (__hal_spdm_entry_add(hldev, &spdm_entry->src_ip, &spdm_entry->dst_ip, spdm_entry->l4_sp, spdm_entry->l4_dp, spdm_entry->is_tcp, spdm_entry->is_ipv4, spdm_entry->tgt_queue, spdm_entry->jhash_value, spdm_entry->spdm_entry) != XGE_HAL_OK) { /* Log an warning */ xge_debug_device(XGE_ERR, "SPDM table update from local" " memory failed"); } } } xge_os_spin_unlock(&hldev->spdm_lock); } /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); /* * We may not need to configure rts_rth_jhash_cfg register as the * default values are good enough to calculate the hash. */ /* * As of now, set all the rth mask registers to zero. TODO. */ for(i = 0; i < 5; i++) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->rts_rth_hash_mask[i]); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->rts_rth_hash_mask_5); if (hldev->config.rth_spdm_use_l4) { val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_status); } val64 = XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); return XGE_HAL_OK; } /* * __hal_device_pci_init * @hldev: HAL device handle. * * Initialize certain PCI/PCI-X configuration registers * with recommended values. Save config space for future hw resets. */ static void __hal_device_pci_init(xge_hal_device_t *hldev) { int i, pcisize = 0; u16 cmd = 0; u8 val; /* Store PCI device ID and revision for future references where in we * decide Xena revision using PCI sub system ID */ xge_os_pci_read16(hldev->pdev,hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, device_id), &hldev->device_id); xge_os_pci_read8(hldev->pdev,hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, revision), &hldev->revision); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) pcisize = XGE_HAL_PCISIZE_HERC; else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) pcisize = XGE_HAL_PCISIZE_XENA; /* save original PCI config space to restore it on device_terminate() */ for (i = 0; i < pcisize; i++) { xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, (u32*)&hldev->pci_config_space_bios + i); } /* Set the PErr Repconse bit and SERR in PCI command register. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), &cmd); cmd |= 0x140; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, command), cmd); /* Set user spcecified value for the PCI Latency Timer */ if (hldev->config.latency_timer && hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) { xge_os_pci_write8(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, latency_timer), (u8)hldev->config.latency_timer); } /* Read back latency timer to reflect it into user level */ xge_os_pci_read8(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val); hldev->config.latency_timer = val; /* Enable Data Parity Error Recovery in PCI-X command register. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd |= 1; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); /* Set MMRB count in PCI-X command register. */ if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) { cmd &= 0xFFF3; cmd |= hldev->config.mmrb_count << 2; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); } /* Read back MMRB count to reflect it into user level */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0x000C; hldev->config.mmrb_count = cmd>>2; /* Setting Maximum outstanding splits based on system type. */ if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) { xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0xFF8F; cmd |= hldev->config.max_splits_trans << 4; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); } /* Read back max split trans to reflect it into user level */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0x0070; hldev->config.max_splits_trans = cmd>>4; /* Forcibly disabling relaxed ordering capability of the card. */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd); cmd &= 0xFFFD; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd); /* save PCI config space for future resets */ for (i = 0; i < pcisize; i++) { xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4, (u32*)&hldev->pci_config_space + i); } } /* * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency * and mode. * @devh: HAL device handle. * @pci_mode: pointer to a variable of enumerated type * xge_hal_pci_mode_e{}. * @bus_frequency: pointer to a variable of enumerated type * xge_hal_pci_bus_frequency_e{}. * @bus_width: pointer to a variable of enumerated type * xge_hal_pci_bus_width_e{}. * * Get pci mode, frequency, and PCI bus width. * * Returns: one of the xge_hal_status_e{} enumerated types. * XGE_HAL_OK - for success. * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card. * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card. * * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. */ static xge_hal_status_e __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, xge_hal_pci_bus_frequency_e *bus_frequency, xge_hal_pci_bus_width_e *bus_width) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_status_e rc_status = XGE_HAL_OK; xge_hal_card_e card_id = xge_hal_device_check_id (devh); #ifdef XGE_HAL_HERC_EMULATION hldev->config.pci_freq_mherz = XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; *pci_mode = XGE_HAL_PCI_66MHZ_MODE; #else if (card_id == XGE_HAL_CARD_HERC) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pci_info); if (XGE_HAL_PCI_32_BIT & pci_info) *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT; else *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; switch((pci_info & XGE_HAL_PCI_INFO)>>60) { case XGE_HAL_PCI_33MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_33MHZ; *pci_mode = XGE_HAL_PCI_33MHZ_MODE; break; case XGE_HAL_PCI_66MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; *pci_mode = XGE_HAL_PCI_66MHZ_MODE; break; case XGE_HAL_PCIX_M1_66MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_66MHZ; *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE; break; case XGE_HAL_PCIX_M1_100MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_100MHZ; *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE; break; case XGE_HAL_PCIX_M1_133MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE; break; case XGE_HAL_PCIX_M2_66MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE; break; case XGE_HAL_PCIX_M2_100MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_200MHZ; *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE; break; case XGE_HAL_PCIX_M2_133MHZ_MODE: *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_266MHZ; *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE; break; case XGE_HAL_PCIX_M1_RESERVED: case XGE_HAL_PCIX_M1_66MHZ_NS: case XGE_HAL_PCIX_M1_100MHZ_NS: case XGE_HAL_PCIX_M1_133MHZ_NS: case XGE_HAL_PCIX_M2_RESERVED: case XGE_HAL_PCIX_533_RESERVED: default: rc_status = XGE_HAL_ERR_INVALID_PCI_INFO; xge_debug_device(XGE_ERR, "invalid pci info "XGE_OS_LLXFMT, (unsigned long long)pci_info); break; } if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO) xge_debug_device(XGE_TRACE, "PCI info: mode %d width " "%d frequency %d", *pci_mode, *bus_width, *bus_frequency); if (hldev->config.pci_freq_mherz == XGE_HAL_DEFAULT_USE_HARDCODE) { hldev->config.pci_freq_mherz = *bus_frequency; } } /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width * are set to unknown */ else if (card_id == XGE_HAL_CARD_XENA) { u32 pcix_status; u8 dev_num, bus_num; /* initialize defaults for XENA */ *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; xge_os_pci_read32(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, pcix_status), &pcix_status); dev_num = (u8)((pcix_status & 0xF8) >> 3); bus_num = (u8)((pcix_status & 0xFF00) >> 8); if (dev_num == 0 && bus_num == 0) *pci_mode = XGE_HAL_PCI_BASIC_MODE; else *pci_mode = XGE_HAL_PCIX_BASIC_MODE; xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode); if (hldev->config.pci_freq_mherz == XGE_HAL_DEFAULT_USE_HARDCODE) { /* * There is no way to detect BUS frequency on Xena, * so, in case of automatic configuration we hopelessly * assume 133MHZ. */ hldev->config.pci_freq_mherz = XGE_HAL_PCI_BUS_FREQUENCY_133MHZ; } } else if (card_id == XGE_HAL_CARD_TITAN) { *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT; *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ; if (hldev->config.pci_freq_mherz == XGE_HAL_DEFAULT_USE_HARDCODE) { hldev->config.pci_freq_mherz = *bus_frequency; } } else{ rc_status = XGE_HAL_ERR_BAD_DEVICE_ID; xge_debug_device(XGE_ERR, "invalid device id %d", card_id); } #endif return rc_status; } /* * __hal_device_handle_link_up_ind * @hldev: HAL device handle. * * Link up indication handler. The function is invoked by HAL when * Xframe indicates that the link is up for programmable amount of time. */ static int __hal_device_handle_link_up_ind(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; /* * If the previous link state is not down, return. */ if (hldev->link_state == XGE_HAL_LINK_UP) { #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ val64 = xge_os_pio_mem_read64( hldev->pdev, hldev->regh0, &bar0->misc_int_mask); val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_int_mask); } #endif xge_debug_device(XGE_TRACE, "link up indication while link is up, ignoring.."); return 0; } /* Now re-enable it as due to noise, hardware turned it off */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 |= XGE_HAL_ADAPTER_CNTL_EN; val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); /* Turn on the Laser */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON | XGE_HAL_ADAPTER_LED_ON); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) { xge_debug_device(XGE_TRACE, "%s", "fail to transition link to up..."); return 0; } else { /* * Mask the Link Up interrupt and unmask the Link Down * interrupt. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_mask); val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT; val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_int_mask); xge_debug_device(XGE_TRACE, "calling link up.."); hldev->link_state = XGE_HAL_LINK_UP; /* notify ULD */ if (g_xge_hal_driver->uld_callbacks.link_up) { g_xge_hal_driver->uld_callbacks.link_up( hldev->upper_layer_info); } return 1; } } #endif xge_os_mdelay(1); if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { /* notify ULD */ (void) xge_queue_produce_context(hldev->queueh, XGE_HAL_EVENT_LINK_IS_UP, hldev); /* link is up after been enabled */ return 1; } else { xge_debug_device(XGE_TRACE, "%s", "fail to transition link to up..."); return 0; } } /* * __hal_device_handle_link_down_ind * @hldev: HAL device handle. * * Link down indication handler. The function is invoked by HAL when * Xframe indicates that the link is down. */ static int __hal_device_handle_link_down_ind(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; /* * If the previous link state is not up, return. */ if (hldev->link_state == XGE_HAL_LINK_DOWN) { #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){ val64 = xge_os_pio_mem_read64( hldev->pdev, hldev->regh0, &bar0->misc_int_mask); val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_int_mask); } #endif xge_debug_device(XGE_TRACE, "link down indication while link is down, ignoring.."); return 0; } xge_os_mdelay(1); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); /* try to debounce the link only if the adapter is enabled. */ if (val64 & XGE_HAL_ADAPTER_CNTL_EN) { if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0, (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT), XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) { xge_debug_device(XGE_TRACE, "link is actually up (possible noisy link?), ignoring."); return(0); } } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); /* turn off LED */ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * Mask the Link Down interrupt and unmask the Link up * interrupt */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_mask); val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT; val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_int_mask); /* link is down */ xge_debug_device(XGE_TRACE, "calling link down.."); hldev->link_state = XGE_HAL_LINK_DOWN; /* notify ULD */ if (g_xge_hal_driver->uld_callbacks.link_down) { g_xge_hal_driver->uld_callbacks.link_down( hldev->upper_layer_info); } return 1; } #endif /* notify ULD */ (void) xge_queue_produce_context(hldev->queueh, XGE_HAL_EVENT_LINK_IS_DOWN, hldev); /* link is down */ return 1; } /* * __hal_device_handle_link_state_change * @hldev: HAL device handle. * * Link state change handler. The function is invoked by HAL when * Xframe indicates link state change condition. The code here makes sure to * 1) ignore redundant state change indications; * 2) execute link-up sequence, and handle the failure to bring the link up; * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by * upper-layer driver (ULD). */ static int __hal_device_handle_link_state_change(xge_hal_device_t *hldev) { u64 hw_status; int hw_link_state; int retcode; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; int i = 0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); /* If the adapter is not enabled but the hal thinks we are in the up * state then transition to the down state. */ if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) && (hldev->link_state == XGE_HAL_LINK_UP) ) { return(__hal_device_handle_link_down_ind(hldev)); } do { xge_os_mdelay(1); (void) xge_hal_device_status(hldev, &hw_status); hw_link_state = (hw_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ? XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP; /* check if the current link state is still considered * to be changed. This way we will make sure that this is * not a noise which needs to be filtered out */ if (hldev->link_state == hw_link_state) break; } while (i++ < hldev->config.link_valid_cnt); /* If the current link state is same as previous, just return */ if (hldev->link_state == hw_link_state) retcode = 0; /* detected state change */ else if (hw_link_state == XGE_HAL_LINK_UP) retcode = __hal_device_handle_link_up_ind(hldev); else retcode = __hal_device_handle_link_down_ind(hldev); return retcode; } /* * */ static void __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value) { hldev->stats.sw_dev_err_stats.serr_cnt++; if (hldev->config.dump_on_serr) { #ifdef XGE_HAL_USE_MGMT_AUX (void) xge_hal_aux_device_dump(hldev); #endif } (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev, 1, sizeof(u64), (void *)&value); xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, (unsigned long long) value); } /* * */ static void __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value) { if (hldev->config.dump_on_eccerr) { #ifdef XGE_HAL_USE_MGMT_AUX (void) xge_hal_aux_device_dump(hldev); #endif } /* Herc smart enough to recover on its own! */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_ECCERR, hldev, 1, sizeof(u64), (void *)&value); } xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, (unsigned long long) value); } /* * */ static void __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value) { if (hldev->config.dump_on_parityerr) { #ifdef XGE_HAL_USE_MGMT_AUX (void) xge_hal_aux_device_dump(hldev); #endif } (void) xge_queue_produce_context(hldev->queueh, XGE_HAL_EVENT_PARITYERR, hldev); xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg, (unsigned long long) value); } /* * */ static void __hal_device_handle_targetabort(xge_hal_device_t *hldev) { (void) xge_queue_produce_context(hldev->queueh, XGE_HAL_EVENT_TARGETABORT, hldev); } /* * __hal_device_hw_initialize * @hldev: HAL device handle. * * Initialize Xframe hardware. */ static xge_hal_status_e __hal_device_hw_initialize(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; xge_hal_status_e status; u64 val64; /* Set proper endian settings and verify the same by reading the PIF * Feed-back register. */ status = __hal_device_set_swapper(hldev); if (status != XGE_HAL_OK) { return status; } /* update the pci mode, frequency, and width */ if (__hal_device_pci_info_get(hldev, &hldev->pci_mode, &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){ hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE; hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN; hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN; /* * FIXME: this cannot happen. * But if it happens we cannot continue just like that */ xge_debug_device(XGE_ERR, "unable to get pci info"); } if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) || (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) || (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) { /* PCI optimization: set TxReqTimeOut * register (0x800+0x120) to 0x1ff or * something close to this. * Note: not to be used for PCI-X! */ val64 = XGE_HAL_TXREQTO_VAL(0x1FF); val64 |= XGE_HAL_TXREQTO_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->txreqtimeout); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, &bar0->read_retry_delay); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL, &bar0->write_retry_delay); xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode"); } if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ || hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) { /* Optimizing for PCI-X 266/250 */ val64 = XGE_HAL_TXREQTO_VAL(0x7F); val64 |= XGE_HAL_TXREQTO_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->txreqtimeout); xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes"); } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, &bar0->read_retry_delay); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL, &bar0->write_retry_delay); } /* added this to set the no of bytes used to update lso_bytes_sent returned TxD0 */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pic_control_2); val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2); val64 |= XGE_HAL_TXD_WRITE_BC(0x4); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->pic_control_2); /* added this to clear the EOI_RESET field while leaving XGXS_RESET * in reset, then a 1-second delay */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset); xge_os_mdelay(1000); /* Clear the XGXS_RESET field of the SW_RESET register in order to * release the XGXS from reset. Its reset value is 0xA5; write 0x00 * to activate the XGXS. The core requires a minimum 500 us reset.*/ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->sw_reset); xge_os_mdelay(1); /* read registers in all blocks */ (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_int_mask); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mc_int_mask); (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->xgxs_int_mask); /* set default MTU and steer based on length*/ __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work if (hldev->config.mac.rmac_bcast_en) { xge_hal_device_bcast_enable(hldev); } else { xge_hal_device_bcast_disable(hldev); } #ifndef XGE_HAL_HERC_EMULATION __hal_device_xaui_configure(hldev); #endif __hal_device_mac_link_util_set(hldev); __hal_device_mac_link_util_set(hldev); /* * Keep its PCI REQ# line asserted during a write * transaction up to the end of the transaction */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_control); val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_control); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_control); val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_control); } /* * bimodal interrupts is when all Rx traffic interrupts * will go to TTI, so we need to adjust RTI settings and * use adaptive TTI timer. We need to make sure RTI is * properly configured to sane value which will not * distrupt bimodal behavior. */ if (hldev->config.bimodal_interrupts) { int i; /* force polling_cnt to be "0", otherwise * IRQ workload statistics will be screwed. This could * be worked out in TXPIC handler later. */ hldev->config.isr_polling_cnt = 0; hldev->config.sched_timer_us = 10000; /* disable all TTI < 56 */ for (i=0; iconfig.fifo.queue[i].configured) continue; for (j=0; jconfig.fifo.queue[i].tti[j].enabled) hldev->config.fifo.queue[i].tti[j].enabled = 0; } } /* now configure bimodal interrupts */ __hal_device_bimodal_configure(hldev); } status = __hal_device_tti_configure(hldev, 0); if (status != XGE_HAL_OK) return status; status = __hal_device_rti_configure(hldev, 0); if (status != XGE_HAL_OK) return status; status = __hal_device_rth_it_configure(hldev); if (status != XGE_HAL_OK) return status; status = __hal_device_rth_spdm_configure(hldev); if (status != XGE_HAL_OK) return status; status = __hal_device_rts_mac_configure(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed "); return status; } status = __hal_device_rts_port_configure(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed "); return status; } status = __hal_device_rts_qos_configure(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed "); return status; } __hal_device_pause_frames_configure(hldev); __hal_device_rmac_padding_configure(hldev); __hal_device_shared_splits_configure(hldev); /* make sure all interrupts going to be disabled at the moment */ __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); /* SXE-008 Transmit DMA arbitration issue */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && hldev->revision < 4) { xge_os_pio_mem_write64(hldev->pdev,hldev->regh0, XGE_HAL_ADAPTER_PCC_ENABLE_FOUR, &bar0->pcc_enable); } #if 0 // Removing temporarily as FreeBSD is seeing lower performance // attributable to this fix. /* SXE-2-010 */ if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* Turn off the ECC error reporting for RLDRAM interface */ if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK) return status; } #endif __hal_fifo_hw_initialize(hldev); __hal_ring_hw_initialize(hldev); if (__hal_device_wait_quiescent(hldev, &val64)) { return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent", (unsigned long long)(ulong_t)hldev); if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) { /* * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL * is disabled. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pic_control); val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->pic_control); } hldev->hw_is_initialized = 1; hldev->terminating = 0; return XGE_HAL_OK; } /* * __hal_device_reset - Reset device only. * @hldev: HAL device handle. * * Reset the device, and subsequently restore * the previously saved PCI configuration space. */ #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50 static xge_hal_status_e __hal_device_reset(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; int i, j, swap_done, pcisize = 0; u64 val64, rawval = 0ULL; if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { if ( hldev->bar2 ) { u64 *msix_vetor_table = (u64 *)hldev->bar2; // 2 64bit words for each entry for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) { hldev->msix_vector_table[i] = xge_os_pio_mem_read64(hldev->pdev, hldev->regh2, &msix_vetor_table[i]); } } } } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->pif_rd_swapper_fb); swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB); if (swap_done) { __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset); } else { u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32); #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN) /* swap it */ val = (((val & (u32)0x000000ffUL) << 24) | ((val & (u32)0x0000ff00UL) << 8) | ((val & (u32)0x00ff0000UL) >> 8) | ((val & (u32)0xff000000UL) >> 24)); #endif xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val, &bar0->sw_reset); } pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; xge_os_mdelay(20); /* Wait for 20 ms after reset */ { /* Poll for no more than 1 second */ for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++) { for (j = 0; j < pcisize; j++) { xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, *((u32*)&hldev->pci_config_space + j)); } xge_os_pci_read16(hldev->pdev,hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, device_id), &hldev->device_id); if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN) break; xge_os_mdelay(20); } } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN) { xge_debug_device(XGE_ERR, "device reset failed"); return XGE_HAL_ERR_RESET_FAILED; } if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { int cnt = 0; rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC; pcisize = XGE_HAL_PCISIZE_HERC; xge_os_mdelay(1); do { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->sw_reset); if (val64 != rawval) { break; } cnt++; xge_os_mdelay(1); /* Wait for 1ms before retry */ } while(cnt < 20); } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA; pcisize = XGE_HAL_PCISIZE_XENA; xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS); } /* Restore MSI-X vector table */ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { if ( hldev->bar2 ) { /* * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 ) * 98: PBATable 00000404 ( BIR:4 Offset:0x400 ) */ u64 *msix_vetor_table = (u64 *)hldev->bar2; /* 2 64bit words for each entry */ for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2; i++) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh2, hldev->msix_vector_table[i], &msix_vetor_table[i]); } } } } hldev->link_state = XGE_HAL_LINK_DOWN; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->sw_reset); if (val64 != rawval) { xge_debug_device(XGE_ERR, "device has not been reset " "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT, (unsigned long long)val64, (unsigned long long)rawval); return XGE_HAL_ERR_RESET_FAILED; } hldev->hw_is_initialized = 0; return XGE_HAL_OK; } /* * __hal_device_poll - General private routine to poll the device. * @hldev: HAL device handle. * * Returns: one of the xge_hal_status_e{} enumerated types. * XGE_HAL_OK - for success. * XGE_HAL_ERR_CRITICAL - when encounters critical error. */ static xge_hal_status_e __hal_device_poll(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0; u64 err_reg; bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; /* Handling SERR errors by forcing a H/W reset. */ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->serr_source); if (err_reg & XGE_HAL_SERR_SOURCE_ANY) { __hal_device_handle_serr(hldev, "serr_source", err_reg); return XGE_HAL_ERR_CRITICAL; } err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_reg); if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) { hldev->stats.sw_dev_err_stats.parity_err_cnt++; __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg); return XGE_HAL_ERR_CRITICAL; } #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) #endif { /* Handling link status change error Intr */ err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_rmac_err_reg); if (__hal_device_handle_link_state_change(hldev)) xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err_reg, &bar0->mac_rmac_err_reg); } if (hldev->inject_serr != 0) { err_reg = hldev->inject_serr; hldev->inject_serr = 0; __hal_device_handle_serr(hldev, "inject_serr", err_reg); return XGE_HAL_ERR_CRITICAL; } if (hldev->inject_ecc != 0) { err_reg = hldev->inject_ecc; hldev->inject_ecc = 0; hldev->stats.sw_dev_err_stats.ecc_err_cnt++; __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg); return XGE_HAL_ERR_CRITICAL; } if (hldev->inject_bad_tcode != 0) { u8 t_code = hldev->inject_bad_tcode; xge_hal_channel_t channel; xge_hal_fifo_txd_t txd; xge_hal_ring_rxd_1_t rxd; channel.devh = hldev; if (hldev->inject_bad_tcode_for_chan_type == XGE_HAL_CHANNEL_TYPE_FIFO) { channel.type = XGE_HAL_CHANNEL_TYPE_FIFO; } else { channel.type = XGE_HAL_CHANNEL_TYPE_RING; } hldev->inject_bad_tcode = 0; if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO) return xge_hal_device_handle_tcode(&channel, &txd, t_code); else return xge_hal_device_handle_tcode(&channel, &rxd, t_code); } return XGE_HAL_OK; } /* * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not * @hldev: HAL device handle. * @adp_status: Adapter Status value * Usage: See xge_hal_device_enable{}. */ xge_hal_status_e __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA && hldev->revision < 4) { /* * For Xena 1,2,3 we enable only 4 PCCs Due to * SXE-008 (Transmit DMA arbitration issue) */ if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) { xge_debug_device(XGE_TRACE, "%s", "PCC is not IDLE after adapter enabled!"); return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } } else { if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) { xge_debug_device(XGE_TRACE, "%s", "PCC is not IDLE after adapter enabled!"); return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } } return XGE_HAL_OK; } static void __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no) { int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist; int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg; int iwl_cnt, i; #define _HIST_SIZE 50 /* 0.5 sec history */ #define _HIST_ADJ_TIMER 1 #define _STEP 2 static int bytes_avg_history[_HIST_SIZE] = {0}; static int d_avg_history[_HIST_SIZE] = {0}; static int history_idx = 0; static int pstep = 1; static int hist_adj_timer = 0; /* * tval - current value of this bimodal timer */ tval = hldev->bimodal_tti[ring_no].timer_val_us; /* * d - how many interrupts we were getting since last * bimodal timer tick. */ d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt - hldev->bimodal_intr_cnt; /* advance bimodal interrupt counter */ hldev->bimodal_intr_cnt = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt; /* * iwl_cnt - how many interrupts we've got since last * bimodal timer tick. */ iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ? hldev->irq_workload_rxcnt[ring_no] : 1); iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ? hldev->irq_workload_txcnt[ring_no] : 1); iwl_cnt = iwl_rxcnt + iwl_txcnt; iwl_cnt = iwl_cnt; /* just to remove the lint warning */ /* * we need to take hldev->config.isr_polling_cnt into account * but for some reason this line causing GCC to produce wrong * code on Solaris. As of now, if bimodal_interrupts is configured * hldev->config.isr_polling_cnt is forced to be "0". * * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */ /* * iwl_avg - how many RXDs on avarage been processed since * last bimodal timer tick. This indirectly includes * CPU utilizations. */ iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt; iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt; iwl_avg = iwl_rxavg + iwl_txavg; iwl_avg = iwl_avg == 0 ? 1 : iwl_avg; /* * len_avg - how many bytes on avarage been processed since * last bimodal timer tick. i.e. avarage frame size. */ len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] / (hldev->irq_workload_rxd[ring_no] ? hldev->irq_workload_rxd[ring_no] : 1); len_txavg = 1 + hldev->irq_workload_txlen[ring_no] / (hldev->irq_workload_txd[ring_no] ? hldev->irq_workload_txd[ring_no] : 1); len_avg = len_rxavg + len_txavg; if (len_avg < 60) len_avg = 60; /* align on low boundary */ if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us) tval = hldev->config.bimodal_timer_lo_us; /* reset faster */ if (iwl_avg == 1) { tval = hldev->config.bimodal_timer_lo_us; /* reset history */ for (i = 0; i < _HIST_SIZE; i++) bytes_avg_history[i] = d_avg_history[i] = 0; history_idx = 0; pstep = 1; hist_adj_timer = 0; } /* always try to ajust timer to the best throughput value */ bytes_avg = iwl_avg * len_avg; history_idx %= _HIST_SIZE; bytes_avg_history[history_idx] = bytes_avg; d_avg_history[history_idx] = d; history_idx++; d_hist = bytes_hist = 0; for (i = 0; i < _HIST_SIZE; i++) { /* do not re-configure until history is gathered */ if (!bytes_avg_history[i]) { tval = hldev->config.bimodal_timer_lo_us; goto _end; } bytes_hist += bytes_avg_history[i]; d_hist += d_avg_history[i]; } bytes_hist /= _HIST_SIZE; d_hist /= _HIST_SIZE; // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d", // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg, // d_hist*bytes_hist, pstep); /* make an adaptive step */ if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) { pstep = !pstep; hist_adj_timer = 0; } if (pstep && (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) { tval += _STEP; hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++; } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) { tval -= _STEP; hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++; } /* enable TTI range A for better latencies */ hldev->bimodal_urange_a_en = 0; if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2) hldev->bimodal_urange_a_en = 1; _end: /* reset workload statistics counters */ hldev->irq_workload_rxcnt[ring_no] = 0; hldev->irq_workload_rxd[ring_no] = 0; hldev->irq_workload_rxlen[ring_no] = 0; hldev->irq_workload_txcnt[ring_no] = 0; hldev->irq_workload_txd[ring_no] = 0; hldev->irq_workload_txlen[ring_no] = 0; /* reconfigure TTI56 + ring_no with new timer value */ hldev->bimodal_timer_val_us = tval; (void) __hal_device_rti_configure(hldev, 1); } static void __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no) { int ufc, ic, i; ufc = hldev->config.ring.queue[ring_no].rti.ufc_a; ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt; /* urange_a adaptive coalescing */ if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) { if (ic > hldev->rxufca_intr_thres) { if (ufc < hldev->config.rxufca_hi_lim) { ufc += 1; for (i=0; iconfig.ring.queue[i].rti.ufc_a = ufc; (void) __hal_device_rti_configure(hldev, 1); hldev->stats.sw_dev_info_stats. rxufca_hi_adjust_cnt++; } hldev->rxufca_intr_thres = ic + hldev->config.rxufca_intr_thres; /* def: 30 */ } else { if (ufc > hldev->config.rxufca_lo_lim) { ufc -= 1; for (i=0; iconfig.ring.queue[i].rti.ufc_a = ufc; (void) __hal_device_rti_configure(hldev, 1); hldev->stats.sw_dev_info_stats. rxufca_lo_adjust_cnt++; } } hldev->rxufca_lbolt_time = hldev->rxufca_lbolt + hldev->config.rxufca_lbolt_period; } hldev->rxufca_lbolt++; } /* * __hal_device_handle_mc - Handle MC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mc_int_status); if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT)) return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mc_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->mc_err_reg); if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L || val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 || (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L || val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) { hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++; hldev->stats.sw_dev_err_stats.ecc_err_cnt++; } if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L || val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 || (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA && (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L || val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L || val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) { hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++; hldev->stats.sw_dev_err_stats.ecc_err_cnt++; } if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) { hldev->stats.sw_dev_err_stats.sm_err_cnt++; } /* those two should result in device reset */ if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 || val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) { __hal_device_handle_eccerr(hldev, "mc_err_reg", val64); return XGE_HAL_ERR_CRITICAL; } return XGE_HAL_OK; } /* * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64; if (reason & XGE_HAL_PIC_INT_FLSH) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->flsh_int_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->flsh_int_reg); /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_MDIO) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mdio_int_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->mdio_int_reg); /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_IIC) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->iic_int_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->iic_int_reg); /* FIXME: handle register */ } if (reason & XGE_HAL_PIC_INT_MISC) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->misc_int_reg); #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* Check for Link interrupts. If both Link Up/Down * bits are set, clear both and check adapter status */ if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) && (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) { u64 temp64; xge_debug_device(XGE_TRACE, "both link up and link down detected "XGE_OS_LLXFMT, (unsigned long long)val64); temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT | XGE_HAL_MISC_INT_REG_LINK_UP_INT); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, temp64, &isrbar0->misc_int_reg); } else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) { xge_debug_device(XGE_TRACE, "link up call request, misc_int "XGE_OS_LLXFMT, (unsigned long long)val64); __hal_device_handle_link_up_ind(hldev); } else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){ xge_debug_device(XGE_TRACE, "link down request, misc_int "XGE_OS_LLXFMT, (unsigned long long)val64); __hal_device_handle_link_down_ind(hldev); } } else #endif { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->misc_int_reg); } } return XGE_HAL_OK; } /* * __hal_device_handle_txpic - Handle TxPIC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason) { xge_hal_status_e status = XGE_HAL_OK; xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; volatile u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->pic_int_status); if ( val64 & (XGE_HAL_PIC_INT_FLSH | XGE_HAL_PIC_INT_MDIO | XGE_HAL_PIC_INT_IIC | XGE_HAL_PIC_INT_MISC) ) { status = __hal_device_handle_pic(hldev, val64); xge_os_wmb(); } if (!(val64 & XGE_HAL_PIC_INT_TX)) return status; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->txpic_int_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->txpic_int_reg); xge_os_wmb(); if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) { int i; if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL) g_xge_hal_driver->uld_callbacks.sched_timer( hldev, hldev->upper_layer_info); /* * This feature implements adaptive receive interrupt * coalecing. It is disabled by default. To enable it * set hldev->config.rxufca_lo_lim to be not equal to * hldev->config.rxufca_hi_lim. * * We are using HW timer for this feature, so * use needs to configure hldev->config.rxufca_lbolt_period * which is essentially a time slice of timer. * * For those who familiar with Linux, lbolt means jiffies * of this timer. I.e. timer tick. */ if (hldev->config.rxufca_lo_lim != hldev->config.rxufca_hi_lim && hldev->config.rxufca_lo_lim != 0) { for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; if (hldev->config.ring.queue[i].rti.urange_a) __hal_update_rxufca(hldev, i); } } /* * This feature implements adaptive TTI timer re-calculation * based on host utilization, number of interrupt processed, * number of RXD per tick and avarage length of packets per * tick. */ if (hldev->config.bimodal_interrupts) { for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!hldev->config.ring.queue[i].configured) continue; if (hldev->bimodal_tti[i].enabled) __hal_update_bimodal(hldev, i); } } } return XGE_HAL_OK; } /* * __hal_device_handle_txdma - Handle TxDMA interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, temp64, err; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->txdma_int_status); if (val64 & XGE_HAL_TXDMA_PFC_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->pfc_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->pfc_err_reg); hldev->stats.sw_dev_info_stats.pfc_err_cnt++; temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR |XGE_HAL_PFC_PCIX_ERR; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_TDA_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->tda_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->tda_err_reg); hldev->stats.sw_dev_info_stats.tda_err_cnt++; temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM |XGE_HAL_TDA_SM1_ERR_ALARM; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_PCC_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->pcc_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->pcc_err_reg); hldev->stats.sw_dev_info_stats.pcc_err_cnt++; temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR |XGE_HAL_PCC_7_LSO_OV_ERR; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_TTI_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->tti_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->tti_err_reg); hldev->stats.sw_dev_info_stats.tti_err_cnt++; temp64 = XGE_HAL_TTI_SM_ERR_ALARM; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_LSO_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->lso_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->lso_err_reg); hldev->stats.sw_dev_info_stats.lso_err_cnt++; temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_TPA_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->tpa_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->tpa_err_reg); hldev->stats.sw_dev_info_stats.tpa_err_cnt++; temp64 = XGE_HAL_TPA_SM_ERR_ALARM; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_TXDMA_SM_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->sm_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->sm_err_reg); hldev->stats.sw_dev_info_stats.sm_err_cnt++; temp64 = XGE_HAL_SM_SM_ERR_ALARM; if (val64 & temp64) goto reset; } return XGE_HAL_OK; reset : xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); return XGE_HAL_OK; } /* * __hal_device_handle_txmac - Handle TxMAC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mac_int_status); if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT)) return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mac_tmac_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->mac_tmac_err_reg); hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++; temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR; if (val64 & temp64) { xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; } /* * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->xgxs_int_status); if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS)) return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->xgxs_txgxs_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->xgxs_txgxs_err_reg); hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++; temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR; if (val64 & temp64) { xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; } /* * __hal_device_handle_rxpic - Handle RxPIC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason) { /* FIXME: handle register */ return XGE_HAL_OK; } /* * __hal_device_handle_rxdma - Handle RxDMA interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, err, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->rxdma_int_status); if (val64 & XGE_HAL_RXDMA_RC_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->rc_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->rc_err_reg); hldev->stats.sw_dev_info_stats.rc_err_cnt++; temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR |XGE_HAL_RC_PRCn_SM_ERR_ALARM |XGE_HAL_RC_FTC_SM_ERR_ALARM; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_RXDMA_RPA_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->rpa_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->rpa_err_reg); hldev->stats.sw_dev_info_stats.rpa_err_cnt++; temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_RXDMA_RDA_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->rda_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->rda_err_reg); hldev->stats.sw_dev_info_stats.rda_err_cnt++; temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR |XGE_HAL_RDA_FRM_ECC_DB_N_AERR |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM |XGE_HAL_RDA_RXD_ECC_DB_SERR; if (val64 & temp64) goto reset; } if (val64 & XGE_HAL_RXDMA_RTI_INT) { err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->rti_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, err, &isrbar0->rti_err_reg); hldev->stats.sw_dev_info_stats.rti_err_cnt++; temp64 = XGE_HAL_RTI_SM_ERR_ALARM; if (val64 & temp64) goto reset; } return XGE_HAL_OK; reset : xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); return XGE_HAL_OK; } /* * __hal_device_handle_rxmac - Handle RxMAC interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mac_int_status); if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT)) return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->mac_rmac_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->mac_rmac_err_reg); hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++; temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR; if (val64 & temp64) { xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; } /* * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason * @hldev: HAL device handle. * @reason: interrupt reason */ xge_hal_status_e __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason) { xge_hal_pci_bar0_t *isrbar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0; u64 val64, temp64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->xgxs_int_status); if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS)) return XGE_HAL_OK; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &isrbar0->xgxs_rxgxs_err_reg); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &isrbar0->xgxs_rxgxs_err_reg); hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++; temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR; if (val64 & temp64) { xge_hal_device_reset(hldev); xge_hal_device_enable(hldev); xge_hal_device_intr_enable(hldev); } return XGE_HAL_OK; } /** * xge_hal_device_enable - Enable device. * @hldev: HAL device handle. * * Enable the specified device: bring up the link/interface. * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device * to a "quiescent" state. * * See also: xge_hal_status_e{}. * * Usage: See ex_open{}. */ xge_hal_status_e xge_hal_device_enable(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; u64 adp_status; int i, j; if (!hldev->hw_is_initialized) { xge_hal_status_e status; status = __hal_device_hw_initialize(hldev); if (status != XGE_HAL_OK) { return status; } } /* * Not needed in most cases, i.e. * when device_disable() is followed by reset - * the latter copies back PCI config space, along with * the bus mastership - see __hal_device_reset(). * However, there are/may-in-future be other cases, and * does not hurt. */ __hal_device_bus_master_enable(hldev); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * Configure the link stability period. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_control); if (hldev->config.link_stability_period != XGE_HAL_DEFAULT_USE_HARDCODE) { val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( hldev->config.link_stability_period); } else { /* * Use the link stability period 1 ms as default */ val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD( XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_control); /* * Clearing any possible Link up/down interrupts that * could have popped up just before Enabling the card. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->misc_int_reg); if (val64) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->misc_int_reg); xge_debug_device(XGE_TRACE, "%s","link state cleared"); } } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { /* * Clearing any possible Link state change interrupts that * could have popped up just before Enabling the card. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_rmac_err_reg); if (val64) { xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mac_rmac_err_reg); xge_debug_device(XGE_TRACE, "%s", "link state cleared"); } } if (__hal_device_wait_quiescent(hldev, &val64)) { return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } /* Enabling Laser. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 |= XGE_HAL_ADAPTER_EOI_TX_ON; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); /* let link establish */ xge_os_mdelay(1); /* set link down untill poll() routine will set it up (maybe) */ hldev->link_state = XGE_HAL_LINK_DOWN; /* If link is UP (adpter is connected) then enable the adapter */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); } else { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON | XGE_HAL_ADAPTER_LED_ON ); } val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */ val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */ xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); /* We spin here waiting for the Link to come up. * This is the fix for the Link being unstable after the reset. */ i = 0; j = 0; do { adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); /* Read the adapter control register for Adapter_enable bit */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) && (val64 & XGE_HAL_ADAPTER_CNTL_EN)) { j++; if (j >= hldev->config.link_valid_cnt) { if (xge_hal_device_status(hldev, &adp_status) == XGE_HAL_OK) { if (__hal_verify_pcc_idle(hldev, adp_status) != XGE_HAL_OK) { return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } xge_debug_device(XGE_TRACE, "adp_status: "XGE_OS_LLXFMT ", link is up on " "adapter enable!", (unsigned long long)adp_status); val64 = xge_os_pio_mem_read64( hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64| (XGE_HAL_ADAPTER_EOI_TX_ON | XGE_HAL_ADAPTER_LED_ON ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); xge_os_mdelay(1); val64 = xge_os_pio_mem_read64( hldev->pdev, hldev->regh0, &bar0->adapter_control); break; /* out of for loop */ } else { return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } } } else { j = 0; /* Reset the count */ /* Turn on the Laser */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON; xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); xge_os_mdelay(1); /* Now re-enable it as due to noise, hardware * turned it off */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 |= XGE_HAL_ADAPTER_CNTL_EN; val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); } xge_os_mdelay(1); /* Sleep for 1 msec */ i++; } while (i < hldev->config.link_retry_cnt); __hal_device_led_actifity_fix(hldev); #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR /* Here we are performing soft reset on XGXS to force link down. * Since link is already up, we will get link state change * poll notificatoin after adapter is enabled */ __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL, &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL, &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL, &bar0->dtx_control); (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control); xge_os_mdelay(100); /* Sleep for 500 msec */ #else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) #endif { /* * With some switches the link state change interrupt does not * occur even though the xgxs reset is done as per SPN-006. So, * poll the adapter status register and check if the link state * is ok. */ adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT | XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT))) { xge_debug_device(XGE_TRACE, "%s", "enable device causing link state change ind.."); (void) __hal_device_handle_link_state_change(hldev); } } if (hldev->config.stats_refresh_time_sec != XGE_HAL_STATS_REFRESH_DISABLE) __hal_stats_enable(&hldev->stats); return XGE_HAL_OK; } /** * xge_hal_device_disable - Disable Xframe adapter. * @hldev: Device handle. * * Disable this device. To gracefully reset the adapter, the host should: * * - call xge_hal_device_disable(); * * - call xge_hal_device_intr_disable(); * * - close all opened channels and clean up outstanding resources; * * - do some work (error recovery, change mtu, reset, etc); * * - call xge_hal_device_enable(); * * - open channels, replenish RxDs, etc. * * - call xge_hal_device_intr_enable(). * * Note: Disabling the device does _not_ include disabling of interrupts. * After disabling the device stops receiving new frames but those frames * that were already in the pipe will keep coming for some few milliseconds. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to * a "quiescent" state. * * See also: xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_disable(xge_hal_device_t *hldev) { xge_hal_status_e status = XGE_HAL_OK; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware"); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) { status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1, XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT, XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) { xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!"); status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT; } if (hldev->config.stats_refresh_time_sec != XGE_HAL_STATS_REFRESH_DISABLE) __hal_stats_disable(&hldev->stats); #ifdef XGE_DEBUG_ASSERT else xge_assert(!hldev->stats.is_enabled); #endif #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP __hal_device_bus_master_disable(hldev); #endif return status; } /** * xge_hal_device_reset - Reset device. * @hldev: HAL device handle. * * Soft-reset the device, reset the device stats except reset_cnt. * * After reset is done, will try to re-initialize HW. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. * XGE_HAL_ERR_RESET_FAILED - Reset failed. * * See also: xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_reset(xge_hal_device_t *hldev) { xge_hal_status_e status; /* increment the soft reset counter */ u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt; xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt); if (!hldev->is_initialized) return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED; /* actual "soft" reset of the adapter */ status = __hal_device_reset(hldev); /* reset all stats including saved */ __hal_stats_soft_reset(hldev, 1); /* increment reset counter */ hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1; /* re-initialize rxufca_intr_thres */ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; hldev->reset_needed_after_close = 0; return status; } /** * xge_hal_device_status - Check whether Xframe hardware is ready for * operation. * @hldev: HAL device handle. * @hw_status: Xframe status register. Returned by HAL. * * Check whether Xframe hardware is ready for operation. * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest * hardware functional blocks. * * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status). * * See also: xge_hal_status_e{}. * Usage: See ex_open{}. */ xge_hal_status_e xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 tmp64; tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_status); *hw_status = tmp64; if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) { xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) { xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) { xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) { xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) { xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) { xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) { xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!"); return XGE_HAL_FAIL; } if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) { xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!"); return XGE_HAL_FAIL; } #ifndef XGE_HAL_HERC_EMULATION /* * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore, * the P_PLL_LOCK bit in the adapter_status register will * not be asserted. */ if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) && xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC && hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) { xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!"); return XGE_HAL_FAIL; } #endif return XGE_HAL_OK; } void __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag) { u16 msi_control_reg; xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, msi_control), &msi_control_reg); if (flag) msi_control_reg |= 0x1; else msi_control_reg &= ~0x1; xge_os_pci_write16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, msi_control), msi_control_reg); } void __hal_device_msix_intr_endis(xge_hal_device_t *hldev, xge_hal_channel_t *channel, int flag) { u64 val64; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->xmsi_mask_reg); if (flag) val64 &= ~(1LL << ( 63 - channel->msix_idx )); else val64 |= (1LL << ( 63 - channel->msix_idx )); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->xmsi_mask_reg); } /** * xge_hal_device_intr_enable - Enable Xframe interrupts. * @hldev: HAL device handle. * @op: One of the xge_hal_device_intr_e enumerated values specifying * the type(s) of interrupts to enable. * * Enable Xframe interrupts. The function is to be executed the last in * Xframe initialization sequence. * * See also: xge_hal_device_intr_disable() */ void xge_hal_device_intr_enable(xge_hal_device_t *hldev) { xge_list_t *item; u64 val64; /* PRC initialization and configuration */ xge_list_for_each(item, &hldev->ring_channels) { xge_hal_channel_h channel; channel = xge_container_of(item, xge_hal_channel_t, item); __hal_ring_prc_enable(channel); } /* enable traffic only interrupts */ if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) { /* * make sure all interrupts going to be disabled if MSI * is enabled. */ __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0); } else { /* * Enable the Tx traffic interrupts only if the TTI feature is * enabled. */ val64 = 0; if (hldev->tti_enabled) val64 = XGE_HAL_TX_TRAFFIC_INTR; if (!hldev->config.bimodal_interrupts) val64 |= XGE_HAL_RX_TRAFFIC_INTR; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) val64 |= XGE_HAL_RX_TRAFFIC_INTR; val64 |=XGE_HAL_TX_PIC_INTR | XGE_HAL_MC_INTR | XGE_HAL_TX_DMA_INTR | (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); __hal_device_intr_mgmt(hldev, val64, 1); } /* * Enable MSI-X interrupts */ if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * To enable MSI-X, MSI also needs to be enabled, * due to a bug in the herc NIC. */ __hal_device_msi_intr_endis(hldev, 1); } /* Enable the MSI-X interrupt for each configured channel */ xge_list_for_each(item, &hldev->fifo_channels) { xge_hal_channel_t *channel; channel = xge_container_of(item, xge_hal_channel_t, item); /* 0 vector is reserved for alarms */ if (!channel->msix_idx) continue; __hal_device_msix_intr_endis(hldev, channel, 1); } xge_list_for_each(item, &hldev->ring_channels) { xge_hal_channel_t *channel; channel = xge_container_of(item, xge_hal_channel_t, item); /* 0 vector is reserved for alarms */ if (!channel->msix_idx) continue; __hal_device_msix_intr_endis(hldev, channel, 1); } } xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled"); } /** * xge_hal_device_intr_disable - Disable Xframe interrupts. * @hldev: HAL device handle. * @op: One of the xge_hal_device_intr_e enumerated values specifying * the type(s) of interrupts to disable. * * Disable Xframe interrupts. * * See also: xge_hal_device_intr_enable() */ void xge_hal_device_intr_disable(xge_hal_device_t *hldev) { xge_list_t *item; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) { if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* * To disable MSI-X, MSI also needs to be disabled, * due to a bug in the herc NIC. */ __hal_device_msi_intr_endis(hldev, 0); } /* Disable the MSI-X interrupt for each configured channel */ xge_list_for_each(item, &hldev->fifo_channels) { xge_hal_channel_t *channel; channel = xge_container_of(item, xge_hal_channel_t, item); /* 0 vector is reserved for alarms */ if (!channel->msix_idx) continue; __hal_device_msix_intr_endis(hldev, channel, 0); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, &bar0->tx_traffic_mask); xge_list_for_each(item, &hldev->ring_channels) { xge_hal_channel_t *channel; channel = xge_container_of(item, xge_hal_channel_t, item); /* 0 vector is reserved for alarms */ if (!channel->msix_idx) continue; __hal_device_msix_intr_endis(hldev, channel, 0); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, &bar0->rx_traffic_mask); } /* * Disable traffic only interrupts. * Tx traffic interrupts are used only if the TTI feature is * enabled. */ val64 = 0; if (hldev->tti_enabled) val64 = XGE_HAL_TX_TRAFFIC_INTR; val64 |= XGE_HAL_RX_TRAFFIC_INTR | XGE_HAL_TX_PIC_INTR | XGE_HAL_MC_INTR | (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0); __hal_device_intr_mgmt(hldev, val64, 0); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0xFFFFFFFFFFFFFFFFULL, &bar0->general_int_mask); /* disable all configured PRCs */ xge_list_for_each(item, &hldev->ring_channels) { xge_hal_channel_h channel; channel = xge_container_of(item, xge_hal_channel_t, item); __hal_ring_prc_disable(channel); } xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled"); } /** * xge_hal_device_mcast_enable - Enable Xframe multicast addresses. * @hldev: HAL device handle. * * Enable Xframe multicast addresses. * Returns: XGE_HAL_OK on success. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast * feature within the time(timeout). * * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_mcast_enable(xge_hal_device_t *hldev) { u64 val64; xge_hal_pci_bar0_t *bar0; int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; if (hldev == NULL) return XGE_HAL_ERR_INVALID_DEVICE; if (hldev->mcast_refcnt) return XGE_HAL_OK; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; hldev->mcast_refcnt = 1; bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; /* Enable all Multicast addresses */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL), &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL), &bar0->rmac_addr_data1_mem); val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; } /** * xge_hal_device_mcast_disable - Disable Xframe multicast addresses. * @hldev: HAL device handle. * * Disable Xframe multicast addresses. * Returns: XGE_HAL_OK - success. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast * feature within the time(timeout). * * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_mcast_disable(xge_hal_device_t *hldev) { u64 val64; xge_hal_pci_bar0_t *bar0; int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET; if (hldev == NULL) return XGE_HAL_ERR_INVALID_DEVICE; if (hldev->mcast_refcnt == 0) return XGE_HAL_OK; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC; hldev->mcast_refcnt = 0; bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; /* Disable all Multicast addresses */ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL), &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0), &bar0->rmac_addr_data1_mem); val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE | XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; } /** * xge_hal_device_promisc_enable - Enable promiscuous mode. * @hldev: HAL device handle. * * Enable promiscuous mode of Xframe operation. * * See also: xge_hal_device_promisc_disable(). */ void xge_hal_device_promisc_enable(xge_hal_device_t *hldev) { u64 val64; xge_hal_pci_bar0_t *bar0; xge_assert(hldev); bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (!hldev->is_promisc) { /* Put the NIC into promiscuous mode */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), &bar0->mac_cfg); hldev->is_promisc = 1; xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled", (unsigned long long)val64); } } /** * xge_hal_device_promisc_disable - Disable promiscuous mode. * @hldev: HAL device handle. * * Disable promiscuous mode of Xframe operation. * * See also: xge_hal_device_promisc_enable(). */ void xge_hal_device_promisc_disable(xge_hal_device_t *hldev) { u64 val64; xge_hal_pci_bar0_t *bar0; xge_assert(hldev); bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (hldev->is_promisc) { /* Remove the NIC from promiscuous mode */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->mac_cfg); val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key); __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), &bar0->mac_cfg); hldev->is_promisc = 0; xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled", (unsigned long long)val64); } } /** * xge_hal_device_macaddr_get - Get MAC addresses. * @hldev: HAL device handle. * @index: MAC address index, in the range from 0 to * XGE_HAL_MAX_MAC_ADDRESSES. * @macaddr: MAC address. Returned by HAL. * * Retrieve one of the stored MAC addresses by reading non-volatile * memory on the chip. * * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. * * Returns: XGE_HAL_OK - success. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac * address within the time(timeout). * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. * * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index, macaddr_t *macaddr) { xge_hal_pci_bar0_t *bar0; u64 val64; int i; if (hldev == NULL) { return XGE_HAL_ERR_INVALID_DEVICE; } bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) { return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; } #ifdef XGE_HAL_HERC_EMULATION xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000, &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000, &bar0->rmac_addr_data1_mem); val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD | XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); /* poll until done */ __hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS); #endif val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD | XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rmac_addr_data0_mem); for (i=0; i < XGE_HAL_ETH_ALEN; i++) { (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8))); } #ifdef XGE_HAL_HERC_EMULATION for (i=0; i < XGE_HAL_ETH_ALEN; i++) { (*macaddr)[i] = (u8)0; } (*macaddr)[1] = (u8)1; #endif return XGE_HAL_OK; } /** * xge_hal_device_macaddr_set - Set MAC address. * @hldev: HAL device handle. * @index: MAC address index, in the range from 0 to * XGE_HAL_MAX_MAC_ADDRESSES. * @macaddr: New MAC address to configure. * * Configure one of the available MAC address "slots". * * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported. * * Returns: XGE_HAL_OK - success. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac * address within the time(timeout). * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. * * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index, macaddr_t macaddr) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64, temp64; int i; if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; temp64 = 0; for (i=0; i < XGE_HAL_ETH_ALEN; i++) { temp64 |= macaddr[i]; temp64 <<= 8; } temp64 >>= 8; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64), &bar0->rmac_addr_data0_mem); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL), &bar0->rmac_addr_data1_mem); val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE | XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD | XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) ); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rmac_addr_cmd_mem); if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0, XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } return XGE_HAL_OK; } /** * xge_hal_device_macaddr_clear - Set MAC address. * @hldev: HAL device handle. * @index: MAC address index, in the range from 0 to * XGE_HAL_MAX_MAC_ADDRESSES. * * Clear one of the available MAC address "slots". * * Returns: XGE_HAL_OK - success. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac * address within the time(timeout). * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index. * * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}. */ xge_hal_status_e xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index) { xge_hal_status_e status; u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; status = xge_hal_device_macaddr_set(hldev, index, macaddr); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "%s", "Not able to set the mac addr"); return status; } return XGE_HAL_OK; } /** * xge_hal_device_macaddr_find - Finds index in the rmac table. * @hldev: HAL device handle. * @wanted: Wanted MAC address. * * See also: xge_hal_device_macaddr_set(). */ int xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted) { int i; if (hldev == NULL) { return XGE_HAL_ERR_INVALID_DEVICE; } for (i=1; iconfig.mtu != new_mtu) { if (hldev->reset_needed_after_close || !hldev->mtu_first_time_set) { status = xge_hal_device_reset(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_TRACE, "%s", "fatal: can not reset the device"); return status; } } /* store the new MTU in device, reset will use it */ hldev->config.mtu = new_mtu; xge_debug_device(XGE_TRACE, "new MTU %d applied", new_mtu); } if (!hldev->mtu_first_time_set) hldev->mtu_first_time_set = 1; return XGE_HAL_OK; } /** * xge_hal_device_initialize - Initialize Xframe device. * @hldev: HAL device handle. * @attr: pointer to xge_hal_device_attr_t structure * @device_config: Configuration to be _applied_ to the device, * For the Xframe configuration "knobs" please * refer to xge_hal_device_config_t and Xframe * User Guide. * * Initialize Xframe device. Note that all the arguments of this public API * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with * OS to find new Xframe device, locate its PCI and memory spaces. * * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL * to enable the latter to perform Xframe hardware initialization. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized. * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not * valid. * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed. * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid. * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac * address within the time(timeout) or TTI/RTI initialization failed. * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control. * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent. * * See also: xge_hal_device_terminate(), xge_hal_status_e{} * xge_hal_device_attr_t{}. */ xge_hal_status_e xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr, xge_hal_device_config_t *device_config) { int i; xge_hal_status_e status; xge_hal_channel_t *channel; u16 subsys_device; u16 subsys_vendor; int total_dram_size, ring_auto_dram_cfg, left_dram_size; int total_dram_size_max = 0; xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing", (unsigned long long)(ulong_t)hldev); /* sanity check */ if (g_xge_hal_driver == NULL || !g_xge_hal_driver->is_initialized) { return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED; } xge_os_memzero(hldev, sizeof(xge_hal_device_t)); /* * validate a common part of Xframe-I/II configuration * (and run check_card() later, once PCI inited - see below) */ status = __hal_device_config_check_common(device_config); if (status != XGE_HAL_OK) return status; /* apply config */ xge_os_memcpy(&hldev->config, device_config, sizeof(xge_hal_device_config_t)); /* save original attr */ xge_os_memcpy(&hldev->orig_attr, attr, sizeof(xge_hal_device_attr_t)); /* initialize rxufca_intr_thres */ hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres; hldev->regh0 = attr->regh0; hldev->regh1 = attr->regh1; hldev->regh2 = attr->regh2; hldev->isrbar0 = hldev->bar0 = attr->bar0; hldev->bar1 = attr->bar1; hldev->bar2 = attr->bar2; hldev->pdev = attr->pdev; hldev->irqh = attr->irqh; hldev->cfgh = attr->cfgh; /* set initial bimodal timer for bimodal adaptive schema */ hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us; hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh, g_xge_hal_driver->config.queue_size_initial, g_xge_hal_driver->config.queue_size_max, __hal_device_event_queued, hldev); if (hldev->queueh == NULL) return XGE_HAL_ERR_OUT_OF_MEMORY; hldev->magic = XGE_HAL_MAGIC; xge_assert(hldev->regh0); xge_assert(hldev->regh1); xge_assert(hldev->bar0); xge_assert(hldev->bar1); xge_assert(hldev->pdev); xge_assert(hldev->irqh); xge_assert(hldev->cfgh); /* initialize some PCI/PCI-X fields of this PCI device. */ __hal_device_pci_init(hldev); /* * initlialize lists to properly handling a potential * terminate request */ xge_list_init(&hldev->free_channels); xge_list_init(&hldev->fifo_channels); xge_list_init(&hldev->ring_channels); if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) { /* fixups for xena */ hldev->config.rth_en = 0; hldev->config.rth_spdm_en = 0; hldev->config.rts_mac_en = 0; total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA; status = __hal_device_config_check_xena(device_config); if (status != XGE_HAL_OK) { xge_hal_device_terminate(hldev); return status; } if (hldev->config.bimodal_interrupts == 1) { xge_hal_device_terminate(hldev); return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED; } else if (hldev->config.bimodal_interrupts == XGE_HAL_DEFAULT_USE_HARDCODE) hldev->config.bimodal_interrupts = 0; } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) { /* fixups for herc */ total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC; status = __hal_device_config_check_herc(device_config); if (status != XGE_HAL_OK) { xge_hal_device_terminate(hldev); return status; } if (hldev->config.bimodal_interrupts == XGE_HAL_DEFAULT_USE_HARDCODE) hldev->config.bimodal_interrupts = 1; } else { xge_debug_device(XGE_ERR, "detected unknown device_id 0x%x", hldev->device_id); xge_hal_device_terminate(hldev); return XGE_HAL_ERR_BAD_DEVICE_ID; } /* allocate and initialize FIFO types of channels according to * configuration */ for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) { if (!device_config->fifo.queue[i].configured) continue; channel = __hal_channel_allocate(hldev, i, XGE_HAL_CHANNEL_TYPE_FIFO); if (channel == NULL) { xge_debug_device(XGE_ERR, "fifo: __hal_channel_allocate failed"); xge_hal_device_terminate(hldev); return XGE_HAL_ERR_OUT_OF_MEMORY; } /* add new channel to the device */ xge_list_insert(&channel->item, &hldev->free_channels); } /* * automatic DRAM adjustment */ total_dram_size = 0; ring_auto_dram_cfg = 0; for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!device_config->ring.queue[i].configured) continue; if (device_config->ring.queue[i].dram_size_mb == XGE_HAL_DEFAULT_USE_HARDCODE) { ring_auto_dram_cfg++; continue; } total_dram_size += device_config->ring.queue[i].dram_size_mb; } left_dram_size = total_dram_size_max - total_dram_size; if (left_dram_size < 0 || (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) { xge_debug_device(XGE_ERR, "ring config: exceeded DRAM size %d MB", total_dram_size_max); xge_hal_device_terminate(hldev); return XGE_HAL_BADCFG_RING_QUEUE_SIZE; } /* * allocate and initialize RING types of channels according to * configuration */ for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) { if (!device_config->ring.queue[i].configured) continue; if (device_config->ring.queue[i].dram_size_mb == XGE_HAL_DEFAULT_USE_HARDCODE) { hldev->config.ring.queue[i].dram_size_mb = device_config->ring.queue[i].dram_size_mb = left_dram_size / ring_auto_dram_cfg; } channel = __hal_channel_allocate(hldev, i, XGE_HAL_CHANNEL_TYPE_RING); if (channel == NULL) { xge_debug_device(XGE_ERR, "ring: __hal_channel_allocate failed"); xge_hal_device_terminate(hldev); return XGE_HAL_ERR_OUT_OF_MEMORY; } /* add new channel to the device */ xge_list_insert(&channel->item, &hldev->free_channels); } /* get subsystem IDs */ xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subsys_device); xge_os_pci_read16(hldev->pdev, hldev->cfgh, xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id), &subsys_vendor); xge_debug_device(XGE_TRACE, "subsystem_id %04x:%04x", subsys_vendor, subsys_device); /* reset device initially */ (void) __hal_device_reset(hldev); /* set host endian before, to assure proper action */ status = __hal_device_set_swapper(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_device_set_swapper failed"); xge_hal_device_terminate(hldev); (void) __hal_device_reset(hldev); return status; } #ifndef XGE_HAL_HERC_EMULATION if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) __hal_device_xena_fix_mac(hldev); #endif /* MAC address initialization. * For now only one mac address will be read and used. */ status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "xge_hal_device_macaddr_get failed"); xge_hal_device_terminate(hldev); return status; } if (hldev->macaddr[0][0] == 0xFF && hldev->macaddr[0][1] == 0xFF && hldev->macaddr[0][2] == 0xFF && hldev->macaddr[0][3] == 0xFF && hldev->macaddr[0][4] == 0xFF && hldev->macaddr[0][5] == 0xFF) { xge_debug_device(XGE_ERR, "xge_hal_device_macaddr_get returns all FFs"); xge_hal_device_terminate(hldev); return XGE_HAL_ERR_INVALID_MAC_ADDRESS; } xge_debug_device(XGE_TRACE, "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x", hldev->macaddr[0][0], hldev->macaddr[0][1], hldev->macaddr[0][2], hldev->macaddr[0][3], hldev->macaddr[0][4], hldev->macaddr[0][5]); status = __hal_stats_initialize(&hldev->stats, hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_stats_initialize failed"); xge_hal_device_terminate(hldev); return status; } status = __hal_device_hw_initialize(hldev); if (status != XGE_HAL_OK) { xge_debug_device(XGE_ERR, "__hal_device_hw_initialize failed"); xge_hal_device_terminate(hldev); return status; } hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE); if (hldev->dump_buf == NULL) { xge_debug_device(XGE_ERR, "__hal_device_hw_initialize failed"); xge_hal_device_terminate(hldev); return XGE_HAL_ERR_OUT_OF_MEMORY; } /* Xena-only: need to serialize fifo posts across all device fifos */ #if defined(XGE_HAL_TX_MULTI_POST) xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev); #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh); #endif /* Getting VPD data */ __hal_device_get_vpd_data(hldev); hldev->is_initialized = 1; return XGE_HAL_OK; } /** * xge_hal_device_terminating - Mark the device as 'terminating'. * @devh: HAL device handle. * * Mark the device as 'terminating', going to terminate. Can be used * to serialize termination with other running processes/contexts. * * See also: xge_hal_device_terminate(). */ void xge_hal_device_terminating(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_list_t *item; xge_hal_channel_t *channel; #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) unsigned long flags=0; #endif /* * go through each opened tx channel and aquire * lock, so it will serialize with HAL termination flag */ xge_list_for_each(item, &hldev->fifo_channels) { channel = xge_container_of(item, xge_hal_channel_t, item); #if defined(XGE_HAL_TX_MULTI_RESERVE) xge_os_spin_lock(&channel->reserve_lock); #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) xge_os_spin_lock_irq(&channel->reserve_lock, flags); #endif channel->terminating = 1; #if defined(XGE_HAL_TX_MULTI_RESERVE) xge_os_spin_unlock(&channel->reserve_lock); #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ) xge_os_spin_unlock_irq(&channel->reserve_lock, flags); #endif } hldev->terminating = 1; } /** * xge_hal_device_terminate - Terminate Xframe device. * @hldev: HAL device handle. * * Terminate HAL device. * * See also: xge_hal_device_initialize(). */ void xge_hal_device_terminate(xge_hal_device_t *hldev) { xge_assert(g_xge_hal_driver != NULL); xge_assert(hldev != NULL); xge_assert(hldev->magic == XGE_HAL_MAGIC); xge_queue_flush(hldev->queueh); hldev->terminating = 1; hldev->is_initialized = 0; hldev->in_poll = 0; hldev->magic = XGE_HAL_DEAD; #if defined(XGE_HAL_TX_MULTI_POST) xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev); #elif defined(XGE_HAL_TX_MULTI_POST_IRQ) xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev); #endif xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating", (unsigned long long)(ulong_t)hldev); xge_assert(xge_list_is_empty(&hldev->fifo_channels)); xge_assert(xge_list_is_empty(&hldev->ring_channels)); if (hldev->stats.is_initialized) { __hal_stats_terminate(&hldev->stats); } /* close if open and free all channels */ while (!xge_list_is_empty(&hldev->free_channels)) { xge_hal_channel_t *channel = (xge_hal_channel_t*) hldev->free_channels.next; xge_assert(!channel->is_open); xge_list_remove(&channel->item); __hal_channel_free(channel); } if (hldev->queueh) { xge_queue_destroy(hldev->queueh); } if (hldev->spdm_table) { xge_os_free(hldev->pdev, hldev->spdm_table[0], (sizeof(xge_hal_spdm_entry_t) * hldev->spdm_max_entries)); xge_os_free(hldev->pdev, hldev->spdm_table, (sizeof(xge_hal_spdm_entry_t *) * hldev->spdm_max_entries)); xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev); hldev->spdm_table = NULL; } if (hldev->dump_buf) { xge_os_free(hldev->pdev, hldev->dump_buf, XGE_HAL_DUMP_BUF_SIZE); hldev->dump_buf = NULL; } if (hldev->device_id != 0) { int j, pcisize; pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)? XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA; for (j = 0; j < pcisize; j++) { xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4, *((u32*)&hldev->pci_config_space_bios + j)); } } } /** * __hal_device_get_vpd_data - Getting vpd_data. * * @hldev: HAL device handle. * * Getting product name and serial number from vpd capabilites structure * */ void __hal_device_get_vpd_data(xge_hal_device_t *hldev) { u8 * vpd_data; u8 data; int index = 0, count, fail = 0; u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR; xge_os_strcpy((char *) hldev->vpd_data.product_name, "10 Gigabit Ethernet Adapter"); xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available"); vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16); - if ( vpd_data == 0 ) + if ( vpd_data == NULL ) return; for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) { xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index); xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data); xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0); for (count = 0; count < 5; count++ ) { xge_os_mdelay(2); xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data); if (data == XGE_HAL_VPD_READ_COMPLETE) break; } if (count >= 5) { xge_os_printf("ERR, Reading VPD data failed"); fail = 1; break; } xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4), (u32 *)&vpd_data[index]); } if(!fail) { /* read serial number of adapter */ for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) { if ((vpd_data[count] == 'S') && (vpd_data[count + 1] == 'N') && (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) { memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH); memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3], vpd_data[count + 2]); break; } } if (vpd_data[1] < XGE_HAL_VPD_LENGTH) { memset(hldev->vpd_data.product_name, 0, vpd_data[1]); memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]); } } xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16); } /** * xge_hal_device_handle_tcode - Handle transfer code. * @channelh: Channel handle. * @dtrh: Descriptor handle. * @t_code: One of the enumerated (and documented in the Xframe user guide) * "transfer codes". * * Handle descriptor's transfer code. The latter comes with each completed * descriptor, see xge_hal_fifo_dtr_next_completed() and * xge_hal_ring_dtr_next_completed(). * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h. * * Returns: one of the xge_hal_status_e{} enumerated types. * XGE_HAL_OK - for success. * XGE_HAL_ERR_CRITICAL - when encounters critical error. */ xge_hal_status_e xge_hal_device_handle_tcode (xge_hal_channel_h channelh, xge_hal_dtr_h dtrh, u8 t_code) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh; if (t_code > 15) { xge_os_printf("invalid t_code %d", t_code); return XGE_HAL_OK; } if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++; #if defined(XGE_HAL_DEBUG_BAD_TCODE) xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh; xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":" XGE_OS_LLXFMT":"XGE_OS_LLXFMT, txdp->control_1, txdp->control_2, txdp->buffer_pointer, txdp->host_control); #endif /* handle link "down" immediately without going through * xge_hal_device_poll() routine. */ if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) { /* link is down */ if (hldev->link_state != XGE_HAL_LINK_DOWN) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; hldev->link_state = XGE_HAL_LINK_DOWN; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->adapter_control); /* turn off LED */ val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->adapter_control); g_xge_hal_driver->uld_callbacks.link_down( hldev->upper_layer_info); } } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER || t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) { __hal_device_handle_targetabort(hldev); return XGE_HAL_ERR_CRITICAL; } return XGE_HAL_ERR_PKT_DROP; } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++; #if defined(XGE_HAL_DEBUG_BAD_TCODE) xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh; xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT ":"XGE_OS_LLXFMT, rxdp->control_1, rxdp->control_2, rxdp->buffer0_ptr, rxdp->host_control); #endif if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) { hldev->stats.sw_dev_err_stats.ecc_err_cnt++; __hal_device_handle_eccerr(hldev, "rxd_t_code", (u64)t_code); return XGE_HAL_ERR_CRITICAL; } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY || t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) { hldev->stats.sw_dev_err_stats.parity_err_cnt++; __hal_device_handle_parityerr(hldev, "rxd_t_code", (u64)t_code); return XGE_HAL_ERR_CRITICAL; /* do not drop if detected unknown IPv6 extension */ } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) { return XGE_HAL_ERR_PKT_DROP; } } return XGE_HAL_OK; } /** * xge_hal_device_link_state - Get link state. * @devh: HAL device handle. * @ls: Link state, see xge_hal_device_link_state_e{}. * * Get link state. * Returns: XGE_HAL_OK. * See also: xge_hal_device_link_state_e{}. */ xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh, xge_hal_device_link_state_e *ls) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_assert(ls != NULL); *ls = hldev->link_state; return XGE_HAL_OK; } /** * xge_hal_device_sched_timer - Configure scheduled device interrupt. * @devh: HAL device handle. * @interval_us: Time interval, in miscoseconds. * Unlike transmit and receive interrupts, * the scheduled interrupt is generated independently of * traffic, but purely based on time. * @one_shot: 1 - generate scheduled interrupt only once. * 0 - generate scheduled interrupt periodically at the specified * @interval_us interval. * * (Re-)configure scheduled interrupt. Can be called at runtime to change * the setting, generate one-shot interrupts based on the resource and/or * traffic conditions, other purposes. * See also: xge_hal_device_config_t{}. */ void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us, int one_shot) { u64 val64; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; unsigned int interval = hldev->config.pci_freq_mherz * interval_us; interval = __hal_fix_time_ival_herc(hldev, interval); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->scheduled_int_ctrl); if (interval) { val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK; val64 |= XGE_HAL_SCHED_INT_PERIOD(interval); if (one_shot) { val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT; } val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN; } else { val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN; } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->scheduled_int_ctrl); xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s", (unsigned long long)val64, interval ? "enabled" : "disabled"); } /** * xge_hal_device_check_id - Verify device ID. * @devh: HAL device handle. * * Verify device ID. * Returns: one of the xge_hal_card_e{} enumerated types. * See also: xge_hal_card_e{}. */ xge_hal_card_e xge_hal_device_check_id(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; switch (hldev->device_id) { case XGE_PCI_DEVICE_ID_XENA_1: case XGE_PCI_DEVICE_ID_XENA_2: return XGE_HAL_CARD_XENA; case XGE_PCI_DEVICE_ID_HERC_1: case XGE_PCI_DEVICE_ID_HERC_2: return XGE_HAL_CARD_HERC; case XGE_PCI_DEVICE_ID_TITAN_1: case XGE_PCI_DEVICE_ID_TITAN_2: return XGE_HAL_CARD_TITAN; default: return XGE_HAL_CARD_UNKNOWN; } } /** * xge_hal_device_pci_info_get - Get PCI bus informations such as width, * frequency, and mode from previously stored values. * @devh: HAL device handle. * @pci_mode: pointer to a variable of enumerated type * xge_hal_pci_mode_e{}. * @bus_frequency: pointer to a variable of enumerated type * xge_hal_pci_bus_frequency_e{}. * @bus_width: pointer to a variable of enumerated type * xge_hal_pci_bus_width_e{}. * * Get pci mode, frequency, and PCI bus width. * Returns: one of the xge_hal_status_e{} enumerated types. * XGE_HAL_OK - for success. * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle. * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e. */ xge_hal_status_e xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode, xge_hal_pci_bus_frequency_e *bus_frequency, xge_hal_pci_bus_width_e *bus_width) { xge_hal_status_e rc_status; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { rc_status = XGE_HAL_ERR_INVALID_DEVICE; xge_debug_device(XGE_ERR, "xge_hal_device_pci_info_get error, rc %d for device %p", rc_status, hldev); return rc_status; } *pci_mode = hldev->pci_mode; *bus_frequency = hldev->bus_frequency; *bus_width = hldev->bus_width; rc_status = XGE_HAL_OK; return rc_status; } /** * xge_hal_reinitialize_hw * @hldev: private member of the device structure. * * This function will soft reset the NIC and re-initalize all the * I/O registers to the values they had after it's inital initialization * through the probe function. */ int xge_hal_reinitialize_hw(xge_hal_device_t * hldev) { (void) xge_hal_device_reset(hldev); if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) { xge_hal_device_terminate(hldev); (void) __hal_device_reset(hldev); return 1; } return 0; } /* * __hal_read_spdm_entry_line * @hldev: pointer to xge_hal_device_t structure * @spdm_line: spdm line in the spdm entry to be read. * @spdm_entry: spdm entry of the spdm_line in the SPDM table. * @spdm_line_val: Contains the value stored in the spdm line. * * SPDM table contains upto a maximum of 256 spdm entries. * Each spdm entry contains 8 lines and each line stores 8 bytes. * This function reads the spdm line(addressed by @spdm_line) * of the spdm entry(addressed by @spdm_entry) in * the SPDM table. */ xge_hal_status_e __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line, u16 spdm_entry, u64 *spdm_line_val) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE | XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) | XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_spdm_mem_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, &bar0->rts_rth_spdm_mem_ctrl, 0, XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_spdm_mem_data); return XGE_HAL_OK; } /* * __hal_get_free_spdm_entry * @hldev: pointer to xge_hal_device_t structure * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table. * * This function returns an index of unused spdm entry in the SPDM * table. */ static xge_hal_status_e __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry) { xge_hal_status_e status; u64 spdm_line_val=0; /* * Search in the local SPDM table for a free slot. */ *spdm_entry = 0; for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) { if (hldev->spdm_table[*spdm_entry]->in_use) { break; } } if (*spdm_entry >= hldev->spdm_max_entries) { return XGE_HAL_ERR_SPDM_TABLE_FULL; } /* * Make sure that the corresponding spdm entry in the SPDM * table is free. * Seventh line of the spdm entry contains information about * whether the entry is free or not. */ if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry, &spdm_line_val)) != XGE_HAL_OK) { return status; } /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */ if ((spdm_line_val & BIT(63))) { /* * Log a warning */ xge_debug_device(XGE_ERR, "Local SPDM table is not " "consistent with the actual one for the spdm " "entry %d", *spdm_entry); return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; } return XGE_HAL_OK; } /* * __hal_calc_jhash - Calculate Jenkins hash. * @msg: Jenkins hash algorithm key. * @length: Length of the key. * @golden_ratio: Jenkins hash golden ratio. * @init_value: Jenkins hash initial value. * * This function implements the Jenkins based algorithm used for the * calculation of the RTH hash. * Returns: Jenkins hash value. * */ static u32 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value) { register u32 a,b,c,len; /* * Set up the internal state */ len = length; a = b = golden_ratio; /* the golden ratio; an arbitrary value */ c = init_value; /* the previous hash value */ /* handle most of the key */ while (len >= 12) { a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16) + ((u32)msg[3]<<24)); b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16) + ((u32)msg[7]<<24)); c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16) + ((u32)msg[11]<<24)); mix(a,b,c); msg += 12; len -= 12; } /* handle the last 11 bytes */ c += length; switch(len) /* all the case statements fall through */ { case 11: c+= ((u32)msg[10]<<24); break; case 10: c+= ((u32)msg[9]<<16); break; case 9 : c+= ((u32)msg[8]<<8); break; /* the first byte of c is reserved for the length */ case 8 : b+= ((u32)msg[7]<<24); break; case 7 : b+= ((u32)msg[6]<<16); break; case 6 : b+= ((u32)msg[5]<<8); break; case 5 : b+= msg[4]; break; case 4 : a+= ((u32)msg[3]<<24); break; case 3 : a+= ((u32)msg[2]<<16); break; case 2 : a+= ((u32)msg[1]<<8); break; case 1 : a+= msg[0]; break; /* case 0: nothing left to add */ } mix(a,b,c); /* report the result */ return c; } /** * xge_hal_spdm_entry_add - Add a new entry to the SPDM table. * @devh: HAL device handle. * @src_ip: Source ip address(IPv4/IPv6). * @dst_ip: Destination ip address(IPv4/IPv6). * @l4_sp: L4 source port. * @l4_dp: L4 destination port. * @is_tcp: Set to 1, if the protocol is TCP. * 0, if the protocol is UDP. * @is_ipv4: Set to 1, if the protocol is IPv4. * 0, if the protocol is IPv6. * @tgt_queue: Target queue to route the receive packet. * * This function add a new entry to the SPDM table. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in * the time(timeout). * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full. * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry. * * See also: xge_hal_spdm_entry_remove{}. */ xge_hal_status_e xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, u8 is_ipv4, u8 tgt_queue) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u32 jhash_value; u32 jhash_init_val; u32 jhash_golden_ratio; u64 val64; int off; u16 spdm_entry; u8 msg[XGE_HAL_JHASH_MSG_LEN]; int ipaddr_len; xge_hal_status_e status; if (!hldev->config.rth_spdm_en) { return XGE_HAL_ERR_SPDM_NOT_ENABLED; } if ((tgt_queue < XGE_HAL_MIN_RING_NUM) || (tgt_queue > XGE_HAL_MAX_RING_NUM)) { return XGE_HAL_ERR_SPDM_INVALID_ENTRY; } /* * Calculate the jenkins hash. */ /* * Create the Jenkins hash algorithm key. * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to * use L4 information. Otherwize key = {L3SA, L3DA}. */ if (is_ipv4) { ipaddr_len = 4; // In bytes } else { ipaddr_len = 16; } /* * Jenkins hash algorithm expects the key in the big endian * format. Since key is the byte array, memcpy won't work in the * case of little endian. So, the current code extracts each * byte starting from MSB and store it in the key. */ if (is_ipv4) { for (off = 0; off < ipaddr_len; off++) { u32 mask = vBIT32(0xff,(off*8),8); int shift = 32-(off+1)*8; msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift); msg[off+ipaddr_len] = (u8)((dst_ip->ipv4.addr & mask) >> shift); } } else { for (off = 0; off < ipaddr_len; off++) { int loc = off % 8; u64 mask = vBIT(0xff,(loc*8),8); int shift = 64-(loc+1)*8; msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask) >> shift); msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8] & mask) >> shift); } } off = (2*ipaddr_len); if (hldev->config.rth_spdm_use_l4) { msg[off] = (u8)((l4_sp & 0xff00) >> 8); msg[off + 1] = (u8)(l4_sp & 0xff); msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8); msg[off + 3] = (u8)(l4_dp & 0xff); off += 4; } /* * Calculate jenkins hash for this configuration */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_jhash_cfg); jhash_golden_ratio = (u32)(val64 >> 32); jhash_init_val = (u32)(val64 & 0xffffffff); jhash_value = __hal_calc_jhash(msg, off, jhash_golden_ratio, jhash_init_val); xge_os_spin_lock(&hldev->spdm_lock); /* * Locate a free slot in the SPDM table. To avoid a seach in the * actual SPDM table, which is very expensive in terms of time, * we are maintaining a local copy of the table and the search for * the free entry is performed in the local table. */ if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry)) != XGE_HAL_OK) { xge_os_spin_unlock(&hldev->spdm_lock); return status; } /* * Add this entry to the SPDM table */ status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp, is_tcp, is_ipv4, tgt_queue, jhash_value, /* calculated jhash */ spdm_entry); xge_os_spin_unlock(&hldev->spdm_lock); return status; } /** * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table. * @devh: HAL device handle. * @src_ip: Source ip address(IPv4/IPv6). * @dst_ip: Destination ip address(IPv4/IPv6). * @l4_sp: L4 source port. * @l4_dp: L4 destination port. * @is_tcp: Set to 1, if the protocol is TCP. * 0, if the protocol os UDP. * @is_ipv4: Set to 1, if the protocol is IPv4. * 0, if the protocol is IPv6. * * This function remove an entry from the SPDM table. * * Returns: XGE_HAL_OK - success. * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled. * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in * the time(timeout). * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM * table. * * See also: xge_hal_spdm_entry_add{}. */ xge_hal_status_e xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip, xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp, u8 is_ipv4) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; u16 spdm_entry; xge_hal_status_e status; u64 spdm_line_arr[8]; u8 line_no; u8 spdm_is_tcp; u8 spdm_is_ipv4; u16 spdm_l4_sp; u16 spdm_l4_dp; if (!hldev->config.rth_spdm_en) { return XGE_HAL_ERR_SPDM_NOT_ENABLED; } xge_os_spin_lock(&hldev->spdm_lock); /* * Poll the rxpic_int_reg register until spdm ready bit is set or * timeout happens. */ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, XGE_HAL_RX_PIC_INT_REG_SPDM_READY, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ xge_os_spin_unlock(&hldev->spdm_lock); return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* * Clear the SPDM READY bit. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rxpic_int_reg); val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rxpic_int_reg); /* * Search in the local SPDM table to get the index of the * corresponding entry in the SPDM table. */ spdm_entry = 0; for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) { if ((!hldev->spdm_table[spdm_entry]->in_use) || (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) || (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) || (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) || (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) { continue; } /* * Compare the src/dst IP addresses of source and target */ if (is_ipv4) { if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr != src_ip->ipv4.addr) || (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr != dst_ip->ipv4.addr)) { continue; } } else { if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0] != src_ip->ipv6.addr[0]) || (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1] != src_ip->ipv6.addr[1]) || (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0] != dst_ip->ipv6.addr[0]) || (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1] != dst_ip->ipv6.addr[1])) { continue; } } break; } if (spdm_entry >= hldev->spdm_max_entries) { xge_os_spin_unlock(&hldev->spdm_lock); return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND; } /* * Retrieve the corresponding entry from the SPDM table and * make sure that the data is consistent. */ for(line_no = 0; line_no < 8; line_no++) { /* * SPDM line 2,3,4 are valid only for IPv6 entry. * SPDM line 5 & 6 are reserved. We don't have to * read these entries in the above cases. */ if (((is_ipv4) && ((line_no == 2)||(line_no == 3)||(line_no == 4))) || (line_no == 5) || (line_no == 6)) { continue; } if ((status = __hal_read_spdm_entry_line( hldev, line_no, spdm_entry, &spdm_line_arr[line_no])) != XGE_HAL_OK) { xge_os_spin_unlock(&hldev->spdm_lock); return status; } } /* * Seventh line of the spdm entry contains the entry_enable * bit. Make sure that the entry_enable bit of this spdm entry * is set. * To remove an entry from the SPDM table, reset this * bit. */ if (!(spdm_line_arr[7] & BIT(63))) { /* * Log a warning */ xge_debug_device(XGE_ERR, "Local SPDM table is not " "consistent with the actual one for the spdm " "entry %d ", spdm_entry); goto err_exit; } /* * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM * table and do a comparision. */ spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4); spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63)); spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48); spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff); if ((spdm_is_tcp != is_tcp) || (spdm_is_ipv4 != is_ipv4) || (spdm_l4_sp != l4_sp) || (spdm_l4_dp != l4_dp)) { /* * Log a warning */ xge_debug_device(XGE_ERR, "Local SPDM table is not " "consistent with the actual one for the spdm " "entry %d ", spdm_entry); goto err_exit; } if (is_ipv4) { /* Upper 32 bits of spdm_line(64 bit) contains the * src IPv4 address. Lower 32 bits of spdm_line * contains the destination IPv4 address. */ u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32); u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff); if ((temp_src_ip != src_ip->ipv4.addr) || (temp_dst_ip != dst_ip->ipv4.addr)) { xge_debug_device(XGE_ERR, "Local SPDM table is not " "consistent with the actual one for the spdm " "entry %d ", spdm_entry); goto err_exit; } } else { /* * SPDM line 1 & 2 contains the src IPv6 address. * SPDM line 3 & 4 contains the dst IPv6 address. */ if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) || (spdm_line_arr[2] != src_ip->ipv6.addr[1]) || (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) || (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) { /* * Log a warning */ xge_debug_device(XGE_ERR, "Local SPDM table is not " "consistent with the actual one for the spdm " "entry %d ", spdm_entry); goto err_exit; } } /* * Reset the entry_enable bit to zero */ spdm_line_arr[7] &= ~BIT(63); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, spdm_line_arr[7], (void *)((char *)hldev->spdm_mem_base + (spdm_entry * 64) + (7 * 8))); /* * Wait for the operation to be completed. */ if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1, XGE_HAL_RX_PIC_INT_REG_SPDM_READY, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { xge_os_spin_unlock(&hldev->spdm_lock); return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } /* * Make the corresponding spdm entry in the local SPDM table * available for future use. */ hldev->spdm_table[spdm_entry]->in_use = 0; xge_os_spin_unlock(&hldev->spdm_lock); return XGE_HAL_OK; err_exit: xge_os_spin_unlock(&hldev->spdm_lock); return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT; } /* * __hal_device_rti_set * @ring: The post_qid of the ring. * @channel: HAL channel of the ring. * * This function stores the RTI value associated for the MSI and * also unmasks this particular RTI in the rti_mask register. */ static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel) { xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) channel->rti = (u8)ring_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rx_traffic_mask); val64 &= ~BIT(ring_qid); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_traffic_mask); } /* * __hal_device_tti_set * @ring: The post_qid of the FIFO. * @channel: HAL channel the FIFO. * * This function stores the TTI value associated for the MSI and * also unmasks this particular TTI in the tti_mask register. */ static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel) { xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI || hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) channel->tti = (u8)fifo_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tx_traffic_mask); val64 &= ~BIT(fifo_qid); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tx_traffic_mask); } /** * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a * FIFO for a given MSI. * @channelh: HAL channel handle. * @msi: MSI Number associated with the channel. * @msi_msg: The MSI message associated with the MSI number above. * * This API will associate a given channel (either Ring or FIFO) with the * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the * hardware to indicate this association to the hardware. */ xge_hal_status_e xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; channel->msi_msg = msi_msg; if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { int ring = channel->post_qid; xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d," " MSI: %d", channel->msi_msg, ring, msi); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rx_mat); val64 |= XGE_HAL_SET_RX_MAT(ring, msi); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_mat); __hal_device_rti_set(ring, channel); } else { int fifo = channel->post_qid; xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d," " MSI: %d", channel->msi_msg, fifo, msi); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tx_mat[0]); val64 |= XGE_HAL_SET_TX_MAT(fifo, msi); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tx_mat[0]); __hal_device_tti_set(fifo, channel); } return XGE_HAL_OK; } /** * xge_hal_mask_msix - Begin IRQ processing. * @hldev: HAL device handle. * @msi_id: MSI ID * * The function masks the msix interrupt for the given msi_id * * Note: * * Returns: 0, * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range * status. * See also: */ xge_hal_status_e xge_hal_mask_msix(xge_hal_device_h devh, int msi_id) { xge_hal_status_e status = XGE_HAL_OK; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; u32 *bar2 = (u32 *)hldev->bar2; u32 val32; xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); val32 |= 1; xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); return status; } /** * xge_hal_mask_msix - Begin IRQ processing. * @hldev: HAL device handle. * @msi_id: MSI ID * * The function masks the msix interrupt for the given msi_id * * Note: * * Returns: 0, * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range * status. * See also: */ xge_hal_status_e xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id) { xge_hal_status_e status = XGE_HAL_OK; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; u32 *bar2 = (u32 *)hldev->bar2; u32 val32; xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES); val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]); val32 &= ~1; xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]); return status; } /* * __hal_set_msix_vals * @devh: HAL device handle. * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address. * Filled in by this function. * @msix_address: 32bit MSI-X DMA address. * Filled in by this function. * @msix_idx: index that corresponds to the (@msix_value, @msix_address) * entry in the table of MSI-X (value, address) pairs. * * This function will program the hardware associating the given * address/value cobination to the specified msi number. */ static void __hal_set_msix_vals (xge_hal_device_h devh, u32 *msix_value, u64 *msix_addr, int msix_idx) { int cnt = 0; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE; __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64 >> 32), &bar0->xmsi_access); __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)(val64), &bar0->xmsi_access); do { val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->xmsi_access); if (val64 & XGE_HAL_XMSI_STROBE) break; cnt++; xge_os_mdelay(20); } while(cnt < 5); *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->xmsi_data)); *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->xmsi_address); } /** * xge_hal_channel_msix_set - Associate MSI-X with a channel. * @channelh: HAL channel handle. * @msix_idx: index that corresponds to a particular (@msix_value, * @msix_address) entry in the MSI-X table. * * This API associates a given channel (either Ring or FIFO) with the * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables * to indicate this association. */ xge_hal_status_e xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx) { xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh; xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) { /* Currently Ring and RTI is one on one. */ int ring = channel->post_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rx_mat); val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_mat); __hal_device_rti_set(ring, channel); hldev->config.fifo.queue[channel->post_qid].intr_vector = msix_idx; } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) { int fifo = channel->post_qid; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->tx_mat[0]); val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->tx_mat[0]); __hal_device_tti_set(fifo, channel); hldev->config.ring.queue[channel->post_qid].intr_vector = msix_idx; } channel->msix_idx = msix_idx; __hal_set_msix_vals(hldev, &channel->msix_data, &channel->msix_address, channel->msix_idx); return XGE_HAL_OK; } #if defined(XGE_HAL_CONFIG_LRO) /** * xge_hal_lro_terminate - Terminate lro resources. * @lro_scale: Amount of lro memory. * @hldev: Hal device structure. * */ void xge_hal_lro_terminate(u32 lro_scale, xge_hal_device_t *hldev) { } /** * xge_hal_lro_init - Initiate lro resources. * @lro_scale: Amount of lro memory. * @hldev: Hal device structure. * Note: For time being I am using only one LRO per device. Later on size * will be increased. */ xge_hal_status_e xge_hal_lro_init(u32 lro_scale, xge_hal_device_t *hldev) { int i; if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE) hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE; if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE) hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN; for (i=0; i < XGE_HAL_MAX_RING_NUM; i++) { xge_os_memzero(hldev->lro_desc[i].lro_pool, sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS); hldev->lro_desc[i].lro_next_idx = 0; hldev->lro_desc[i].lro_recent = NULL; } return XGE_HAL_OK; } #endif /** * xge_hal_device_poll - HAL device "polling" entry point. * @devh: HAL device. * * HAL "polling" entry point. Note that this is part of HAL public API. * Upper-Layer driver _must_ periodically poll HAL via * xge_hal_device_poll(). * * HAL uses caller's execution context to serially process accumulated * slow-path events, such as link state changes and hardware error * indications. * * The rate of polling could be somewhere between 500us to 10ms, * depending on requirements (e.g., the requirement to support fail-over * could mean that 500us or even 100us polling interval need to be used). * * The need and motivation for external polling includes * * - remove the error-checking "burden" from the HAL interrupt handler * (see xge_hal_device_handle_irq()); * * - remove the potential source of portability issues by _not_ * implementing separate polling thread within HAL itself. * * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}. * Usage: See ex_slow_path{}. */ void xge_hal_device_poll(xge_hal_device_h devh) { unsigned char item_buf[sizeof(xge_queue_item_t) + XGE_DEFAULT_EVENT_MAX_DATA_SIZE]; xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf; xge_queue_status_e qstatus; xge_hal_status_e hstatus; int i = 0; int queue_has_critical_event = 0; xge_hal_device_t *hldev = (xge_hal_device_t*)devh; xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) + XGE_DEFAULT_EVENT_MAX_DATA_SIZE)); _again: if (!hldev->is_initialized || hldev->terminating || hldev->magic != XGE_HAL_MAGIC) return; if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000) { /* * Wait for an Hour */ hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++; } else { /* * Logging Error messages in the excess temperature, * Bias current, laser output for three cycle */ __hal_updt_stats_xpak(hldev); hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0; } if (!queue_has_critical_event) queue_has_critical_event = __queue_get_reset_critical(hldev->queueh); hldev->in_poll = 1; while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) { qstatus = xge_queue_consume(hldev->queueh, XGE_DEFAULT_EVENT_MAX_DATA_SIZE, item); if (qstatus == XGE_QUEUE_IS_EMPTY) break; xge_debug_queue(XGE_TRACE, "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x" XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type, (u64)(ulong_t)item->context); if (!hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) { hldev->in_poll = 0; return; } switch (item->event_type) { case XGE_HAL_EVENT_LINK_IS_UP: { if (!queue_has_critical_event && g_xge_hal_driver->uld_callbacks.link_up) { g_xge_hal_driver->uld_callbacks.link_up( hldev->upper_layer_info); hldev->link_state = XGE_HAL_LINK_UP; } } break; case XGE_HAL_EVENT_LINK_IS_DOWN: { if (!queue_has_critical_event && g_xge_hal_driver->uld_callbacks.link_down) { g_xge_hal_driver->uld_callbacks.link_down( hldev->upper_layer_info); hldev->link_state = XGE_HAL_LINK_DOWN; } } break; case XGE_HAL_EVENT_SERR: case XGE_HAL_EVENT_ECCERR: case XGE_HAL_EVENT_PARITYERR: case XGE_HAL_EVENT_TARGETABORT: case XGE_HAL_EVENT_SLOT_FREEZE: { void *item_data = xge_queue_item_data(item); xge_hal_event_e event_type = item->event_type; u64 val64 = *((u64*)item_data); if (event_type != XGE_HAL_EVENT_SLOT_FREEZE) if (xge_hal_device_is_slot_freeze(hldev)) event_type = XGE_HAL_EVENT_SLOT_FREEZE; if (g_xge_hal_driver->uld_callbacks.crit_err) { g_xge_hal_driver->uld_callbacks.crit_err( hldev->upper_layer_info, event_type, val64); /* handle one critical event per poll cycle */ hldev->in_poll = 0; return; } } break; default: { xge_debug_queue(XGE_TRACE, "got non-HAL event %d", item->event_type); } break; } /* broadcast this event */ if (g_xge_hal_driver->uld_callbacks.event) g_xge_hal_driver->uld_callbacks.event(item); } if (g_xge_hal_driver->uld_callbacks.before_device_poll) { if (g_xge_hal_driver->uld_callbacks.before_device_poll( hldev) != 0) { hldev->in_poll = 0; return; } } hstatus = __hal_device_poll(hldev); if (g_xge_hal_driver->uld_callbacks.after_device_poll) g_xge_hal_driver->uld_callbacks.after_device_poll(hldev); /* * handle critical error right away: * - walk the device queue again * - drop non-critical events, if any * - look for the 1st critical */ if (hstatus == XGE_HAL_ERR_CRITICAL) { queue_has_critical_event = 1; goto _again; } hldev->in_poll = 0; } /** * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing. * @hldev: HAL device handle. * * This function is used to set the adapter to enhanced mode. * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). */ void xge_hal_rts_rth_init(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); } /** * xge_hal_rts_rth_clr - Clear RTS hashing. * @hldev: HAL device handle. * * This function is used to clear all RTS hashing related stuff. * It brings the adapter out from enhanced mode to classic mode. * It also clears RTS_RTH_CFG register i.e clears hash type, function etc. * * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set(). */ void xge_hal_rts_rth_clr(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; /* * Set the receive traffic steering mode from default(classic) * to enhanced. */ val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_ctrl); val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_ctrl); val64 = 0; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); } /** * xge_hal_rts_rth_set - Set/configure RTS hashing. * @hldev: HAL device handle. * @def_q: default queue * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc. * @bucket_size: no of least significant bits to be used for hashing. * * Used to set/configure all RTS hashing related stuff. * - set the steering mode to enhanced. * - set hash function i.e algo selection. * - set the default queue. * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(). */ void xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type, u16 bucket_size) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = XGE_HAL_RTS_DEFAULT_Q(def_q); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_default_q); val64 = hash_type; val64 |= XGE_HAL_RTS_RTH_EN; val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size); val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); } /** * xge_hal_rts_rth_start - Start RTS hashing. * @hldev: HAL device handle. * * Used to Start RTS hashing . * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. */ void xge_hal_rts_rth_start(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_cfg); val64 |= XGE_HAL_RTS_RTH_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); } /** * xge_hal_rts_rth_stop - Stop the RTS hashing. * @hldev: HAL device handle. * * Used to Staop RTS hashing . * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start. */ void xge_hal_rts_rth_stop(xge_hal_device_t *hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_rth_cfg); val64 &= ~XGE_HAL_RTS_RTH_EN; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_cfg); } /** * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT). * @hldev: HAL device handle. * @itable: Pointer to the indirection table * @itable_size: no of least significant bits to be used for hashing * * Used to set/configure indirection table. * It enables the required no of entries in the IT. * It adds entries to the IT. * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). */ xge_hal_status_e xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; u64 val64; u32 idx; for (idx = 0; idx < itable_size; idx++) { val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN | XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_map_mem_data); /* execute */ val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE | XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE | XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx)); xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_map_mem_ctrl); /* poll until done */ if (__hal_device_register_poll(hldev, &bar0->rts_rth_map_mem_ctrl, 0, XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) { /* upper layer may require to repeat */ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } } return XGE_HAL_OK; } /** * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc. * * @hldev: HAL device handle. * @KeySize: Number of 64-bit words * @Key: upto 40-byte array of 8-bit values * This function configures the 40-byte secret which is used for hash * calculation. * * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set(). */ void xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0; u64 val64; u32 entry, nreg, i; entry = 0; nreg = 0; while( KeySize ) { val64 = 0; for ( i = 0; i < 8 ; i++) { /* Prepare 64-bit word for 'nreg' containing 8 keys. */ if (i) val64 <<= 8; val64 |= Key[entry++]; } KeySize--; /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/ xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_hash_mask[nreg++]); } while( nreg < 5 ) { /* Clear the rest if key is less than 40 bytes */ val64 = 0; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_rth_hash_mask[nreg++]); } } /** * xge_hal_device_is_closed - Device is closed * * @devh: HAL device handle. */ int xge_hal_device_is_closed(xge_hal_device_h devh) { xge_hal_device_t *hldev = (xge_hal_device_t *)devh; if (xge_list_is_empty(&hldev->fifo_channels) && xge_list_is_empty(&hldev->ring_channels)) return 1; return 0; } xge_hal_status_e xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index) { u64 val64; int section; int max_addr = XGE_HAL_MAX_MAC_ADDRESSES; xge_hal_device_t *hldev = (xge_hal_device_t *)devh; xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0; if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC; if ( index >= max_addr ) return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES; /* * Calculate the section value */ section = index / 32; xge_debug_device(XGE_TRACE, "the Section value is %d ", section); val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, &bar0->rts_mac_cfg); switch(section) { case 0: val64 |= XGE_HAL_RTS_MAC_SECT0_EN; break; case 1: val64 |= XGE_HAL_RTS_MAC_SECT1_EN; break; case 2: val64 |= XGE_HAL_RTS_MAC_SECT2_EN; break; case 3: val64 |= XGE_HAL_RTS_MAC_SECT3_EN; break; case 4: val64 |= XGE_HAL_RTS_MAC_SECT4_EN; break; case 5: val64 |= XGE_HAL_RTS_MAC_SECT5_EN; break; case 6: val64 |= XGE_HAL_RTS_MAC_SECT6_EN; break; case 7: val64 |= XGE_HAL_RTS_MAC_SECT7_EN; break; default: xge_debug_device(XGE_ERR, "Invalid Section value %d " , section); } xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rts_mac_cfg); return XGE_HAL_OK; } /** * xge_hal_fix_rldram_ecc_error * @hldev: private member of the device structure. * * SXE-02-010. This function will turn OFF the ECC error reporting for the * interface bet'n external Micron RLDRAM II device and memory controller. * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the * qualification of Herc. */ xge_hal_status_e xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev) { xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0; u64 val64; // Enter Test Mode. val64 = XGE_HAL_MC_RLDRAM_TEST_MODE; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_test_ctrl); // Enable fg/bg tests. val64 = 0x0100000000000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_driver); // Enable RLDRAM configuration. val64 = 0x0000000000017B00ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_mrs); // Enable RLDRAM queues. val64 = 0x0000000001017B00ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_mrs); // Setup test ranges val64 = 0x00000000001E0100ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_test_add); val64 = 0x00000100001F0100ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_test_add_bkg); // Start Reads. val64 = 0x0001000000010000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_test_ctrl); if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1, XGE_HAL_MC_RLDRAM_TEST_DONE, XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){ return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING; } // Exit test mode val64 = 0x0000000000000000ULL; xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->mc_rldram_test_ctrl); return XGE_HAL_OK; } Index: head/sys/dev/sound/isa/ad1816.c =================================================================== --- head/sys/dev/sound/isa/ad1816.c (revision 297861) +++ head/sys/dev/sound/isa/ad1816.c (revision 297862) @@ -1,687 +1,687 @@ /*- * Copyright (c) 1999 Cameron Grant * Copyright (c) 1997,1998 Luigi Rizzo * Copyright (c) 1994,1995 Hannu Savolainen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); struct ad1816_info; struct ad1816_chinfo { struct ad1816_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir, blksz; }; struct ad1816_info { struct resource *io_base; /* primary I/O address for the board */ int io_rid; struct resource *irq; int irq_rid; struct resource *drq1; /* play */ int drq1_rid; struct resource *drq2; /* rec */ int drq2_rid; void *ih; bus_dma_tag_t parent_dmat; struct mtx *lock; unsigned int bufsize; struct ad1816_chinfo pch, rch; }; static u_int32_t ad1816_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_MU_LAW, 1, 0), SND_FORMAT(AFMT_MU_LAW, 2, 0), SND_FORMAT(AFMT_A_LAW, 1, 0), SND_FORMAT(AFMT_A_LAW, 2, 0), 0 }; static struct pcmchan_caps ad1816_caps = {4000, 55200, ad1816_fmt, 0}; #define AD1816_MUTE 31 /* value for mute */ static void ad1816_lock(struct ad1816_info *ad1816) { snd_mtxlock(ad1816->lock); } static void ad1816_unlock(struct ad1816_info *ad1816) { snd_mtxunlock(ad1816->lock); } static int port_rd(struct resource *port, int off) { if (port) return bus_space_read_1(rman_get_bustag(port), rman_get_bushandle(port), off); else return -1; } static void port_wr(struct resource *port, int off, u_int8_t data) { if (port) bus_space_write_1(rman_get_bustag(port), rman_get_bushandle(port), off, data); } static int io_rd(struct ad1816_info *ad1816, int reg) { return port_rd(ad1816->io_base, reg); } static void io_wr(struct ad1816_info *ad1816, int reg, u_int8_t data) { port_wr(ad1816->io_base, reg, data); } static void ad1816_intr(void *arg) { struct ad1816_info *ad1816 = (struct ad1816_info *)arg; unsigned char c, served = 0; ad1816_lock(ad1816); /* get interrupt status */ c = io_rd(ad1816, AD1816_INT); /* check for stray interrupts */ if (c & ~(AD1816_INTRCI | AD1816_INTRPI)) { printf("pcm: stray int (%x)\n", c); c &= AD1816_INTRCI | AD1816_INTRPI; } /* check for capture interrupt */ if (sndbuf_runsz(ad1816->rch.buffer) && (c & AD1816_INTRCI)) { ad1816_unlock(ad1816); chn_intr(ad1816->rch.channel); ad1816_lock(ad1816); served |= AD1816_INTRCI; /* cp served */ } /* check for playback interrupt */ if (sndbuf_runsz(ad1816->pch.buffer) && (c & AD1816_INTRPI)) { ad1816_unlock(ad1816); chn_intr(ad1816->pch.channel); ad1816_lock(ad1816); served |= AD1816_INTRPI; /* pb served */ } if (served == 0) { /* this probably means this is not a (working) ad1816 chip, */ /* or an error in dma handling */ printf("pcm: int without reason (%x)\n", c); c = 0; } else c &= ~served; io_wr(ad1816, AD1816_INT, c); c = io_rd(ad1816, AD1816_INT); if (c != 0) printf("pcm: int clear failed (%x)\n", c); ad1816_unlock(ad1816); } static int ad1816_wait_init(struct ad1816_info *ad1816, int x) { int n = 0; /* to shut up the compiler... */ for (; x--;) if ((n = (io_rd(ad1816, AD1816_ALE) & AD1816_BUSY)) == 0) DELAY(10); else return n; printf("ad1816_wait_init failed 0x%02x.\n", n); return -1; } static unsigned short ad1816_read(struct ad1816_info *ad1816, unsigned int reg) { u_short x = 0; if (ad1816_wait_init(ad1816, 100) == -1) return 0; io_wr(ad1816, AD1816_ALE, 0); io_wr(ad1816, AD1816_ALE, (reg & AD1816_ALEMASK)); if (ad1816_wait_init(ad1816, 100) == -1) return 0; x = (io_rd(ad1816, AD1816_HIGH) << 8) | io_rd(ad1816, AD1816_LOW); return x; } static void ad1816_write(struct ad1816_info *ad1816, unsigned int reg, unsigned short data) { if (ad1816_wait_init(ad1816, 100) == -1) return; io_wr(ad1816, AD1816_ALE, (reg & AD1816_ALEMASK)); io_wr(ad1816, AD1816_LOW, (data & 0x000000ff)); io_wr(ad1816, AD1816_HIGH, (data & 0x0000ff00) >> 8); } /* -------------------------------------------------------------------- */ static int ad1816mix_init(struct snd_mixer *m) { mix_setdevs(m, AD1816_MIXER_DEVICES); mix_setrecdevs(m, AD1816_REC_DEVICES); return 0; } static int ad1816mix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct ad1816_info *ad1816 = mix_getdevinfo(m); u_short reg = 0; /* Scale volumes */ left = AD1816_MUTE - (AD1816_MUTE * left) / 100; right = AD1816_MUTE - (AD1816_MUTE * right) / 100; reg = (left << 8) | right; /* do channel selective muting if volume is zero */ if (left == AD1816_MUTE) reg |= 0x8000; if (right == AD1816_MUTE) reg |= 0x0080; ad1816_lock(ad1816); switch (dev) { case SOUND_MIXER_VOLUME: /* Register 14 master volume */ ad1816_write(ad1816, 14, reg); break; case SOUND_MIXER_CD: /* Register 15 cd */ case SOUND_MIXER_LINE1: ad1816_write(ad1816, 15, reg); break; case SOUND_MIXER_SYNTH: /* Register 16 synth */ ad1816_write(ad1816, 16, reg); break; case SOUND_MIXER_PCM: /* Register 4 pcm */ ad1816_write(ad1816, 4, reg); break; case SOUND_MIXER_LINE: case SOUND_MIXER_LINE3: /* Register 18 line in */ ad1816_write(ad1816, 18, reg); break; case SOUND_MIXER_MIC: /* Register 19 mic volume */ ad1816_write(ad1816, 19, reg & ~0xff); /* mic is mono */ break; case SOUND_MIXER_IGAIN: /* and now to something completely different ... */ ad1816_write(ad1816, 20, ((ad1816_read(ad1816, 20) & ~0x0f0f) | (((AD1816_MUTE - left) / 2) << 8) /* four bits of adc gain */ | ((AD1816_MUTE - right) / 2))); break; default: printf("ad1816_mixer_set(): unknown device.\n"); break; } ad1816_unlock(ad1816); left = ((AD1816_MUTE - left) * 100) / AD1816_MUTE; right = ((AD1816_MUTE - right) * 100) / AD1816_MUTE; return left | (right << 8); } static u_int32_t ad1816mix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct ad1816_info *ad1816 = mix_getdevinfo(m); int dev; switch (src) { case SOUND_MASK_LINE: case SOUND_MASK_LINE3: dev = 0x00; break; case SOUND_MASK_CD: case SOUND_MASK_LINE1: dev = 0x20; break; case SOUND_MASK_MIC: default: dev = 0x50; src = SOUND_MASK_MIC; } dev |= dev << 8; ad1816_lock(ad1816); ad1816_write(ad1816, 20, (ad1816_read(ad1816, 20) & ~0x7070) | dev); ad1816_unlock(ad1816); return src; } static kobj_method_t ad1816mixer_methods[] = { KOBJMETHOD(mixer_init, ad1816mix_init), KOBJMETHOD(mixer_set, ad1816mix_set), KOBJMETHOD(mixer_setrecsrc, ad1816mix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(ad1816mixer); /* -------------------------------------------------------------------- */ /* channel interface */ static void * ad1816chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct ad1816_info *ad1816 = devinfo; struct ad1816_chinfo *ch = (dir == PCMDIR_PLAY)? &ad1816->pch : &ad1816->rch; ch->dir = dir; ch->parent = ad1816; ch->channel = c; ch->buffer = b; if (sndbuf_alloc(ch->buffer, ad1816->parent_dmat, 0, ad1816->bufsize) != 0) return NULL; sndbuf_dmasetup(ch->buffer, (dir == PCMDIR_PLAY) ? ad1816->drq1 : ad1816->drq2); if (SND_DMA(ch->buffer)) sndbuf_dmasetdir(ch->buffer, dir); return ch; } static int ad1816chan_setformat(kobj_t obj, void *data, u_int32_t format) { struct ad1816_chinfo *ch = data; struct ad1816_info *ad1816 = ch->parent; int fmt = AD1816_U8, reg; ad1816_lock(ad1816); if (ch->dir == PCMDIR_PLAY) { reg = AD1816_PLAY; ad1816_write(ad1816, 8, 0x0000); /* reset base and current counter */ ad1816_write(ad1816, 9, 0x0000); /* for playback and capture */ } else { reg = AD1816_CAPT; ad1816_write(ad1816, 10, 0x0000); ad1816_write(ad1816, 11, 0x0000); } switch (AFMT_ENCODING(format)) { case AFMT_A_LAW: fmt = AD1816_ALAW; break; case AFMT_MU_LAW: fmt = AD1816_MULAW; break; case AFMT_S16_LE: fmt = AD1816_S16LE; break; case AFMT_S16_BE: fmt = AD1816_S16BE; break; case AFMT_U8: fmt = AD1816_U8; break; } if (AFMT_CHANNEL(format) > 1) fmt |= AD1816_STEREO; io_wr(ad1816, reg, fmt); ad1816_unlock(ad1816); #if 0 return format; #else return 0; #endif } static u_int32_t ad1816chan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct ad1816_chinfo *ch = data; struct ad1816_info *ad1816 = ch->parent; RANGE(speed, 4000, 55200); ad1816_lock(ad1816); ad1816_write(ad1816, (ch->dir == PCMDIR_PLAY)? 2 : 3, speed); ad1816_unlock(ad1816); return speed; } static u_int32_t ad1816chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct ad1816_chinfo *ch = data; ch->blksz = blocksize; return ch->blksz; } static int ad1816chan_trigger(kobj_t obj, void *data, int go) { struct ad1816_chinfo *ch = data; struct ad1816_info *ad1816 = ch->parent; int wr, reg; if (!PCMTRIG_COMMON(go)) return 0; sndbuf_dma(ch->buffer, go); wr = (ch->dir == PCMDIR_PLAY); reg = wr? AD1816_PLAY : AD1816_CAPT; ad1816_lock(ad1816); switch (go) { case PCMTRIG_START: /* start only if not already running */ if (!(io_rd(ad1816, reg) & AD1816_ENABLE)) { int cnt = ((ch->blksz) >> 2) - 1; ad1816_write(ad1816, wr? 8 : 10, cnt); /* count */ ad1816_write(ad1816, wr? 9 : 11, 0); /* reset cur cnt */ ad1816_write(ad1816, 1, ad1816_read(ad1816, 1) | (wr? 0x8000 : 0x4000)); /* enable int */ /* enable playback */ io_wr(ad1816, reg, io_rd(ad1816, reg) | AD1816_ENABLE); if (!(io_rd(ad1816, reg) & AD1816_ENABLE)) printf("ad1816: failed to start %s DMA!\n", wr? "play" : "rec"); } break; case PCMTRIG_STOP: case PCMTRIG_ABORT: /* XXX check this... */ /* we don't test here if it is running... */ if (wr) { ad1816_write(ad1816, 1, ad1816_read(ad1816, 1) & ~(wr? 0x8000 : 0x4000)); /* disable int */ io_wr(ad1816, reg, io_rd(ad1816, reg) & ~AD1816_ENABLE); /* disable playback */ if (io_rd(ad1816, reg) & AD1816_ENABLE) printf("ad1816: failed to stop %s DMA!\n", wr? "play" : "rec"); ad1816_write(ad1816, wr? 8 : 10, 0); /* reset base cnt */ ad1816_write(ad1816, wr? 9 : 11, 0); /* reset cur cnt */ } break; } ad1816_unlock(ad1816); return 0; } static u_int32_t ad1816chan_getptr(kobj_t obj, void *data) { struct ad1816_chinfo *ch = data; return sndbuf_dmaptr(ch->buffer); } static struct pcmchan_caps * ad1816chan_getcaps(kobj_t obj, void *data) { return &ad1816_caps; } static kobj_method_t ad1816chan_methods[] = { KOBJMETHOD(channel_init, ad1816chan_init), KOBJMETHOD(channel_setformat, ad1816chan_setformat), KOBJMETHOD(channel_setspeed, ad1816chan_setspeed), KOBJMETHOD(channel_setblocksize, ad1816chan_setblocksize), KOBJMETHOD(channel_trigger, ad1816chan_trigger), KOBJMETHOD(channel_getptr, ad1816chan_getptr), KOBJMETHOD(channel_getcaps, ad1816chan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(ad1816chan); /* -------------------------------------------------------------------- */ static void ad1816_release_resources(struct ad1816_info *ad1816, device_t dev) { if (ad1816->irq) { if (ad1816->ih) bus_teardown_intr(dev, ad1816->irq, ad1816->ih); bus_release_resource(dev, SYS_RES_IRQ, ad1816->irq_rid, ad1816->irq); - ad1816->irq = 0; + ad1816->irq = NULL; } if (ad1816->drq1) { isa_dma_release(rman_get_start(ad1816->drq1)); bus_release_resource(dev, SYS_RES_DRQ, ad1816->drq1_rid, ad1816->drq1); - ad1816->drq1 = 0; + ad1816->drq1 = NULL; } if (ad1816->drq2) { isa_dma_release(rman_get_start(ad1816->drq2)); bus_release_resource(dev, SYS_RES_DRQ, ad1816->drq2_rid, ad1816->drq2); - ad1816->drq2 = 0; + ad1816->drq2 = NULL; } if (ad1816->io_base) { bus_release_resource(dev, SYS_RES_IOPORT, ad1816->io_rid, ad1816->io_base); - ad1816->io_base = 0; + ad1816->io_base = NULL; } if (ad1816->parent_dmat) { bus_dma_tag_destroy(ad1816->parent_dmat); ad1816->parent_dmat = 0; } if (ad1816->lock) snd_mtxfree(ad1816->lock); free(ad1816, M_DEVBUF); } static int ad1816_alloc_resources(struct ad1816_info *ad1816, device_t dev) { int ok = 1, pdma, rdma; if (!ad1816->io_base) ad1816->io_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &ad1816->io_rid, RF_ACTIVE); if (!ad1816->irq) ad1816->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ad1816->irq_rid, RF_ACTIVE); if (!ad1816->drq1) ad1816->drq1 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &ad1816->drq1_rid, RF_ACTIVE); if (!ad1816->drq2) ad1816->drq2 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &ad1816->drq2_rid, RF_ACTIVE); if (!ad1816->io_base || !ad1816->drq1 || !ad1816->irq) ok = 0; if (ok) { pdma = rman_get_start(ad1816->drq1); isa_dma_acquire(pdma); isa_dmainit(pdma, ad1816->bufsize); if (ad1816->drq2) { rdma = rman_get_start(ad1816->drq2); isa_dma_acquire(rdma); isa_dmainit(rdma, ad1816->bufsize); } else rdma = pdma; if (pdma == rdma) pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); } return ok; } static int ad1816_init(struct ad1816_info *ad1816, device_t dev) { ad1816_write(ad1816, 1, 0x2); /* disable interrupts */ ad1816_write(ad1816, 32, 0x90F0); /* SoundSys Mode, split fmt */ ad1816_write(ad1816, 5, 0x8080); /* FM volume mute */ ad1816_write(ad1816, 6, 0x8080); /* I2S1 volume mute */ ad1816_write(ad1816, 7, 0x8080); /* I2S0 volume mute */ ad1816_write(ad1816, 17, 0x8888); /* VID Volume mute */ ad1816_write(ad1816, 20, 0x5050); /* recsrc mic, agc off */ /* adc gain is set to 0 */ return 0; } static int ad1816_probe(device_t dev) { char *s = NULL; u_int32_t logical_id = isa_get_logicalid(dev); switch (logical_id) { case 0x80719304: /* ADS7180 */ s = "AD1816"; break; case 0x50719304: /* ADS7150 */ s = "AD1815"; break; } if (s) { device_set_desc(dev, s); return BUS_PROBE_DEFAULT; } return ENXIO; } static int ad1816_attach(device_t dev) { struct ad1816_info *ad1816; char status[SND_STATUSLEN], status2[SND_STATUSLEN]; ad1816 = malloc(sizeof(*ad1816), M_DEVBUF, M_WAITOK | M_ZERO); ad1816->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_ad1816 softc"); ad1816->io_rid = 2; ad1816->irq_rid = 0; ad1816->drq1_rid = 0; ad1816->drq2_rid = 1; ad1816->bufsize = pcm_getbuffersize(dev, 4096, DSP_BUFFSIZE, 65536); if (!ad1816_alloc_resources(ad1816, dev)) goto no; ad1816_init(ad1816, dev); if (mixer_init(dev, &ad1816mixer_class, ad1816)) goto no; snd_setup_intr(dev, ad1816->irq, 0, ad1816_intr, ad1816, &ad1816->ih); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/ad1816->bufsize, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/ &Giant, &ad1816->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } if (ad1816->drq2) snprintf(status2, SND_STATUSLEN, ":%jd", rman_get_start(ad1816->drq2)); else status2[0] = '\0'; snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd drq %jd%s bufsz %u %s", rman_get_start(ad1816->io_base), rman_get_start(ad1816->irq), rman_get_start(ad1816->drq1), status2, ad1816->bufsize, PCM_KLDSTRING(snd_ad1816)); if (pcm_register(dev, ad1816, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &ad1816chan_class, ad1816); pcm_addchan(dev, PCMDIR_PLAY, &ad1816chan_class, ad1816); pcm_setstatus(dev, status); return 0; no: ad1816_release_resources(ad1816, dev); return ENXIO; } static int ad1816_detach(device_t dev) { int r; struct ad1816_info *ad1816; r = pcm_unregister(dev); if (r) return r; ad1816 = pcm_getdevinfo(dev); ad1816_release_resources(ad1816, dev); return 0; } static device_method_t ad1816_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ad1816_probe), DEVMETHOD(device_attach, ad1816_attach), DEVMETHOD(device_detach, ad1816_detach), { 0, 0 } }; static driver_t ad1816_driver = { "pcm", ad1816_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_ad1816, isa, ad1816_driver, pcm_devclass, 0, 0); DRIVER_MODULE(snd_ad1816, acpi, ad1816_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_ad1816, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_ad1816, 1); Index: head/sys/dev/sound/isa/ess.c =================================================================== --- head/sys/dev/sound/isa/ess.c (revision 297861) +++ head/sys/dev/sound/isa/ess.c (revision 297862) @@ -1,1016 +1,1016 @@ /*- * Copyright (c) 1999 Cameron Grant * Copyright (c) 1997,1998 Luigi Rizzo * * Derived from files in the Voxware 3.5 distribution, * Copyright by Hannu Savolainen 1994, under the same copyright * conditions. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define ESS_BUFFSIZE (4096) #define ABS(x) (((x) < 0)? -(x) : (x)) /* audio2 never generates irqs and sounds very noisy */ #undef ESS18XX_DUPLEX /* more accurate clocks and split audio1/audio2 rates */ #define ESS18XX_NEWSPEED static u_int32_t ess_pfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps ess_playcaps = {6000, 48000, ess_pfmt, 0}; static u_int32_t ess_rfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps ess_reccaps = {6000, 48000, ess_rfmt, 0}; struct ess_info; struct ess_chinfo { struct ess_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir, hwch, stopping, run; u_int32_t fmt, spd, blksz; }; struct ess_info { device_t parent_dev; struct resource *io_base; /* I/O address for the board */ struct resource *irq; struct resource *drq1; struct resource *drq2; void *ih; bus_dma_tag_t parent_dmat; unsigned int bufsize; int type; unsigned int duplex:1, newspeed:1; u_long bd_flags; /* board-specific flags */ struct ess_chinfo pch, rch; }; #if 0 static int ess_rd(struct ess_info *sc, int reg); static void ess_wr(struct ess_info *sc, int reg, u_int8_t val); static int ess_dspready(struct ess_info *sc); static int ess_cmd(struct ess_info *sc, u_char val); static int ess_cmd1(struct ess_info *sc, u_char cmd, int val); static int ess_get_byte(struct ess_info *sc); static void ess_setmixer(struct ess_info *sc, u_int port, u_int value); static int ess_getmixer(struct ess_info *sc, u_int port); static int ess_reset_dsp(struct ess_info *sc); static int ess_write(struct ess_info *sc, u_char reg, int val); static int ess_read(struct ess_info *sc, u_char reg); static void ess_intr(void *arg); static int ess_setupch(struct ess_info *sc, int ch, int dir, int spd, u_int32_t fmt, int len); static int ess_start(struct ess_chinfo *ch); static int ess_stop(struct ess_chinfo *ch); #endif /* * Common code for the midi and pcm functions * * ess_cmd write a single byte to the CMD port. * ess_cmd1 write a CMD + 1 byte arg * ess_cmd2 write a CMD + 2 byte arg * ess_get_byte returns a single byte from the DSP data port * * ess_write is actually ess_cmd1 * ess_read access ext. regs via ess_cmd(0xc0, reg) followed by ess_get_byte */ static void ess_lock(struct ess_info *sc) { sbc_lock(device_get_softc(sc->parent_dev)); } static void ess_unlock(struct ess_info *sc) { sbc_unlock(device_get_softc(sc->parent_dev)); } static int port_rd(struct resource *port, int off) { return bus_space_read_1(rman_get_bustag(port), rman_get_bushandle(port), off); } static void port_wr(struct resource *port, int off, u_int8_t data) { bus_space_write_1(rman_get_bustag(port), rman_get_bushandle(port), off, data); } static int ess_rd(struct ess_info *sc, int reg) { return port_rd(sc->io_base, reg); } static void ess_wr(struct ess_info *sc, int reg, u_int8_t val) { port_wr(sc->io_base, reg, val); } static int ess_dspready(struct ess_info *sc) { return ((ess_rd(sc, SBDSP_STATUS) & 0x80) == 0); } static int ess_dspwr(struct ess_info *sc, u_char val) { int i; for (i = 0; i < 1000; i++) { if (ess_dspready(sc)) { ess_wr(sc, SBDSP_CMD, val); return 1; } if (i > 10) DELAY((i > 100)? 1000 : 10); } printf("ess_dspwr(0x%02x) timed out.\n", val); return 0; } static int ess_cmd(struct ess_info *sc, u_char val) { #if 0 printf("ess_cmd: %x\n", val); #endif return ess_dspwr(sc, val); } static int ess_cmd1(struct ess_info *sc, u_char cmd, int val) { #if 0 printf("ess_cmd1: %x, %x\n", cmd, val); #endif if (ess_dspwr(sc, cmd)) { return ess_dspwr(sc, val & 0xff); } else return 0; } static void ess_setmixer(struct ess_info *sc, u_int port, u_int value) { DEB(printf("ess_setmixer: reg=%x, val=%x\n", port, value);) ess_wr(sc, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); ess_wr(sc, SB_MIX_DATA, (u_char) (value & 0xff)); DELAY(10); } static int ess_getmixer(struct ess_info *sc, u_int port) { int val; ess_wr(sc, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); val = ess_rd(sc, SB_MIX_DATA); DELAY(10); return val; } static int ess_get_byte(struct ess_info *sc) { int i; for (i = 1000; i > 0; i--) { if (ess_rd(sc, DSP_DATA_AVAIL) & 0x80) return ess_rd(sc, DSP_READ); else DELAY(20); } return -1; } static int ess_write(struct ess_info *sc, u_char reg, int val) { return ess_cmd1(sc, reg, val); } static int ess_read(struct ess_info *sc, u_char reg) { return (ess_cmd(sc, 0xc0) && ess_cmd(sc, reg))? ess_get_byte(sc) : -1; } static int ess_reset_dsp(struct ess_info *sc) { ess_wr(sc, SBDSP_RST, 3); DELAY(100); ess_wr(sc, SBDSP_RST, 0); if (ess_get_byte(sc) != 0xAA) { DEB(printf("ess_reset_dsp 0x%lx failed\n", rman_get_start(sc->io_base))); return ENXIO; /* Sorry */ } ess_cmd(sc, 0xc6); return 0; } static void ess_release_resources(struct ess_info *sc, device_t dev) { if (sc->irq) { if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); - sc->irq = 0; + sc->irq = NULL; } if (sc->drq1) { isa_dma_release(rman_get_start(sc->drq1)); bus_release_resource(dev, SYS_RES_DRQ, 0, sc->drq1); - sc->drq1 = 0; + sc->drq1 = NULL; } if (sc->drq2) { isa_dma_release(rman_get_start(sc->drq2)); bus_release_resource(dev, SYS_RES_DRQ, 1, sc->drq2); - sc->drq2 = 0; + sc->drq2 = NULL; } if (sc->io_base) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sc->io_base); - sc->io_base = 0; + sc->io_base = NULL; } if (sc->parent_dmat) { bus_dma_tag_destroy(sc->parent_dmat); sc->parent_dmat = 0; } free(sc, M_DEVBUF); } static int ess_alloc_resources(struct ess_info *sc, device_t dev) { int rid; rid = 0; if (!sc->io_base) sc->io_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = 0; if (!sc->irq) sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); rid = 0; if (!sc->drq1) sc->drq1 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &rid, RF_ACTIVE); rid = 1; if (!sc->drq2) sc->drq2 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &rid, RF_ACTIVE); if (sc->io_base && sc->drq1 && sc->irq) { isa_dma_acquire(rman_get_start(sc->drq1)); isa_dmainit(rman_get_start(sc->drq1), sc->bufsize); if (sc->drq2) { isa_dma_acquire(rman_get_start(sc->drq2)); isa_dmainit(rman_get_start(sc->drq2), sc->bufsize); } return 0; } else return ENXIO; } static void ess_intr(void *arg) { struct ess_info *sc = (struct ess_info *)arg; int src, pirq, rirq; ess_lock(sc); src = 0; if (ess_getmixer(sc, 0x7a) & 0x80) src |= 2; if (ess_rd(sc, 0x0c) & 0x01) src |= 1; pirq = (src & sc->pch.hwch)? 1 : 0; rirq = (src & sc->rch.hwch)? 1 : 0; if (pirq) { if (sc->pch.run) { ess_unlock(sc); chn_intr(sc->pch.channel); ess_lock(sc); } if (sc->pch.stopping) { sc->pch.run = 0; sndbuf_dma(sc->pch.buffer, PCMTRIG_STOP); sc->pch.stopping = 0; if (sc->pch.hwch == 1) ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x01); else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) & ~0x03); } } if (rirq) { if (sc->rch.run) { ess_unlock(sc); chn_intr(sc->rch.channel); ess_lock(sc); } if (sc->rch.stopping) { sc->rch.run = 0; sndbuf_dma(sc->rch.buffer, PCMTRIG_STOP); sc->rch.stopping = 0; /* XXX: will this stop audio2? */ ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x01); } } if (src & 2) ess_setmixer(sc, 0x7a, ess_getmixer(sc, 0x7a) & ~0x80); if (src & 1) ess_rd(sc, DSP_DATA_AVAIL); ess_unlock(sc); } /* utility functions for ESS */ static u_int8_t ess_calcspeed8(int *spd) { int speed = *spd; u_int32_t t; if (speed > 22000) { t = (795500 + speed / 2) / speed; speed = (795500 + t / 2) / t; t = (256 - t) | 0x80; } else { t = (397700 + speed / 2) / speed; speed = (397700 + t / 2) / t; t = 128 - t; } *spd = speed; return t & 0x000000ff; } static u_int8_t ess_calcspeed9(int *spd) { int speed, s0, s1, use0; u_int8_t t0, t1; /* rate = source / (256 - divisor) */ /* divisor = 256 - (source / rate) */ speed = *spd; t0 = 128 - (793800 / speed); s0 = 793800 / (128 - t0); t1 = 128 - (768000 / speed); s1 = 768000 / (128 - t1); t1 |= 0x80; use0 = (ABS(speed - s0) < ABS(speed - s1))? 1 : 0; *spd = use0? s0 : s1; return use0? t0 : t1; } static u_int8_t ess_calcfilter(int spd) { int cutoff; /* cutoff = 7160000 / (256 - divisor) */ /* divisor = 256 - (7160000 / cutoff) */ cutoff = (spd * 9 * 82) / 20; return (256 - (7160000 / cutoff)); } static int ess_setupch(struct ess_info *sc, int ch, int dir, int spd, u_int32_t fmt, int len) { int play = (dir == PCMDIR_PLAY)? 1 : 0; int b16 = (fmt & AFMT_16BIT)? 1 : 0; int stereo = (AFMT_CHANNEL(fmt) > 1)? 1 : 0; int unsign = (fmt == AFMT_U8 || fmt == AFMT_U16_LE)? 1 : 0; u_int8_t spdval, fmtval; spdval = (sc->newspeed)? ess_calcspeed9(&spd) : ess_calcspeed8(&spd); len = -len; if (ch == 1) { KASSERT((dir == PCMDIR_PLAY) || (dir == PCMDIR_REC), ("ess_setupch: dir1 bad")); /* transfer length low */ ess_write(sc, 0xa4, len & 0x00ff); /* transfer length high */ ess_write(sc, 0xa5, (len & 0xff00) >> 8); /* autoinit, dma dir */ ess_write(sc, 0xb8, 0x04 | (play? 0x00 : 0x0a)); /* mono/stereo */ ess_write(sc, 0xa8, (ess_read(sc, 0xa8) & ~0x03) | (stereo? 0x01 : 0x02)); /* demand mode, 4 bytes/xfer */ ess_write(sc, 0xb9, 0x02); /* sample rate */ ess_write(sc, 0xa1, spdval); /* filter cutoff */ ess_write(sc, 0xa2, ess_calcfilter(spd)); /* setup dac/adc */ if (play) ess_write(sc, 0xb6, unsign? 0x80 : 0x00); /* mono, b16: signed, load signal */ ess_write(sc, 0xb7, 0x51 | (unsign? 0x00 : 0x20)); /* setup fifo */ ess_write(sc, 0xb7, 0x90 | (unsign? 0x00 : 0x20) | (b16? 0x04 : 0x00) | (stereo? 0x08 : 0x40)); /* irq control */ ess_write(sc, 0xb1, (ess_read(sc, 0xb1) & 0x0f) | 0x50); /* drq control */ ess_write(sc, 0xb2, (ess_read(sc, 0xb2) & 0x0f) | 0x50); } else if (ch == 2) { KASSERT(dir == PCMDIR_PLAY, ("ess_setupch: dir2 bad")); /* transfer length low */ ess_setmixer(sc, 0x74, len & 0x00ff); /* transfer length high */ ess_setmixer(sc, 0x76, (len & 0xff00) >> 8); /* autoinit, 4 bytes/req */ ess_setmixer(sc, 0x78, 0x90); fmtval = b16 | (stereo << 1) | (unsign << 2); /* enable irq, set format */ ess_setmixer(sc, 0x7a, 0x40 | fmtval); if (sc->newspeed) { /* sample rate */ ess_setmixer(sc, 0x70, spdval); /* filter cutoff */ ess_setmixer(sc, 0x72, ess_calcfilter(spd)); } } return 0; } static int ess_start(struct ess_chinfo *ch) { struct ess_info *sc = ch->parent; int play = (ch->dir == PCMDIR_PLAY)? 1 : 0; ess_lock(sc); ess_setupch(sc, ch->hwch, ch->dir, ch->spd, ch->fmt, ch->blksz); ch->stopping = 0; if (ch->hwch == 1) ess_write(sc, 0xb8, ess_read(sc, 0xb8) | 0x01); else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) | 0x03); if (play) ess_cmd(sc, DSP_CMD_SPKON); ess_unlock(sc); return 0; } static int ess_stop(struct ess_chinfo *ch) { struct ess_info *sc = ch->parent; int play = (ch->dir == PCMDIR_PLAY)? 1 : 0; ess_lock(sc); ch->stopping = 1; if (ch->hwch == 1) ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x04); else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) & ~0x10); if (play) ess_cmd(sc, DSP_CMD_SPKOFF); ess_unlock(sc); return 0; } /* -------------------------------------------------------------------- */ /* channel interface for ESS18xx */ static void * esschan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct ess_info *sc = devinfo; struct ess_chinfo *ch = (dir == PCMDIR_PLAY)? &sc->pch : &sc->rch; ch->parent = sc; ch->channel = c; ch->buffer = b; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsize) != 0) return NULL; ch->dir = dir; ch->hwch = 1; if ((dir == PCMDIR_PLAY) && (sc->duplex)) ch->hwch = 2; sndbuf_dmasetup(ch->buffer, (ch->hwch == 1)? sc->drq1 : sc->drq2); return ch; } static int esschan_setformat(kobj_t obj, void *data, u_int32_t format) { struct ess_chinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t esschan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct ess_chinfo *ch = data; struct ess_info *sc = ch->parent; ch->spd = speed; if (sc->newspeed) ess_calcspeed9(&ch->spd); else ess_calcspeed8(&ch->spd); return ch->spd; } static u_int32_t esschan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct ess_chinfo *ch = data; ch->blksz = blocksize; return ch->blksz; } static int esschan_trigger(kobj_t obj, void *data, int go) { struct ess_chinfo *ch = data; if (!PCMTRIG_COMMON(go)) return 0; switch (go) { case PCMTRIG_START: ch->run = 1; sndbuf_dma(ch->buffer, go); ess_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: default: ess_stop(ch); break; } return 0; } static u_int32_t esschan_getptr(kobj_t obj, void *data) { struct ess_chinfo *ch = data; return sndbuf_dmaptr(ch->buffer); } static struct pcmchan_caps * esschan_getcaps(kobj_t obj, void *data) { struct ess_chinfo *ch = data; return (ch->dir == PCMDIR_PLAY)? &ess_playcaps : &ess_reccaps; } static kobj_method_t esschan_methods[] = { KOBJMETHOD(channel_init, esschan_init), KOBJMETHOD(channel_setformat, esschan_setformat), KOBJMETHOD(channel_setspeed, esschan_setspeed), KOBJMETHOD(channel_setblocksize, esschan_setblocksize), KOBJMETHOD(channel_trigger, esschan_trigger), KOBJMETHOD(channel_getptr, esschan_getptr), KOBJMETHOD(channel_getcaps, esschan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(esschan); /************************************************************/ static int essmix_init(struct snd_mixer *m) { struct ess_info *sc = mix_getdevinfo(m); mix_setrecdevs(m, SOUND_MASK_CD | SOUND_MASK_MIC | SOUND_MASK_LINE | SOUND_MASK_IMIX); mix_setdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME | SOUND_MASK_LINE1 | SOUND_MASK_SPEAKER); ess_setmixer(sc, 0, 0); /* reset */ return 0; } static int essmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct ess_info *sc = mix_getdevinfo(m); int preg = 0, rreg = 0, l, r; l = (left * 15) / 100; r = (right * 15) / 100; switch (dev) { case SOUND_MIXER_SYNTH: preg = 0x36; rreg = 0x6b; break; case SOUND_MIXER_PCM: preg = 0x14; rreg = 0x7c; break; case SOUND_MIXER_LINE: preg = 0x3e; rreg = 0x6e; break; case SOUND_MIXER_MIC: preg = 0x1a; rreg = 0x68; break; case SOUND_MIXER_LINE1: preg = 0x3a; rreg = 0x6c; break; case SOUND_MIXER_CD: preg = 0x38; rreg = 0x6a; break; case SOUND_MIXER_SPEAKER: preg = 0x3c; break; case SOUND_MIXER_VOLUME: l = left? (left * 63) / 100 : 64; r = right? (right * 63) / 100 : 64; ess_setmixer(sc, 0x60, l); ess_setmixer(sc, 0x62, r); left = (l == 64)? 0 : (l * 100) / 63; right = (r == 64)? 0 : (r * 100) / 63; return left | (right << 8); } if (preg) ess_setmixer(sc, preg, (l << 4) | r); if (rreg) ess_setmixer(sc, rreg, (l << 4) | r); left = (l * 100) / 15; right = (r * 100) / 15; return left | (right << 8); } static u_int32_t essmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct ess_info *sc = mix_getdevinfo(m); u_char recdev; switch (src) { case SOUND_MASK_CD: recdev = 0x02; break; case SOUND_MASK_LINE: recdev = 0x06; break; case SOUND_MASK_IMIX: recdev = 0x05; break; case SOUND_MASK_MIC: default: recdev = 0x00; src = SOUND_MASK_MIC; break; } ess_setmixer(sc, 0x1c, recdev); return src; } static kobj_method_t essmixer_methods[] = { KOBJMETHOD(mixer_init, essmix_init), KOBJMETHOD(mixer_set, essmix_set), KOBJMETHOD(mixer_setrecsrc, essmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(essmixer); /************************************************************/ static int ess_probe(device_t dev) { uintptr_t func, ver, r, f; /* The parent device has already been probed. */ r = BUS_READ_IVAR(device_get_parent(dev), dev, 0, &func); if (func != SCF_PCM) return (ENXIO); r = BUS_READ_IVAR(device_get_parent(dev), dev, 1, &ver); f = (ver & 0xffff0000) >> 16; if (!(f & BD_F_ESS)) return (ENXIO); device_set_desc(dev, "ESS 18xx DSP"); return 0; } static int ess_attach(device_t dev) { struct ess_info *sc; char status[SND_STATUSLEN], buf[64]; int ver; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->parent_dev = device_get_parent(dev); sc->bufsize = pcm_getbuffersize(dev, 4096, ESS_BUFFSIZE, 65536); if (ess_alloc_resources(sc, dev)) goto no; if (ess_reset_dsp(sc)) goto no; if (mixer_init(dev, &essmixer_class, sc)) goto no; sc->duplex = 0; sc->newspeed = 0; ver = (ess_getmixer(sc, 0x40) << 8) | ess_rd(sc, SB_MIX_DATA); snprintf(buf, sizeof buf, "ESS %x DSP", ver); device_set_desc_copy(dev, buf); if (bootverbose) device_printf(dev, "ESS%x detected", ver); switch (ver) { case 0x1869: case 0x1879: #ifdef ESS18XX_DUPLEX sc->duplex = sc->drq2? 1 : 0; #endif #ifdef ESS18XX_NEWSPEED sc->newspeed = 1; #endif break; } if (bootverbose) printf("%s%s\n", sc->duplex? ", duplex" : "", sc->newspeed? ", newspeed" : ""); if (sc->newspeed) ess_setmixer(sc, 0x71, 0x22); snd_setup_intr(dev, sc->irq, 0, ess_intr, sc, &sc->ih); if (!sc->duplex) pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsize, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } if (sc->drq2) snprintf(buf, SND_STATUSLEN, ":%jd", rman_get_start(sc->drq2)); else buf[0] = '\0'; snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd drq %jd%s bufsz %u %s", rman_get_start(sc->io_base), rman_get_start(sc->irq), rman_get_start(sc->drq1), buf, sc->bufsize, PCM_KLDSTRING(snd_ess)); if (pcm_register(dev, sc, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &esschan_class, sc); pcm_addchan(dev, PCMDIR_PLAY, &esschan_class, sc); pcm_setstatus(dev, status); return 0; no: ess_release_resources(sc, dev); return ENXIO; } static int ess_detach(device_t dev) { int r; struct ess_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); ess_release_resources(sc, dev); return 0; } static int ess_resume(device_t dev) { struct ess_info *sc; sc = pcm_getdevinfo(dev); if (ess_reset_dsp(sc)) { device_printf(dev, "unable to reset DSP at resume\n"); return ENXIO; } if (mixer_reinit(dev)) { device_printf(dev, "unable to reinitialize mixer at resume\n"); return ENXIO; } return 0; } static device_method_t ess_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ess_probe), DEVMETHOD(device_attach, ess_attach), DEVMETHOD(device_detach, ess_detach), DEVMETHOD(device_resume, ess_resume), { 0, 0 } }; static driver_t ess_driver = { "pcm", ess_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_ess, sbc, ess_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_ess, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_ess, snd_sbc, 1, 1, 1); MODULE_VERSION(snd_ess, 1); /************************************************************/ static devclass_t esscontrol_devclass; static struct isa_pnp_id essc_ids[] = { {0x06007316, "ESS Control"}, {0} }; static int esscontrol_probe(device_t dev) { int i; i = ISA_PNP_PROBE(device_get_parent(dev), dev, essc_ids); if (i == 0) device_quiet(dev); return i; } static int esscontrol_attach(device_t dev) { #ifdef notyet struct resource *io; int rid, i, x; rid = 0; io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); x = 0; for (i = 0; i < 0x100; i++) { port_wr(io, 0, i); x = port_rd(io, 1); if ((i & 0x0f) == 0) printf("%3.3x: ", i); printf("%2.2x ", x); if ((i & 0x0f) == 0x0f) printf("\n"); } bus_release_resource(dev, SYS_RES_IOPORT, 0, io); io = NULL; #endif return 0; } static int esscontrol_detach(device_t dev) { return 0; } static device_method_t esscontrol_methods[] = { /* Device interface */ DEVMETHOD(device_probe, esscontrol_probe), DEVMETHOD(device_attach, esscontrol_attach), DEVMETHOD(device_detach, esscontrol_detach), { 0, 0 } }; static driver_t esscontrol_driver = { "esscontrol", esscontrol_methods, 1, }; DRIVER_MODULE(esscontrol, isa, esscontrol_driver, esscontrol_devclass, 0, 0); DRIVER_MODULE(esscontrol, acpi, esscontrol_driver, esscontrol_devclass, 0, 0); Index: head/sys/dev/sound/isa/mss.c =================================================================== --- head/sys/dev/sound/isa/mss.c (revision 297861) +++ head/sys/dev/sound/isa/mss.c (revision 297862) @@ -1,2320 +1,2320 @@ /*- * Copyright (c) 2001 George Reid * Copyright (c) 1999 Cameron Grant * Copyright (c) 1997,1998 Luigi Rizzo * Copyright (c) 1994,1995 Hannu Savolainen * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include SND_DECLARE_FILE("$FreeBSD$"); /* board-specific include files */ #include #include #include #include #include "mixer_if.h" #define MSS_DEFAULT_BUFSZ (4096) #define MSS_INDEXED_REGS 0x20 #define OPL_INDEXED_REGS 0x19 struct mss_info; struct mss_chinfo { struct mss_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir; u_int32_t fmt, blksz; }; struct mss_info { struct resource *io_base; /* primary I/O address for the board */ int io_rid; struct resource *conf_base; /* and the opti931 also has a config space */ int conf_rid; struct resource *irq; int irq_rid; struct resource *drq1; /* play */ int drq1_rid; struct resource *drq2; /* rec */ int drq2_rid; void *ih; bus_dma_tag_t parent_dmat; struct mtx *lock; char mss_indexed_regs[MSS_INDEXED_REGS]; char opl_indexed_regs[OPL_INDEXED_REGS]; int bd_id; /* used to hold board-id info, eg. sb version, * mss codec type, etc. etc. */ int opti_offset; /* offset from config_base for opti931 */ u_long bd_flags; /* board-specific flags */ int optibase; /* base address for OPTi9xx config */ struct resource *indir; /* Indirect register index address */ int indir_rid; int password; /* password for opti9xx cards */ int passwdreg; /* password register */ unsigned int bufsize; struct mss_chinfo pch, rch; }; static int mss_probe(device_t dev); static int mss_attach(device_t dev); static driver_intr_t mss_intr; /* prototypes for local functions */ static int mss_detect(device_t dev, struct mss_info *mss); #ifndef PC98 static int opti_detect(device_t dev, struct mss_info *mss); #endif static char *ymf_test(device_t dev, struct mss_info *mss); static void ad_unmute(struct mss_info *mss); /* mixer set funcs */ static int mss_mixer_set(struct mss_info *mss, int dev, int left, int right); static int mss_set_recsrc(struct mss_info *mss, int mask); /* io funcs */ static int ad_wait_init(struct mss_info *mss, int x); static int ad_read(struct mss_info *mss, int reg); static void ad_write(struct mss_info *mss, int reg, u_char data); static void ad_write_cnt(struct mss_info *mss, int reg, u_short data); static void ad_enter_MCE(struct mss_info *mss); static void ad_leave_MCE(struct mss_info *mss); /* OPTi-specific functions */ static void opti_write(struct mss_info *mss, u_char reg, u_char data); #ifndef PC98 static u_char opti_read(struct mss_info *mss, u_char reg); #endif static int opti_init(device_t dev, struct mss_info *mss); /* io primitives */ static void conf_wr(struct mss_info *mss, u_char reg, u_char data); static u_char conf_rd(struct mss_info *mss, u_char reg); static int pnpmss_probe(device_t dev); static int pnpmss_attach(device_t dev); static driver_intr_t opti931_intr; static u_int32_t mss_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_MU_LAW, 1, 0), SND_FORMAT(AFMT_MU_LAW, 2, 0), SND_FORMAT(AFMT_A_LAW, 1, 0), SND_FORMAT(AFMT_A_LAW, 2, 0), 0 }; static struct pcmchan_caps mss_caps = {4000, 48000, mss_fmt, 0}; static u_int32_t guspnp_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_A_LAW, 1, 0), SND_FORMAT(AFMT_A_LAW, 2, 0), 0 }; static struct pcmchan_caps guspnp_caps = {4000, 48000, guspnp_fmt, 0}; static u_int32_t opti931_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps opti931_caps = {4000, 48000, opti931_fmt, 0}; #define MD_AD1848 0x91 #define MD_AD1845 0x92 #define MD_CS42XX 0xA1 #define MD_CS423X 0xA2 #define MD_OPTI930 0xB0 #define MD_OPTI931 0xB1 #define MD_OPTI925 0xB2 #define MD_OPTI924 0xB3 #define MD_GUSPNP 0xB8 #define MD_GUSMAX 0xB9 #define MD_YM0020 0xC1 #define MD_VIVO 0xD1 #define DV_F_TRUE_MSS 0x00010000 /* mss _with_ base regs */ #define FULL_DUPLEX(x) ((x)->bd_flags & BD_F_DUPLEX) static void mss_lock(struct mss_info *mss) { snd_mtxlock(mss->lock); } static void mss_unlock(struct mss_info *mss) { snd_mtxunlock(mss->lock); } static int port_rd(struct resource *port, int off) { if (port) return bus_space_read_1(rman_get_bustag(port), rman_get_bushandle(port), off); else return -1; } static void port_wr(struct resource *port, int off, u_int8_t data) { if (port) bus_space_write_1(rman_get_bustag(port), rman_get_bushandle(port), off, data); } static int io_rd(struct mss_info *mss, int reg) { if (mss->bd_flags & BD_F_MSS_OFFSET) reg -= 4; return port_rd(mss->io_base, reg); } static void io_wr(struct mss_info *mss, int reg, u_int8_t data) { if (mss->bd_flags & BD_F_MSS_OFFSET) reg -= 4; port_wr(mss->io_base, reg, data); } static void conf_wr(struct mss_info *mss, u_char reg, u_char value) { port_wr(mss->conf_base, 0, reg); port_wr(mss->conf_base, 1, value); } static u_char conf_rd(struct mss_info *mss, u_char reg) { port_wr(mss->conf_base, 0, reg); return port_rd(mss->conf_base, 1); } static void opti_wr(struct mss_info *mss, u_char reg, u_char value) { port_wr(mss->conf_base, mss->opti_offset + 0, reg); port_wr(mss->conf_base, mss->opti_offset + 1, value); } static u_char opti_rd(struct mss_info *mss, u_char reg) { port_wr(mss->conf_base, mss->opti_offset + 0, reg); return port_rd(mss->conf_base, mss->opti_offset + 1); } static void gus_wr(struct mss_info *mss, u_char reg, u_char value) { port_wr(mss->conf_base, 3, reg); port_wr(mss->conf_base, 5, value); } static u_char gus_rd(struct mss_info *mss, u_char reg) { port_wr(mss->conf_base, 3, reg); return port_rd(mss->conf_base, 5); } static void mss_release_resources(struct mss_info *mss, device_t dev) { if (mss->irq) { if (mss->ih) bus_teardown_intr(dev, mss->irq, mss->ih); bus_release_resource(dev, SYS_RES_IRQ, mss->irq_rid, mss->irq); - mss->irq = 0; + mss->irq = NULL; } if (mss->drq2) { if (mss->drq2 != mss->drq1) { isa_dma_release(rman_get_start(mss->drq2)); bus_release_resource(dev, SYS_RES_DRQ, mss->drq2_rid, mss->drq2); } - mss->drq2 = 0; + mss->drq2 = NULL; } if (mss->drq1) { isa_dma_release(rman_get_start(mss->drq1)); bus_release_resource(dev, SYS_RES_DRQ, mss->drq1_rid, mss->drq1); - mss->drq1 = 0; + mss->drq1 = NULL; } if (mss->io_base) { bus_release_resource(dev, SYS_RES_IOPORT, mss->io_rid, mss->io_base); - mss->io_base = 0; + mss->io_base = NULL; } if (mss->conf_base) { bus_release_resource(dev, SYS_RES_IOPORT, mss->conf_rid, mss->conf_base); - mss->conf_base = 0; + mss->conf_base = NULL; } if (mss->indir) { bus_release_resource(dev, SYS_RES_IOPORT, mss->indir_rid, mss->indir); - mss->indir = 0; + mss->indir = NULL; } if (mss->parent_dmat) { bus_dma_tag_destroy(mss->parent_dmat); mss->parent_dmat = 0; } if (mss->lock) snd_mtxfree(mss->lock); free(mss, M_DEVBUF); } static int mss_alloc_resources(struct mss_info *mss, device_t dev) { int pdma, rdma, ok = 1; if (!mss->io_base) mss->io_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &mss->io_rid, RF_ACTIVE); if (!mss->irq) mss->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &mss->irq_rid, RF_ACTIVE); if (!mss->drq1) mss->drq1 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &mss->drq1_rid, RF_ACTIVE); if (mss->conf_rid >= 0 && !mss->conf_base) mss->conf_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &mss->conf_rid, RF_ACTIVE); if (mss->drq2_rid >= 0 && !mss->drq2) mss->drq2 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &mss->drq2_rid, RF_ACTIVE); if (!mss->io_base || !mss->drq1 || !mss->irq) ok = 0; if (mss->conf_rid >= 0 && !mss->conf_base) ok = 0; if (mss->drq2_rid >= 0 && !mss->drq2) ok = 0; if (ok) { pdma = rman_get_start(mss->drq1); isa_dma_acquire(pdma); isa_dmainit(pdma, mss->bufsize); mss->bd_flags &= ~BD_F_DUPLEX; if (mss->drq2) { rdma = rman_get_start(mss->drq2); isa_dma_acquire(rdma); isa_dmainit(rdma, mss->bufsize); mss->bd_flags |= BD_F_DUPLEX; } else mss->drq2 = mss->drq1; } return ok; } /* * The various mixers use a variety of bitmasks etc. The Voxware * driver had a very nice technique to describe a mixer and interface * to it. A table defines, for each channel, which register, bits, * offset, polarity to use. This procedure creates the new value * using the table and the old value. */ static void change_bits(mixer_tab *t, u_char *regval, int dev, int chn, int newval) { u_char mask; int shift; DEB(printf("ch_bits dev %d ch %d val %d old 0x%02x " "r %d p %d bit %d off %d\n", dev, chn, newval, *regval, (*t)[dev][chn].regno, (*t)[dev][chn].polarity, (*t)[dev][chn].nbits, (*t)[dev][chn].bitoffs ) ); if ( (*t)[dev][chn].polarity == 1) /* reverse */ newval = 100 - newval ; mask = (1 << (*t)[dev][chn].nbits) - 1; newval = (int) ((newval * mask) + 50) / 100; /* Scale it */ shift = (*t)[dev][chn].bitoffs /*- (*t)[dev][LEFT_CHN].nbits + 1*/; *regval &= ~(mask << shift); /* Filter out the previous value */ *regval |= (newval & mask) << shift; /* Set the new value */ } /* -------------------------------------------------------------------- */ /* only one source can be set... */ static int mss_set_recsrc(struct mss_info *mss, int mask) { u_char recdev; switch (mask) { case SOUND_MASK_LINE: case SOUND_MASK_LINE3: recdev = 0; break; case SOUND_MASK_CD: case SOUND_MASK_LINE1: recdev = 0x40; break; case SOUND_MASK_IMIX: recdev = 0xc0; break; case SOUND_MASK_MIC: default: mask = SOUND_MASK_MIC; recdev = 0x80; } ad_write(mss, 0, (ad_read(mss, 0) & 0x3f) | recdev); ad_write(mss, 1, (ad_read(mss, 1) & 0x3f) | recdev); return mask; } /* there are differences in the mixer depending on the actual sound card. */ static int mss_mixer_set(struct mss_info *mss, int dev, int left, int right) { int regoffs; mixer_tab *mix_d; u_char old, val; switch (mss->bd_id) { case MD_OPTI931: mix_d = &opti931_devices; break; case MD_OPTI930: mix_d = &opti930_devices; break; default: mix_d = &mix_devices; } if ((*mix_d)[dev][LEFT_CHN].nbits == 0) { DEB(printf("nbits = 0 for dev %d\n", dev)); return -1; } if ((*mix_d)[dev][RIGHT_CHN].nbits == 0) right = left; /* mono */ /* Set the left channel */ regoffs = (*mix_d)[dev][LEFT_CHN].regno; old = val = ad_read(mss, regoffs); /* if volume is 0, mute chan. Otherwise, unmute. */ if (regoffs != 0) val = (left == 0)? old | 0x80 : old & 0x7f; change_bits(mix_d, &val, dev, LEFT_CHN, left); ad_write(mss, regoffs, val); DEB(printf("LEFT: dev %d reg %d old 0x%02x new 0x%02x\n", dev, regoffs, old, val)); if ((*mix_d)[dev][RIGHT_CHN].nbits != 0) { /* have stereo */ /* Set the right channel */ regoffs = (*mix_d)[dev][RIGHT_CHN].regno; old = val = ad_read(mss, regoffs); if (regoffs != 1) val = (right == 0)? old | 0x80 : old & 0x7f; change_bits(mix_d, &val, dev, RIGHT_CHN, right); ad_write(mss, regoffs, val); DEB(printf("RIGHT: dev %d reg %d old 0x%02x new 0x%02x\n", dev, regoffs, old, val)); } return 0; /* success */ } /* -------------------------------------------------------------------- */ static int mssmix_init(struct snd_mixer *m) { struct mss_info *mss = mix_getdevinfo(m); mix_setdevs(m, MODE2_MIXER_DEVICES); mix_setrecdevs(m, MSS_REC_DEVICES); switch(mss->bd_id) { case MD_OPTI930: mix_setdevs(m, OPTI930_MIXER_DEVICES); break; case MD_OPTI931: mix_setdevs(m, OPTI931_MIXER_DEVICES); mss_lock(mss); ad_write(mss, 20, 0x88); ad_write(mss, 21, 0x88); mss_unlock(mss); break; case MD_AD1848: mix_setdevs(m, MODE1_MIXER_DEVICES); break; case MD_GUSPNP: case MD_GUSMAX: /* this is only necessary in mode 3 ... */ mss_lock(mss); ad_write(mss, 22, 0x88); ad_write(mss, 23, 0x88); mss_unlock(mss); break; } return 0; } static int mssmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct mss_info *mss = mix_getdevinfo(m); mss_lock(mss); mss_mixer_set(mss, dev, left, right); mss_unlock(mss); return left | (right << 8); } static u_int32_t mssmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct mss_info *mss = mix_getdevinfo(m); mss_lock(mss); src = mss_set_recsrc(mss, src); mss_unlock(mss); return src; } static kobj_method_t mssmix_mixer_methods[] = { KOBJMETHOD(mixer_init, mssmix_init), KOBJMETHOD(mixer_set, mssmix_set), KOBJMETHOD(mixer_setrecsrc, mssmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(mssmix_mixer); /* -------------------------------------------------------------------- */ static int ymmix_init(struct snd_mixer *m) { struct mss_info *mss = mix_getdevinfo(m); mssmix_init(m); mix_setdevs(m, mix_getdevs(m) | SOUND_MASK_VOLUME | SOUND_MASK_MIC | SOUND_MASK_BASS | SOUND_MASK_TREBLE); /* Set master volume */ mss_lock(mss); conf_wr(mss, OPL3SAx_VOLUMEL, 7); conf_wr(mss, OPL3SAx_VOLUMER, 7); mss_unlock(mss); return 0; } static int ymmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct mss_info *mss = mix_getdevinfo(m); int t, l, r; mss_lock(mss); switch (dev) { case SOUND_MIXER_VOLUME: if (left) t = 15 - (left * 15) / 100; else t = 0x80; /* mute */ conf_wr(mss, OPL3SAx_VOLUMEL, t); if (right) t = 15 - (right * 15) / 100; else t = 0x80; /* mute */ conf_wr(mss, OPL3SAx_VOLUMER, t); break; case SOUND_MIXER_MIC: t = left; if (left) t = 31 - (left * 31) / 100; else t = 0x80; /* mute */ conf_wr(mss, OPL3SAx_MIC, t); break; case SOUND_MIXER_BASS: l = (left * 7) / 100; r = (right * 7) / 100; t = (r << 4) | l; conf_wr(mss, OPL3SAx_BASS, t); break; case SOUND_MIXER_TREBLE: l = (left * 7) / 100; r = (right * 7) / 100; t = (r << 4) | l; conf_wr(mss, OPL3SAx_TREBLE, t); break; default: mss_mixer_set(mss, dev, left, right); } mss_unlock(mss); return left | (right << 8); } static u_int32_t ymmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct mss_info *mss = mix_getdevinfo(m); mss_lock(mss); src = mss_set_recsrc(mss, src); mss_unlock(mss); return src; } static kobj_method_t ymmix_mixer_methods[] = { KOBJMETHOD(mixer_init, ymmix_init), KOBJMETHOD(mixer_set, ymmix_set), KOBJMETHOD(mixer_setrecsrc, ymmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(ymmix_mixer); /* -------------------------------------------------------------------- */ /* * XXX This might be better off in the gusc driver. */ static void gusmax_setup(struct mss_info *mss, device_t dev, struct resource *alt) { static const unsigned char irq_bits[16] = { 0, 0, 0, 3, 0, 2, 0, 4, 0, 1, 0, 5, 6, 0, 0, 7 }; static const unsigned char dma_bits[8] = { 0, 1, 0, 2, 0, 3, 4, 5 }; device_t parent = device_get_parent(dev); unsigned char irqctl, dmactl; int s; s = splhigh(); port_wr(alt, 0x0f, 0x05); port_wr(alt, 0x00, 0x0c); port_wr(alt, 0x0b, 0x00); port_wr(alt, 0x0f, 0x00); irqctl = irq_bits[isa_get_irq(parent)]; /* Share the IRQ with the MIDI driver. */ irqctl |= 0x40; dmactl = dma_bits[isa_get_drq(parent)]; if (device_get_flags(parent) & DV_F_DUAL_DMA) dmactl |= dma_bits[device_get_flags(parent) & DV_F_DRQ_MASK] << 3; /* * Set the DMA and IRQ control latches. */ port_wr(alt, 0x00, 0x0c); port_wr(alt, 0x0b, dmactl | 0x80); port_wr(alt, 0x00, 0x4c); port_wr(alt, 0x0b, irqctl); port_wr(alt, 0x00, 0x0c); port_wr(alt, 0x0b, dmactl); port_wr(alt, 0x00, 0x4c); port_wr(alt, 0x0b, irqctl); port_wr(mss->conf_base, 2, 0); port_wr(alt, 0x00, 0x0c); port_wr(mss->conf_base, 2, 0); splx(s); } static int mss_init(struct mss_info *mss, device_t dev) { u_char r6, r9; struct resource *alt; int rid, tmp; mss->bd_flags |= BD_F_MCE_BIT; switch(mss->bd_id) { case MD_OPTI931: /* * The MED3931 v.1.0 allocates 3 bytes for the config * space, whereas v.2.0 allocates 4 bytes. What I know * for sure is that the upper two ports must be used, * and they should end on a boundary of 4 bytes. So I * need the following trick. */ mss->opti_offset = (rman_get_start(mss->conf_base) & ~3) + 2 - rman_get_start(mss->conf_base); BVDDB(printf("mss_init: opti_offset=%d\n", mss->opti_offset)); opti_wr(mss, 4, 0xd6); /* fifo empty, OPL3, audio enable, SB3.2 */ ad_write(mss, 10, 2); /* enable interrupts */ opti_wr(mss, 6, 2); /* MCIR6: mss enable, sb disable */ opti_wr(mss, 5, 0x28); /* MCIR5: codec in exp. mode,fifo */ break; case MD_GUSPNP: case MD_GUSMAX: gus_wr(mss, 0x4c /* _URSTI */, 0);/* Pull reset */ DELAY(1000 * 30); /* release reset and enable DAC */ gus_wr(mss, 0x4c /* _URSTI */, 3); DELAY(1000 * 30); /* end of reset */ rid = 0; alt = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (alt == NULL) { printf("XXX couldn't init GUS PnP/MAX\n"); break; } port_wr(alt, 0, 0xC); /* enable int and dma */ if (mss->bd_id == MD_GUSMAX) gusmax_setup(mss, dev, alt); bus_release_resource(dev, SYS_RES_IOPORT, rid, alt); /* * unmute left & right line. Need to go in mode3, unmute, * and back to mode 2 */ tmp = ad_read(mss, 0x0c); ad_write(mss, 0x0c, 0x6c); /* special value to enter mode 3 */ ad_write(mss, 0x19, 0); /* unmute left */ ad_write(mss, 0x1b, 0); /* unmute right */ ad_write(mss, 0x0c, tmp); /* restore old mode */ /* send codec interrupts on irq1 and only use that one */ gus_wr(mss, 0x5a, 0x4f); /* enable access to hidden regs */ tmp = gus_rd(mss, 0x5b /* IVERI */); gus_wr(mss, 0x5b, tmp | 1); BVDDB(printf("GUS: silicon rev %c\n", 'A' + ((tmp & 0xf) >> 4))); break; case MD_YM0020: conf_wr(mss, OPL3SAx_DMACONF, 0xa9); /* dma-b rec, dma-a play */ r6 = conf_rd(mss, OPL3SAx_DMACONF); r9 = conf_rd(mss, OPL3SAx_MISC); /* version */ BVDDB(printf("Yamaha: ver 0x%x DMA config 0x%x\n", r6, r9);) /* yamaha - set volume to max */ conf_wr(mss, OPL3SAx_VOLUMEL, 0); conf_wr(mss, OPL3SAx_VOLUMER, 0); conf_wr(mss, OPL3SAx_DMACONF, FULL_DUPLEX(mss)? 0xa9 : 0x8b); break; } if (FULL_DUPLEX(mss) && mss->bd_id != MD_OPTI931) ad_write(mss, 12, ad_read(mss, 12) | 0x40); /* mode 2 */ ad_enter_MCE(mss); ad_write(mss, 9, FULL_DUPLEX(mss)? 0 : 4); ad_leave_MCE(mss); ad_write(mss, 10, 2); /* int enable */ io_wr(mss, MSS_STATUS, 0); /* Clear interrupt status */ /* the following seem required on the CS4232 */ ad_unmute(mss); return 0; } /* * main irq handler for the CS423x. The OPTi931 code is * a separate one. * The correct way to operate for a device with multiple internal * interrupt sources is to loop on the status register and ack * interrupts until all interrupts are served and none are reported. At * this point the IRQ line to the ISA IRQ controller should go low * and be raised at the next interrupt. * * Since the ISA IRQ controller is sent EOI _before_ passing control * to the isr, it might happen that we serve an interrupt early, in * which case the status register at the next interrupt should just * say that there are no more interrupts... */ static void mss_intr(void *arg) { struct mss_info *mss = arg; u_char c = 0, served = 0; int i; DEB(printf("mss_intr\n")); mss_lock(mss); ad_read(mss, 11); /* fake read of status bits */ /* loop until there are interrupts, but no more than 10 times. */ for (i = 10; i > 0 && io_rd(mss, MSS_STATUS) & 1; i--) { /* get exact reason for full-duplex boards */ c = FULL_DUPLEX(mss)? ad_read(mss, 24) : 0x30; c &= ~served; if (sndbuf_runsz(mss->pch.buffer) && (c & 0x10)) { served |= 0x10; mss_unlock(mss); chn_intr(mss->pch.channel); mss_lock(mss); } if (sndbuf_runsz(mss->rch.buffer) && (c & 0x20)) { served |= 0x20; mss_unlock(mss); chn_intr(mss->rch.channel); mss_lock(mss); } /* now ack the interrupt */ if (FULL_DUPLEX(mss)) ad_write(mss, 24, ~c); /* ack selectively */ else io_wr(mss, MSS_STATUS, 0); /* Clear interrupt status */ } if (i == 10) { BVDDB(printf("mss_intr: irq, but not from mss\n")); } else if (served == 0) { BVDDB(printf("mss_intr: unexpected irq with reason %x\n", c)); /* * this should not happen... I have no idea what to do now. * maybe should do a sanity check and restart dmas ? */ io_wr(mss, MSS_STATUS, 0); /* Clear interrupt status */ } mss_unlock(mss); } /* * AD_WAIT_INIT waits if we are initializing the board and * we cannot modify its settings */ static int ad_wait_init(struct mss_info *mss, int x) { int arg = x, n = 0; /* to shut up the compiler... */ for (; x > 0; x--) if ((n = io_rd(mss, MSS_INDEX)) & MSS_IDXBUSY) DELAY(10); else return n; printf("AD_WAIT_INIT FAILED %d 0x%02x\n", arg, n); return n; } static int ad_read(struct mss_info *mss, int reg) { int x; ad_wait_init(mss, 201000); x = io_rd(mss, MSS_INDEX) & ~MSS_IDXMASK; io_wr(mss, MSS_INDEX, (u_char)(reg & MSS_IDXMASK) | x); x = io_rd(mss, MSS_IDATA); /* printf("ad_read %d, %x\n", reg, x); */ return x; } static void ad_write(struct mss_info *mss, int reg, u_char data) { int x; /* printf("ad_write %d, %x\n", reg, data); */ ad_wait_init(mss, 1002000); x = io_rd(mss, MSS_INDEX) & ~MSS_IDXMASK; io_wr(mss, MSS_INDEX, (u_char)(reg & MSS_IDXMASK) | x); io_wr(mss, MSS_IDATA, data); } static void ad_write_cnt(struct mss_info *mss, int reg, u_short cnt) { ad_write(mss, reg+1, cnt & 0xff); ad_write(mss, reg, cnt >> 8); /* upper base must be last */ } static void wait_for_calibration(struct mss_info *mss) { int t; /* * Wait until the auto calibration process has finished. * * 1) Wait until the chip becomes ready (reads don't return 0x80). * 2) Wait until the ACI bit of I11 gets on * 3) Wait until the ACI bit of I11 gets off */ t = ad_wait_init(mss, 1000000); if (t & MSS_IDXBUSY) printf("mss: Auto calibration timed out(1).\n"); /* * The calibration mode for chips that support it is set so that * we never see ACI go on. */ if (mss->bd_id == MD_GUSMAX || mss->bd_id == MD_GUSPNP) { for (t = 100; t > 0 && (ad_read(mss, 11) & 0x20) == 0; t--); } else { /* * XXX This should only be enabled for cards that *really* * need it. Are there any? */ for (t = 100; t > 0 && (ad_read(mss, 11) & 0x20) == 0; t--) DELAY(100); } for (t = 100; t > 0 && ad_read(mss, 11) & 0x20; t--) DELAY(100); } static void ad_unmute(struct mss_info *mss) { ad_write(mss, 6, ad_read(mss, 6) & ~I6_MUTE); ad_write(mss, 7, ad_read(mss, 7) & ~I6_MUTE); } static void ad_enter_MCE(struct mss_info *mss) { int prev; mss->bd_flags |= BD_F_MCE_BIT; ad_wait_init(mss, 203000); prev = io_rd(mss, MSS_INDEX); prev &= ~MSS_TRD; io_wr(mss, MSS_INDEX, prev | MSS_MCE); } static void ad_leave_MCE(struct mss_info *mss) { u_char prev; if ((mss->bd_flags & BD_F_MCE_BIT) == 0) { DEB(printf("--- hey, leave_MCE: MCE bit was not set!\n")); return; } ad_wait_init(mss, 1000000); mss->bd_flags &= ~BD_F_MCE_BIT; prev = io_rd(mss, MSS_INDEX); prev &= ~MSS_TRD; io_wr(mss, MSS_INDEX, prev & ~MSS_MCE); /* Clear the MCE bit */ wait_for_calibration(mss); } static int mss_speed(struct mss_chinfo *ch, int speed) { struct mss_info *mss = ch->parent; /* * In the CS4231, the low 4 bits of I8 are used to hold the * sample rate. Only a fixed number of values is allowed. This * table lists them. The speed-setting routines scans the table * looking for the closest match. This is the only supported method. * * In the CS4236, there is an alternate metod (which we do not * support yet) which provides almost arbitrary frequency setting. * In the AD1845, it looks like the sample rate can be * almost arbitrary, and written directly to a register. * In the OPTi931, there is a SB command which provides for * almost arbitrary frequency setting. * */ ad_enter_MCE(mss); if (mss->bd_id == MD_AD1845) { /* Use alternate speed select regs */ ad_write(mss, 22, (speed >> 8) & 0xff); /* Speed MSB */ ad_write(mss, 23, speed & 0xff); /* Speed LSB */ /* XXX must also do something in I27 for the ad1845 */ } else { int i, sel = 0; /* assume entry 0 does not contain -1 */ static int speeds[] = {8000, 5512, 16000, 11025, 27429, 18900, 32000, 22050, -1, 37800, -1, 44100, 48000, 33075, 9600, 6615}; for (i = 1; i < 16; i++) if (speeds[i] > 0 && abs(speed-speeds[i]) < abs(speed-speeds[sel])) sel = i; speed = speeds[sel]; ad_write(mss, 8, (ad_read(mss, 8) & 0xf0) | sel); ad_wait_init(mss, 10000); } ad_leave_MCE(mss); return speed; } /* * mss_format checks that the format is supported (or defaults to AFMT_U8) * and returns the bit setting for the 1848 register corresponding to * the desired format. * * fixed lr970724 */ static int mss_format(struct mss_chinfo *ch, u_int32_t format) { struct mss_info *mss = ch->parent; int i, arg = AFMT_ENCODING(format); /* * The data format uses 3 bits (just 2 on the 1848). For each * bit setting, the following array returns the corresponding format. * The code scans the array looking for a suitable format. In * case it is not found, default to AFMT_U8 (not such a good * choice, but let's do it for compatibility...). */ static int fmts[] = {AFMT_U8, AFMT_MU_LAW, AFMT_S16_LE, AFMT_A_LAW, -1, AFMT_IMA_ADPCM, AFMT_U16_BE, -1}; ch->fmt = format; for (i = 0; i < 8; i++) if (arg == fmts[i]) break; arg = i << 1; if (AFMT_CHANNEL(format) > 1) arg |= 1; arg <<= 4; ad_enter_MCE(mss); ad_write(mss, 8, (ad_read(mss, 8) & 0x0f) | arg); ad_wait_init(mss, 10000); if (ad_read(mss, 12) & 0x40) { /* mode2? */ ad_write(mss, 28, arg); /* capture mode */ ad_wait_init(mss, 10000); } ad_leave_MCE(mss); return format; } static int mss_trigger(struct mss_chinfo *ch, int go) { struct mss_info *mss = ch->parent; u_char m; int retry, wr, cnt, ss; ss = 1; ss <<= (AFMT_CHANNEL(ch->fmt) > 1)? 1 : 0; ss <<= (ch->fmt & AFMT_16BIT)? 1 : 0; wr = (ch->dir == PCMDIR_PLAY)? 1 : 0; m = ad_read(mss, 9); switch (go) { case PCMTRIG_START: cnt = (ch->blksz / ss) - 1; DEB(if (m & 4) printf("OUCH! reg 9 0x%02x\n", m);); m |= wr? I9_PEN : I9_CEN; /* enable DMA */ ad_write_cnt(mss, (wr || !FULL_DUPLEX(mss))? 14 : 30, cnt); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: /* XXX check this... */ m &= ~(wr? I9_PEN : I9_CEN); /* Stop DMA */ #if 0 /* * try to disable DMA by clearing count registers. Not sure it * is needed, and it might cause false interrupts when the * DMA is re-enabled later. */ ad_write_cnt(mss, (wr || !FULL_DUPLEX(mss))? 14 : 30, 0); #endif } /* on the OPTi931 the enable bit seems hard to set... */ for (retry = 10; retry > 0; retry--) { ad_write(mss, 9, m); if (ad_read(mss, 9) == m) break; } if (retry == 0) BVDDB(printf("stop dma, failed to set bit 0x%02x 0x%02x\n", \ m, ad_read(mss, 9))); return 0; } /* * the opti931 seems to miss interrupts when working in full * duplex, so we try some heuristics to catch them. */ static void opti931_intr(void *arg) { struct mss_info *mss = (struct mss_info *)arg; u_char masked = 0, i11, mc11, c = 0; u_char reason; /* b0 = playback, b1 = capture, b2 = timer */ int loops = 10; #if 0 reason = io_rd(mss, MSS_STATUS); if (!(reason & 1)) {/* no int, maybe a shared line ? */ DEB(printf("intr: flag 0, mcir11 0x%02x\n", ad_read(mss, 11))); return; } #endif mss_lock(mss); i11 = ad_read(mss, 11); /* XXX what's for ? */ again: c = mc11 = FULL_DUPLEX(mss)? opti_rd(mss, 11) : 0xc; mc11 &= 0x0c; if (c & 0x10) { DEB(printf("Warning: CD interrupt\n");) mc11 |= 0x10; } if (c & 0x20) { DEB(printf("Warning: MPU interrupt\n");) mc11 |= 0x20; } if (mc11 & masked) BVDDB(printf("irq reset failed, mc11 0x%02x, 0x%02x\n",\ mc11, masked)); masked |= mc11; /* * the nice OPTi931 sets the IRQ line before setting the bits in * mc11. So, on some occasions I have to retry (max 10 times). */ if (mc11 == 0) { /* perhaps can return ... */ reason = io_rd(mss, MSS_STATUS); if (reason & 1) { DEB(printf("one more try...\n");) if (--loops) goto again; else BVDDB(printf("intr, but mc11 not set\n");) } if (loops == 0) BVDDB(printf("intr, nothing in mcir11 0x%02x\n", mc11)); mss_unlock(mss); return; } if (sndbuf_runsz(mss->rch.buffer) && (mc11 & 8)) { mss_unlock(mss); chn_intr(mss->rch.channel); mss_lock(mss); } if (sndbuf_runsz(mss->pch.buffer) && (mc11 & 4)) { mss_unlock(mss); chn_intr(mss->pch.channel); mss_lock(mss); } opti_wr(mss, 11, ~mc11); /* ack */ if (--loops) goto again; mss_unlock(mss); DEB(printf("xxx too many loops\n");) } /* -------------------------------------------------------------------- */ /* channel interface */ static void * msschan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct mss_info *mss = devinfo; struct mss_chinfo *ch = (dir == PCMDIR_PLAY)? &mss->pch : &mss->rch; ch->parent = mss; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, mss->parent_dmat, 0, mss->bufsize) != 0) return NULL; sndbuf_dmasetup(ch->buffer, (dir == PCMDIR_PLAY)? mss->drq1 : mss->drq2); return ch; } static int msschan_setformat(kobj_t obj, void *data, u_int32_t format) { struct mss_chinfo *ch = data; struct mss_info *mss = ch->parent; mss_lock(mss); mss_format(ch, format); mss_unlock(mss); return 0; } static u_int32_t msschan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct mss_chinfo *ch = data; struct mss_info *mss = ch->parent; u_int32_t r; mss_lock(mss); r = mss_speed(ch, speed); mss_unlock(mss); return r; } static u_int32_t msschan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct mss_chinfo *ch = data; ch->blksz = blocksize; sndbuf_resize(ch->buffer, 2, ch->blksz); return ch->blksz; } static int msschan_trigger(kobj_t obj, void *data, int go) { struct mss_chinfo *ch = data; struct mss_info *mss = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; sndbuf_dma(ch->buffer, go); mss_lock(mss); mss_trigger(ch, go); mss_unlock(mss); return 0; } static u_int32_t msschan_getptr(kobj_t obj, void *data) { struct mss_chinfo *ch = data; return sndbuf_dmaptr(ch->buffer); } static struct pcmchan_caps * msschan_getcaps(kobj_t obj, void *data) { struct mss_chinfo *ch = data; switch(ch->parent->bd_id) { case MD_OPTI931: return &opti931_caps; break; case MD_GUSPNP: case MD_GUSMAX: return &guspnp_caps; break; default: return &mss_caps; break; } } static kobj_method_t msschan_methods[] = { KOBJMETHOD(channel_init, msschan_init), KOBJMETHOD(channel_setformat, msschan_setformat), KOBJMETHOD(channel_setspeed, msschan_setspeed), KOBJMETHOD(channel_setblocksize, msschan_setblocksize), KOBJMETHOD(channel_trigger, msschan_trigger), KOBJMETHOD(channel_getptr, msschan_getptr), KOBJMETHOD(channel_getcaps, msschan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(msschan); /* -------------------------------------------------------------------- */ /* * mss_probe() is the probe routine. Note, it is not necessary to * go through this for PnP devices, since they are already * indentified precisely using their PnP id. * * The base address supplied in the device refers to the old MSS * specs where the four 4 registers in io space contain configuration * information. Some boards (as an example, early MSS boards) * has such a block of registers, whereas others (generally CS42xx) * do not. In order to distinguish between the two and do not have * to supply two separate probe routines, the flags entry in isa_device * has a bit to mark this. * */ static int mss_probe(device_t dev) { u_char tmp, tmpx; int flags, irq, drq, result = ENXIO, setres = 0; struct mss_info *mss; if (isa_get_logicalid(dev)) return ENXIO; /* not yet */ mss = (struct mss_info *)malloc(sizeof *mss, M_DEVBUF, M_NOWAIT | M_ZERO); if (!mss) return ENXIO; mss->io_rid = 0; mss->conf_rid = -1; mss->irq_rid = 0; mss->drq1_rid = 0; mss->drq2_rid = -1; mss->io_base = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &mss->io_rid, 8, RF_ACTIVE); if (!mss->io_base) { BVDDB(printf("mss_probe: no address given, try 0x%x\n", 0x530)); mss->io_rid = 0; /* XXX verify this */ setres = 1; bus_set_resource(dev, SYS_RES_IOPORT, mss->io_rid, 0x530, 8); mss->io_base = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &mss->io_rid, 8, RF_ACTIVE); } if (!mss->io_base) goto no; /* got irq/dma regs? */ flags = device_get_flags(dev); irq = isa_get_irq(dev); drq = isa_get_drq(dev); if (!(device_get_flags(dev) & DV_F_TRUE_MSS)) goto mss_probe_end; /* * Check if the IO port returns valid signature. The original MS * Sound system returns 0x04 while some cards * (AudioTriX Pro for example) return 0x00 or 0x0f. */ device_set_desc(dev, "MSS"); tmpx = tmp = io_rd(mss, 3); if (tmp == 0xff) { /* Bus float */ BVDDB(printf("I/O addr inactive (%x), try pseudo_mss\n", tmp)); device_set_flags(dev, flags & ~DV_F_TRUE_MSS); goto mss_probe_end; } tmp &= 0x3f; if (!(tmp == 0x04 || tmp == 0x0f || tmp == 0x00 || tmp == 0x05)) { BVDDB(printf("No MSS signature detected on port 0x%jx (0x%x)\n", rman_get_start(mss->io_base), tmpx)); goto no; } #ifdef PC98 if (irq > 12) { #else if (irq > 11) { #endif printf("MSS: Bad IRQ %d\n", irq); goto no; } if (!(drq == 0 || drq == 1 || drq == 3)) { printf("MSS: Bad DMA %d\n", drq); goto no; } if (tmpx & 0x80) { /* 8-bit board: only drq1/3 and irq7/9 */ if (drq == 0) { printf("MSS: Can't use DMA0 with a 8 bit card/slot\n"); goto no; } if (!(irq == 7 || irq == 9)) { printf("MSS: Can't use IRQ%d with a 8 bit card/slot\n", irq); goto no; } } mss_probe_end: result = mss_detect(dev, mss); no: mss_release_resources(mss, dev); #if 0 if (setres) ISA_DELETE_RESOURCE(device_get_parent(dev), dev, SYS_RES_IOPORT, mss->io_rid); /* XXX ? */ #endif return result; } static int mss_detect(device_t dev, struct mss_info *mss) { int i; u_char tmp = 0, tmp1, tmp2; char *name, *yamaha; if (mss->bd_id != 0) { device_printf(dev, "presel bd_id 0x%04x -- %s\n", mss->bd_id, device_get_desc(dev)); return 0; } name = "AD1848"; mss->bd_id = MD_AD1848; /* AD1848 or CS4248 */ #ifndef PC98 if (opti_detect(dev, mss)) { switch (mss->bd_id) { case MD_OPTI924: name = "OPTi924"; break; case MD_OPTI930: name = "OPTi930"; break; } printf("Found OPTi device %s\n", name); if (opti_init(dev, mss) == 0) goto gotit; } #endif /* * Check that the I/O address is in use. * * bit 7 of the base I/O port is known to be 0 after the chip has * performed its power on initialization. Just assume this has * happened before the OS is starting. * * If the I/O address is unused, it typically returns 0xff. */ for (i = 0; i < 10; i++) if ((tmp = io_rd(mss, MSS_INDEX)) & MSS_IDXBUSY) DELAY(10000); else break; if (i >= 10) { /* Not an AD1848 */ BVDDB(printf("mss_detect, busy still set (0x%02x)\n", tmp)); goto no; } /* * Test if it's possible to change contents of the indirect * registers. Registers 0 and 1 are ADC volume registers. The bit * 0x10 is read only so try to avoid using it. */ ad_write(mss, 0, 0xaa); ad_write(mss, 1, 0x45);/* 0x55 with bit 0x10 clear */ tmp1 = ad_read(mss, 0); tmp2 = ad_read(mss, 1); if (tmp1 != 0xaa || tmp2 != 0x45) { BVDDB(printf("mss_detect error - IREG (%x/%x)\n", tmp1, tmp2)); goto no; } ad_write(mss, 0, 0x45); ad_write(mss, 1, 0xaa); tmp1 = ad_read(mss, 0); tmp2 = ad_read(mss, 1); if (tmp1 != 0x45 || tmp2 != 0xaa) { BVDDB(printf("mss_detect error - IREG2 (%x/%x)\n", tmp1, tmp2)); goto no; } /* * The indirect register I12 has some read only bits. Lets try to * change them. */ tmp = ad_read(mss, 12); ad_write(mss, 12, (~tmp) & 0x0f); tmp1 = ad_read(mss, 12); if ((tmp & 0x0f) != (tmp1 & 0x0f)) { BVDDB(printf("mss_detect - I12 (0x%02x was 0x%02x)\n", tmp1, tmp)); goto no; } /* * NOTE! Last 4 bits of the reg I12 tell the chip revision. * 0x01=RevB * 0x0A=RevC. also CS4231/CS4231A and OPTi931 */ BVDDB(printf("mss_detect - chip revision 0x%02x\n", tmp & 0x0f);) /* * The original AD1848/CS4248 has just 16 indirect registers. This * means that I0 and I16 should return the same value (etc.). Ensure * that the Mode2 enable bit of I12 is 0. Otherwise this test fails * with new parts. */ ad_write(mss, 12, 0); /* Mode2=disabled */ #if 0 for (i = 0; i < 16; i++) { if ((tmp1 = ad_read(mss, i)) != (tmp2 = ad_read(mss, i + 16))) { BVDDB(printf("mss_detect warning - I%d: 0x%02x/0x%02x\n", i, tmp1, tmp2)); /* * note - this seems to fail on the 4232 on I11. So we just break * rather than fail. (which makes this test pointless - cg) */ break; /* return 0; */ } } #endif /* * Try to switch the chip to mode2 (CS4231) by setting the MODE2 bit * (0x40). The bit 0x80 is always 1 in CS4248 and CS4231. * * On the OPTi931, however, I12 is readonly and only contains the * chip revision ID (as in the CS4231A). The upper bits return 0. */ ad_write(mss, 12, 0x40); /* Set mode2, clear 0x80 */ tmp1 = ad_read(mss, 12); if (tmp1 & 0x80) name = "CS4248"; /* Our best knowledge just now */ if ((tmp1 & 0xf0) == 0x00) { BVDDB(printf("this should be an OPTi931\n");) } else if ((tmp1 & 0xc0) != 0xC0) goto gotit; /* * The 4231 has bit7=1 always, and bit6 we just set to 1. * We want to check that this is really a CS4231 * Verify that setting I0 doesn't change I16. */ ad_write(mss, 16, 0); /* Set I16 to known value */ ad_write(mss, 0, 0x45); if ((tmp1 = ad_read(mss, 16)) == 0x45) goto gotit; ad_write(mss, 0, 0xaa); if ((tmp1 = ad_read(mss, 16)) == 0xaa) { /* Rotten bits? */ BVDDB(printf("mss_detect error - step H(%x)\n", tmp1)); goto no; } /* Verify that some bits of I25 are read only. */ tmp1 = ad_read(mss, 25); /* Original bits */ ad_write(mss, 25, ~tmp1); /* Invert all bits */ if ((ad_read(mss, 25) & 0xe7) == (tmp1 & 0xe7)) { int id; /* It's at least CS4231 */ name = "CS4231"; mss->bd_id = MD_CS42XX; /* * It could be an AD1845 or CS4231A as well. * CS4231 and AD1845 report the same revision info in I25 * while the CS4231A reports different. */ id = ad_read(mss, 25) & 0xe7; /* * b7-b5 = version number; * 100 : all CS4231 * 101 : CS4231A * * b2-b0 = chip id; */ switch (id) { case 0xa0: name = "CS4231A"; mss->bd_id = MD_CS42XX; break; case 0xa2: name = "CS4232"; mss->bd_id = MD_CS42XX; break; case 0xb2: /* strange: the 4231 data sheet says b4-b3 are XX * so this should be the same as 0xa2 */ name = "CS4232A"; mss->bd_id = MD_CS42XX; break; case 0x80: /* * It must be a CS4231 or AD1845. The register I23 * of CS4231 is undefined and it appears to be read * only. AD1845 uses I23 for setting sample rate. * Assume the chip is AD1845 if I23 is changeable. */ tmp = ad_read(mss, 23); ad_write(mss, 23, ~tmp); if (ad_read(mss, 23) != tmp) { /* AD1845 ? */ name = "AD1845"; mss->bd_id = MD_AD1845; } ad_write(mss, 23, tmp); /* Restore */ yamaha = ymf_test(dev, mss); if (yamaha) { mss->bd_id = MD_YM0020; name = yamaha; } break; case 0x83: /* CS4236 */ case 0x03: /* CS4236 on Intel PR440FX motherboard XXX */ name = "CS4236"; mss->bd_id = MD_CS42XX; break; default: /* Assume CS4231 */ BVDDB(printf("unknown id 0x%02x, assuming CS4231\n", id);) mss->bd_id = MD_CS42XX; } } ad_write(mss, 25, tmp1); /* Restore bits */ gotit: BVDDB(printf("mss_detect() - Detected %s\n", name)); device_set_desc(dev, name); device_set_flags(dev, ((device_get_flags(dev) & ~DV_F_DEV_MASK) | ((mss->bd_id << DV_F_DEV_SHIFT) & DV_F_DEV_MASK))); return 0; no: return ENXIO; } #ifndef PC98 static int opti_detect(device_t dev, struct mss_info *mss) { int c; static const struct opticard { int boardid; int passwdreg; int password; int base; int indir_reg; } cards[] = { { MD_OPTI930, 0, 0xe4, 0xf8f, 0xe0e }, /* 930 */ { MD_OPTI924, 3, 0xe5, 0xf8c, 0, }, /* 924 */ { 0 }, }; mss->conf_rid = 3; mss->indir_rid = 4; for (c = 0; cards[c].base; c++) { mss->optibase = cards[c].base; mss->password = cards[c].password; mss->passwdreg = cards[c].passwdreg; mss->bd_id = cards[c].boardid; if (cards[c].indir_reg) mss->indir = bus_alloc_resource(dev, SYS_RES_IOPORT, &mss->indir_rid, cards[c].indir_reg, cards[c].indir_reg+1, 1, RF_ACTIVE); mss->conf_base = bus_alloc_resource(dev, SYS_RES_IOPORT, &mss->conf_rid, mss->optibase, mss->optibase+9, 9, RF_ACTIVE); if (opti_read(mss, 1) != 0xff) { return 1; } else { if (mss->indir) bus_release_resource(dev, SYS_RES_IOPORT, mss->indir_rid, mss->indir); mss->indir = NULL; if (mss->conf_base) bus_release_resource(dev, SYS_RES_IOPORT, mss->conf_rid, mss->conf_base); mss->conf_base = NULL; } } return 0; } #endif static char * ymf_test(device_t dev, struct mss_info *mss) { static int ports[] = {0x370, 0x310, 0x538}; int p, i, j, version; static char *chipset[] = { NULL, /* 0 */ "OPL3-SA2 (YMF711)", /* 1 */ "OPL3-SA3 (YMF715)", /* 2 */ "OPL3-SA3 (YMF715)", /* 3 */ "OPL3-SAx (YMF719)", /* 4 */ "OPL3-SAx (YMF719)", /* 5 */ "OPL3-SAx (YMF719)", /* 6 */ "OPL3-SAx (YMF719)", /* 7 */ }; for (p = 0; p < 3; p++) { mss->conf_rid = 1; mss->conf_base = bus_alloc_resource(dev, SYS_RES_IOPORT, &mss->conf_rid, ports[p], ports[p] + 1, 2, RF_ACTIVE); if (!mss->conf_base) return 0; /* Test the index port of the config registers */ i = port_rd(mss->conf_base, 0); port_wr(mss->conf_base, 0, OPL3SAx_DMACONF); j = (port_rd(mss->conf_base, 0) == OPL3SAx_DMACONF)? 1 : 0; port_wr(mss->conf_base, 0, i); if (!j) { bus_release_resource(dev, SYS_RES_IOPORT, mss->conf_rid, mss->conf_base); #ifdef PC98 /* PC98 need this. I don't know reason why. */ bus_delete_resource(dev, SYS_RES_IOPORT, mss->conf_rid); #endif - mss->conf_base = 0; + mss->conf_base = NULL; continue; } version = conf_rd(mss, OPL3SAx_MISC) & 0x07; return chipset[version]; } return NULL; } static int mss_doattach(device_t dev, struct mss_info *mss) { int pdma, rdma, flags = device_get_flags(dev); char status[SND_STATUSLEN], status2[SND_STATUSLEN]; mss->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_mss softc"); mss->bufsize = pcm_getbuffersize(dev, 4096, MSS_DEFAULT_BUFSZ, 65536); if (!mss_alloc_resources(mss, dev)) goto no; mss_init(mss, dev); pdma = rman_get_start(mss->drq1); rdma = rman_get_start(mss->drq2); if (flags & DV_F_TRUE_MSS) { /* has IRQ/DMA registers, set IRQ and DMA addr */ #ifdef PC98 /* CS423[12] in PC98 can use IRQ3,5,10,12 */ static char interrupt_bits[13] = {-1, -1, -1, 0x08, -1, 0x10, -1, -1, -1, -1, 0x18, -1, 0x20}; #else static char interrupt_bits[12] = {-1, -1, -1, -1, -1, 0x28, -1, 0x08, -1, 0x10, 0x18, 0x20}; #endif static char pdma_bits[4] = {1, 2, -1, 3}; static char valid_rdma[4] = {1, 0, -1, 0}; char bits; if (!mss->irq || (bits = interrupt_bits[rman_get_start(mss->irq)]) == -1) goto no; #ifndef PC98 /* CS423[12] in PC98 don't support this. */ io_wr(mss, 0, bits | 0x40); /* config port */ if ((io_rd(mss, 3) & 0x40) == 0) device_printf(dev, "IRQ Conflict?\n"); #endif /* Write IRQ+DMA setup */ if (pdma_bits[pdma] == -1) goto no; bits |= pdma_bits[pdma]; if (pdma != rdma) { if (rdma == valid_rdma[pdma]) bits |= 4; else { printf("invalid dual dma config %d:%d\n", pdma, rdma); goto no; } } io_wr(mss, 0, bits); printf("drq/irq conf %x\n", io_rd(mss, 0)); } mixer_init(dev, (mss->bd_id == MD_YM0020)? &ymmix_mixer_class : &mssmix_mixer_class, mss); switch (mss->bd_id) { case MD_OPTI931: snd_setup_intr(dev, mss->irq, 0, opti931_intr, mss, &mss->ih); break; default: snd_setup_intr(dev, mss->irq, 0, mss_intr, mss, &mss->ih); } if (pdma == rdma) pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/mss->bufsize, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &mss->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } if (pdma != rdma) snprintf(status2, SND_STATUSLEN, ":%d", rdma); else status2[0] = '\0'; snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd drq %d%s bufsz %u", rman_get_start(mss->io_base), rman_get_start(mss->irq), pdma, status2, mss->bufsize); if (pcm_register(dev, mss, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &msschan_class, mss); pcm_addchan(dev, PCMDIR_PLAY, &msschan_class, mss); pcm_setstatus(dev, status); return 0; no: mss_release_resources(mss, dev); return ENXIO; } static int mss_detach(device_t dev) { int r; struct mss_info *mss; r = pcm_unregister(dev); if (r) return r; mss = pcm_getdevinfo(dev); mss_release_resources(mss, dev); return 0; } static int mss_attach(device_t dev) { struct mss_info *mss; int flags = device_get_flags(dev); mss = (struct mss_info *)malloc(sizeof *mss, M_DEVBUF, M_NOWAIT | M_ZERO); if (!mss) return ENXIO; mss->io_rid = 0; mss->conf_rid = -1; mss->irq_rid = 0; mss->drq1_rid = 0; mss->drq2_rid = -1; if (flags & DV_F_DUAL_DMA) { bus_set_resource(dev, SYS_RES_DRQ, 1, flags & DV_F_DRQ_MASK, 1); mss->drq2_rid = 1; } mss->bd_id = (device_get_flags(dev) & DV_F_DEV_MASK) >> DV_F_DEV_SHIFT; if (mss->bd_id == MD_YM0020) ymf_test(dev, mss); return mss_doattach(dev, mss); } /* * mss_resume() is the code to allow a laptop to resume using the sound * card. * * This routine re-sets the state of the board to the state before going * to sleep. According to the yamaha docs this is the right thing to do, * but getting DMA restarted appears to be a bit of a trick, so the device * has to be closed and re-opened to be re-used, but there is no skipping * problem, and volume, bass/treble and most other things are restored * properly. * */ static int mss_resume(device_t dev) { /* * Restore the state taken below. */ struct mss_info *mss; int i; mss = pcm_getdevinfo(dev); if(mss->bd_id == MD_YM0020 || mss->bd_id == MD_CS423X) { /* This works on a Toshiba Libretto 100CT. */ for (i = 0; i < MSS_INDEXED_REGS; i++) ad_write(mss, i, mss->mss_indexed_regs[i]); for (i = 0; i < OPL_INDEXED_REGS; i++) conf_wr(mss, i, mss->opl_indexed_regs[i]); mss_intr(mss); } if (mss->bd_id == MD_CS423X) { /* Needed on IBM Thinkpad 600E */ mss_lock(mss); mss_format(&mss->pch, mss->pch.channel->format); mss_speed(&mss->pch, mss->pch.channel->speed); mss_unlock(mss); } return 0; } /* * mss_suspend() is the code that gets called right before a laptop * suspends. * * This code saves the state of the sound card right before shutdown * so it can be restored above. * */ static int mss_suspend(device_t dev) { int i; struct mss_info *mss; mss = pcm_getdevinfo(dev); if(mss->bd_id == MD_YM0020 || mss->bd_id == MD_CS423X) { /* this stops playback. */ conf_wr(mss, 0x12, 0x0c); for(i = 0; i < MSS_INDEXED_REGS; i++) mss->mss_indexed_regs[i] = ad_read(mss, i); for(i = 0; i < OPL_INDEXED_REGS; i++) mss->opl_indexed_regs[i] = conf_rd(mss, i); mss->opl_indexed_regs[0x12] = 0x0; } return 0; } static device_method_t mss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mss_probe), DEVMETHOD(device_attach, mss_attach), DEVMETHOD(device_detach, mss_detach), DEVMETHOD(device_suspend, mss_suspend), DEVMETHOD(device_resume, mss_resume), { 0, 0 } }; static driver_t mss_driver = { "pcm", mss_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_mss, isa, mss_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_mss, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_mss, 1); static int azt2320_mss_mode(struct mss_info *mss, device_t dev) { struct resource *sbport; int i, ret, rid; rid = 0; ret = -1; sbport = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sbport) { for (i = 0; i < 1000; i++) { if ((port_rd(sbport, SBDSP_STATUS) & 0x80)) DELAY((i > 100) ? 1000 : 10); else { port_wr(sbport, SBDSP_CMD, 0x09); break; } } for (i = 0; i < 1000; i++) { if ((port_rd(sbport, SBDSP_STATUS) & 0x80)) DELAY((i > 100) ? 1000 : 10); else { port_wr(sbport, SBDSP_CMD, 0x00); ret = 0; break; } } DELAY(1000); bus_release_resource(dev, SYS_RES_IOPORT, rid, sbport); } return ret; } static struct isa_pnp_id pnpmss_ids[] = { {0x0000630e, "CS423x"}, /* CSC0000 */ {0x0001630e, "CS423x-PCI"}, /* CSC0100 */ {0x01000000, "CMI8330"}, /* @@@0001 */ {0x2100a865, "Yamaha OPL-SAx"}, /* YMH0021 */ {0x1110d315, "ENSONIQ SoundscapeVIVO"}, /* ENS1011 */ {0x1093143e, "OPTi931"}, /* OPT9310 */ {0x5092143e, "OPTi925"}, /* OPT9250 XXX guess */ {0x0000143e, "OPTi924"}, /* OPT0924 */ {0x1022b839, "Neomagic 256AV (non-ac97)"}, /* NMX2210 */ {0x01005407, "Aztech 2320"}, /* AZT0001 */ #if 0 {0x0000561e, "GusPnP"}, /* GRV0000 */ #endif {0}, }; static int pnpmss_probe(device_t dev) { u_int32_t lid, vid; lid = isa_get_logicalid(dev); vid = isa_get_vendorid(dev); if (lid == 0x01000000 && vid != 0x0100a90d) /* CMI0001 */ return ENXIO; return ISA_PNP_PROBE(device_get_parent(dev), dev, pnpmss_ids); } static int pnpmss_attach(device_t dev) { struct mss_info *mss; mss = malloc(sizeof(*mss), M_DEVBUF, M_WAITOK | M_ZERO); mss->io_rid = 0; mss->conf_rid = -1; mss->irq_rid = 0; mss->drq1_rid = 0; mss->drq2_rid = 1; mss->bd_id = MD_CS42XX; switch (isa_get_logicalid(dev)) { case 0x0000630e: /* CSC0000 */ case 0x0001630e: /* CSC0100 */ mss->bd_flags |= BD_F_MSS_OFFSET; mss->bd_id = MD_CS423X; break; case 0x2100a865: /* YHM0021 */ mss->io_rid = 1; mss->conf_rid = 4; mss->bd_id = MD_YM0020; break; case 0x1110d315: /* ENS1011 */ mss->io_rid = 1; mss->bd_id = MD_VIVO; break; case 0x1093143e: /* OPT9310 */ mss->bd_flags |= BD_F_MSS_OFFSET; mss->conf_rid = 3; mss->bd_id = MD_OPTI931; break; case 0x5092143e: /* OPT9250 XXX guess */ mss->io_rid = 1; mss->conf_rid = 3; mss->bd_id = MD_OPTI925; break; case 0x0000143e: /* OPT0924 */ mss->password = 0xe5; mss->passwdreg = 3; mss->optibase = 0xf0c; mss->io_rid = 2; mss->conf_rid = 3; mss->bd_id = MD_OPTI924; mss->bd_flags |= BD_F_924PNP; if(opti_init(dev, mss) != 0) { free(mss, M_DEVBUF); return ENXIO; } break; case 0x1022b839: /* NMX2210 */ mss->io_rid = 1; break; case 0x01005407: /* AZT0001 */ /* put into MSS mode first (snatched from NetBSD) */ if (azt2320_mss_mode(mss, dev) == -1) { free(mss, M_DEVBUF); return ENXIO; } mss->bd_flags |= BD_F_MSS_OFFSET; mss->io_rid = 2; break; #if 0 case 0x0000561e: /* GRV0000 */ mss->bd_flags |= BD_F_MSS_OFFSET; mss->io_rid = 2; mss->conf_rid = 1; mss->drq1_rid = 1; mss->drq2_rid = 0; mss->bd_id = MD_GUSPNP; break; #endif case 0x01000000: /* @@@0001 */ mss->drq2_rid = -1; break; /* Unknown MSS default. We could let the CSC0000 stuff match too */ default: mss->bd_flags |= BD_F_MSS_OFFSET; break; } return mss_doattach(dev, mss); } static int opti_init(device_t dev, struct mss_info *mss) { int flags = device_get_flags(dev); int basebits = 0; if (!mss->conf_base) { bus_set_resource(dev, SYS_RES_IOPORT, mss->conf_rid, mss->optibase, 0x9); mss->conf_base = bus_alloc_resource(dev, SYS_RES_IOPORT, &mss->conf_rid, mss->optibase, mss->optibase+0x9, 0x9, RF_ACTIVE); } if (!mss->conf_base) return ENXIO; if (!mss->io_base) mss->io_base = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &mss->io_rid, 8, RF_ACTIVE); if (!mss->io_base) /* No hint specified, use 0x530 */ mss->io_base = bus_alloc_resource(dev, SYS_RES_IOPORT, &mss->io_rid, 0x530, 0x537, 8, RF_ACTIVE); if (!mss->io_base) return ENXIO; switch (rman_get_start(mss->io_base)) { case 0x530: basebits = 0x0; break; case 0xe80: basebits = 0x10; break; case 0xf40: basebits = 0x20; break; case 0x604: basebits = 0x30; break; default: printf("opti_init: invalid MSS base address!\n"); return ENXIO; } switch (mss->bd_id) { case MD_OPTI924: opti_write(mss, 1, 0x80 | basebits); /* MSS mode */ opti_write(mss, 2, 0x00); /* Disable CD */ opti_write(mss, 3, 0xf0); /* Disable SB IRQ */ opti_write(mss, 4, 0xf0); opti_write(mss, 5, 0x00); opti_write(mss, 6, 0x02); /* MPU stuff */ break; case MD_OPTI930: opti_write(mss, 1, 0x00 | basebits); opti_write(mss, 3, 0x00); /* Disable SB IRQ/DMA */ opti_write(mss, 4, 0x52); /* Empty FIFO */ opti_write(mss, 5, 0x3c); /* Mode 2 */ opti_write(mss, 6, 0x02); /* Enable MSS */ break; } if (mss->bd_flags & BD_F_924PNP) { u_int32_t irq = isa_get_irq(dev); u_int32_t drq = isa_get_drq(dev); bus_set_resource(dev, SYS_RES_IRQ, 0, irq, 1); bus_set_resource(dev, SYS_RES_DRQ, mss->drq1_rid, drq, 1); if (flags & DV_F_DUAL_DMA) { bus_set_resource(dev, SYS_RES_DRQ, 1, flags & DV_F_DRQ_MASK, 1); mss->drq2_rid = 1; } } /* OPTixxx has I/DRQ registers */ device_set_flags(dev, device_get_flags(dev) | DV_F_TRUE_MSS); return 0; } static void opti_write(struct mss_info *mss, u_char reg, u_char val) { port_wr(mss->conf_base, mss->passwdreg, mss->password); switch(mss->bd_id) { case MD_OPTI924: if (reg > 7) { /* Indirect register */ port_wr(mss->conf_base, mss->passwdreg, reg); port_wr(mss->conf_base, mss->passwdreg, mss->password); port_wr(mss->conf_base, 9, val); return; } port_wr(mss->conf_base, reg, val); break; case MD_OPTI930: port_wr(mss->indir, 0, reg); port_wr(mss->conf_base, mss->passwdreg, mss->password); port_wr(mss->indir, 1, val); break; } } #ifndef PC98 u_char opti_read(struct mss_info *mss, u_char reg) { port_wr(mss->conf_base, mss->passwdreg, mss->password); switch(mss->bd_id) { case MD_OPTI924: if (reg > 7) { /* Indirect register */ port_wr(mss->conf_base, mss->passwdreg, reg); port_wr(mss->conf_base, mss->passwdreg, mss->password); return(port_rd(mss->conf_base, 9)); } return(port_rd(mss->conf_base, reg)); break; case MD_OPTI930: port_wr(mss->indir, 0, reg); port_wr(mss->conf_base, mss->passwdreg, mss->password); return port_rd(mss->indir, 1); break; } return -1; } #endif static device_method_t pnpmss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pnpmss_probe), DEVMETHOD(device_attach, pnpmss_attach), DEVMETHOD(device_detach, mss_detach), DEVMETHOD(device_suspend, mss_suspend), DEVMETHOD(device_resume, mss_resume), { 0, 0 } }; static driver_t pnpmss_driver = { "pcm", pnpmss_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_pnpmss, isa, pnpmss_driver, pcm_devclass, 0, 0); DRIVER_MODULE(snd_pnpmss, acpi, pnpmss_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_pnpmss, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_pnpmss, 1); static int guspcm_probe(device_t dev) { struct sndcard_func *func; func = device_get_ivars(dev); if (func == NULL || func->func != SCF_PCM) return ENXIO; device_set_desc(dev, "GUS CS4231"); return 0; } static int guspcm_attach(device_t dev) { device_t parent = device_get_parent(dev); struct mss_info *mss; int base, flags; unsigned char ctl; mss = (struct mss_info *)malloc(sizeof *mss, M_DEVBUF, M_NOWAIT | M_ZERO); if (mss == NULL) return ENOMEM; mss->bd_flags = BD_F_MSS_OFFSET; mss->io_rid = 2; mss->conf_rid = 1; mss->irq_rid = 0; mss->drq1_rid = 1; mss->drq2_rid = -1; if (isa_get_logicalid(parent) == 0) mss->bd_id = MD_GUSMAX; else { mss->bd_id = MD_GUSPNP; mss->drq2_rid = 0; goto skip_setup; } flags = device_get_flags(parent); if (flags & DV_F_DUAL_DMA) mss->drq2_rid = 0; mss->conf_base = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &mss->conf_rid, 8, RF_ACTIVE); if (mss->conf_base == NULL) { mss_release_resources(mss, dev); return ENXIO; } base = isa_get_port(parent); ctl = 0x40; /* CS4231 enable */ if (isa_get_drq(dev) > 3) ctl |= 0x10; /* 16-bit dma channel 1 */ if ((flags & DV_F_DUAL_DMA) != 0 && (flags & DV_F_DRQ_MASK) > 3) ctl |= 0x20; /* 16-bit dma channel 2 */ ctl |= (base >> 4) & 0x0f; /* 2X0 -> 3XC */ port_wr(mss->conf_base, 6, ctl); skip_setup: return mss_doattach(dev, mss); } static device_method_t guspcm_methods[] = { DEVMETHOD(device_probe, guspcm_probe), DEVMETHOD(device_attach, guspcm_attach), DEVMETHOD(device_detach, mss_detach), { 0, 0 } }; static driver_t guspcm_driver = { "pcm", guspcm_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_guspcm, gusc, guspcm_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_guspcm, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_guspcm, 1); Index: head/sys/dev/sound/isa/sb16.c =================================================================== --- head/sys/dev/sound/isa/sb16.c (revision 297861) +++ head/sys/dev/sound/isa/sb16.c (revision 297862) @@ -1,913 +1,913 @@ /*- * Copyright (c) 1999 Cameron Grant * Copyright (c) 1997,1998 Luigi Rizzo * * Derived from files in the Voxware 3.5 distribution, * Copyright by Hannu Savolainen 1994, under the same copyright * conditions. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define SB16_BUFFSIZE 4096 #define PLAIN_SB16(x) ((((x)->bd_flags) & (BD_F_SB16|BD_F_SB16X)) == BD_F_SB16) static u_int32_t sb16_fmt8[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), 0 }; static struct pcmchan_caps sb16_caps8 = {5000, 45000, sb16_fmt8, 0}; static u_int32_t sb16_fmt16[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps sb16_caps16 = {5000, 45000, sb16_fmt16, 0}; static u_int32_t sb16x_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps sb16x_caps = {5000, 49000, sb16x_fmt, 0}; struct sb_info; struct sb_chinfo { struct sb_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir, run, dch; u_int32_t fmt, spd, blksz; }; struct sb_info { struct resource *io_base; /* I/O address for the board */ struct resource *irq; struct resource *drq1; struct resource *drq2; void *ih; bus_dma_tag_t parent_dmat; unsigned int bufsize; int bd_id; u_long bd_flags; /* board-specific flags */ int prio, prio16; struct sb_chinfo pch, rch; device_t parent_dev; }; #if 0 static void sb_lock(struct sb_info *sb); static void sb_unlock(struct sb_info *sb); static int sb_rd(struct sb_info *sb, int reg); static void sb_wr(struct sb_info *sb, int reg, u_int8_t val); static int sb_cmd(struct sb_info *sb, u_char val); /* static int sb_cmd1(struct sb_info *sb, u_char cmd, int val); */ static int sb_cmd2(struct sb_info *sb, u_char cmd, int val); static u_int sb_get_byte(struct sb_info *sb); static void sb_setmixer(struct sb_info *sb, u_int port, u_int value); static int sb_getmixer(struct sb_info *sb, u_int port); static int sb_reset_dsp(struct sb_info *sb); static void sb_intr(void *arg); #endif /* * Common code for the midi and pcm functions * * sb_cmd write a single byte to the CMD port. * sb_cmd1 write a CMD + 1 byte arg * sb_cmd2 write a CMD + 2 byte arg * sb_get_byte returns a single byte from the DSP data port */ static void sb_lock(struct sb_info *sb) { sbc_lock(device_get_softc(sb->parent_dev)); } static void sb_lockassert(struct sb_info *sb) { sbc_lockassert(device_get_softc(sb->parent_dev)); } static void sb_unlock(struct sb_info *sb) { sbc_unlock(device_get_softc(sb->parent_dev)); } static int port_rd(struct resource *port, int off) { return bus_space_read_1(rman_get_bustag(port), rman_get_bushandle(port), off); } static void port_wr(struct resource *port, int off, u_int8_t data) { bus_space_write_1(rman_get_bustag(port), rman_get_bushandle(port), off, data); } static int sb_rd(struct sb_info *sb, int reg) { return port_rd(sb->io_base, reg); } static void sb_wr(struct sb_info *sb, int reg, u_int8_t val) { port_wr(sb->io_base, reg, val); } static int sb_dspwr(struct sb_info *sb, u_char val) { int i; for (i = 0; i < 1000; i++) { if ((sb_rd(sb, SBDSP_STATUS) & 0x80)) DELAY((i > 100)? 1000 : 10); else { sb_wr(sb, SBDSP_CMD, val); return 1; } } if (curthread->td_intr_nesting_level == 0) printf("sb_dspwr(0x%02x) timed out.\n", val); return 0; } static int sb_cmd(struct sb_info *sb, u_char val) { #if 0 printf("sb_cmd: %x\n", val); #endif return sb_dspwr(sb, val); } /* static int sb_cmd1(struct sb_info *sb, u_char cmd, int val) { #if 0 printf("sb_cmd1: %x, %x\n", cmd, val); #endif if (sb_dspwr(sb, cmd)) { return sb_dspwr(sb, val & 0xff); } else return 0; } */ static int sb_cmd2(struct sb_info *sb, u_char cmd, int val) { int r; #if 0 printf("sb_cmd2: %x, %x\n", cmd, val); #endif sb_lockassert(sb); r = 0; if (sb_dspwr(sb, cmd)) { if (sb_dspwr(sb, val & 0xff)) { if (sb_dspwr(sb, (val >> 8) & 0xff)) { r = 1; } } } return r; } /* * in the SB, there is a set of indirect "mixer" registers with * address at offset 4, data at offset 5 */ static void sb_setmixer(struct sb_info *sb, u_int port, u_int value) { sb_lock(sb); sb_wr(sb, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); sb_wr(sb, SB_MIX_DATA, (u_char) (value & 0xff)); DELAY(10); sb_unlock(sb); } static int sb_getmixer(struct sb_info *sb, u_int port) { int val; sb_lockassert(sb); sb_wr(sb, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); val = sb_rd(sb, SB_MIX_DATA); DELAY(10); return val; } static u_int sb_get_byte(struct sb_info *sb) { int i; for (i = 1000; i > 0; i--) { if (sb_rd(sb, DSP_DATA_AVAIL) & 0x80) return sb_rd(sb, DSP_READ); else DELAY(20); } return 0xffff; } static int sb_reset_dsp(struct sb_info *sb) { u_char b; sb_lockassert(sb); sb_wr(sb, SBDSP_RST, 3); DELAY(100); sb_wr(sb, SBDSP_RST, 0); b = sb_get_byte(sb); if (b != 0xAA) { DEB(printf("sb_reset_dsp 0x%lx failed\n", rman_get_start(sb->io_base))); return ENXIO; /* Sorry */ } return 0; } /************************************************************/ struct sb16_mixent { int reg; int bits; int ofs; int stereo; }; static const struct sb16_mixent sb16_mixtab[32] = { [SOUND_MIXER_VOLUME] = { 0x30, 5, 3, 1 }, [SOUND_MIXER_PCM] = { 0x32, 5, 3, 1 }, [SOUND_MIXER_SYNTH] = { 0x34, 5, 3, 1 }, [SOUND_MIXER_CD] = { 0x36, 5, 3, 1 }, [SOUND_MIXER_LINE] = { 0x38, 5, 3, 1 }, [SOUND_MIXER_MIC] = { 0x3a, 5, 3, 0 }, [SOUND_MIXER_SPEAKER] = { 0x3b, 5, 3, 0 }, [SOUND_MIXER_IGAIN] = { 0x3f, 2, 6, 1 }, [SOUND_MIXER_OGAIN] = { 0x41, 2, 6, 1 }, [SOUND_MIXER_TREBLE] = { 0x44, 4, 4, 1 }, [SOUND_MIXER_BASS] = { 0x46, 4, 4, 1 }, [SOUND_MIXER_LINE1] = { 0x52, 5, 3, 1 } }; static int sb16mix_init(struct snd_mixer *m) { struct sb_info *sb = mix_getdevinfo(m); mix_setdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_SPEAKER | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | SOUND_MASK_LINE1 | SOUND_MASK_VOLUME | SOUND_MASK_BASS | SOUND_MASK_TREBLE); mix_setrecdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_LINE | SOUND_MASK_LINE1 | SOUND_MASK_MIC | SOUND_MASK_CD); sb_setmixer(sb, 0x3c, 0x1f); /* make all output active */ sb_setmixer(sb, 0x3d, 0); /* make all inputs-l off */ sb_setmixer(sb, 0x3e, 0); /* make all inputs-r off */ return 0; } static int rel2abs_volume(int x, int max) { int temp; temp = ((x * max) + 50) / 100; if (temp > max) temp = max; else if (temp < 0) temp = 0; return (temp); } static int sb16mix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sb_info *sb = mix_getdevinfo(m); const struct sb16_mixent *e; int max; e = &sb16_mixtab[dev]; max = (1 << e->bits) - 1; left = rel2abs_volume(left, max); right = rel2abs_volume(right, max); sb_setmixer(sb, e->reg, left << e->ofs); if (e->stereo) sb_setmixer(sb, e->reg + 1, right << e->ofs); else right = left; left = (left * 100) / max; right = (right * 100) / max; return left | (right << 8); } static u_int32_t sb16mix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sb_info *sb = mix_getdevinfo(m); u_char recdev_l, recdev_r; recdev_l = 0; recdev_r = 0; if (src & SOUND_MASK_MIC) { recdev_l |= 0x01; /* mono mic */ recdev_r |= 0x01; } if (src & SOUND_MASK_CD) { recdev_l |= 0x04; /* l cd */ recdev_r |= 0x02; /* r cd */ } if (src & SOUND_MASK_LINE) { recdev_l |= 0x10; /* l line */ recdev_r |= 0x08; /* r line */ } if (src & SOUND_MASK_SYNTH) { recdev_l |= 0x40; /* l midi */ recdev_r |= 0x20; /* r midi */ } sb_setmixer(sb, SB16_IMASK_L, recdev_l); sb_setmixer(sb, SB16_IMASK_R, recdev_r); /* Switch on/off FM tuner source */ if (src & SOUND_MASK_LINE1) sb_setmixer(sb, 0x4a, 0x0c); else sb_setmixer(sb, 0x4a, 0x00); /* * since the same volume controls apply to the input and * output sections, the best approach to have a consistent * behaviour among cards would be to disable the output path * on devices which are used to record. * However, since users like to have feedback, we only disable * the mic -- permanently. */ sb_setmixer(sb, SB16_OMASK, 0x1f & ~1); return src; } static kobj_method_t sb16mix_mixer_methods[] = { KOBJMETHOD(mixer_init, sb16mix_init), KOBJMETHOD(mixer_set, sb16mix_set), KOBJMETHOD(mixer_setrecsrc, sb16mix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(sb16mix_mixer); /************************************************************/ static void sb16_release_resources(struct sb_info *sb, device_t dev) { if (sb->irq) { if (sb->ih) bus_teardown_intr(dev, sb->irq, sb->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sb->irq); - sb->irq = 0; + sb->irq = NULL; } if (sb->drq2) { if (sb->drq2 != sb->drq1) { isa_dma_release(rman_get_start(sb->drq2)); bus_release_resource(dev, SYS_RES_DRQ, 1, sb->drq2); } - sb->drq2 = 0; + sb->drq2 = NULL; } if (sb->drq1) { isa_dma_release(rman_get_start(sb->drq1)); bus_release_resource(dev, SYS_RES_DRQ, 0, sb->drq1); - sb->drq1 = 0; + sb->drq1 = NULL; } if (sb->io_base) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sb->io_base); - sb->io_base = 0; + sb->io_base = NULL; } if (sb->parent_dmat) { bus_dma_tag_destroy(sb->parent_dmat); sb->parent_dmat = 0; } free(sb, M_DEVBUF); } static int sb16_alloc_resources(struct sb_info *sb, device_t dev) { int rid; rid = 0; if (!sb->io_base) sb->io_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = 0; if (!sb->irq) sb->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); rid = 0; if (!sb->drq1) sb->drq1 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &rid, RF_ACTIVE); rid = 1; if (!sb->drq2) sb->drq2 = bus_alloc_resource_any(dev, SYS_RES_DRQ, &rid, RF_ACTIVE); if (sb->io_base && sb->drq1 && sb->irq) { isa_dma_acquire(rman_get_start(sb->drq1)); isa_dmainit(rman_get_start(sb->drq1), sb->bufsize); if (sb->drq2) { isa_dma_acquire(rman_get_start(sb->drq2)); isa_dmainit(rman_get_start(sb->drq2), sb->bufsize); } else { sb->drq2 = sb->drq1; pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); } return 0; } else return ENXIO; } /* sbc does locking for us */ static void sb_intr(void *arg) { struct sb_info *sb = (struct sb_info *)arg; int reason, c; /* * The Vibra16X has separate flags for 8 and 16 bit transfers, but * I have no idea how to tell capture from playback interrupts... */ reason = 0; sb_lock(sb); c = sb_getmixer(sb, IRQ_STAT); if (c & 1) sb_rd(sb, DSP_DATA_AVAIL); /* 8-bit int ack */ if (c & 2) sb_rd(sb, DSP_DATA_AVL16); /* 16-bit int ack */ sb_unlock(sb); /* * this tells us if the source is 8-bit or 16-bit dma. We * have to check the io channel to map it to read or write... */ if (sb->bd_flags & BD_F_SB16X) { if (c & 1) { /* 8-bit format */ if (sb->pch.fmt & AFMT_8BIT) reason |= 1; if (sb->rch.fmt & AFMT_8BIT) reason |= 2; } if (c & 2) { /* 16-bit format */ if (sb->pch.fmt & AFMT_16BIT) reason |= 1; if (sb->rch.fmt & AFMT_16BIT) reason |= 2; } } else { if (c & 1) { /* 8-bit dma */ if (sb->pch.dch == 1) reason |= 1; if (sb->rch.dch == 1) reason |= 2; } if (c & 2) { /* 16-bit dma */ if (sb->pch.dch == 2) reason |= 1; if (sb->rch.dch == 2) reason |= 2; } } #if 0 printf("sb_intr: reason=%d c=0x%x\n", reason, c); #endif if ((reason & 1) && (sb->pch.run)) chn_intr(sb->pch.channel); if ((reason & 2) && (sb->rch.run)) chn_intr(sb->rch.channel); } static int sb_setup(struct sb_info *sb) { struct sb_chinfo *ch; u_int8_t v; int l, pprio; sb_lock(sb); if (sb->bd_flags & BD_F_DMARUN) sndbuf_dma(sb->pch.buffer, PCMTRIG_STOP); if (sb->bd_flags & BD_F_DMARUN2) sndbuf_dma(sb->rch.buffer, PCMTRIG_STOP); sb->bd_flags &= ~(BD_F_DMARUN | BD_F_DMARUN2); sb_reset_dsp(sb); if (sb->bd_flags & BD_F_SB16X) { /* full-duplex doesn't work! */ pprio = sb->pch.run? 1 : 0; sndbuf_dmasetup(sb->pch.buffer, pprio? sb->drq1 : sb->drq2); sb->pch.dch = pprio? 1 : 0; sndbuf_dmasetup(sb->rch.buffer, pprio? sb->drq2 : sb->drq1); sb->rch.dch = pprio? 2 : 1; } else { if (sb->pch.run && sb->rch.run) { pprio = (sb->rch.fmt & AFMT_16BIT)? 0 : 1; sndbuf_dmasetup(sb->pch.buffer, pprio? sb->drq2 : sb->drq1); sb->pch.dch = pprio? 2 : 1; sndbuf_dmasetup(sb->rch.buffer, pprio? sb->drq1 : sb->drq2); sb->rch.dch = pprio? 1 : 2; } else { if (sb->pch.run) { sndbuf_dmasetup(sb->pch.buffer, (sb->pch.fmt & AFMT_16BIT)? sb->drq2 : sb->drq1); sb->pch.dch = (sb->pch.fmt & AFMT_16BIT)? 2 : 1; sndbuf_dmasetup(sb->rch.buffer, (sb->pch.fmt & AFMT_16BIT)? sb->drq1 : sb->drq2); sb->rch.dch = (sb->pch.fmt & AFMT_16BIT)? 1 : 2; } else if (sb->rch.run) { sndbuf_dmasetup(sb->pch.buffer, (sb->rch.fmt & AFMT_16BIT)? sb->drq1 : sb->drq2); sb->pch.dch = (sb->rch.fmt & AFMT_16BIT)? 1 : 2; sndbuf_dmasetup(sb->rch.buffer, (sb->rch.fmt & AFMT_16BIT)? sb->drq2 : sb->drq1); sb->rch.dch = (sb->rch.fmt & AFMT_16BIT)? 2 : 1; } } } sndbuf_dmasetdir(sb->pch.buffer, PCMDIR_PLAY); sndbuf_dmasetdir(sb->rch.buffer, PCMDIR_REC); /* printf("setup: [pch = %d, pfmt = %d, pgo = %d] [rch = %d, rfmt = %d, rgo = %d]\n", sb->pch.dch, sb->pch.fmt, sb->pch.run, sb->rch.dch, sb->rch.fmt, sb->rch.run); */ ch = &sb->pch; if (ch->run) { l = ch->blksz; if (ch->fmt & AFMT_16BIT) l >>= 1; l--; /* play speed */ RANGE(ch->spd, 5000, 45000); sb_cmd(sb, DSP_CMD_OUT16); sb_cmd(sb, ch->spd >> 8); sb_cmd(sb, ch->spd & 0xff); /* play format, length */ v = DSP_F16_AUTO | DSP_F16_FIFO_ON | DSP_F16_DAC; v |= (ch->fmt & AFMT_16BIT)? DSP_DMA16 : DSP_DMA8; sb_cmd(sb, v); v = (AFMT_CHANNEL(ch->fmt) > 1)? DSP_F16_STEREO : 0; v |= (ch->fmt & AFMT_SIGNED)? DSP_F16_SIGNED : 0; sb_cmd2(sb, v, l); sndbuf_dma(ch->buffer, PCMTRIG_START); sb->bd_flags |= BD_F_DMARUN; } ch = &sb->rch; if (ch->run) { l = ch->blksz; if (ch->fmt & AFMT_16BIT) l >>= 1; l--; /* record speed */ RANGE(ch->spd, 5000, 45000); sb_cmd(sb, DSP_CMD_IN16); sb_cmd(sb, ch->spd >> 8); sb_cmd(sb, ch->spd & 0xff); /* record format, length */ v = DSP_F16_AUTO | DSP_F16_FIFO_ON | DSP_F16_ADC; v |= (ch->fmt & AFMT_16BIT)? DSP_DMA16 : DSP_DMA8; sb_cmd(sb, v); v = (AFMT_CHANNEL(ch->fmt) > 1)? DSP_F16_STEREO : 0; v |= (ch->fmt & AFMT_SIGNED)? DSP_F16_SIGNED : 0; sb_cmd2(sb, v, l); sndbuf_dma(ch->buffer, PCMTRIG_START); sb->bd_flags |= BD_F_DMARUN2; } sb_unlock(sb); return 0; } /* channel interface */ static void * sb16chan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sb_info *sb = devinfo; struct sb_chinfo *ch = (dir == PCMDIR_PLAY)? &sb->pch : &sb->rch; ch->parent = sb; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, sb->parent_dmat, 0, sb->bufsize) != 0) return NULL; return ch; } static int sb16chan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sb_chinfo *ch = data; struct sb_info *sb = ch->parent; ch->fmt = format; sb->prio = ch->dir; sb->prio16 = (ch->fmt & AFMT_16BIT)? 1 : 0; return 0; } static u_int32_t sb16chan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sb_chinfo *ch = data; ch->spd = speed; return speed; } static u_int32_t sb16chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sb_chinfo *ch = data; ch->blksz = blocksize; return ch->blksz; } static int sb16chan_trigger(kobj_t obj, void *data, int go) { struct sb_chinfo *ch = data; struct sb_info *sb = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; if (go == PCMTRIG_START) ch->run = 1; else ch->run = 0; sb_setup(sb); return 0; } static u_int32_t sb16chan_getptr(kobj_t obj, void *data) { struct sb_chinfo *ch = data; return sndbuf_dmaptr(ch->buffer); } static struct pcmchan_caps * sb16chan_getcaps(kobj_t obj, void *data) { struct sb_chinfo *ch = data; struct sb_info *sb = ch->parent; if ((sb->prio == 0) || (sb->prio == ch->dir)) return &sb16x_caps; else return sb->prio16? &sb16_caps8 : &sb16_caps16; } static int sb16chan_resetdone(kobj_t obj, void *data) { struct sb_chinfo *ch = data; struct sb_info *sb = ch->parent; sb->prio = 0; return 0; } static kobj_method_t sb16chan_methods[] = { KOBJMETHOD(channel_init, sb16chan_init), KOBJMETHOD(channel_resetdone, sb16chan_resetdone), KOBJMETHOD(channel_setformat, sb16chan_setformat), KOBJMETHOD(channel_setspeed, sb16chan_setspeed), KOBJMETHOD(channel_setblocksize, sb16chan_setblocksize), KOBJMETHOD(channel_trigger, sb16chan_trigger), KOBJMETHOD(channel_getptr, sb16chan_getptr), KOBJMETHOD(channel_getcaps, sb16chan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(sb16chan); /************************************************************/ static int sb16_probe(device_t dev) { char buf[64]; uintptr_t func, ver, r, f; /* The parent device has already been probed. */ r = BUS_READ_IVAR(device_get_parent(dev), dev, 0, &func); if (func != SCF_PCM) return (ENXIO); r = BUS_READ_IVAR(device_get_parent(dev), dev, 1, &ver); f = (ver & 0xffff0000) >> 16; ver &= 0x0000ffff; if (f & BD_F_SB16) { snprintf(buf, sizeof buf, "SB16 DSP %d.%02d%s", (int) ver >> 8, (int) ver & 0xff, (f & BD_F_SB16X)? " (ViBRA16X)" : ""); device_set_desc_copy(dev, buf); return 0; } else return (ENXIO); } static int sb16_attach(device_t dev) { struct sb_info *sb; uintptr_t ver; char status[SND_STATUSLEN], status2[SND_STATUSLEN]; sb = malloc(sizeof(*sb), M_DEVBUF, M_WAITOK | M_ZERO); sb->parent_dev = device_get_parent(dev); BUS_READ_IVAR(sb->parent_dev, dev, 1, &ver); sb->bd_id = ver & 0x0000ffff; sb->bd_flags = (ver & 0xffff0000) >> 16; sb->bufsize = pcm_getbuffersize(dev, 4096, SB16_BUFFSIZE, 65536); if (sb16_alloc_resources(sb, dev)) goto no; sb_lock(sb); if (sb_reset_dsp(sb)) { sb_unlock(sb); goto no; } sb_unlock(sb); if (mixer_init(dev, &sb16mix_mixer_class, sb)) goto no; if (snd_setup_intr(dev, sb->irq, 0, sb_intr, sb, &sb->ih)) goto no; if (sb->bd_flags & BD_F_SB16X) pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); sb->prio = 0; if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sb->bufsize, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sb->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } if (!(pcm_getflags(dev) & SD_F_SIMPLEX)) snprintf(status2, SND_STATUSLEN, ":%jd", rman_get_start(sb->drq2)); else status2[0] = '\0'; snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd drq %jd%s bufsz %u %s", rman_get_start(sb->io_base), rman_get_start(sb->irq), rman_get_start(sb->drq1), status2, sb->bufsize, PCM_KLDSTRING(snd_sb16)); if (pcm_register(dev, sb, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &sb16chan_class, sb); pcm_addchan(dev, PCMDIR_PLAY, &sb16chan_class, sb); pcm_setstatus(dev, status); return 0; no: sb16_release_resources(sb, dev); return ENXIO; } static int sb16_detach(device_t dev) { int r; struct sb_info *sb; r = pcm_unregister(dev); if (r) return r; sb = pcm_getdevinfo(dev); sb16_release_resources(sb, dev); return 0; } static device_method_t sb16_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sb16_probe), DEVMETHOD(device_attach, sb16_attach), DEVMETHOD(device_detach, sb16_detach), { 0, 0 } }; static driver_t sb16_driver = { "pcm", sb16_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_sb16, sbc, sb16_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_sb16, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_sb16, snd_sbc, 1, 1, 1); MODULE_VERSION(snd_sb16, 1); Index: head/sys/dev/sound/isa/sb8.c =================================================================== --- head/sys/dev/sound/isa/sb8.c (revision 297861) +++ head/sys/dev/sound/isa/sb8.c (revision 297862) @@ -1,807 +1,807 @@ /*- * Copyright (c) 1999 Cameron Grant * Copyright (c) 1997,1998 Luigi Rizzo * * Derived from files in the Voxware 3.5 distribution, * Copyright by Hannu Savolainen 1994, under the same copyright * conditions. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define SB_DEFAULT_BUFSZ 4096 static u_int32_t sb_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), 0 }; static struct pcmchan_caps sb200_playcaps = {4000, 23000, sb_fmt, 0}; static struct pcmchan_caps sb200_reccaps = {4000, 13000, sb_fmt, 0}; static struct pcmchan_caps sb201_playcaps = {4000, 44100, sb_fmt, 0}; static struct pcmchan_caps sb201_reccaps = {4000, 15000, sb_fmt, 0}; static u_int32_t sbpro_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), 0 }; static struct pcmchan_caps sbpro_playcaps = {4000, 44100, sbpro_fmt, 0}; static struct pcmchan_caps sbpro_reccaps = {4000, 44100, sbpro_fmt, 0}; struct sb_info; struct sb_chinfo { struct sb_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir; u_int32_t fmt, spd, blksz; }; struct sb_info { device_t parent_dev; struct resource *io_base; /* I/O address for the board */ struct resource *irq; struct resource *drq; void *ih; bus_dma_tag_t parent_dmat; unsigned int bufsize; int bd_id; u_long bd_flags; /* board-specific flags */ struct sb_chinfo pch, rch; }; static int sb_rd(struct sb_info *sb, int reg); static void sb_wr(struct sb_info *sb, int reg, u_int8_t val); static int sb_dspready(struct sb_info *sb); static int sb_cmd(struct sb_info *sb, u_char val); static int sb_cmd1(struct sb_info *sb, u_char cmd, int val); static int sb_cmd2(struct sb_info *sb, u_char cmd, int val); static u_int sb_get_byte(struct sb_info *sb); static void sb_setmixer(struct sb_info *sb, u_int port, u_int value); static int sb_getmixer(struct sb_info *sb, u_int port); static int sb_reset_dsp(struct sb_info *sb); static void sb_intr(void *arg); static int sb_speed(struct sb_chinfo *ch); static int sb_start(struct sb_chinfo *ch); static int sb_stop(struct sb_chinfo *ch); /* * Common code for the midi and pcm functions * * sb_cmd write a single byte to the CMD port. * sb_cmd1 write a CMD + 1 byte arg * sb_cmd2 write a CMD + 2 byte arg * sb_get_byte returns a single byte from the DSP data port */ static void sb_lock(struct sb_info *sb) { sbc_lock(device_get_softc(sb->parent_dev)); } static void sb_unlock(struct sb_info *sb) { sbc_unlock(device_get_softc(sb->parent_dev)); } static int port_rd(struct resource *port, int off) { return bus_space_read_1(rman_get_bustag(port), rman_get_bushandle(port), off); } static void port_wr(struct resource *port, int off, u_int8_t data) { bus_space_write_1(rman_get_bustag(port), rman_get_bushandle(port), off, data); } static int sb_rd(struct sb_info *sb, int reg) { return port_rd(sb->io_base, reg); } static void sb_wr(struct sb_info *sb, int reg, u_int8_t val) { port_wr(sb->io_base, reg, val); } static int sb_dspready(struct sb_info *sb) { return ((sb_rd(sb, SBDSP_STATUS) & 0x80) == 0); } static int sb_dspwr(struct sb_info *sb, u_char val) { int i; for (i = 0; i < 1000; i++) { if (sb_dspready(sb)) { sb_wr(sb, SBDSP_CMD, val); return 1; } if (i > 10) DELAY((i > 100)? 1000 : 10); } printf("sb_dspwr(0x%02x) timed out.\n", val); return 0; } static int sb_cmd(struct sb_info *sb, u_char val) { #if 0 printf("sb_cmd: %x\n", val); #endif return sb_dspwr(sb, val); } static int sb_cmd1(struct sb_info *sb, u_char cmd, int val) { #if 0 printf("sb_cmd1: %x, %x\n", cmd, val); #endif if (sb_dspwr(sb, cmd)) { return sb_dspwr(sb, val & 0xff); } else return 0; } static int sb_cmd2(struct sb_info *sb, u_char cmd, int val) { #if 0 printf("sb_cmd2: %x, %x\n", cmd, val); #endif if (sb_dspwr(sb, cmd)) { return sb_dspwr(sb, val & 0xff) && sb_dspwr(sb, (val >> 8) & 0xff); } else return 0; } /* * in the SB, there is a set of indirect "mixer" registers with * address at offset 4, data at offset 5 * * we don't need to interlock these, the mixer lock will suffice. */ static void sb_setmixer(struct sb_info *sb, u_int port, u_int value) { sb_wr(sb, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); sb_wr(sb, SB_MIX_DATA, (u_char) (value & 0xff)); DELAY(10); } static int sb_getmixer(struct sb_info *sb, u_int port) { int val; sb_wr(sb, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); val = sb_rd(sb, SB_MIX_DATA); DELAY(10); return val; } static u_int sb_get_byte(struct sb_info *sb) { int i; for (i = 1000; i > 0; i--) { if (sb_rd(sb, DSP_DATA_AVAIL) & 0x80) return sb_rd(sb, DSP_READ); else DELAY(20); } return 0xffff; } static int sb_reset_dsp(struct sb_info *sb) { sb_wr(sb, SBDSP_RST, 3); DELAY(100); sb_wr(sb, SBDSP_RST, 0); if (sb_get_byte(sb) != 0xAA) { DEB(printf("sb_reset_dsp 0x%lx failed\n", rman_get_start(sb->io_base))); return ENXIO; /* Sorry */ } return 0; } static void sb_release_resources(struct sb_info *sb, device_t dev) { if (sb->irq) { if (sb->ih) bus_teardown_intr(dev, sb->irq, sb->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sb->irq); - sb->irq = 0; + sb->irq = NULL; } if (sb->drq) { isa_dma_release(rman_get_start(sb->drq)); bus_release_resource(dev, SYS_RES_DRQ, 0, sb->drq); - sb->drq = 0; + sb->drq = NULL; } if (sb->io_base) { bus_release_resource(dev, SYS_RES_IOPORT, 0, sb->io_base); - sb->io_base = 0; + sb->io_base = NULL; } if (sb->parent_dmat) { bus_dma_tag_destroy(sb->parent_dmat); sb->parent_dmat = 0; } free(sb, M_DEVBUF); } static int sb_alloc_resources(struct sb_info *sb, device_t dev) { int rid; rid = 0; if (!sb->io_base) sb->io_base = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = 0; if (!sb->irq) sb->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); rid = 0; if (!sb->drq) sb->drq = bus_alloc_resource_any(dev, SYS_RES_DRQ, &rid, RF_ACTIVE); if (sb->io_base && sb->drq && sb->irq) { isa_dma_acquire(rman_get_start(sb->drq)); isa_dmainit(rman_get_start(sb->drq), sb->bufsize); return 0; } else return ENXIO; } /************************************************************/ static int sbpromix_init(struct snd_mixer *m) { struct sb_info *sb = mix_getdevinfo(m); mix_setdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME); mix_setrecdevs(m, SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD); sb_setmixer(sb, 0, 1); /* reset mixer */ return 0; } static int sbpromix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sb_info *sb = mix_getdevinfo(m); int reg, max; u_char val; max = 7; switch (dev) { case SOUND_MIXER_PCM: reg = 0x04; break; case SOUND_MIXER_MIC: reg = 0x0a; max = 3; break; case SOUND_MIXER_VOLUME: reg = 0x22; break; case SOUND_MIXER_SYNTH: reg = 0x26; break; case SOUND_MIXER_CD: reg = 0x28; break; case SOUND_MIXER_LINE: reg = 0x2e; break; default: return -1; } left = (left * max) / 100; right = (dev == SOUND_MIXER_MIC)? left : ((right * max) / 100); val = (dev == SOUND_MIXER_MIC)? (left << 1) : (left << 5 | right << 1); sb_setmixer(sb, reg, val); left = (left * 100) / max; right = (right * 100) / max; return left | (right << 8); } static u_int32_t sbpromix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sb_info *sb = mix_getdevinfo(m); u_char recdev; if (src == SOUND_MASK_LINE) recdev = 0x06; else if (src == SOUND_MASK_CD) recdev = 0x02; else { /* default: mic */ src = SOUND_MASK_MIC; recdev = 0; } sb_setmixer(sb, RECORD_SRC, recdev | (sb_getmixer(sb, RECORD_SRC) & ~0x07)); return src; } static kobj_method_t sbpromix_mixer_methods[] = { KOBJMETHOD(mixer_init, sbpromix_init), KOBJMETHOD(mixer_set, sbpromix_set), KOBJMETHOD(mixer_setrecsrc, sbpromix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(sbpromix_mixer); /************************************************************/ static int sbmix_init(struct snd_mixer *m) { struct sb_info *sb = mix_getdevinfo(m); mix_setdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_CD | SOUND_MASK_VOLUME); mix_setrecdevs(m, 0); sb_setmixer(sb, 0, 1); /* reset mixer */ return 0; } static int sbmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sb_info *sb = mix_getdevinfo(m); int reg, max; max = 7; switch (dev) { case SOUND_MIXER_VOLUME: reg = 0x2; break; case SOUND_MIXER_SYNTH: reg = 0x6; break; case SOUND_MIXER_CD: reg = 0x8; break; case SOUND_MIXER_PCM: reg = 0x0a; max = 3; break; default: return -1; } left = (left * max) / 100; sb_setmixer(sb, reg, left << 1); left = (left * 100) / max; return left | (left << 8); } static u_int32_t sbmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { return 0; } static kobj_method_t sbmix_mixer_methods[] = { KOBJMETHOD(mixer_init, sbmix_init), KOBJMETHOD(mixer_set, sbmix_set), KOBJMETHOD(mixer_setrecsrc, sbmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(sbmix_mixer); /************************************************************/ static void sb_intr(void *arg) { struct sb_info *sb = (struct sb_info *)arg; sb_lock(sb); if (sndbuf_runsz(sb->pch.buffer) > 0) { sb_unlock(sb); chn_intr(sb->pch.channel); sb_lock(sb); } if (sndbuf_runsz(sb->rch.buffer) > 0) { sb_unlock(sb); chn_intr(sb->rch.channel); sb_lock(sb); } sb_rd(sb, DSP_DATA_AVAIL); /* int ack */ sb_unlock(sb); } static int sb_speed(struct sb_chinfo *ch) { struct sb_info *sb = ch->parent; int play = (ch->dir == PCMDIR_PLAY)? 1 : 0; int stereo = (AFMT_CHANNEL(ch->fmt) > 1)? 1 : 0; int speed, tmp, thresh, max; u_char tconst; if (sb->bd_id >= 0x300) { thresh = stereo? 11025 : 23000; max = stereo? 22050 : 44100; } else if (sb->bd_id > 0x200) { thresh = play? 23000 : 13000; max = play? 44100 : 15000; } else { thresh = 999999; max = play? 23000 : 13000; } speed = ch->spd; if (speed > max) speed = max; sb_lock(sb); sb->bd_flags &= ~BD_F_HISPEED; if (speed > thresh) sb->bd_flags |= BD_F_HISPEED; tmp = 65536 - (256000000 / (speed << stereo)); tconst = tmp >> 8; sb_cmd1(sb, 0x40, tconst); /* set time constant */ speed = (256000000 / (65536 - tmp)) >> stereo; ch->spd = speed; sb_unlock(sb); return speed; } static int sb_start(struct sb_chinfo *ch) { struct sb_info *sb = ch->parent; int play = (ch->dir == PCMDIR_PLAY)? 1 : 0; int stereo = (AFMT_CHANNEL(ch->fmt) > 1)? 1 : 0; int l = ch->blksz; u_char i; l--; sb_lock(sb); if (play) sb_cmd(sb, DSP_CMD_SPKON); if (sb->bd_flags & BD_F_HISPEED) i = play? 0x90 : 0x98; else i = play? 0x1c : 0x2c; sb_setmixer(sb, 0x0e, stereo? 2 : 0); sb_cmd2(sb, 0x48, l); sb_cmd(sb, i); sb->bd_flags |= BD_F_DMARUN; sb_unlock(sb); return 0; } static int sb_stop(struct sb_chinfo *ch) { struct sb_info *sb = ch->parent; int play = (ch->dir == PCMDIR_PLAY)? 1 : 0; sb_lock(sb); if (sb->bd_flags & BD_F_HISPEED) sb_reset_dsp(sb); else { #if 0 /* * NOTE: DSP_CMD_DMAEXIT_8 does not work with old * soundblaster. */ sb_cmd(sb, DSP_CMD_DMAEXIT_8); #endif sb_reset_dsp(sb); } if (play) sb_cmd(sb, DSP_CMD_SPKOFF); /* speaker off */ sb_unlock(sb); sb->bd_flags &= ~BD_F_DMARUN; return 0; } /* channel interface */ static void * sbchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sb_info *sb = devinfo; struct sb_chinfo *ch = (dir == PCMDIR_PLAY)? &sb->pch : &sb->rch; ch->parent = sb; ch->channel = c; ch->dir = dir; ch->buffer = b; if (sndbuf_alloc(ch->buffer, sb->parent_dmat, 0, sb->bufsize) != 0) return NULL; sndbuf_dmasetup(ch->buffer, sb->drq); return ch; } static int sbchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sb_chinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t sbchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sb_chinfo *ch = data; ch->spd = speed; return sb_speed(ch); } static u_int32_t sbchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sb_chinfo *ch = data; ch->blksz = blocksize; return ch->blksz; } static int sbchan_trigger(kobj_t obj, void *data, int go) { struct sb_chinfo *ch = data; if (!PCMTRIG_COMMON(go)) return 0; sndbuf_dma(ch->buffer, go); if (go == PCMTRIG_START) sb_start(ch); else sb_stop(ch); return 0; } static u_int32_t sbchan_getptr(kobj_t obj, void *data) { struct sb_chinfo *ch = data; return sndbuf_dmaptr(ch->buffer); } static struct pcmchan_caps * sbchan_getcaps(kobj_t obj, void *data) { struct sb_chinfo *ch = data; int p = (ch->dir == PCMDIR_PLAY)? 1 : 0; if (ch->parent->bd_id == 0x200) return p? &sb200_playcaps : &sb200_reccaps; if (ch->parent->bd_id < 0x300) return p? &sb201_playcaps : &sb201_reccaps; return p? &sbpro_playcaps : &sbpro_reccaps; } static kobj_method_t sbchan_methods[] = { KOBJMETHOD(channel_init, sbchan_init), KOBJMETHOD(channel_setformat, sbchan_setformat), KOBJMETHOD(channel_setspeed, sbchan_setspeed), KOBJMETHOD(channel_setblocksize, sbchan_setblocksize), KOBJMETHOD(channel_trigger, sbchan_trigger), KOBJMETHOD(channel_getptr, sbchan_getptr), KOBJMETHOD(channel_getcaps, sbchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(sbchan); /************************************************************/ static int sb_probe(device_t dev) { char buf[64]; uintptr_t func, ver, r, f; /* The parent device has already been probed. */ r = BUS_READ_IVAR(device_get_parent(dev), dev, 0, &func); if (func != SCF_PCM) return (ENXIO); r = BUS_READ_IVAR(device_get_parent(dev), dev, 1, &ver); f = (ver & 0xffff0000) >> 16; ver &= 0x0000ffff; if ((f & BD_F_ESS) || (ver >= 0x400)) return (ENXIO); snprintf(buf, sizeof buf, "SB DSP %d.%02d", (int) ver >> 8, (int) ver & 0xff); device_set_desc_copy(dev, buf); return 0; } static int sb_attach(device_t dev) { struct sb_info *sb; char status[SND_STATUSLEN]; uintptr_t ver; sb = malloc(sizeof(*sb), M_DEVBUF, M_WAITOK | M_ZERO); sb->parent_dev = device_get_parent(dev); BUS_READ_IVAR(device_get_parent(dev), dev, 1, &ver); sb->bd_id = ver & 0x0000ffff; sb->bd_flags = (ver & 0xffff0000) >> 16; sb->bufsize = pcm_getbuffersize(dev, 4096, SB_DEFAULT_BUFSZ, 65536); if (sb_alloc_resources(sb, dev)) goto no; if (sb_reset_dsp(sb)) goto no; if (mixer_init(dev, (sb->bd_id < 0x300)? &sbmix_mixer_class : &sbpromix_mixer_class, sb)) goto no; if (snd_setup_intr(dev, sb->irq, 0, sb_intr, sb, &sb->ih)) goto no; pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sb->bufsize, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sb->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd drq %jd bufsz %u %s", rman_get_start(sb->io_base), rman_get_start(sb->irq), rman_get_start(sb->drq), sb->bufsize, PCM_KLDSTRING(snd_sb8)); if (pcm_register(dev, sb, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &sbchan_class, sb); pcm_addchan(dev, PCMDIR_PLAY, &sbchan_class, sb); pcm_setstatus(dev, status); return 0; no: sb_release_resources(sb, dev); return ENXIO; } static int sb_detach(device_t dev) { int r; struct sb_info *sb; r = pcm_unregister(dev); if (r) return r; sb = pcm_getdevinfo(dev); sb_release_resources(sb, dev); return 0; } static device_method_t sb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sb_probe), DEVMETHOD(device_attach, sb_attach), DEVMETHOD(device_detach, sb_detach), { 0, 0 } }; static driver_t sb_driver = { "pcm", sb_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_sb8, sbc, sb_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_sb8, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_sb8, snd_sbc, 1, 1, 1); MODULE_VERSION(snd_sb8, 1); Index: head/sys/dev/sound/midi/midi.c =================================================================== --- head/sys/dev/sound/midi/midi.c (revision 297861) +++ head/sys/dev/sound/midi/midi.c (revision 297862) @@ -1,1540 +1,1540 @@ /*- * Copyright (c) 2003 Mathew Kanner * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (augustss@netbsd.org). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parts of this file started out as NetBSD: midi.c 1.31 * They are mostly gone. Still the most obvious will be the state * machine midi_in */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include "mpu_if.h" #include #include "synth_if.h" MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area"); #ifndef KOBJMETHOD_END #define KOBJMETHOD_END { NULL, NULL } #endif #define PCMMKMINOR(u, d, c) ((((c) & 0xff) << 16) | (((u) & 0x0f) << 4) | ((d) & 0x0f)) #define MIDIMKMINOR(u, d, c) PCMMKMINOR(u, d, c) #define MIDI_DEV_RAW 2 #define MIDI_DEV_MIDICTL 12 enum midi_states { MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA }; /* * The MPU interface current has init() uninit() inqsize() outqsize() * callback() : fiddle with the tx|rx status. */ #include "mpu_if.h" /* * /dev/rmidi Structure definitions */ #define MIDI_NAMELEN 16 struct snd_midi { KOBJ_FIELDS; struct mtx lock; /* Protects all but queues */ void *cookie; int unit; /* Should only be used in midistat */ int channel; /* Should only be used in midistat */ int busy; int flags; /* File flags */ char name[MIDI_NAMELEN]; struct mtx qlock; /* Protects inq, outq and flags */ MIDIQ_HEAD(, char) inq, outq; int rchan, wchan; struct selinfo rsel, wsel; int hiwat; /* QLEN(outq)>High-water -> disable * writes from userland */ enum midi_states inq_state; int inq_status, inq_left; /* Variables for the state machine in * Midi_in, this is to provide that * signals only get issued only * complete command packets. */ struct proc *async; struct cdev *dev; struct synth_midi *synth; int synth_flags; TAILQ_ENTRY(snd_midi) link; }; struct synth_midi { KOBJ_FIELDS; struct snd_midi *m; }; static synth_open_t midisynth_open; static synth_close_t midisynth_close; static synth_writeraw_t midisynth_writeraw; static synth_killnote_t midisynth_killnote; static synth_startnote_t midisynth_startnote; static synth_setinstr_t midisynth_setinstr; static synth_alloc_t midisynth_alloc; static synth_controller_t midisynth_controller; static synth_bender_t midisynth_bender; static kobj_method_t midisynth_methods[] = { KOBJMETHOD(synth_open, midisynth_open), KOBJMETHOD(synth_close, midisynth_close), KOBJMETHOD(synth_writeraw, midisynth_writeraw), KOBJMETHOD(synth_setinstr, midisynth_setinstr), KOBJMETHOD(synth_startnote, midisynth_startnote), KOBJMETHOD(synth_killnote, midisynth_killnote), KOBJMETHOD(synth_alloc, midisynth_alloc), KOBJMETHOD(synth_controller, midisynth_controller), KOBJMETHOD(synth_bender, midisynth_bender), KOBJMETHOD_END }; DEFINE_CLASS(midisynth, midisynth_methods, 0); /* * Module Exports & Interface * * struct midi_chan *midi_init(MPU_CLASS cls, int unit, int chan, * void *cookie) * int midi_uninit(struct snd_midi *) * * 0 == no error * EBUSY or other error * * int midi_in(struct snd_midi *, char *buf, int count) * int midi_out(struct snd_midi *, char *buf, int count) * * midi_{in,out} return actual size transfered * */ /* * midi_devs tailq, holder of all rmidi instances protected by midistat_lock */ TAILQ_HEAD(, snd_midi) midi_devs; /* * /dev/midistat variables and declarations, protected by midistat_lock */ static struct mtx midistat_lock; static int midistat_isopen = 0; static struct sbuf midistat_sbuf; static int midistat_bufptr; static struct cdev *midistat_dev; /* * /dev/midistat dev_t declarations */ static d_open_t midistat_open; static d_close_t midistat_close; static d_read_t midistat_read; static struct cdevsw midistat_cdevsw = { .d_version = D_VERSION, .d_open = midistat_open, .d_close = midistat_close, .d_read = midistat_read, .d_name = "midistat", }; /* * /dev/rmidi dev_t declarations, struct variable access is protected by * locks contained within the structure. */ static d_open_t midi_open; static d_close_t midi_close; static d_ioctl_t midi_ioctl; static d_read_t midi_read; static d_write_t midi_write; static d_poll_t midi_poll; static struct cdevsw midi_cdevsw = { .d_version = D_VERSION, .d_open = midi_open, .d_close = midi_close, .d_read = midi_read, .d_write = midi_write, .d_ioctl = midi_ioctl, .d_poll = midi_poll, .d_name = "rmidi", }; /* * Prototypes of library functions */ static int midi_destroy(struct snd_midi *, int); static int midistat_prepare(struct sbuf * s); static int midi_load(void); static int midi_unload(void); /* * Misc declr. */ SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD, 0, "Midi driver"); static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD, 0, "Status device"); int midi_debug; /* XXX: should this be moved into debug.midi? */ SYSCTL_INT(_hw_midi, OID_AUTO, debug, CTLFLAG_RW, &midi_debug, 0, ""); int midi_dumpraw; SYSCTL_INT(_hw_midi, OID_AUTO, dumpraw, CTLFLAG_RW, &midi_dumpraw, 0, ""); int midi_instroff; SYSCTL_INT(_hw_midi, OID_AUTO, instroff, CTLFLAG_RW, &midi_instroff, 0, ""); int midistat_verbose; SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW, &midistat_verbose, 0, ""); #define MIDI_DEBUG(l,a) if(midi_debug>=l) a /* * CODE START */ /* * Register a new rmidi device. cls midi_if interface unit == 0 means * auto-assign new unit number unit != 0 already assigned a unit number, eg. * not the first channel provided by this device. channel, sub-unit * cookie is passed back on MPU calls Typical device drivers will call with * unit=0, channel=1..(number of channels) and cookie=soft_c and won't care * what unit number is used. * * It is an error to call midi_init with an already used unit/channel combo. * * Returns NULL on error * */ struct snd_midi * midi_init(kobj_class_t cls, int unit, int channel, void *cookie) { struct snd_midi *m; int i; int inqsize, outqsize; MIDI_TYPE *buf; MIDI_DEBUG(1, printf("midiinit: unit %d/%d.\n", unit, channel)); mtx_lock(&midistat_lock); /* * Protect against call with existing unit/channel or auto-allocate a * new unit number. */ i = -1; TAILQ_FOREACH(m, &midi_devs, link) { mtx_lock(&m->lock); if (unit != 0) { if (m->unit == unit && m->channel == channel) { mtx_unlock(&m->lock); goto err0; } } else { /* * Find a better unit number */ if (m->unit > i) i = m->unit; } mtx_unlock(&m->lock); } if (unit == 0) unit = i + 1; MIDI_DEBUG(1, printf("midiinit #2: unit %d/%d.\n", unit, channel)); m = malloc(sizeof(*m), M_MIDI, M_NOWAIT | M_ZERO); if (m == NULL) goto err0; m->synth = malloc(sizeof(*m->synth), M_MIDI, M_NOWAIT | M_ZERO); if (m->synth == NULL) goto err1; kobj_init((kobj_t)m->synth, &midisynth_class); m->synth->m = m; kobj_init((kobj_t)m, cls); inqsize = MPU_INQSIZE(m, cookie); outqsize = MPU_OUTQSIZE(m, cookie); MIDI_DEBUG(1, printf("midiinit queues %d/%d.\n", inqsize, outqsize)); if (!inqsize && !outqsize) goto err2; mtx_init(&m->lock, "raw midi", NULL, 0); mtx_init(&m->qlock, "q raw midi", NULL, 0); mtx_lock(&m->lock); mtx_lock(&m->qlock); if (inqsize) buf = malloc(sizeof(MIDI_TYPE) * inqsize, M_MIDI, M_NOWAIT); else buf = NULL; MIDIQ_INIT(m->inq, buf, inqsize); if (outqsize) buf = malloc(sizeof(MIDI_TYPE) * outqsize, M_MIDI, M_NOWAIT); else buf = NULL; m->hiwat = outqsize / 2; MIDIQ_INIT(m->outq, buf, outqsize); if ((inqsize && !MIDIQ_BUF(m->inq)) || (outqsize && !MIDIQ_BUF(m->outq))) goto err3; m->busy = 0; m->flags = 0; m->unit = unit; m->channel = channel; m->cookie = cookie; if (MPU_INIT(m, cookie)) goto err3; mtx_unlock(&m->lock); mtx_unlock(&m->qlock); TAILQ_INSERT_TAIL(&midi_devs, m, link); mtx_unlock(&midistat_lock); m->dev = make_dev(&midi_cdevsw, MIDIMKMINOR(unit, MIDI_DEV_RAW, channel), UID_ROOT, GID_WHEEL, 0666, "midi%d.%d", unit, channel); m->dev->si_drv1 = m; return m; err3: mtx_destroy(&m->qlock); mtx_destroy(&m->lock); if (MIDIQ_BUF(m->inq)) free(MIDIQ_BUF(m->inq), M_MIDI); if (MIDIQ_BUF(m->outq)) free(MIDIQ_BUF(m->outq), M_MIDI); err2: free(m->synth, M_MIDI); err1: free(m, M_MIDI); err0: mtx_unlock(&midistat_lock); MIDI_DEBUG(1, printf("midi_init ended in error\n")); return NULL; } /* * midi_uninit does not call MIDI_UNINIT, as since this is the implementors * entry point. midi_uninit if fact, does not send any methods. A call to * midi_uninit is a defacto promise that you won't manipulate ch anymore * */ int midi_uninit(struct snd_midi *m) { int err; err = EBUSY; mtx_lock(&midistat_lock); mtx_lock(&m->lock); if (m->busy) { if (!(m->rchan || m->wchan)) goto err; if (m->rchan) { wakeup(&m->rchan); m->rchan = 0; } if (m->wchan) { wakeup(&m->wchan); m->wchan = 0; } } err = midi_destroy(m, 0); if (!err) goto exit; err: mtx_unlock(&m->lock); exit: mtx_unlock(&midistat_lock); return err; } /* * midi_in: process all data until the queue is full, then discards the rest. * Since midi_in is a state machine, data discards can cause it to get out of * whack. Process as much as possible. It calls, wakeup, selnotify and * psignal at most once. */ #ifdef notdef static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0}; #endif /* notdef */ /* Number of bytes in a MIDI command */ #define MIDI_LENGTH(d) (midi_lengths[((d) >> 4) & 7]) #define MIDI_ACK 0xfe #define MIDI_IS_STATUS(d) ((d) >= 0x80) #define MIDI_IS_COMMON(d) ((d) >= 0xf0) #define MIDI_SYSEX_START 0xF0 #define MIDI_SYSEX_END 0xF7 int midi_in(struct snd_midi *m, MIDI_TYPE *buf, int size) { /* int i, sig, enq; */ int used; /* MIDI_TYPE data; */ MIDI_DEBUG(5, printf("midi_in: m=%p size=%d\n", m, size)); /* * XXX: locking flub */ if (!(m->flags & M_RX)) return size; used = 0; mtx_lock(&m->qlock); #if 0 /* * Don't bother queuing if not in read mode. Discard everything and * return size so the caller doesn't freak out. */ if (!(m->flags & M_RX)) return size; for (i = sig = 0; i < size; i++) { data = buf[i]; enq = 0; if (data == MIDI_ACK) continue; switch (m->inq_state) { case MIDI_IN_START: if (MIDI_IS_STATUS(data)) { switch (data) { case 0xf0: /* Sysex */ m->inq_state = MIDI_IN_SYSEX; break; case 0xf1: /* MTC quarter frame */ case 0xf3: /* Song select */ m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_left = 1; break; case 0xf2: /* Song position pointer */ m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_left = 2; break; default: if (MIDI_IS_COMMON(data)) { enq = 1; sig = 1; } else { m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_status = data; m->inq_left = MIDI_LENGTH(data); } break; } } else if (MIDI_IS_STATUS(m->inq_status)) { m->inq_state = MIDI_IN_DATA; if (!MIDIQ_FULL(m->inq)) { used++; MIDIQ_ENQ(m->inq, &m->inq_status, 1); } enq = 1; m->inq_left = MIDI_LENGTH(m->inq_status) - 1; } break; /* * End of case MIDI_IN_START: */ case MIDI_IN_DATA: enq = 1; if (--m->inq_left <= 0) sig = 1;/* deliver data */ break; case MIDI_IN_SYSEX: if (data == MIDI_SYSEX_END) m->inq_state = MIDI_IN_START; break; } if (enq) if (!MIDIQ_FULL(m->inq)) { MIDIQ_ENQ(m->inq, &data, 1); used++; } /* * End of the state machines main "for loop" */ } if (sig) { #endif MIDI_DEBUG(6, printf("midi_in: len %jd avail %jd\n", (intmax_t)MIDIQ_LEN(m->inq), (intmax_t)MIDIQ_AVAIL(m->inq))); if (MIDIQ_AVAIL(m->inq) > size) { used = size; MIDIQ_ENQ(m->inq, buf, size); } else { MIDI_DEBUG(4, printf("midi_in: Discarding data qu\n")); mtx_unlock(&m->qlock); return 0; } if (m->rchan) { wakeup(&m->rchan); m->rchan = 0; } selwakeup(&m->rsel); if (m->async) { PROC_LOCK(m->async); kern_psignal(m->async, SIGIO); PROC_UNLOCK(m->async); } #if 0 } #endif mtx_unlock(&m->qlock); return used; } /* * midi_out: The only clearer of the M_TXEN flag. */ int midi_out(struct snd_midi *m, MIDI_TYPE *buf, int size) { int used; /* * XXX: locking flub */ if (!(m->flags & M_TXEN)) return 0; MIDI_DEBUG(2, printf("midi_out: %p\n", m)); mtx_lock(&m->qlock); used = MIN(size, MIDIQ_LEN(m->outq)); MIDI_DEBUG(3, printf("midi_out: used %d\n", used)); if (used) MIDIQ_DEQ(m->outq, buf, used); if (MIDIQ_EMPTY(m->outq)) { m->flags &= ~M_TXEN; MPU_CALLBACKP(m, m->cookie, m->flags); } if (used && MIDIQ_AVAIL(m->outq) > m->hiwat) { if (m->wchan) { wakeup(&m->wchan); m->wchan = 0; } selwakeup(&m->wsel); if (m->async) { PROC_LOCK(m->async); kern_psignal(m->async, SIGIO); PROC_UNLOCK(m->async); } } mtx_unlock(&m->qlock); return used; } /* * /dev/rmidi#.# device access functions */ int midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int retval; MIDI_DEBUG(1, printf("midiopen %p %s %s\n", td, flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); retval = 0; if (flags & FREAD) { if (MIDIQ_SIZE(m->inq) == 0) retval = ENXIO; else if (m->flags & M_RX) retval = EBUSY; if (retval) goto err; } if (flags & FWRITE) { if (MIDIQ_SIZE(m->outq) == 0) retval = ENXIO; else if (m->flags & M_TX) retval = EBUSY; if (retval) goto err; } m->busy++; m->rchan = 0; m->wchan = 0; m->async = 0; if (flags & FREAD) { m->flags |= M_RX | M_RXEN; /* * Only clear the inq, the outq might still have data to drain * from a previous session */ MIDIQ_CLEAR(m->inq); } if (flags & FWRITE) m->flags |= M_TX; MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(2, printf("midi_open: opened.\n")); err: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); return retval; } int midi_close(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int retval; int oldflags; MIDI_DEBUG(1, printf("midi_close %p %s %s\n", td, flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if ((flags & FREAD && !(m->flags & M_RX)) || (flags & FWRITE && !(m->flags & M_TX))) { retval = ENXIO; goto err; } m->busy--; oldflags = m->flags; if (flags & FREAD) m->flags &= ~(M_RX | M_RXEN); if (flags & FWRITE) m->flags &= ~M_TX; if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN))) MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy)); mtx_unlock(&m->qlock); mtx_unlock(&m->lock); retval = 0; err: return retval; } /* * TODO: midi_read, per oss programmer's guide pg. 42 should return as soon * as data is available. */ int midi_read(struct cdev *i_dev, struct uio *uio, int ioflag) { #define MIDI_RSIZE 32 struct snd_midi *m = i_dev->si_drv1; int retval; int used; char buf[MIDI_RSIZE]; MIDI_DEBUG(5, printf("midiread: count=%lu\n", (unsigned long)uio->uio_resid)); retval = EIO; if (m == NULL) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_RX)) goto err1; while (uio->uio_resid > 0) { while (MIDIQ_EMPTY(m->inq)) { retval = EWOULDBLOCK; if (ioflag & O_NONBLOCK) goto err1; mtx_unlock(&m->lock); m->rchan = 1; retval = msleep(&m->rchan, &m->qlock, PCATCH | PDROP, "midi RX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (m != i_dev->si_drv1) retval = ENXIO; /* if (retval && retval != ERESTART) */ if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->rchan = 0; if (!m->busy) goto err1; } MIDI_DEBUG(6, printf("midi_read start\n")); /* * At this point, it is certain that m->inq has data */ used = MIN(MIDIQ_LEN(m->inq), uio->uio_resid); used = MIN(used, MIDI_RSIZE); MIDI_DEBUG(6, printf("midiread: uiomove cc=%d\n", used)); MIDIQ_DEQ(m->inq, buf, used); retval = uiomove(buf, used, uio); if (retval) goto err1; } /* * If we Made it here then transfer is good */ retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: MIDI_DEBUG(4, printf("midi_read: ret %d\n", retval)); return retval; } /* * midi_write: The only setter of M_TXEN */ int midi_write(struct cdev *i_dev, struct uio *uio, int ioflag) { #define MIDI_WSIZE 32 struct snd_midi *m = i_dev->si_drv1; int retval; int used; char buf[MIDI_WSIZE]; MIDI_DEBUG(4, printf("midi_write\n")); retval = 0; if (m == NULL) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_TX)) goto err1; while (uio->uio_resid > 0) { while (MIDIQ_AVAIL(m->outq) == 0) { retval = EWOULDBLOCK; if (ioflag & O_NONBLOCK) goto err1; mtx_unlock(&m->lock); m->wchan = 1; MIDI_DEBUG(3, printf("midi_write msleep\n")); retval = msleep(&m->wchan, &m->qlock, PCATCH | PDROP, "midi TX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (m != i_dev->si_drv1) retval = ENXIO; if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->wchan = 0; if (!m->busy) goto err1; } /* * We are certain than data can be placed on the queue */ used = MIN(MIDIQ_AVAIL(m->outq), uio->uio_resid); used = MIN(used, MIDI_WSIZE); MIDI_DEBUG(5, printf("midiout: resid %zd len %jd avail %jd\n", uio->uio_resid, (intmax_t)MIDIQ_LEN(m->outq), (intmax_t)MIDIQ_AVAIL(m->outq))); MIDI_DEBUG(5, printf("midi_write: uiomove cc=%d\n", used)); retval = uiomove(buf, used, uio); if (retval) goto err1; MIDIQ_ENQ(m->outq, buf, used); /* * Inform the bottom half that data can be written */ if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } } /* * If we Made it here then transfer is good */ retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: return retval; } int midi_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { return ENXIO; } int midi_poll(struct cdev *i_dev, int events, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int revents; if (m == NULL) return 0; revents = 0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (events & (POLLIN | POLLRDNORM)) if (!MIDIQ_EMPTY(m->inq)) events |= events & (POLLIN | POLLRDNORM); if (events & (POLLOUT | POLLWRNORM)) if (MIDIQ_AVAIL(m->outq) < m->hiwat) events |= events & (POLLOUT | POLLWRNORM); if (revents == 0) { if (events & (POLLIN | POLLRDNORM)) selrecord(td, &m->rsel); if (events & (POLLOUT | POLLWRNORM)) selrecord(td, &m->wsel); } mtx_unlock(&m->lock); mtx_unlock(&m->qlock); return (revents); } /* * /dev/midistat device functions * */ static int midistat_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { int error; MIDI_DEBUG(1, printf("midistat_open\n")); mtx_lock(&midistat_lock); if (midistat_isopen) { mtx_unlock(&midistat_lock); return EBUSY; } midistat_isopen = 1; mtx_unlock(&midistat_lock); if (sbuf_new(&midistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) { error = ENXIO; mtx_lock(&midistat_lock); goto out; } mtx_lock(&midistat_lock); midistat_bufptr = 0; error = (midistat_prepare(&midistat_sbuf) > 0) ? 0 : ENOMEM; out: if (error) midistat_isopen = 0; mtx_unlock(&midistat_lock); return error; } static int midistat_close(struct cdev *i_dev, int flags, int mode, struct thread *td) { MIDI_DEBUG(1, printf("midistat_close\n")); mtx_lock(&midistat_lock); if (!midistat_isopen) { mtx_unlock(&midistat_lock); return EBADF; } sbuf_delete(&midistat_sbuf); midistat_isopen = 0; mtx_unlock(&midistat_lock); return 0; } static int midistat_read(struct cdev *i_dev, struct uio *buf, int flag) { int l, err; MIDI_DEBUG(4, printf("midistat_read\n")); mtx_lock(&midistat_lock); if (!midistat_isopen) { mtx_unlock(&midistat_lock); return EBADF; } l = min(buf->uio_resid, sbuf_len(&midistat_sbuf) - midistat_bufptr); err = 0; if (l > 0) { mtx_unlock(&midistat_lock); err = uiomove(sbuf_data(&midistat_sbuf) + midistat_bufptr, l, buf); mtx_lock(&midistat_lock); } else l = 0; midistat_bufptr += l; mtx_unlock(&midistat_lock); return err; } /* * Module library functions */ static int midistat_prepare(struct sbuf *s) { struct snd_midi *m; mtx_assert(&midistat_lock, MA_OWNED); sbuf_printf(s, "FreeBSD Midi Driver (midi2)\n"); if (TAILQ_EMPTY(&midi_devs)) { sbuf_printf(s, "No devices installed.\n"); sbuf_finish(s); return sbuf_len(s); } sbuf_printf(s, "Installed devices:\n"); TAILQ_FOREACH(m, &midi_devs, link) { mtx_lock(&m->lock); sbuf_printf(s, "%s [%d/%d:%s]", m->name, m->unit, m->channel, MPU_PROVIDER(m, m->cookie)); sbuf_printf(s, "%s", MPU_DESCR(m, m->cookie, midistat_verbose)); sbuf_printf(s, "\n"); mtx_unlock(&m->lock); } sbuf_finish(s); return sbuf_len(s); } #ifdef notdef /* * Convert IOCTL command to string for debugging */ static char * midi_cmdname(int cmd) { static struct { int cmd; char *name; } *tab, cmdtab_midiioctl[] = { #define A(x) {x, ## x} /* * Once we have some real IOCTLs define, the following will * be relavant. * * A(SNDCTL_MIDI_PRETIME), A(SNDCTL_MIDI_MPUMODE), * A(SNDCTL_MIDI_MPUCMD), A(SNDCTL_SYNTH_INFO), * A(SNDCTL_MIDI_INFO), A(SNDCTL_SYNTH_MEMAVL), * A(SNDCTL_FM_LOAD_INSTR), A(SNDCTL_FM_4OP_ENABLE), * A(MIOSPASSTHRU), A(MIOGPASSTHRU), A(AIONWRITE), * A(AIOGSIZE), A(AIOSSIZE), A(AIOGFMT), A(AIOSFMT), * A(AIOGMIX), A(AIOSMIX), A(AIOSTOP), A(AIOSYNC), * A(AIOGCAP), */ #undef A { -1, "unknown" }, }; for (tab = cmdtab_midiioctl; tab->cmd != cmd && tab->cmd != -1; tab++); return tab->name; } #endif /* notdef */ /* * midisynth */ int midisynth_open(void *n, void *arg, int flags) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; MIDI_DEBUG(1, printf("midisynth_open %s %s\n", flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); retval = 0; if (flags & FREAD) { if (MIDIQ_SIZE(m->inq) == 0) retval = ENXIO; else if (m->flags & M_RX) retval = EBUSY; if (retval) goto err; } if (flags & FWRITE) { if (MIDIQ_SIZE(m->outq) == 0) retval = ENXIO; else if (m->flags & M_TX) retval = EBUSY; if (retval) goto err; } m->busy++; /* * TODO: Consider m->async = 0; */ if (flags & FREAD) { m->flags |= M_RX | M_RXEN; /* * Only clear the inq, the outq might still have data to drain * from a previous session */ MIDIQ_CLEAR(m->inq); m->rchan = 0; } if (flags & FWRITE) { m->flags |= M_TX; m->wchan = 0; } m->synth_flags = flags & (FREAD | FWRITE); MPU_CALLBACK(m, m->cookie, m->flags); err: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); MIDI_DEBUG(2, printf("midisynth_open: return %d.\n", retval)); return retval; } int midisynth_close(void *n) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; int oldflags; MIDI_DEBUG(1, printf("midisynth_close %s %s\n", m->synth_flags & FREAD ? "M_RX" : "", m->synth_flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if ((m->synth_flags & FREAD && !(m->flags & M_RX)) || (m->synth_flags & FWRITE && !(m->flags & M_TX))) { retval = ENXIO; goto err; } m->busy--; oldflags = m->flags; if (m->synth_flags & FREAD) m->flags &= ~(M_RX | M_RXEN); if (m->synth_flags & FWRITE) m->flags &= ~M_TX; if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN))) MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy)); mtx_unlock(&m->qlock); mtx_unlock(&m->lock); retval = 0; err: return retval; } /* * Always blocking. */ int midisynth_writeraw(void *n, uint8_t *buf, size_t len) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; int used; int i; MIDI_DEBUG(4, printf("midisynth_writeraw\n")); retval = 0; if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_TX)) goto err1; if (midi_dumpraw) printf("midi dump: "); while (len > 0) { while (MIDIQ_AVAIL(m->outq) == 0) { if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } mtx_unlock(&m->lock); m->wchan = 1; MIDI_DEBUG(3, printf("midisynth_writeraw msleep\n")); retval = msleep(&m->wchan, &m->qlock, PCATCH | PDROP, "midi TX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->wchan = 0; if (!m->busy) goto err1; } /* * We are certain than data can be placed on the queue */ used = MIN(MIDIQ_AVAIL(m->outq), len); used = MIN(used, MIDI_WSIZE); MIDI_DEBUG(5, printf("midi_synth: resid %zu len %jd avail %jd\n", len, (intmax_t)MIDIQ_LEN(m->outq), (intmax_t)MIDIQ_AVAIL(m->outq))); if (midi_dumpraw) for (i = 0; i < used; i++) printf("%x ", buf[i]); MIDIQ_ENQ(m->outq, buf, used); len -= used; /* * Inform the bottom half that data can be written */ if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } } /* * If we Made it here then transfer is good */ if (midi_dumpraw) printf("\n"); retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: return retval; } static int midisynth_killnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) { u_char c[3]; if (note > 127 || chn > 15) return (EINVAL); if (vel > 127) vel = 127; if (vel == 64) { c[0] = 0x90 | (chn & 0x0f); /* Note on. */ c[1] = (u_char)note; c[2] = 0; } else { c[0] = 0x80 | (chn & 0x0f); /* Note off. */ c[1] = (u_char)note; c[2] = (u_char)vel; } return midisynth_writeraw(n, c, 3); } static int midisynth_setinstr(void *n, uint8_t chn, uint16_t instr) { u_char c[2]; if (instr > 127 || chn > 15) return EINVAL; c[0] = 0xc0 | (chn & 0x0f); /* Progamme change. */ c[1] = instr + midi_instroff; return midisynth_writeraw(n, c, 2); } static int midisynth_startnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) { u_char c[3]; if (note > 127 || chn > 15) return EINVAL; if (vel > 127) vel = 127; c[0] = 0x90 | (chn & 0x0f); /* Note on. */ c[1] = (u_char)note; c[2] = (u_char)vel; return midisynth_writeraw(n, c, 3); } static int midisynth_alloc(void *n, uint8_t chan, uint8_t note) { return chan; } static int midisynth_controller(void *n, uint8_t chn, uint8_t ctrlnum, uint16_t val) { u_char c[3]; if (ctrlnum > 127 || chn > 15) return EINVAL; c[0] = 0xb0 | (chn & 0x0f); /* Control Message. */ c[1] = ctrlnum; c[2] = val; return midisynth_writeraw(n, c, 3); } static int midisynth_bender(void *n, uint8_t chn, uint16_t val) { u_char c[3]; if (val > 16383 || chn > 15) return EINVAL; c[0] = 0xe0 | (chn & 0x0f); /* Pitch bend. */ c[1] = (u_char)val & 0x7f; c[2] = (u_char)(val >> 7) & 0x7f; return midisynth_writeraw(n, c, 3); } /* * Single point of midi destructions. */ static int midi_destroy(struct snd_midi *m, int midiuninit) { mtx_assert(&midistat_lock, MA_OWNED); mtx_assert(&m->lock, MA_OWNED); MIDI_DEBUG(3, printf("midi_destroy\n")); m->dev->si_drv1 = NULL; mtx_unlock(&m->lock); /* XXX */ destroy_dev(m->dev); TAILQ_REMOVE(&midi_devs, m, link); if (midiuninit) MPU_UNINIT(m, m->cookie); free(MIDIQ_BUF(m->inq), M_MIDI); free(MIDIQ_BUF(m->outq), M_MIDI); mtx_destroy(&m->qlock); mtx_destroy(&m->lock); free(m->synth, M_MIDI); free(m, M_MIDI); return 0; } /* * Load and unload functions, creates the /dev/midistat device */ static int midi_load() { mtx_init(&midistat_lock, "midistat lock", NULL, 0); TAILQ_INIT(&midi_devs); /* Initialize the queue. */ midistat_dev = make_dev(&midistat_cdevsw, MIDIMKMINOR(0, MIDI_DEV_MIDICTL, 0), UID_ROOT, GID_WHEEL, 0666, "midistat"); return 0; } static int midi_unload() { struct snd_midi *m; int retval; MIDI_DEBUG(1, printf("midi_unload()\n")); retval = EBUSY; mtx_lock(&midistat_lock); if (midistat_isopen) goto exit0; TAILQ_FOREACH(m, &midi_devs, link) { mtx_lock(&m->lock); if (m->busy) retval = EBUSY; else retval = midi_destroy(m, 1); if (retval) goto exit1; } mtx_unlock(&midistat_lock); /* XXX */ destroy_dev(midistat_dev); /* * Made it here then unload is complete */ mtx_destroy(&midistat_lock); return 0; exit1: mtx_unlock(&m->lock); exit0: mtx_unlock(&midistat_lock); if (retval) MIDI_DEBUG(2, printf("midi_unload: failed\n")); return retval; } extern int seq_modevent(module_t mod, int type, void *data); static int midi_modevent(module_t mod, int type, void *data) { int retval; retval = 0; switch (type) { case MOD_LOAD: retval = midi_load(); #if 0 if (retval == 0) retval = seq_modevent(mod, type, data); #endif break; case MOD_UNLOAD: retval = midi_unload(); #if 0 if (retval == 0) retval = seq_modevent(mod, type, data); #endif break; default: break; } return retval; } kobj_t midimapper_addseq(void *arg1, int *unit, void **cookie) { - unit = 0; + unit = NULL; return (kobj_t)arg1; } int midimapper_open(void *arg1, void **cookie) { int retval = 0; struct snd_midi *m; mtx_lock(&midistat_lock); TAILQ_FOREACH(m, &midi_devs, link) { retval++; } mtx_unlock(&midistat_lock); return retval; } int midimapper_close(void *arg1, void *cookie) { return 0; } kobj_t midimapper_fetch_synth(void *arg, void *cookie, int unit) { struct snd_midi *m; int retval = 0; mtx_lock(&midistat_lock); TAILQ_FOREACH(m, &midi_devs, link) { if (unit == retval) { mtx_unlock(&midistat_lock); return (kobj_t)m->synth; } retval++; } mtx_unlock(&midistat_lock); return NULL; } DEV_MODULE(midi, midi_modevent, NULL); MODULE_VERSION(midi, 1); Index: head/sys/dev/sound/pci/als4000.c =================================================================== --- head/sys/dev/sound/pci/als4000.c (revision 297861) +++ head/sys/dev/sound/pci/als4000.c (revision 297862) @@ -1,941 +1,941 @@ /*- * Copyright (c) 2001 Orion Hodson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF * SUCH DAMAGE. */ /* * als4000.c - driver for the Avance Logic ALS 4000 chipset. * * The ALS4000 is effectively an SB16 with a PCI interface. * * This driver derives from ALS4000a.PDF, Bart Hartgers alsa driver, and * SB16 register descriptions. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); /* Debugging macro's */ #undef DEB #ifndef DEB #define DEB(x) /* x */ #endif /* DEB */ #define ALS_DEFAULT_BUFSZ 16384 /* ------------------------------------------------------------------------- */ /* Structures */ struct sc_info; struct sc_chinfo { struct sc_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t format, speed, phys_buf, bps; u_int32_t dma_active:1, dma_was_active:1; u_int8_t gcr_fifo_status; int dir; }; struct sc_info { device_t dev; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq; int regid, irqid; void *ih; struct mtx *lock; unsigned int bufsz; struct sc_chinfo pch, rch; }; /* Channel caps */ static u_int32_t als_format[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; /* * I don't believe this rotten soundcard can do 48k, really, * trust me. */ static struct pcmchan_caps als_caps = { 4000, 44100, als_format, 0 }; /* ------------------------------------------------------------------------- */ /* Register Utilities */ static u_int32_t als_gcr_rd(struct sc_info *sc, int index) { bus_space_write_1(sc->st, sc->sh, ALS_GCR_INDEX, index); return bus_space_read_4(sc->st, sc->sh, ALS_GCR_DATA); } static void als_gcr_wr(struct sc_info *sc, int index, int data) { bus_space_write_1(sc->st, sc->sh, ALS_GCR_INDEX, index); bus_space_write_4(sc->st, sc->sh, ALS_GCR_DATA, data); } static u_int8_t als_intr_rd(struct sc_info *sc) { return bus_space_read_1(sc->st, sc->sh, ALS_SB_MPU_IRQ); } static void als_intr_wr(struct sc_info *sc, u_int8_t data) { bus_space_write_1(sc->st, sc->sh, ALS_SB_MPU_IRQ, data); } static u_int8_t als_mix_rd(struct sc_info *sc, u_int8_t index) { bus_space_write_1(sc->st, sc->sh, ALS_MIXER_INDEX, index); return bus_space_read_1(sc->st, sc->sh, ALS_MIXER_DATA); } static void als_mix_wr(struct sc_info *sc, u_int8_t index, u_int8_t data) { bus_space_write_1(sc->st, sc->sh, ALS_MIXER_INDEX, index); bus_space_write_1(sc->st, sc->sh, ALS_MIXER_DATA, data); } static void als_esp_wr(struct sc_info *sc, u_int8_t data) { u_int32_t tries, v; tries = 1000; do { v = bus_space_read_1(sc->st, sc->sh, ALS_ESP_WR_STATUS); if (~v & 0x80) break; DELAY(20); } while (--tries != 0); if (tries == 0) device_printf(sc->dev, "als_esp_wr timeout"); bus_space_write_1(sc->st, sc->sh, ALS_ESP_WR_DATA, data); } static int als_esp_reset(struct sc_info *sc) { u_int32_t tries, u, v; bus_space_write_1(sc->st, sc->sh, ALS_ESP_RST, 1); DELAY(10); bus_space_write_1(sc->st, sc->sh, ALS_ESP_RST, 0); DELAY(30); tries = 1000; do { u = bus_space_read_1(sc->st, sc->sh, ALS_ESP_RD_STATUS8); if (u & 0x80) { v = bus_space_read_1(sc->st, sc->sh, ALS_ESP_RD_DATA); if (v == 0xaa) return 0; else break; } DELAY(20); } while (--tries != 0); if (tries == 0) device_printf(sc->dev, "als_esp_reset timeout"); return 1; } static u_int8_t als_ack_read(struct sc_info *sc, u_int8_t addr) { u_int8_t r = bus_space_read_1(sc->st, sc->sh, addr); return r; } /* ------------------------------------------------------------------------- */ /* Common pcm channel implementation */ static void * alschan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_chinfo *ch; snd_mtxlock(sc->lock); if (dir == PCMDIR_PLAY) { ch = &sc->pch; ch->gcr_fifo_status = ALS_GCR_FIFO0_STATUS; } else { ch = &sc->rch; ch->gcr_fifo_status = ALS_GCR_FIFO1_STATUS; } ch->dir = dir; ch->parent = sc; ch->channel = c; ch->bps = 1; ch->format = SND_FORMAT(AFMT_U8, 1, 0); ch->speed = DSP_DEFAULT_SPEED; ch->buffer = b; snd_mtxunlock(sc->lock); if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) return NULL; return ch; } static int alschan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; ch->format = format; return 0; } static u_int32_t alschan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data, *other; struct sc_info *sc = ch->parent; other = (ch->dir == PCMDIR_PLAY) ? &sc->rch : &sc->pch; /* Deny request if other dma channel is active */ if (other->dma_active) { ch->speed = other->speed; return other->speed; } ch->speed = speed; return speed; } static u_int32_t alschan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; if (blocksize > sc->bufsz / 2) { blocksize = sc->bufsz / 2; } sndbuf_resize(ch->buffer, 2, blocksize); return blocksize; } static u_int32_t alschan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; int32_t pos, sz; snd_mtxlock(sc->lock); pos = als_gcr_rd(ch->parent, ch->gcr_fifo_status) & 0xffff; snd_mtxunlock(sc->lock); sz = sndbuf_getsize(ch->buffer); return (2 * sz - pos - 1) % sz; } static struct pcmchan_caps* alschan_getcaps(kobj_t obj, void *data) { return &als_caps; } static void als_set_speed(struct sc_chinfo *ch) { struct sc_info *sc = ch->parent; struct sc_chinfo *other; other = (ch->dir == PCMDIR_PLAY) ? &sc->rch : &sc->pch; if (other->dma_active == 0) { als_esp_wr(sc, ALS_ESP_SAMPLE_RATE); als_esp_wr(sc, ch->speed >> 8); als_esp_wr(sc, ch->speed & 0xff); } else { DEB(printf("speed locked at %d (tried %d)\n", other->speed, ch->speed)); } } /* ------------------------------------------------------------------------- */ /* Playback channel implementation */ #define ALS_8BIT_CMD(x, y) { (x), (y), DSP_DMA8, DSP_CMD_DMAPAUSE_8 } #define ALS_16BIT_CMD(x, y) { (x), (y), DSP_DMA16, DSP_CMD_DMAPAUSE_16 } struct playback_command { u_int32_t pcm_format; /* newpcm format */ u_int8_t format_val; /* sb16 format value */ u_int8_t dma_prog; /* sb16 dma program */ u_int8_t dma_stop; /* sb16 stop register */ } static const playback_cmds[] = { ALS_8BIT_CMD(SND_FORMAT(AFMT_U8, 1, 0), DSP_MODE_U8MONO), ALS_8BIT_CMD(SND_FORMAT(AFMT_U8, 2, 0), DSP_MODE_U8STEREO), ALS_16BIT_CMD(SND_FORMAT(AFMT_S16_LE, 1, 0), DSP_MODE_S16MONO), ALS_16BIT_CMD(SND_FORMAT(AFMT_S16_LE, 2, 0), DSP_MODE_S16STEREO), }; static const struct playback_command* als_get_playback_command(u_int32_t format) { u_int32_t i, n; n = sizeof(playback_cmds) / sizeof(playback_cmds[0]); for (i = 0; i < n; i++) { if (playback_cmds[i].pcm_format == format) { return &playback_cmds[i]; } } DEB(printf("als_get_playback_command: invalid format 0x%08x\n", format)); return &playback_cmds[0]; } static void als_playback_start(struct sc_chinfo *ch) { const struct playback_command *p; struct sc_info *sc = ch->parent; u_int32_t buf, bufsz, count, dma_prog; buf = sndbuf_getbufaddr(ch->buffer); bufsz = sndbuf_getsize(ch->buffer); count = bufsz / 2; if (ch->format & AFMT_16BIT) count /= 2; count--; als_esp_wr(sc, DSP_CMD_SPKON); als_set_speed(ch); als_gcr_wr(sc, ALS_GCR_DMA0_START, buf); als_gcr_wr(sc, ALS_GCR_DMA0_MODE, (bufsz - 1) | 0x180000); p = als_get_playback_command(ch->format); dma_prog = p->dma_prog | DSP_F16_DAC | DSP_F16_AUTO | DSP_F16_FIFO_ON; als_esp_wr(sc, dma_prog); als_esp_wr(sc, p->format_val); als_esp_wr(sc, count & 0xff); als_esp_wr(sc, count >> 8); ch->dma_active = 1; } static int als_playback_stop(struct sc_chinfo *ch) { const struct playback_command *p; struct sc_info *sc = ch->parent; u_int32_t active; active = ch->dma_active; if (active) { p = als_get_playback_command(ch->format); als_esp_wr(sc, p->dma_stop); } ch->dma_active = 0; return active; } static int alspchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; snd_mtxlock(sc->lock); switch(go) { case PCMTRIG_START: als_playback_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: als_playback_stop(ch); break; default: break; } snd_mtxunlock(sc->lock); return 0; } static kobj_method_t alspchan_methods[] = { KOBJMETHOD(channel_init, alschan_init), KOBJMETHOD(channel_setformat, alschan_setformat), KOBJMETHOD(channel_setspeed, alschan_setspeed), KOBJMETHOD(channel_setblocksize, alschan_setblocksize), KOBJMETHOD(channel_trigger, alspchan_trigger), KOBJMETHOD(channel_getptr, alschan_getptr), KOBJMETHOD(channel_getcaps, alschan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(alspchan); /* ------------------------------------------------------------------------- */ /* Capture channel implementation */ static u_int8_t als_get_fifo_format(struct sc_info *sc, u_int32_t format) { switch (format) { case SND_FORMAT(AFMT_U8, 1, 0): return ALS_FIFO1_8BIT; case SND_FORMAT(AFMT_U8, 2, 0): return ALS_FIFO1_8BIT | ALS_FIFO1_STEREO; case SND_FORMAT(AFMT_S16_LE, 1, 0): return ALS_FIFO1_SIGNED; case SND_FORMAT(AFMT_S16_LE, 2, 0): return ALS_FIFO1_SIGNED | ALS_FIFO1_STEREO; } device_printf(sc->dev, "format not found: 0x%08x\n", format); return ALS_FIFO1_8BIT; } static void als_capture_start(struct sc_chinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t buf, bufsz, count, dma_prog; buf = sndbuf_getbufaddr(ch->buffer); bufsz = sndbuf_getsize(ch->buffer); count = bufsz / 2; if (ch->format & AFMT_16BIT) count /= 2; count--; als_esp_wr(sc, DSP_CMD_SPKON); als_set_speed(ch); als_gcr_wr(sc, ALS_GCR_FIFO1_START, buf); als_gcr_wr(sc, ALS_GCR_FIFO1_COUNT, (bufsz - 1)); als_mix_wr(sc, ALS_FIFO1_LENGTH_LO, count & 0xff); als_mix_wr(sc, ALS_FIFO1_LENGTH_HI, count >> 8); dma_prog = ALS_FIFO1_RUN | als_get_fifo_format(sc, ch->format); als_mix_wr(sc, ALS_FIFO1_CONTROL, dma_prog); ch->dma_active = 1; } static int als_capture_stop(struct sc_chinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t active; active = ch->dma_active; if (active) { als_mix_wr(sc, ALS_FIFO1_CONTROL, ALS_FIFO1_STOP); } ch->dma_active = 0; return active; } static int alsrchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; snd_mtxlock(sc->lock); switch(go) { case PCMTRIG_START: als_capture_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: als_capture_stop(ch); break; } snd_mtxunlock(sc->lock); return 0; } static kobj_method_t alsrchan_methods[] = { KOBJMETHOD(channel_init, alschan_init), KOBJMETHOD(channel_setformat, alschan_setformat), KOBJMETHOD(channel_setspeed, alschan_setspeed), KOBJMETHOD(channel_setblocksize, alschan_setblocksize), KOBJMETHOD(channel_trigger, alsrchan_trigger), KOBJMETHOD(channel_getptr, alschan_getptr), KOBJMETHOD(channel_getcaps, alschan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(alsrchan); /* ------------------------------------------------------------------------- */ /* Mixer related */ /* * ALS4000 has an sb16 mixer, with some additional controls that we do * not yet a means to support. */ struct sb16props { u_int8_t lreg; u_int8_t rreg; u_int8_t bits; u_int8_t oselect; u_int8_t iselect; /* left input mask */ } static const amt[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_VOLUME] = { 0x30, 0x31, 5, 0x00, 0x00 }, [SOUND_MIXER_PCM] = { 0x32, 0x33, 5, 0x00, 0x00 }, [SOUND_MIXER_SYNTH] = { 0x34, 0x35, 5, 0x60, 0x40 }, [SOUND_MIXER_CD] = { 0x36, 0x37, 5, 0x06, 0x04 }, [SOUND_MIXER_LINE] = { 0x38, 0x39, 5, 0x18, 0x10 }, [SOUND_MIXER_MIC] = { 0x3a, 0x00, 5, 0x01, 0x01 }, [SOUND_MIXER_SPEAKER] = { 0x3b, 0x00, 2, 0x00, 0x00 }, [SOUND_MIXER_IGAIN] = { 0x3f, 0x40, 2, 0x00, 0x00 }, [SOUND_MIXER_OGAIN] = { 0x41, 0x42, 2, 0x00, 0x00 }, /* The following have register values but no h/w implementation */ [SOUND_MIXER_TREBLE] = { 0x44, 0x45, 4, 0x00, 0x00 }, [SOUND_MIXER_BASS] = { 0x46, 0x47, 4, 0x00, 0x00 } }; static int alsmix_init(struct snd_mixer *m) { u_int32_t i, v; for (i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (amt[i].bits) v |= 1 << i; } mix_setdevs(m, v); for (i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (amt[i].iselect) v |= 1 << i; } mix_setrecdevs(m, v); return 0; } static int alsmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t r, l, v, mask; /* Fill upper n bits in mask with 1's */ mask = ((1 << amt[dev].bits) - 1) << (8 - amt[dev].bits); l = (left * mask / 100) & mask; v = als_mix_rd(sc, amt[dev].lreg) & ~mask; als_mix_wr(sc, amt[dev].lreg, l | v); if (amt[dev].rreg) { r = (right * mask / 100) & mask; v = als_mix_rd(sc, amt[dev].rreg) & ~mask; als_mix_wr(sc, amt[dev].rreg, r | v); } else { r = 0; } /* Zero gain does not mute channel from output, but this does. */ v = als_mix_rd(sc, SB16_OMASK); if (l == 0 && r == 0) { v &= ~amt[dev].oselect; } else { v |= amt[dev].oselect; } als_mix_wr(sc, SB16_OMASK, v); return 0; } static u_int32_t alsmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t i, l, r; for (i = l = r = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (src & (1 << i)) { if (amt[i].iselect == 1) { /* microphone */ l |= amt[i].iselect; r |= amt[i].iselect; } else { l |= amt[i].iselect; r |= amt[i].iselect >> 1; } } } als_mix_wr(sc, SB16_IMASK_L, l); als_mix_wr(sc, SB16_IMASK_R, r); return src; } static kobj_method_t als_mixer_methods[] = { KOBJMETHOD(mixer_init, alsmix_init), KOBJMETHOD(mixer_set, alsmix_set), KOBJMETHOD(mixer_setrecsrc, alsmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(als_mixer); /* ------------------------------------------------------------------------- */ /* Interrupt Handler */ static void als_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; u_int8_t intr, sb_status; snd_mtxlock(sc->lock); intr = als_intr_rd(sc); if (intr & 0x80) { snd_mtxunlock(sc->lock); chn_intr(sc->pch.channel); snd_mtxlock(sc->lock); } if (intr & 0x40) { snd_mtxunlock(sc->lock); chn_intr(sc->rch.channel); snd_mtxlock(sc->lock); } /* ACK interrupt in PCI core */ als_intr_wr(sc, intr); /* ACK interrupt in SB core */ sb_status = als_mix_rd(sc, IRQ_STAT); if (sb_status & ALS_IRQ_STATUS8) als_ack_read(sc, ALS_ESP_RD_STATUS8); if (sb_status & ALS_IRQ_STATUS16) als_ack_read(sc, ALS_ESP_RD_STATUS16); if (sb_status & ALS_IRQ_MPUIN) als_ack_read(sc, ALS_MIDI_DATA); if (sb_status & ALS_IRQ_CR1E) als_ack_read(sc, ALS_CR1E_ACK_PORT); snd_mtxunlock(sc->lock); return; } /* ------------------------------------------------------------------------- */ /* H/W initialization */ static int als_init(struct sc_info *sc) { u_int32_t i, v; /* Reset Chip */ if (als_esp_reset(sc)) { return 1; } /* Enable write on DMA_SETUP register */ v = als_mix_rd(sc, ALS_SB16_CONFIG); als_mix_wr(sc, ALS_SB16_CONFIG, v | 0x80); /* Select DMA0 */ als_mix_wr(sc, ALS_SB16_DMA_SETUP, 0x01); /* Disable write on DMA_SETUP register */ als_mix_wr(sc, ALS_SB16_CONFIG, v & 0x7f); /* Enable interrupts */ v = als_gcr_rd(sc, ALS_GCR_MISC); als_gcr_wr(sc, ALS_GCR_MISC, v | 0x28000); /* Black out GCR DMA registers */ for (i = 0x91; i <= 0x96; i++) { als_gcr_wr(sc, i, 0); } /* Emulation mode */ v = als_gcr_rd(sc, ALS_GCR_DMA_EMULATION); als_gcr_wr(sc, ALS_GCR_DMA_EMULATION, v); DEB(printf("GCR_DMA_EMULATION 0x%08x\n", v)); return 0; } static void als_uninit(struct sc_info *sc) { /* Disable interrupts */ als_gcr_wr(sc, ALS_GCR_MISC, 0); } /* ------------------------------------------------------------------------- */ /* Probe and attach card */ static int als_pci_probe(device_t dev) { if (pci_get_devid(dev) == ALS_PCI_ID0) { device_set_desc(dev, "Avance Logic ALS4000"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void als_resource_free(device_t dev, struct sc_info *sc) { if (sc->reg) { bus_release_resource(dev, SYS_RES_IOPORT, sc->regid, sc->reg); - sc->reg = 0; + sc->reg = NULL; } if (sc->ih) { bus_teardown_intr(dev, sc->irq, sc->ih); - sc->ih = 0; + sc->ih = NULL; } if (sc->irq) { bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); - sc->irq = 0; + sc->irq = NULL; } if (sc->parent_dmat) { bus_dma_tag_destroy(sc->parent_dmat); sc->parent_dmat = 0; } if (sc->lock) { snd_mtxfree(sc->lock); sc->lock = NULL; } } static int als_resource_grab(device_t dev, struct sc_info *sc) { sc->regid = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->regid, RF_ACTIVE); - if (sc->reg == 0) { + if (sc->reg == NULL) { device_printf(dev, "unable to allocate register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); - if (sc->irq == 0) { + if (sc->irq == NULL) { device_printf(dev, "unable to allocate interrupt\n"); goto bad; } if (snd_setup_intr(dev, sc->irq, INTR_MPSAFE, als_intr, sc, &sc->ih)) { device_printf(dev, "unable to setup interrupt\n"); goto bad; } sc->bufsz = pcm_getbuffersize(dev, 4096, ALS_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } return 0; bad: als_resource_free(dev, sc); return ENXIO; } static int als_pci_attach(device_t dev) { struct sc_info *sc; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_als4000 softc"); sc->dev = dev; pci_enable_busmaster(dev); /* * By default the power to the various components on the * ALS4000 is entirely controlled by the pci powerstate. We * could attempt finer grained control by setting GCR6.31. */ if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { /* Reset the power state. */ device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); } if (als_resource_grab(dev, sc)) { device_printf(dev, "failed to allocate resources\n"); goto bad_attach; } if (als_init(sc)) { device_printf(dev, "failed to initialize hardware\n"); goto bad_attach; } if (mixer_init(dev, &als_mixer_class, sc)) { device_printf(dev, "failed to initialize mixer\n"); goto bad_attach; } if (pcm_register(dev, sc, 1, 1)) { device_printf(dev, "failed to register pcm entries\n"); goto bad_attach; } pcm_addchan(dev, PCMDIR_PLAY, &alspchan_class, sc); pcm_addchan(dev, PCMDIR_REC, &alsrchan_class, sc); snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_als4000)); pcm_setstatus(dev, status); return 0; bad_attach: als_resource_free(dev, sc); free(sc, M_DEVBUF); return ENXIO; } static int als_pci_detach(device_t dev) { struct sc_info *sc; int r; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); als_uninit(sc); als_resource_free(dev, sc); free(sc, M_DEVBUF); return 0; } static int als_pci_suspend(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); snd_mtxlock(sc->lock); sc->pch.dma_was_active = als_playback_stop(&sc->pch); sc->rch.dma_was_active = als_capture_stop(&sc->rch); als_uninit(sc); snd_mtxunlock(sc->lock); return 0; } static int als_pci_resume(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); snd_mtxlock(sc->lock); if (als_init(sc) != 0) { device_printf(dev, "unable to reinitialize the card\n"); snd_mtxunlock(sc->lock); return ENXIO; } if (mixer_reinit(dev) != 0) { device_printf(dev, "unable to reinitialize the mixer\n"); snd_mtxunlock(sc->lock); return ENXIO; } if (sc->pch.dma_was_active) { als_playback_start(&sc->pch); } if (sc->rch.dma_was_active) { als_capture_start(&sc->rch); } snd_mtxunlock(sc->lock); return 0; } static device_method_t als_methods[] = { /* Device interface */ DEVMETHOD(device_probe, als_pci_probe), DEVMETHOD(device_attach, als_pci_attach), DEVMETHOD(device_detach, als_pci_detach), DEVMETHOD(device_suspend, als_pci_suspend), DEVMETHOD(device_resume, als_pci_resume), { 0, 0 } }; static driver_t als_driver = { "pcm", als_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_als4000, pci, als_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_als4000, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_als4000, 1); Index: head/sys/dev/sound/pci/aureal.c =================================================================== --- head/sys/dev/sound/pci/aureal.c (revision 297861) +++ head/sys/dev/sound/pci/aureal.c (revision 297862) @@ -1,684 +1,686 @@ /*- * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); /* PCI IDs of supported chips */ #define AU8820_PCI_ID 0x000112eb /* channel interface */ static u_int32_t au_playfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps au_playcaps = {4000, 48000, au_playfmt, 0}; static u_int32_t au_recfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps au_reccaps = {4000, 48000, au_recfmt, 0}; /* -------------------------------------------------------------------- */ struct au_info; struct au_chinfo { struct au_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir; }; struct au_info { int unit; bus_space_tag_t st[3]; bus_space_handle_t sh[3]; bus_dma_tag_t parent_dmat; struct mtx *lock; u_int32_t x[32], y[128]; char z[128]; u_int32_t routes[4], interrupts; struct au_chinfo pch; }; static int au_init(device_t dev, struct au_info *au); static void au_intr(void *); /* -------------------------------------------------------------------- */ static u_int32_t au_rd(struct au_info *au, int mapno, int regno, int size) { switch(size) { case 1: return bus_space_read_1(au->st[mapno], au->sh[mapno], regno); case 2: return bus_space_read_2(au->st[mapno], au->sh[mapno], regno); case 4: return bus_space_read_4(au->st[mapno], au->sh[mapno], regno); default: return 0xffffffff; } } static void au_wr(struct au_info *au, int mapno, int regno, u_int32_t data, int size) { switch(size) { case 1: bus_space_write_1(au->st[mapno], au->sh[mapno], regno, data); break; case 2: bus_space_write_2(au->st[mapno], au->sh[mapno], regno, data); break; case 4: bus_space_write_4(au->st[mapno], au->sh[mapno], regno, data); break; } } /* -------------------------------------------------------------------- */ static int au_rdcd(kobj_t obj, void *arg, int regno) { struct au_info *au = (struct au_info *)arg; int i=0, j=0; regno<<=16; au_wr(au, 0, AU_REG_CODECIO, regno, 4); while (j<50) { i=au_rd(au, 0, AU_REG_CODECIO, 4); if ((i & 0x00ff0000) == (regno | 0x00800000)) break; DELAY(j * 200 + 2000); j++; } if (j==50) printf("pcm%d: codec timeout reading register %x (%x)\n", au->unit, (regno & AU_CDC_REGMASK)>>16, i); return i & AU_CDC_DATAMASK; } static int au_wrcd(kobj_t obj, void *arg, int regno, u_int32_t data) { struct au_info *au = (struct au_info *)arg; int i, j, tries; i=j=tries=0; do { while (j<50 && (i & AU_CDC_WROK) == 0) { i=au_rd(au, 0, AU_REG_CODECST, 4); DELAY(2000); j++; } if (j==50) printf("codec timeout during write of register %x, data %x\n", regno, data); au_wr(au, 0, AU_REG_CODECIO, (regno<<16) | AU_CDC_REGSET | data, 4); /* DELAY(20000); i=au_rdcd(au, regno); */ tries++; } while (0); /* (i != data && tries < 3); */ /* if (tries == 3) printf("giving up writing 0x%4x to codec reg %2x\n", data, regno); */ return 0; } static kobj_method_t au_ac97_methods[] = { KOBJMETHOD(ac97_read, au_rdcd), KOBJMETHOD(ac97_write, au_wrcd), KOBJMETHOD_END }; AC97_DECLARE(au_ac97); /* -------------------------------------------------------------------- */ static void au_setbit(u_int32_t *p, char bit, u_int32_t value) { p += bit >> 5; bit &= 0x1f; *p &= ~ (1 << bit); *p |= (value << bit); } static void au_addroute(struct au_info *au, int a, int b, int route) { int j = 0x1099c+(a<<2); if (au->x[a] != a+0x67) j = AU_REG_RTBASE+(au->x[a]<<2); au_wr(au, 0, AU_REG_RTBASE+(route<<2), 0xffffffff, 4); au_wr(au, 0, j, route | (b<<7), 4); au->y[route]=au->x[a]; au->x[a]=route; au->z[route]=a & 0x000000ff; au_setbit(au->routes, route, 1); } static void au_delroute(struct au_info *au, int route) { int i; int j=au->z[route]; au_setbit(au->routes, route, 0); au->z[route]=0x1f; i=au_rd(au, 0, AU_REG_RTBASE+(route<<2), 4); au_wr(au, 0, AU_REG_RTBASE+(au->y[route]<<2), i, 4); au->y[i & 0x7f]=au->y[route]; au_wr(au, 0, AU_REG_RTBASE+(route<<2), 0xfffffffe, 4); if (au->x[j] == route) au->x[j]=au->y[route]; au->y[route]=0x7f; } static void au_encodec(struct au_info *au, char channel) { au_wr(au, 0, AU_REG_CODECEN, au_rd(au, 0, AU_REG_CODECEN, 4) | (1 << (channel + 8)), 4); } static void au_clrfifo(struct au_info *au, u_int32_t c) { u_int32_t i; for (i=0; i<32; i++) au_wr(au, 0, AU_REG_FIFOBASE+(c<<7)+(i<<2), 0, 4); } static void au_setadb(struct au_info *au, u_int32_t c, u_int32_t enable) { int x; x = au_rd(au, 0, AU_REG_ADB, 4); x &= ~(1 << c); x |= (enable << c); au_wr(au, 0, AU_REG_ADB, x, 4); } static void au_prepareoutput(struct au_chinfo *ch, u_int32_t format) { struct au_info *au = ch->parent; int i, stereo = (AFMT_CHANNEL(format) > 1)? 1 : 0; u_int32_t baseaddr = sndbuf_getbufaddr(ch->buffer); au_wr(au, 0, 0x1061c, 0, 4); au_wr(au, 0, 0x10620, 0, 4); au_wr(au, 0, 0x10624, 0, 4); switch(AFMT_ENCODING(format)) { case 1: i=0xb000; break; case 2: i=0xf000; break; case 8: i=0x7000; break; case 16: i=0x23000; break; default: i=0x3000; } au_wr(au, 0, 0x10200, baseaddr, 4); au_wr(au, 0, 0x10204, baseaddr+0x1000, 4); au_wr(au, 0, 0x10208, baseaddr+0x2000, 4); au_wr(au, 0, 0x1020c, baseaddr+0x3000, 4); au_wr(au, 0, 0x10400, 0xdeffffff, 4); au_wr(au, 0, 0x10404, 0xfcffffff, 4); au_wr(au, 0, 0x10580, i, 4); au_wr(au, 0, 0x10210, baseaddr, 4); au_wr(au, 0, 0x10214, baseaddr+0x1000, 4); au_wr(au, 0, 0x10218, baseaddr+0x2000, 4); au_wr(au, 0, 0x1021c, baseaddr+0x3000, 4); au_wr(au, 0, 0x10408, 0x00fff000 | 0x56000000 | 0x00000fff, 4); au_wr(au, 0, 0x1040c, 0x00fff000 | 0x74000000 | 0x00000fff, 4); au_wr(au, 0, 0x10584, i, 4); au_wr(au, 0, 0x0f800, stereo? 0x00030032 : 0x00030030, 4); au_wr(au, 0, 0x0f804, stereo? 0x00030032 : 0x00030030, 4); au_addroute(au, 0x11, 0, 0x58); au_addroute(au, 0x11, stereo? 0 : 1, 0x59); } /* -------------------------------------------------------------------- */ /* channel interface */ static void * auchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct au_info *au = devinfo; struct au_chinfo *ch = (dir == PCMDIR_PLAY)? &au->pch : NULL; ch->parent = au; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, au->parent_dmat, 0, AU_BUFFSIZE) != 0) return NULL; return ch; } static int auchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct au_chinfo *ch = data; if (ch->dir == PCMDIR_PLAY) au_prepareoutput(ch, format); return 0; } static int auchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct au_chinfo *ch = data; if (ch->dir == PCMDIR_PLAY) { } else { } return speed; } static int auchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { return blocksize; } static int auchan_trigger(kobj_t obj, void *data, int go) { struct au_chinfo *ch = data; struct au_info *au = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; if (ch->dir == PCMDIR_PLAY) { au_setadb(au, 0x11, (go)? 1 : 0); if (go != PCMTRIG_START) { au_wr(au, 0, 0xf800, 0, 4); au_wr(au, 0, 0xf804, 0, 4); au_delroute(au, 0x58); au_delroute(au, 0x59); } } else { } return 0; } static int auchan_getptr(kobj_t obj, void *data) { struct au_chinfo *ch = data; struct au_info *au = ch->parent; if (ch->dir == PCMDIR_PLAY) { return au_rd(au, 0, AU_REG_UNK2, 4) & (AU_BUFFSIZE-1); } else { return 0; } } static struct pcmchan_caps * auchan_getcaps(kobj_t obj, void *data) { struct au_chinfo *ch = data; return (ch->dir == PCMDIR_PLAY)? &au_playcaps : &au_reccaps; } static kobj_method_t auchan_methods[] = { KOBJMETHOD(channel_init, auchan_init), KOBJMETHOD(channel_setformat, auchan_setformat), KOBJMETHOD(channel_setspeed, auchan_setspeed), KOBJMETHOD(channel_setblocksize, auchan_setblocksize), KOBJMETHOD(channel_trigger, auchan_trigger), KOBJMETHOD(channel_getptr, auchan_getptr), KOBJMETHOD(channel_getcaps, auchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(auchan); /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void au_intr (void *p) { struct au_info *au = p; u_int32_t intsrc, i; au->interrupts++; intsrc=au_rd(au, 0, AU_REG_IRQSRC, 4); printf("pcm%d: interrupt with src %x\n", au->unit, intsrc); if (intsrc & AU_IRQ_FATAL) printf("pcm%d: fatal error irq\n", au->unit); if (intsrc & AU_IRQ_PARITY) printf("pcm%d: parity error irq\n", au->unit); if (intsrc & AU_IRQ_UNKNOWN) { (void)au_rd(au, 0, AU_REG_UNK1, 4); au_wr(au, 0, AU_REG_UNK1, 0, 4); au_wr(au, 0, AU_REG_UNK1, 0x10000, 4); } if (intsrc & AU_IRQ_PCMOUT) { i=au_rd(au, 0, AU_REG_UNK2, 4) & (AU_BUFFSIZE-1); chn_intr(au->pch.channel); (void)au_rd(au, 0, AU_REG_UNK3, 4); (void)au_rd(au, 0, AU_REG_UNK4, 4); (void)au_rd(au, 0, AU_REG_UNK5, 4); } /* don't support midi if (intsrc & AU_IRQ_MIDI) { i=au_rd(au, 0, 0x11004, 4); j=10; while (i & 0xff) { if (j-- <= 0) break; i=au_rd(au, 0, 0x11000, 4); if ((au->midi_stat & 1) && (au->midi_out)) au->midi_out(au->midi_devno, i); i=au_rd(au, 0, 0x11004); } } */ au_wr(au, 0, AU_REG_IRQSRC, intsrc & 0x7ff, 4); au_rd(au, 0, AU_REG_IRQSRC, 4); } /* -------------------------------------------------------------------- */ /* Probe and attach the card */ static int au_init(device_t dev, struct au_info *au) { u_int32_t i, j; au_wr(au, 0, AU_REG_IRQGLOB, 0xffffffff, 4); DELAY(100000); /* init codec */ /* cold reset */ for (i=0; i<32; i++) { au_wr(au, 0, AU_REG_CODECCHN+(i<<2), 0, 4); DELAY(10000); } if (1) { au_wr(au, 0, AU_REG_CODECST, 0x8068, 4); DELAY(10000); au_wr(au, 0, AU_REG_CODECST, 0x00e8, 4); DELAY(10000); } else { au_wr(au, 0, AU_REG_CODECST, 0x00a8, 4); DELAY(100000); au_wr(au, 0, AU_REG_CODECST, 0x80a8, 4); DELAY(100000); au_wr(au, 0, AU_REG_CODECST, 0x80e8, 4); DELAY(100000); au_wr(au, 0, AU_REG_CODECST, 0x80a8, 4); DELAY(100000); au_wr(au, 0, AU_REG_CODECST, 0x00a8, 4); DELAY(100000); au_wr(au, 0, AU_REG_CODECST, 0x00e8, 4); DELAY(100000); } /* init */ for (i=0; i<32; i++) { au_wr(au, 0, AU_REG_CODECCHN+(i<<2), 0, 4); DELAY(10000); } au_wr(au, 0, AU_REG_CODECST, 0xe8, 4); DELAY(10000); au_wr(au, 0, AU_REG_CODECEN, 0, 4); /* setup codec */ i=j=0; while (j<100 && (i & AU_CDC_READY)==0) { i=au_rd(au, 0, AU_REG_CODECST, 4); DELAY(1000); j++; } if (j==100) device_printf(dev, "codec not ready, status 0x%x\n", i); /* init adb */ /*au->x5c=0;*/ for (i=0; i<32; i++) au->x[i]=i+0x67; for (i=0; i<128; i++) au->y[i]=0x7f; for (i=0; i<128; i++) au->z[i]=0x1f; au_wr(au, 0, AU_REG_ADB, 0, 4); for (i=0; i<124; i++) au_wr(au, 0, AU_REG_RTBASE+(i<<2), 0xffffffff, 4); /* test */ i=au_rd(au, 0, 0x107c0, 4); if (i!=0xdeadbeef) device_printf(dev, "dma check failed: 0x%x\n", i); /* install mixer */ au_wr(au, 0, AU_REG_IRQGLOB, au_rd(au, 0, AU_REG_IRQGLOB, 4) | AU_IRQ_ENABLE, 4); /* braindead but it's what the oss/linux driver does * for (i=0; i<0x80000000; i++) au_wr(au, 0, i<<2, 0, 4); */ au->routes[0]=au->routes[1]=au->routes[2]=au->routes[3]=0; /*au->x1e4=0;*/ /* attach channel */ au_addroute(au, 0x11, 0x48, 0x02); au_addroute(au, 0x11, 0x49, 0x03); au_encodec(au, 0); au_encodec(au, 1); for (i=0; i<48; i++) au_wr(au, 0, 0xf800+(i<<2), 0x20, 4); for (i=2; i<6; i++) au_wr(au, 0, 0xf800+(i<<2), 0, 4); au_wr(au, 0, 0xf8c0, 0x0843, 4); for (i=0; i<4; i++) au_clrfifo(au, i); return (0); } static int au_testirq(struct au_info *au) { au_wr(au, 0, AU_REG_UNK1, 0x80001000, 4); au_wr(au, 0, AU_REG_IRQEN, 0x00001030, 4); au_wr(au, 0, AU_REG_IRQSRC, 0x000007ff, 4); DELAY(1000000); if (au->interrupts==0) printf("pcm%d: irq test failed\n", au->unit); /* this apparently generates an irq */ return 0; } static int au_pci_probe(device_t dev) { if (pci_get_devid(dev) == AU8820_PCI_ID) { device_set_desc(dev, "Aureal Vortex 8820"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int au_pci_attach(device_t dev) { struct au_info *au; int type[10]; int regid[10]; struct resource *reg[10]; int i, j, mapped = 0; int irqid; - struct resource *irq = 0; - void *ih = 0; + struct resource *irq; + void *ih; struct ac97_info *codec; char status[SND_STATUSLEN]; au = malloc(sizeof(*au), M_DEVBUF, M_WAITOK | M_ZERO); au->unit = device_get_unit(dev); pci_enable_busmaster(dev); + irq = NULL; + ih = NULL; j=0; /* XXX dfr: is this strictly necessary? */ for (i=0; imap[i].ln2size); printf("%s space ", (config_id->map[i].type & PCI_MAPPORT)? "io" : "memory"); printf("at 0x%x...", config_id->map[i].base); } #endif regid[j] = PCIR_BAR(i); type[j] = SYS_RES_MEMORY; reg[j] = bus_alloc_resource_any(dev, type[j], ®id[j], RF_ACTIVE); if (!reg[j]) { type[j] = SYS_RES_IOPORT; reg[j] = bus_alloc_resource_any(dev, type[j], ®id[j], RF_ACTIVE); } if (reg[j]) { au->st[i] = rman_get_bustag(reg[j]); au->sh[i] = rman_get_bushandle(reg[j]); mapped++; } #if 0 if (bootverbose) printf("%s\n", mapped? "ok" : "failed"); #endif if (mapped) j++; if (j == 10) { /* XXX */ device_printf(dev, "too many resources"); goto bad; } } #if 0 if (j < config_id->nummaps) { printf("pcm%d: unable to map a required resource\n", unit); free(au, M_DEVBUF); return; } #endif au_wr(au, 0, AU_REG_IRQEN, 0, 4); irqid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irqid, RF_ACTIVE | RF_SHAREABLE); if (!irq || snd_setup_intr(dev, irq, 0, au_intr, au, &ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } if (au_testirq(au)) device_printf(dev, "irq test failed\n"); if (au_init(dev, au) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, au, au_ac97); if (codec == NULL) goto bad; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto bad; if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/AU_BUFFSIZE, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &au->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at %s 0x%jx irq %jd %s", (type[0] == SYS_RES_IOPORT)? "io" : "memory", rman_get_start(reg[0]), rman_get_start(irq),PCM_KLDSTRING(snd_aureal)); if (pcm_register(dev, au, 1, 1)) goto bad; /* pcm_addchan(dev, PCMDIR_REC, &au_chantemplate, au); */ pcm_addchan(dev, PCMDIR_PLAY, &auchan_class, au); pcm_setstatus(dev, status); return 0; bad: if (au) free(au, M_DEVBUF); for (i = 0; i < j; i++) bus_release_resource(dev, type[i], regid[i], reg[i]); if (ih) bus_teardown_intr(dev, irq, ih); if (irq) bus_release_resource(dev, SYS_RES_IRQ, irqid, irq); return ENXIO; } static device_method_t au_methods[] = { /* Device interface */ DEVMETHOD(device_probe, au_pci_probe), DEVMETHOD(device_attach, au_pci_attach), { 0, 0 } }; static driver_t au_driver = { "pcm", au_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_aureal, pci, au_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_aureal, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_aureal, 1); Index: head/sys/dev/sound/pci/cmi.c =================================================================== --- head/sys/dev/sound/pci/cmi.c (revision 297861) +++ head/sys/dev/sound/pci/cmi.c (revision 297862) @@ -1,1115 +1,1115 @@ /*- * Copyright (c) 2000 Orion Hodson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF * SUCH DAMAGE. */ /* * This driver exists largely as a result of other people's efforts. * Much of register handling is based on NetBSD CMI8x38 audio driver * by Takuya Shiozaki . Chen-Li Tien * clarified points regarding the DMA related * registers and the 8738 mixer devices. His Linux driver was also a * useful reference point. * * TODO: MIDI * * SPDIF contributed by Gerhard Gonter . * * This card/code does not always manage to sample at 44100 - actual * rate drifts slightly between recordings (usually 0-3%). No * differences visible in register dumps between times that work and * those that don't. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include "mixer_if.h" #include "mpufoi_if.h" SND_DECLARE_FILE("$FreeBSD$"); /* Supported chip ID's */ #define CMI8338A_PCI_ID 0x010013f6 #define CMI8338B_PCI_ID 0x010113f6 #define CMI8738_PCI_ID 0x011113f6 #define CMI8738B_PCI_ID 0x011213f6 #define CMI120_USB_ID 0x01030d8c /* Buffer size max is 64k for permitted DMA boundaries */ #define CMI_DEFAULT_BUFSZ 16384 /* Interrupts per length of buffer */ #define CMI_INTR_PER_BUFFER 2 /* Clarify meaning of named defines in cmireg.h */ #define CMPCI_REG_DMA0_MAX_SAMPLES CMPCI_REG_DMA0_BYTES #define CMPCI_REG_DMA0_INTR_SAMPLES CMPCI_REG_DMA0_SAMPLES #define CMPCI_REG_DMA1_MAX_SAMPLES CMPCI_REG_DMA1_BYTES #define CMPCI_REG_DMA1_INTR_SAMPLES CMPCI_REG_DMA1_SAMPLES /* Our indication of custom mixer control */ #define CMPCI_NON_SB16_CONTROL 0xff /* Debugging macro's */ #undef DEB #ifndef DEB #define DEB(x) /* x */ #endif /* DEB */ #ifndef DEBMIX #define DEBMIX(x) /* x */ #endif /* DEBMIX */ /* ------------------------------------------------------------------------- */ /* Structures */ struct sc_info; struct sc_chinfo { struct sc_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t fmt, spd, phys_buf, bps; u_int32_t dma_active:1, dma_was_active:1; int dir; }; struct sc_info { device_t dev; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq; int regid, irqid; void *ih; struct mtx *lock; int spdif_enabled; unsigned int bufsz; struct sc_chinfo pch, rch; struct mpu401 *mpu; mpu401_intr_t *mpu_intr; struct resource *mpu_reg; int mpu_regid; bus_space_tag_t mpu_bt; bus_space_handle_t mpu_bh; }; /* Channel caps */ static u_int32_t cmi_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps cmi_caps = {5512, 48000, cmi_fmt, 0}; /* ------------------------------------------------------------------------- */ /* Register Utilities */ static u_int32_t cmi_rd(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->st, sc->sh, regno); case 2: return bus_space_read_2(sc->st, sc->sh, regno); case 4: return bus_space_read_4(sc->st, sc->sh, regno); default: DEB(printf("cmi_rd: failed 0x%04x %d\n", regno, size)); return 0xFFFFFFFF; } } static void cmi_wr(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->st, sc->sh, regno, data); break; case 2: bus_space_write_2(sc->st, sc->sh, regno, data); break; case 4: bus_space_write_4(sc->st, sc->sh, regno, data); break; } } static void cmi_partial_wr4(struct sc_info *sc, int reg, int shift, u_int32_t mask, u_int32_t val) { u_int32_t r; r = cmi_rd(sc, reg, 4); r &= ~(mask << shift); r |= val << shift; cmi_wr(sc, reg, r, 4); } static void cmi_clr4(struct sc_info *sc, int reg, u_int32_t mask) { u_int32_t r; r = cmi_rd(sc, reg, 4); r &= ~mask; cmi_wr(sc, reg, r, 4); } static void cmi_set4(struct sc_info *sc, int reg, u_int32_t mask) { u_int32_t r; r = cmi_rd(sc, reg, 4); r |= mask; cmi_wr(sc, reg, r, 4); } /* ------------------------------------------------------------------------- */ /* Rate Mapping */ static int cmi_rates[] = {5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000}; #define NUM_CMI_RATES (sizeof(cmi_rates)/sizeof(cmi_rates[0])) /* cmpci_rate_to_regvalue returns sampling freq selector for FCR1 * register - reg order is 5k,11k,22k,44k,8k,16k,32k,48k */ static u_int32_t cmpci_rate_to_regvalue(int rate) { int i, r; for(i = 0; i < NUM_CMI_RATES - 1; i++) { if (rate < ((cmi_rates[i] + cmi_rates[i + 1]) / 2)) { break; } } DEB(printf("cmpci_rate_to_regvalue: %d -> %d\n", rate, cmi_rates[i])); r = ((i >> 1) | (i << 2)) & 0x07; return r; } static int cmpci_regvalue_to_rate(u_int32_t r) { int i; i = ((r << 1) | (r >> 2)) & 0x07; DEB(printf("cmpci_regvalue_to_rate: %d -> %d\n", r, i)); return cmi_rates[i]; } /* ------------------------------------------------------------------------- */ /* ADC/DAC control - there are 2 dma channels on 8738, either can be * playback or capture. We use ch0 for playback and ch1 for capture. */ static void cmi_dma_prog(struct sc_info *sc, struct sc_chinfo *ch, u_int32_t base) { u_int32_t s, i, sz; ch->phys_buf = sndbuf_getbufaddr(ch->buffer); cmi_wr(sc, base, ch->phys_buf, 4); sz = (u_int32_t)sndbuf_getsize(ch->buffer); s = sz / ch->bps - 1; cmi_wr(sc, base + 4, s, 2); i = sz / (ch->bps * CMI_INTR_PER_BUFFER) - 1; cmi_wr(sc, base + 6, i, 2); } static void cmi_ch0_start(struct sc_info *sc, struct sc_chinfo *ch) { cmi_dma_prog(sc, ch, CMPCI_REG_DMA0_BASE); cmi_set4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_ENABLE); cmi_set4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH0_INTR_ENABLE); ch->dma_active = 1; } static u_int32_t cmi_ch0_stop(struct sc_info *sc, struct sc_chinfo *ch) { u_int32_t r = ch->dma_active; cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH0_INTR_ENABLE); cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_ENABLE); cmi_set4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_RESET); cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_RESET); ch->dma_active = 0; return r; } static void cmi_ch1_start(struct sc_info *sc, struct sc_chinfo *ch) { cmi_dma_prog(sc, ch, CMPCI_REG_DMA1_BASE); cmi_set4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH1_ENABLE); /* Enable Interrupts */ cmi_set4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH1_INTR_ENABLE); DEB(printf("cmi_ch1_start: dma prog\n")); ch->dma_active = 1; } static u_int32_t cmi_ch1_stop(struct sc_info *sc, struct sc_chinfo *ch) { u_int32_t r = ch->dma_active; cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH1_INTR_ENABLE); cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH1_ENABLE); cmi_set4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH1_RESET); cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH1_RESET); ch->dma_active = 0; return r; } static void cmi_spdif_speed(struct sc_info *sc, int speed) { u_int32_t fcr1, lcr, mcr; if (speed >= 44100) { fcr1 = CMPCI_REG_SPDIF0_ENABLE; lcr = CMPCI_REG_XSPDIF_ENABLE; mcr = (speed == 48000) ? CMPCI_REG_W_SPDIF_48L | CMPCI_REG_SPDIF_48K : 0; } else { fcr1 = mcr = lcr = 0; } cmi_partial_wr4(sc, CMPCI_REG_MISC, 0, CMPCI_REG_W_SPDIF_48L | CMPCI_REG_SPDIF_48K, mcr); cmi_partial_wr4(sc, CMPCI_REG_FUNC_1, 0, CMPCI_REG_SPDIF0_ENABLE, fcr1); cmi_partial_wr4(sc, CMPCI_REG_LEGACY_CTRL, 0, CMPCI_REG_XSPDIF_ENABLE, lcr); } /* ------------------------------------------------------------------------- */ /* Channel Interface implementation */ static void * cmichan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_chinfo *ch = (dir == PCMDIR_PLAY) ? &sc->pch : &sc->rch; ch->parent = sc; ch->channel = c; ch->bps = 1; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = DSP_DEFAULT_SPEED; ch->buffer = b; ch->dma_active = 0; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) { DEB(printf("cmichan_init failed\n")); return NULL; } ch->dir = dir; snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { cmi_dma_prog(sc, ch, CMPCI_REG_DMA0_BASE); } else { cmi_dma_prog(sc, ch, CMPCI_REG_DMA1_BASE); } snd_mtxunlock(sc->lock); return ch; } static int cmichan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t f; if (format & AFMT_S16_LE) { f = CMPCI_REG_FORMAT_16BIT; ch->bps = 2; } else { f = CMPCI_REG_FORMAT_8BIT; ch->bps = 1; } if (AFMT_CHANNEL(format) > 1) { f |= CMPCI_REG_FORMAT_STEREO; ch->bps *= 2; } else { f |= CMPCI_REG_FORMAT_MONO; } snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { cmi_partial_wr4(ch->parent, CMPCI_REG_CHANNEL_FORMAT, CMPCI_REG_CH0_FORMAT_SHIFT, CMPCI_REG_CH0_FORMAT_MASK, f); } else { cmi_partial_wr4(ch->parent, CMPCI_REG_CHANNEL_FORMAT, CMPCI_REG_CH1_FORMAT_SHIFT, CMPCI_REG_CH1_FORMAT_MASK, f); } snd_mtxunlock(sc->lock); ch->fmt = format; return 0; } static u_int32_t cmichan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t r, rsp; r = cmpci_rate_to_regvalue(speed); snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { if (speed < 44100) { /* disable if req before rate change */ cmi_spdif_speed(ch->parent, speed); } cmi_partial_wr4(ch->parent, CMPCI_REG_FUNC_1, CMPCI_REG_DAC_FS_SHIFT, CMPCI_REG_DAC_FS_MASK, r); if (speed >= 44100 && ch->parent->spdif_enabled) { /* enable if req after rate change */ cmi_spdif_speed(ch->parent, speed); } rsp = cmi_rd(ch->parent, CMPCI_REG_FUNC_1, 4); rsp >>= CMPCI_REG_DAC_FS_SHIFT; rsp &= CMPCI_REG_DAC_FS_MASK; } else { cmi_partial_wr4(ch->parent, CMPCI_REG_FUNC_1, CMPCI_REG_ADC_FS_SHIFT, CMPCI_REG_ADC_FS_MASK, r); rsp = cmi_rd(ch->parent, CMPCI_REG_FUNC_1, 4); rsp >>= CMPCI_REG_ADC_FS_SHIFT; rsp &= CMPCI_REG_ADC_FS_MASK; } snd_mtxunlock(sc->lock); ch->spd = cmpci_regvalue_to_rate(r); DEB(printf("cmichan_setspeed (%s) %d -> %d (%d)\n", (ch->dir == PCMDIR_PLAY) ? "play" : "rec", speed, ch->spd, cmpci_regvalue_to_rate(rsp))); return ch->spd; } static u_int32_t cmichan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; /* user has requested interrupts every blocksize bytes */ if (blocksize > sc->bufsz / CMI_INTR_PER_BUFFER) { blocksize = sc->bufsz / CMI_INTR_PER_BUFFER; } sndbuf_resize(ch->buffer, CMI_INTR_PER_BUFFER, blocksize); return blocksize; } static int cmichan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { switch(go) { case PCMTRIG_START: cmi_ch0_start(sc, ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: cmi_ch0_stop(sc, ch); break; } } else { switch(go) { case PCMTRIG_START: cmi_ch1_start(sc, ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: cmi_ch1_stop(sc, ch); break; } } snd_mtxunlock(sc->lock); return 0; } static u_int32_t cmichan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t physptr, bufptr, sz; snd_mtxlock(sc->lock); if (ch->dir == PCMDIR_PLAY) { physptr = cmi_rd(sc, CMPCI_REG_DMA0_BASE, 4); } else { physptr = cmi_rd(sc, CMPCI_REG_DMA1_BASE, 4); } snd_mtxunlock(sc->lock); sz = sndbuf_getsize(ch->buffer); bufptr = (physptr - ch->phys_buf + sz - ch->bps) % sz; return bufptr; } static void cmi_intr(void *data) { struct sc_info *sc = data; u_int32_t intrstat; u_int32_t toclear; snd_mtxlock(sc->lock); intrstat = cmi_rd(sc, CMPCI_REG_INTR_STATUS, 4); if ((intrstat & CMPCI_REG_ANY_INTR) != 0) { toclear = 0; if (intrstat & CMPCI_REG_CH0_INTR) { toclear |= CMPCI_REG_CH0_INTR_ENABLE; //cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH0_INTR_ENABLE); } if (intrstat & CMPCI_REG_CH1_INTR) { toclear |= CMPCI_REG_CH1_INTR_ENABLE; //cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH1_INTR_ENABLE); } if (toclear) { cmi_clr4(sc, CMPCI_REG_INTR_CTRL, toclear); snd_mtxunlock(sc->lock); /* Signal interrupts to channel */ if (intrstat & CMPCI_REG_CH0_INTR) { chn_intr(sc->pch.channel); } if (intrstat & CMPCI_REG_CH1_INTR) { chn_intr(sc->rch.channel); } snd_mtxlock(sc->lock); cmi_set4(sc, CMPCI_REG_INTR_CTRL, toclear); } } if(sc->mpu_intr) { (sc->mpu_intr)(sc->mpu); } snd_mtxunlock(sc->lock); return; } static struct pcmchan_caps * cmichan_getcaps(kobj_t obj, void *data) { return &cmi_caps; } static kobj_method_t cmichan_methods[] = { KOBJMETHOD(channel_init, cmichan_init), KOBJMETHOD(channel_setformat, cmichan_setformat), KOBJMETHOD(channel_setspeed, cmichan_setspeed), KOBJMETHOD(channel_setblocksize, cmichan_setblocksize), KOBJMETHOD(channel_trigger, cmichan_trigger), KOBJMETHOD(channel_getptr, cmichan_getptr), KOBJMETHOD(channel_getcaps, cmichan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(cmichan); /* ------------------------------------------------------------------------- */ /* Mixer - sb16 with kinks */ static void cmimix_wr(struct sc_info *sc, u_int8_t port, u_int8_t val) { cmi_wr(sc, CMPCI_REG_SBADDR, port, 1); cmi_wr(sc, CMPCI_REG_SBDATA, val, 1); } static u_int8_t cmimix_rd(struct sc_info *sc, u_int8_t port) { cmi_wr(sc, CMPCI_REG_SBADDR, port, 1); return (u_int8_t)cmi_rd(sc, CMPCI_REG_SBDATA, 1); } struct sb16props { u_int8_t rreg; /* right reg chan register */ u_int8_t stereo:1; /* (no explanation needed, honest) */ u_int8_t rec:1; /* recording source */ u_int8_t bits:3; /* num bits to represent maximum gain rep */ u_int8_t oselect; /* output select mask */ u_int8_t iselect; /* right input select mask */ } static const cmt[SOUND_MIXER_NRDEVICES] = { [SOUND_MIXER_SYNTH] = {CMPCI_SB16_MIXER_FM_R, 1, 1, 5, CMPCI_SB16_SW_FM, CMPCI_SB16_MIXER_FM_SRC_R}, [SOUND_MIXER_CD] = {CMPCI_SB16_MIXER_CDDA_R, 1, 1, 5, CMPCI_SB16_SW_CD, CMPCI_SB16_MIXER_CD_SRC_R}, [SOUND_MIXER_LINE] = {CMPCI_SB16_MIXER_LINE_R, 1, 1, 5, CMPCI_SB16_SW_LINE, CMPCI_SB16_MIXER_LINE_SRC_R}, [SOUND_MIXER_MIC] = {CMPCI_SB16_MIXER_MIC, 0, 1, 5, CMPCI_SB16_SW_MIC, CMPCI_SB16_MIXER_MIC_SRC}, [SOUND_MIXER_SPEAKER] = {CMPCI_SB16_MIXER_SPEAKER, 0, 0, 2, 0, 0}, [SOUND_MIXER_PCM] = {CMPCI_SB16_MIXER_VOICE_R, 1, 0, 5, 0, 0}, [SOUND_MIXER_VOLUME] = {CMPCI_SB16_MIXER_MASTER_R, 1, 0, 5, 0, 0}, /* These controls are not implemented in CMI8738, but maybe at a future date. They are not documented in C-Media documentation, though appear in other drivers for future h/w (ALSA, Linux, NetBSD). */ [SOUND_MIXER_IGAIN] = {CMPCI_SB16_MIXER_INGAIN_R, 1, 0, 2, 0, 0}, [SOUND_MIXER_OGAIN] = {CMPCI_SB16_MIXER_OUTGAIN_R, 1, 0, 2, 0, 0}, [SOUND_MIXER_BASS] = {CMPCI_SB16_MIXER_BASS_R, 1, 0, 4, 0, 0}, [SOUND_MIXER_TREBLE] = {CMPCI_SB16_MIXER_TREBLE_R, 1, 0, 4, 0, 0}, /* The mic pre-amp is implemented with non-SB16 compatible registers. */ [SOUND_MIXER_MONITOR] = {CMPCI_NON_SB16_CONTROL, 0, 1, 4, 0}, }; #define MIXER_GAIN_REG_RTOL(r) (r - 1) static int cmimix_init(struct snd_mixer *m) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t i,v; for(i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (cmt[i].bits) v |= 1 << i; } mix_setdevs(m, v); for(i = v = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (cmt[i].rec) v |= 1 << i; } mix_setrecdevs(m, v); cmimix_wr(sc, CMPCI_SB16_MIXER_RESET, 0); cmimix_wr(sc, CMPCI_SB16_MIXER_ADCMIX_L, 0); cmimix_wr(sc, CMPCI_SB16_MIXER_ADCMIX_R, 0); cmimix_wr(sc, CMPCI_SB16_MIXER_OUTMIX, CMPCI_SB16_SW_CD | CMPCI_SB16_SW_MIC | CMPCI_SB16_SW_LINE); return 0; } static int cmimix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t r, l, max; u_int8_t v; max = (1 << cmt[dev].bits) - 1; if (cmt[dev].rreg == CMPCI_NON_SB16_CONTROL) { /* For time being this can only be one thing (mic in * mic/aux reg) */ v = cmi_rd(sc, CMPCI_REG_AUX_MIC, 1) & 0xf0; l = left * max / 100; /* 3 bit gain with LSB MICGAIN off(1),on(1) -> 4 bit value */ v |= ((l << 1) | (~l >> 3)) & 0x0f; cmi_wr(sc, CMPCI_REG_AUX_MIC, v, 1); return 0; } l = (left * max / 100) << (8 - cmt[dev].bits); if (cmt[dev].stereo) { r = (right * max / 100) << (8 - cmt[dev].bits); cmimix_wr(sc, MIXER_GAIN_REG_RTOL(cmt[dev].rreg), l); cmimix_wr(sc, cmt[dev].rreg, r); DEBMIX(printf("Mixer stereo write dev %d reg 0x%02x "\ "value 0x%02x:0x%02x\n", dev, MIXER_GAIN_REG_RTOL(cmt[dev].rreg), l, r)); } else { r = l; cmimix_wr(sc, cmt[dev].rreg, l); DEBMIX(printf("Mixer mono write dev %d reg 0x%02x " \ "value 0x%02x:0x%02x\n", dev, cmt[dev].rreg, l, l)); } /* Zero gain does not mute channel from output, but this does... */ v = cmimix_rd(sc, CMPCI_SB16_MIXER_OUTMIX); if (l == 0 && r == 0) { v &= ~cmt[dev].oselect; } else { v |= cmt[dev].oselect; } cmimix_wr(sc, CMPCI_SB16_MIXER_OUTMIX, v); return 0; } static u_int32_t cmimix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct sc_info *sc = mix_getdevinfo(m); u_int32_t i, ml, sl; ml = sl = 0; for(i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if ((1< */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "spdif_enabled", CTLFLAG_RW, &sc->spdif_enabled, 0, "enable SPDIF output at 44.1 kHz and above"); return 0; } /* ------------------------------------------------------------------------- */ static kobj_method_t cmi_mixer_methods[] = { KOBJMETHOD(mixer_init, cmimix_init), KOBJMETHOD(mixer_set, cmimix_set), KOBJMETHOD(mixer_setrecsrc, cmimix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(cmi_mixer); /* * mpu401 functions */ static unsigned char cmi_mread(struct mpu401 *arg, void *sc, int reg) { unsigned int d; d = bus_space_read_1(0,0, 0x330 + reg); /* printf("cmi_mread: reg %x %x\n",reg, d); */ return d; } static void cmi_mwrite(struct mpu401 *arg, void *sc, int reg, unsigned char b) { bus_space_write_1(0,0,0x330 + reg , b); } static int cmi_muninit(struct mpu401 *arg, void *cookie) { struct sc_info *sc = cookie; snd_mtxlock(sc->lock); - sc->mpu_intr = 0; - sc->mpu = 0; + sc->mpu_intr = NULL; + sc->mpu = NULL; snd_mtxunlock(sc->lock); return 0; } static kobj_method_t cmi_mpu_methods[] = { KOBJMETHOD(mpufoi_read, cmi_mread), KOBJMETHOD(mpufoi_write, cmi_mwrite), KOBJMETHOD(mpufoi_uninit, cmi_muninit), KOBJMETHOD_END }; static DEFINE_CLASS(cmi_mpu, cmi_mpu_methods, 0); static void cmi_midiattach(struct sc_info *sc) { /* const struct { int port,bits; } *p, ports[] = { {0x330,0}, {0x320,1}, {0x310,2}, {0x300,3}, {0,0} } ; Notes, CMPCI_REG_VMPUSEL sets the io port for the mpu. Does anyone know how to bus_space tag? */ cmi_clr4(sc, CMPCI_REG_FUNC_1, CMPCI_REG_UART_ENABLE); cmi_clr4(sc, CMPCI_REG_LEGACY_CTRL, CMPCI_REG_VMPUSEL_MASK << CMPCI_REG_VMPUSEL_SHIFT); cmi_set4(sc, CMPCI_REG_LEGACY_CTRL, 0 << CMPCI_REG_VMPUSEL_SHIFT ); cmi_set4(sc, CMPCI_REG_FUNC_1, CMPCI_REG_UART_ENABLE); sc->mpu = mpu401_init(&cmi_mpu_class, sc, cmi_intr, &sc->mpu_intr); } /* ------------------------------------------------------------------------- */ /* Power and reset */ static void cmi_power(struct sc_info *sc, int state) { switch (state) { case 0: /* full power */ cmi_clr4(sc, CMPCI_REG_MISC, CMPCI_REG_POWER_DOWN); break; default: /* power off */ cmi_set4(sc, CMPCI_REG_MISC, CMPCI_REG_POWER_DOWN); break; } } static int cmi_init(struct sc_info *sc) { /* Effect reset */ cmi_set4(sc, CMPCI_REG_MISC, CMPCI_REG_BUS_AND_DSP_RESET); DELAY(100); cmi_clr4(sc, CMPCI_REG_MISC, CMPCI_REG_BUS_AND_DSP_RESET); /* Disable interrupts and channels */ cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_ENABLE | CMPCI_REG_CH1_ENABLE); cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH0_INTR_ENABLE | CMPCI_REG_CH1_INTR_ENABLE); /* Configure DMA channels, ch0 = play, ch1 = capture */ cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_DIR); cmi_set4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH1_DIR); /* Attempt to enable 4 Channel output */ cmi_set4(sc, CMPCI_REG_MISC, CMPCI_REG_N4SPK3D); /* Disable SPDIF1 - not compatible with config */ cmi_clr4(sc, CMPCI_REG_FUNC_1, CMPCI_REG_SPDIF1_ENABLE); cmi_clr4(sc, CMPCI_REG_FUNC_1, CMPCI_REG_SPDIF_LOOP); return 0; } static void cmi_uninit(struct sc_info *sc) { /* Disable interrupts and channels */ cmi_clr4(sc, CMPCI_REG_INTR_CTRL, CMPCI_REG_CH0_INTR_ENABLE | CMPCI_REG_CH1_INTR_ENABLE | CMPCI_REG_TDMA_INTR_ENABLE); cmi_clr4(sc, CMPCI_REG_FUNC_0, CMPCI_REG_CH0_ENABLE | CMPCI_REG_CH1_ENABLE); cmi_clr4(sc, CMPCI_REG_FUNC_1, CMPCI_REG_UART_ENABLE); if( sc->mpu ) - sc->mpu_intr = 0; + sc->mpu_intr = NULL; } /* ------------------------------------------------------------------------- */ /* Bus and device registration */ static int cmi_probe(device_t dev) { switch(pci_get_devid(dev)) { case CMI8338A_PCI_ID: device_set_desc(dev, "CMedia CMI8338A"); return BUS_PROBE_DEFAULT; case CMI8338B_PCI_ID: device_set_desc(dev, "CMedia CMI8338B"); return BUS_PROBE_DEFAULT; case CMI8738_PCI_ID: device_set_desc(dev, "CMedia CMI8738"); return BUS_PROBE_DEFAULT; case CMI8738B_PCI_ID: device_set_desc(dev, "CMedia CMI8738B"); return BUS_PROBE_DEFAULT; case CMI120_USB_ID: device_set_desc(dev, "CMedia CMI120"); return BUS_PROBE_DEFAULT; default: return ENXIO; } } static int cmi_attach(device_t dev) { struct sc_info *sc; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_cmi softc"); pci_enable_busmaster(dev); sc->dev = dev; sc->regid = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "cmi_attach: Cannot allocate bus resource\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); if (0) cmi_midiattach(sc); sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, cmi_intr, sc, &sc->ih)) { device_printf(dev, "cmi_attach: Unable to map interrupt\n"); goto bad; } sc->bufsz = pcm_getbuffersize(dev, 4096, CMI_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockfunc*/NULL, &sc->parent_dmat) != 0) { device_printf(dev, "cmi_attach: Unable to create dma tag\n"); goto bad; } cmi_power(sc, 0); if (cmi_init(sc)) goto bad; if (mixer_init(dev, &cmi_mixer_class, sc)) goto bad; if (pcm_register(dev, sc, 1, 1)) goto bad; cmi_initsys(sc); pcm_addchan(dev, PCMDIR_PLAY, &cmichan_class, sc); pcm_addchan(dev, PCMDIR_REC, &cmichan_class, sc); snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_cmi)); pcm_setstatus(dev, status); DEB(printf("cmi_attach: succeeded\n")); return 0; bad: if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->reg) bus_release_resource(dev, SYS_RES_IOPORT, sc->regid, sc->reg); if (sc->lock) snd_mtxfree(sc->lock); if (sc) free(sc, M_DEVBUF); return ENXIO; } static int cmi_detach(device_t dev) { struct sc_info *sc; int r; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); cmi_uninit(sc); cmi_power(sc, 3); bus_dma_tag_destroy(sc->parent_dmat); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if(sc->mpu) mpu401_uninit(sc->mpu); bus_release_resource(dev, SYS_RES_IOPORT, sc->regid, sc->reg); if (sc->mpu_reg) bus_release_resource(dev, SYS_RES_IOPORT, sc->mpu_regid, sc->mpu_reg); snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return 0; } static int cmi_suspend(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); snd_mtxlock(sc->lock); sc->pch.dma_was_active = cmi_ch0_stop(sc, &sc->pch); sc->rch.dma_was_active = cmi_ch1_stop(sc, &sc->rch); cmi_power(sc, 3); snd_mtxunlock(sc->lock); return 0; } static int cmi_resume(device_t dev) { struct sc_info *sc = pcm_getdevinfo(dev); snd_mtxlock(sc->lock); cmi_power(sc, 0); if (cmi_init(sc) != 0) { device_printf(dev, "unable to reinitialize the card\n"); snd_mtxunlock(sc->lock); return ENXIO; } if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to reinitialize the mixer\n"); snd_mtxunlock(sc->lock); return ENXIO; } if (sc->pch.dma_was_active) { cmichan_setspeed(NULL, &sc->pch, sc->pch.spd); cmichan_setformat(NULL, &sc->pch, sc->pch.fmt); cmi_ch0_start(sc, &sc->pch); } if (sc->rch.dma_was_active) { cmichan_setspeed(NULL, &sc->rch, sc->rch.spd); cmichan_setformat(NULL, &sc->rch, sc->rch.fmt); cmi_ch1_start(sc, &sc->rch); } snd_mtxunlock(sc->lock); return 0; } static device_method_t cmi_methods[] = { DEVMETHOD(device_probe, cmi_probe), DEVMETHOD(device_attach, cmi_attach), DEVMETHOD(device_detach, cmi_detach), DEVMETHOD(device_resume, cmi_resume), DEVMETHOD(device_suspend, cmi_suspend), { 0, 0 } }; static driver_t cmi_driver = { "pcm", cmi_methods, PCM_SOFTC_SIZE }; DRIVER_MODULE(snd_cmi, pci, cmi_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_cmi, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_DEPEND(snd_cmi, midi, 1,1,1); MODULE_VERSION(snd_cmi, 1); Index: head/sys/dev/sound/pci/emu10k1.c =================================================================== --- head/sys/dev/sound/pci/emu10k1.c (revision 297861) +++ head/sys/dev/sound/pci/emu10k1.c (revision 297862) @@ -1,2256 +1,2256 @@ /*- * Copyright (c) 2004 David O'Brien * Copyright (c) 2003 Orlando Bassotto * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include "mpufoi_if.h" SND_DECLARE_FILE("$FreeBSD$"); /* -------------------------------------------------------------------- */ #define NUM_G 64 /* use all channels */ #define WAVEOUT_MAXBUFSIZE 32768 #define EMUPAGESIZE 4096 /* don't change */ #define EMUMAXPAGES (WAVEOUT_MAXBUFSIZE * NUM_G / EMUPAGESIZE) #define EMU10K1_PCI_ID 0x00021102 /* 1102 => Creative Labs Vendor ID */ #define EMU10K2_PCI_ID 0x00041102 #define EMU10K3_PCI_ID 0x00081102 #define EMU_DEFAULT_BUFSZ 4096 #define EMU_MAX_CHANS 8 #define EMU_CHANS 4 #define MAXREQVOICES 8 #define RESERVED 0 #define NUM_MIDI 16 #define NUM_FXSENDS 4 #define TMEMSIZE 256*1024 #define TMEMSIZEREG 4 #define ENABLE 0xffffffff #define DISABLE 0x00000000 #define ENV_ON EMU_CHAN_DCYSUSV_CHANNELENABLE_MASK #define ENV_OFF 0x00 /* XXX: should this be 1? */ #define EMU_A_IOCFG_GPOUT_A 0x40 #define EMU_A_IOCFG_GPOUT_D 0x04 #define EMU_A_IOCFG_GPOUT_AD (EMU_A_IOCFG_GPOUT_A|EMU_A_IOCFG_GPOUT_D) /* EMU_A_IOCFG_GPOUT0 */ #define EMU_HCFG_GPOUT1 0x00000800 /* instruction set */ #define iACC3 0x06 #define iMACINT0 0x04 #define iINTERP 0x0e #define C_00000000 0x40 #define C_00000001 0x41 #define C_00000004 0x44 #define C_40000000 0x4d /* Audigy constants */ #define A_C_00000000 0xc0 #define A_C_40000000 0xcd /* GPRs */ #define FXBUS(x) (0x00 + (x)) #define EXTIN(x) (0x10 + (x)) #define EXTOUT(x) (0x20 + (x)) #define GPR(x) (EMU_FXGPREGBASE + (x)) #define A_EXTIN(x) (0x40 + (x)) #define A_FXBUS(x) (0x00 + (x)) #define A_EXTOUT(x) (0x60 + (x)) #define A_GPR(x) (EMU_A_FXGPREGBASE + (x)) /* FX buses */ #define FXBUS_PCM_LEFT 0x00 #define FXBUS_PCM_RIGHT 0x01 #define FXBUS_MIDI_LEFT 0x04 #define FXBUS_MIDI_RIGHT 0x05 #define FXBUS_MIDI_REVERB 0x0c #define FXBUS_MIDI_CHORUS 0x0d /* Inputs */ #define EXTIN_AC97_L 0x00 #define EXTIN_AC97_R 0x01 #define EXTIN_SPDIF_CD_L 0x02 #define EXTIN_SPDIF_CD_R 0x03 #define EXTIN_TOSLINK_L 0x06 #define EXTIN_TOSLINK_R 0x07 #define EXTIN_COAX_SPDIF_L 0x0a #define EXTIN_COAX_SPDIF_R 0x0b /* Audigy Inputs */ #define A_EXTIN_AC97_L 0x00 #define A_EXTIN_AC97_R 0x01 /* Outputs */ #define EXTOUT_AC97_L 0x00 #define EXTOUT_AC97_R 0x01 #define EXTOUT_TOSLINK_L 0x02 #define EXTOUT_TOSLINK_R 0x03 #define EXTOUT_AC97_CENTER 0x04 #define EXTOUT_AC97_LFE 0x05 #define EXTOUT_HEADPHONE_L 0x06 #define EXTOUT_HEADPHONE_R 0x07 #define EXTOUT_REAR_L 0x08 #define EXTOUT_REAR_R 0x09 #define EXTOUT_ADC_CAP_L 0x0a #define EXTOUT_ADC_CAP_R 0x0b #define EXTOUT_ACENTER 0x11 #define EXTOUT_ALFE 0x12 /* Audigy Outputs */ #define A_EXTOUT_FRONT_L 0x00 #define A_EXTOUT_FRONT_R 0x01 #define A_EXTOUT_CENTER 0x02 #define A_EXTOUT_LFE 0x03 #define A_EXTOUT_HEADPHONE_L 0x04 #define A_EXTOUT_HEADPHONE_R 0x05 #define A_EXTOUT_REAR_L 0x06 #define A_EXTOUT_REAR_R 0x07 #define A_EXTOUT_AFRONT_L 0x08 #define A_EXTOUT_AFRONT_R 0x09 #define A_EXTOUT_ACENTER 0x0a #define A_EXTOUT_ALFE 0x0b #define A_EXTOUT_AREAR_L 0x0e #define A_EXTOUT_AREAR_R 0x0f #define A_EXTOUT_AC97_L 0x10 #define A_EXTOUT_AC97_R 0x11 #define A_EXTOUT_ADC_CAP_L 0x16 #define A_EXTOUT_ADC_CAP_R 0x17 struct emu_memblk { SLIST_ENTRY(emu_memblk) link; void *buf; bus_addr_t buf_addr; u_int32_t pte_start, pte_size; bus_dmamap_t buf_map; }; struct emu_mem { u_int8_t bmap[EMUMAXPAGES / 8]; u_int32_t *ptb_pages; void *silent_page; bus_addr_t silent_page_addr; bus_addr_t ptb_pages_addr; bus_dmamap_t ptb_map; bus_dmamap_t silent_map; SLIST_HEAD(, emu_memblk) blocks; }; struct emu_voice { int vnum; unsigned int b16:1, stereo:1, busy:1, running:1, ismaster:1; int speed; int start, end, vol; int fxrt1; /* FX routing */ int fxrt2; /* FX routing (only for audigy) */ u_int32_t buf; struct emu_voice *slave; struct pcm_channel *channel; }; struct sc_info; /* channel registers */ struct sc_pchinfo { int spd, fmt, blksz, run; struct emu_voice *master, *slave; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; }; struct sc_rchinfo { int spd, fmt, run, blksz, num; u_int32_t idxreg, basereg, sizereg, setupreg, irqmask; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; }; /* device private data */ struct sc_info { device_t dev; u_int32_t type, rev; u_int32_t tos_link:1, APS:1, audigy:1, audigy2:1; u_int32_t addrmask; /* wider if audigy */ bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq; void *ih; struct mtx *lock; unsigned int bufsz; int timer, timerinterval; int pnum, rnum; int nchans; struct emu_mem mem; struct emu_voice voice[64]; struct sc_pchinfo pch[EMU_MAX_CHANS]; struct sc_rchinfo rch[3]; struct mpu401 *mpu; mpu401_intr_t *mpu_intr; int mputx; }; /* -------------------------------------------------------------------- */ /* * prototypes */ /* stuff */ static int emu_init(struct sc_info *); static void emu_intr(void *); static void *emu_malloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr, bus_dmamap_t *map); static void *emu_memalloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr); static int emu_memfree(struct sc_info *sc, void *buf); static int emu_memstart(struct sc_info *sc, void *buf); #ifdef EMUDEBUG static void emu_vdump(struct sc_info *sc, struct emu_voice *v); #endif /* talk to the card */ static u_int32_t emu_rd(struct sc_info *, int, int); static void emu_wr(struct sc_info *, int, u_int32_t, int); /* -------------------------------------------------------------------- */ static u_int32_t emu_rfmt_ac97[] = { SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static u_int32_t emu_rfmt_mic[] = { SND_FORMAT(AFMT_U8, 1, 0), 0 }; static u_int32_t emu_rfmt_efx[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps emu_reccaps[3] = { {8000, 48000, emu_rfmt_ac97, 0}, {8000, 8000, emu_rfmt_mic, 0}, {48000, 48000, emu_rfmt_efx, 0}, }; static u_int32_t emu_pfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps emu_playcaps = {4000, 48000, emu_pfmt, 0}; static int adcspeed[8] = {48000, 44100, 32000, 24000, 22050, 16000, 11025, 8000}; /* audigy supports 12kHz. */ static int audigy_adcspeed[9] = { 48000, 44100, 32000, 24000, 22050, 16000, 12000, 11025, 8000 }; /* -------------------------------------------------------------------- */ /* Hardware */ static u_int32_t emu_rd(struct sc_info *sc, int regno, int size) { switch (size) { case 1: return bus_space_read_1(sc->st, sc->sh, regno); case 2: return bus_space_read_2(sc->st, sc->sh, regno); case 4: return bus_space_read_4(sc->st, sc->sh, regno); default: return 0xffffffff; } } static void emu_wr(struct sc_info *sc, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(sc->st, sc->sh, regno, data); break; case 2: bus_space_write_2(sc->st, sc->sh, regno, data); break; case 4: bus_space_write_4(sc->st, sc->sh, regno, data); break; } } static u_int32_t emu_rdptr(struct sc_info *sc, int chn, int reg) { u_int32_t ptr, val, mask, size, offset; ptr = ((reg << 16) & sc->addrmask) | (chn & EMU_PTR_CHNO_MASK); emu_wr(sc, EMU_PTR, ptr, 4); val = emu_rd(sc, EMU_DATA, 4); if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; val &= mask; val >>= offset; } return val; } static void emu_wrptr(struct sc_info *sc, int chn, int reg, u_int32_t data) { u_int32_t ptr, mask, size, offset; ptr = ((reg << 16) & sc->addrmask) | (chn & EMU_PTR_CHNO_MASK); emu_wr(sc, EMU_PTR, ptr, 4); if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; data <<= offset; data &= mask; data |= emu_rd(sc, EMU_DATA, 4) & ~mask; } emu_wr(sc, EMU_DATA, data, 4); } static void emu_wrefx(struct sc_info *sc, unsigned int pc, unsigned int data) { pc += sc->audigy ? EMU_A_MICROCODEBASE : EMU_MICROCODEBASE; emu_wrptr(sc, 0, pc, data); } /* -------------------------------------------------------------------- */ /* ac97 codec */ /* no locking needed */ static int emu_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; emu_wr(sc, EMU_AC97ADDR, regno, 1); return emu_rd(sc, EMU_AC97DATA, 2); } static int emu_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct sc_info *sc = (struct sc_info *)devinfo; emu_wr(sc, EMU_AC97ADDR, regno, 1); emu_wr(sc, EMU_AC97DATA, data, 2); return 0; } static kobj_method_t emu_ac97_methods[] = { KOBJMETHOD(ac97_read, emu_rdcd), KOBJMETHOD(ac97_write, emu_wrcd), KOBJMETHOD_END }; AC97_DECLARE(emu_ac97); /* -------------------------------------------------------------------- */ /* stuff */ static int emu_settimer(struct sc_info *sc) { struct sc_pchinfo *pch; struct sc_rchinfo *rch; int i, tmp, rate; rate = 0; for (i = 0; i < sc->nchans; i++) { pch = &sc->pch[i]; if (pch->buffer) { tmp = (pch->spd * sndbuf_getalign(pch->buffer)) / pch->blksz; if (tmp > rate) rate = tmp; } } for (i = 0; i < 3; i++) { rch = &sc->rch[i]; if (rch->buffer) { tmp = (rch->spd * sndbuf_getalign(rch->buffer)) / rch->blksz; if (tmp > rate) rate = tmp; } } RANGE(rate, 48, 9600); sc->timerinterval = 48000 / rate; emu_wr(sc, EMU_TIMER, sc->timerinterval & 0x03ff, 2); return sc->timerinterval; } static int emu_enatimer(struct sc_info *sc, int go) { u_int32_t x; if (go) { if (sc->timer++ == 0) { x = emu_rd(sc, EMU_INTE, 4); x |= EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } } else { sc->timer = 0; x = emu_rd(sc, EMU_INTE, 4); x &= ~EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } return 0; } static void emu_enastop(struct sc_info *sc, char channel, int enable) { int reg = (channel & 0x20) ? EMU_SOLEH : EMU_SOLEL; channel &= 0x1f; reg |= 1 << 24; reg |= channel << 16; emu_wrptr(sc, 0, reg, enable); } static int emu_recval(int speed) { int val; val = 0; while (val < 7 && speed < adcspeed[val]) val++; return val; } static int audigy_recval(int speed) { int val; val = 0; while (val < 8 && speed < audigy_adcspeed[val]) val++; return val; } static u_int32_t emu_rate_to_pitch(u_int32_t rate) { static u_int32_t logMagTable[128] = { 0x00000, 0x02dfc, 0x05b9e, 0x088e6, 0x0b5d6, 0x0e26f, 0x10eb3, 0x13aa2, 0x1663f, 0x1918a, 0x1bc84, 0x1e72e, 0x2118b, 0x23b9a, 0x2655d, 0x28ed5, 0x2b803, 0x2e0e8, 0x30985, 0x331db, 0x359eb, 0x381b6, 0x3a93d, 0x3d081, 0x3f782, 0x41e42, 0x444c1, 0x46b01, 0x49101, 0x4b6c4, 0x4dc49, 0x50191, 0x5269e, 0x54b6f, 0x57006, 0x59463, 0x5b888, 0x5dc74, 0x60029, 0x623a7, 0x646ee, 0x66a00, 0x68cdd, 0x6af86, 0x6d1fa, 0x6f43c, 0x7164b, 0x73829, 0x759d4, 0x77b4f, 0x79c9a, 0x7bdb5, 0x7dea1, 0x7ff5e, 0x81fed, 0x8404e, 0x86082, 0x88089, 0x8a064, 0x8c014, 0x8df98, 0x8fef1, 0x91e20, 0x93d26, 0x95c01, 0x97ab4, 0x9993e, 0x9b79f, 0x9d5d9, 0x9f3ec, 0xa11d8, 0xa2f9d, 0xa4d3c, 0xa6ab5, 0xa8808, 0xaa537, 0xac241, 0xadf26, 0xafbe7, 0xb1885, 0xb3500, 0xb5157, 0xb6d8c, 0xb899f, 0xba58f, 0xbc15e, 0xbdd0c, 0xbf899, 0xc1404, 0xc2f50, 0xc4a7b, 0xc6587, 0xc8073, 0xc9b3f, 0xcb5ed, 0xcd07c, 0xceaec, 0xd053f, 0xd1f73, 0xd398a, 0xd5384, 0xd6d60, 0xd8720, 0xda0c3, 0xdba4a, 0xdd3b4, 0xded03, 0xe0636, 0xe1f4e, 0xe384a, 0xe512c, 0xe69f3, 0xe829f, 0xe9b31, 0xeb3a9, 0xecc08, 0xee44c, 0xefc78, 0xf148a, 0xf2c83, 0xf4463, 0xf5c2a, 0xf73da, 0xf8b71, 0xfa2f0, 0xfba57, 0xfd1a7, 0xfe8df }; static char logSlopeTable[128] = { 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x59, 0x58, 0x58, 0x57, 0x56, 0x56, 0x55, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4b, 0x4b, 0x4a, 0x4a, 0x49, 0x49, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f }; int i; if (rate == 0) return 0; /* Bail out if no leading "1" */ rate *= 11185; /* Scale 48000 to 0x20002380 */ for (i = 31; i > 0; i--) { if (rate & 0x80000000) { /* Detect leading "1" */ return (((u_int32_t) (i - 15) << 20) + logMagTable[0x7f & (rate >> 24)] + (0x7f & (rate >> 17)) * logSlopeTable[0x7f & (rate >> 24)]); } rate <<= 1; } return 0; /* Should never reach this point */ } static u_int32_t emu_rate_to_linearpitch(u_int32_t rate) { rate = (rate << 8) / 375; return (rate >> 1) + (rate & 1); } static struct emu_voice * emu_valloc(struct sc_info *sc) { struct emu_voice *v; int i; v = NULL; for (i = 0; i < 64 && sc->voice[i].busy; i++); if (i < 64) { v = &sc->voice[i]; v->busy = 1; } return v; } static int emu_vinit(struct sc_info *sc, struct emu_voice *m, struct emu_voice *s, u_int32_t sz, struct snd_dbuf *b) { void *buf; bus_addr_t tmp_addr; buf = emu_memalloc(sc, sz, &tmp_addr); if (buf == NULL) return -1; if (b != NULL) sndbuf_setup(b, buf, sz); m->start = emu_memstart(sc, buf) * EMUPAGESIZE; m->end = m->start + sz; m->channel = NULL; m->speed = 0; m->b16 = 0; m->stereo = 0; m->running = 0; m->ismaster = 1; m->vol = 0xff; m->buf = tmp_addr; m->slave = s; if (sc->audigy) { m->fxrt1 = FXBUS_MIDI_CHORUS | FXBUS_PCM_RIGHT << 8 | FXBUS_PCM_LEFT << 16 | FXBUS_MIDI_REVERB << 24; m->fxrt2 = 0x3f3f3f3f; /* No effects on second route */ } else { m->fxrt1 = FXBUS_MIDI_CHORUS | FXBUS_PCM_RIGHT << 4 | FXBUS_PCM_LEFT << 8 | FXBUS_MIDI_REVERB << 12; m->fxrt2 = 0; } if (s != NULL) { s->start = m->start; s->end = m->end; s->channel = NULL; s->speed = 0; s->b16 = 0; s->stereo = 0; s->running = 0; s->ismaster = 0; s->vol = m->vol; s->buf = m->buf; s->fxrt1 = m->fxrt1; s->fxrt2 = m->fxrt2; s->slave = NULL; } return 0; } static void emu_vsetup(struct sc_pchinfo *ch) { struct emu_voice *v = ch->master; if (ch->fmt) { v->b16 = (ch->fmt & AFMT_16BIT) ? 1 : 0; v->stereo = (AFMT_CHANNEL(ch->fmt) > 1) ? 1 : 0; if (v->slave != NULL) { v->slave->b16 = v->b16; v->slave->stereo = v->stereo; } } if (ch->spd) { v->speed = ch->spd; if (v->slave != NULL) v->slave->speed = v->speed; } } static void emu_vwrite(struct sc_info *sc, struct emu_voice *v) { int s; int l, r, x, y; u_int32_t sa, ea, start, val, silent_page; s = (v->stereo ? 1 : 0) + (v->b16 ? 1 : 0); sa = v->start >> s; ea = v->end >> s; l = r = x = y = v->vol; if (v->stereo) { l = v->ismaster ? l : 0; r = v->ismaster ? 0 : r; } emu_wrptr(sc, v->vnum, EMU_CHAN_CPF, v->stereo ? EMU_CHAN_CPF_STEREO_MASK : 0); val = v->stereo ? 28 : 30; val *= v->b16 ? 1 : 2; start = sa + val; if (sc->audigy) { emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT1, v->fxrt1); emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT2, v->fxrt2); emu_wrptr(sc, v->vnum, EMU_A_CHAN_SENDAMOUNTS, 0); } else emu_wrptr(sc, v->vnum, EMU_CHAN_FXRT, v->fxrt1 << 16); emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX, (x << 8) | r); emu_wrptr(sc, v->vnum, EMU_CHAN_DSL, ea | (y << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_PSST, sa | (l << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_CCCA, start | (v->b16 ? 0 : EMU_CHAN_CCCA_8BITSELECT)); emu_wrptr(sc, v->vnum, EMU_CHAN_Z1, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_Z2, 0); silent_page = ((u_int32_t)(sc->mem.silent_page_addr) << 1) | EMU_CHAN_MAP_PTI_MASK; emu_wrptr(sc, v->vnum, EMU_CHAN_MAPA, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_MAPB, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, EMU_CHAN_CVCF_CURRFILTER_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, EMU_CHAN_VTFT_FILTERTARGET_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSM, EMU_CHAN_DCYSUSM_DECAYTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL1, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL2, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_TREMFRQ, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_FM2FRQ2, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVAL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDV, EMU_CHAN_ATKHLDV_HOLDTIME_MASK | EMU_CHAN_ATKHLDV_ATTACKTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVOL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_FILTERAMOUNT, 0x7f); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_PITCHAMOUNT, 0); if (v->slave != NULL) emu_vwrite(sc, v->slave); } static void emu_vtrigger(struct sc_info *sc, struct emu_voice *v, int go) { u_int32_t pitch_target, initial_pitch; u_int32_t cra, cs, ccis; u_int32_t sample, i; if (go) { cra = 64; cs = v->stereo ? 4 : 2; ccis = v->stereo ? 28 : 30; ccis *= v->b16 ? 1 : 2; sample = v->b16 ? 0x00000000 : 0x80808080; for (i = 0; i < cs; i++) emu_wrptr(sc, v->vnum, EMU_CHAN_CD0 + i, sample); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_READADDRESS, cra); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, ccis); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xff00); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSV, 0x00007f7f); emu_enastop(sc, v->vnum, 0); pitch_target = emu_rate_to_linearpitch(v->speed); initial_pitch = emu_rate_to_pitch(v->speed) >> 8; emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, initial_pitch); } else { emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, 0); emu_enastop(sc, v->vnum, 1); } if (v->slave != NULL) emu_vtrigger(sc, v->slave, go); } static int emu_vpos(struct sc_info *sc, struct emu_voice *v) { int s, ptr; s = (v->b16 ? 1 : 0) + (v->stereo ? 1 : 0); ptr = (emu_rdptr(sc, v->vnum, EMU_CHAN_CCCA_CURRADDR) - (v->start >> s)) << s; return ptr & ~0x0000001f; } #ifdef EMUDEBUG static void emu_vdump(struct sc_info *sc, struct emu_voice *v) { char *regname[] = { "cpf", "ptrx", "cvcf", "vtft", "z2", "z1", "psst", "dsl", "ccca", "ccr", "clp", "fxrt", "mapa", "mapb", NULL, NULL, "envvol", "atkhldv", "dcysusv", "lfoval1", "envval", "atkhldm", "dcysusm", "lfoval2", "ip", "ifatn", "pefe", "fmmod", "tremfrq", "fmfrq2", "tempenv" }; char *regname2[] = { "mudata1", "mustat1", "mudata2", "mustat2", "fxwc1", "fxwc2", "spdrate", NULL, NULL, NULL, NULL, NULL, "fxrt2", "sndamnt", "fxrt1", NULL, NULL }; int i, x; printf("voice number %d\n", v->vnum); for (i = 0, x = 0; i <= 0x1e; i++) { if (regname[i] == NULL) continue; printf("%s\t[%08x]", regname[i], emu_rdptr(sc, v->vnum, i)); printf("%s", (x == 2) ? "\n" : "\t"); x++; if (x > 2) x = 0; } /* Print out audigy extra registers */ if (sc->audigy) { for (i = 0; i <= 0xe; i++) { if (regname2[i] == NULL) continue; printf("%s\t[%08x]", regname2[i], emu_rdptr(sc, v->vnum, i + 0x70)); printf("%s", (x == 2)? "\n" : "\t"); x++; if (x > 2) x = 0; } } printf("\n\n"); } #endif /* channel interface */ static void * emupchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_pchinfo *ch; void *r; KASSERT(dir == PCMDIR_PLAY, ("emupchan_init: bad direction")); ch = &sc->pch[sc->pnum++]; ch->buffer = b; ch->parent = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; snd_mtxlock(sc->lock); ch->master = emu_valloc(sc); ch->slave = emu_valloc(sc); snd_mtxunlock(sc->lock); r = (emu_vinit(sc, ch->master, ch->slave, sc->bufsz, ch->buffer)) ? NULL : ch; return r; } static int emupchan_free(kobj_t obj, void *data) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_memfree(sc, sndbuf_getbuf(ch->buffer)); snd_mtxunlock(sc->lock); return r; } static int emupchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_pchinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t emupchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_pchinfo *ch = data; ch->spd = speed; return ch->spd; } static u_int32_t emupchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int irqrate, blksz; ch->blksz = blocksize; snd_mtxlock(sc->lock); emu_settimer(sc); irqrate = 48000 / sc->timerinterval; snd_mtxunlock(sc->lock); blksz = (ch->spd * sndbuf_getalign(ch->buffer)) / irqrate; return blocksize; } static int emupchan_trigger(kobj_t obj, void *data, int go) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; snd_mtxlock(sc->lock); if (go == PCMTRIG_START) { emu_vsetup(ch); emu_vwrite(sc, ch->master); emu_settimer(sc); emu_enatimer(sc, 1); #ifdef EMUDEBUG printf("start [%d bit, %s, %d hz]\n", ch->master->b16 ? 16 : 8, ch->master->stereo ? "stereo" : "mono", ch->master->speed); emu_vdump(sc, ch->master); emu_vdump(sc, ch->slave); #endif } ch->run = (go == PCMTRIG_START) ? 1 : 0; emu_vtrigger(sc, ch->master, ch->run); snd_mtxunlock(sc->lock); return 0; } static u_int32_t emupchan_getptr(kobj_t obj, void *data) { struct sc_pchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_vpos(sc, ch->master); snd_mtxunlock(sc->lock); return r; } static struct pcmchan_caps * emupchan_getcaps(kobj_t obj, void *data) { return &emu_playcaps; } static kobj_method_t emupchan_methods[] = { KOBJMETHOD(channel_init, emupchan_init), KOBJMETHOD(channel_free, emupchan_free), KOBJMETHOD(channel_setformat, emupchan_setformat), KOBJMETHOD(channel_setspeed, emupchan_setspeed), KOBJMETHOD(channel_setblocksize, emupchan_setblocksize), KOBJMETHOD(channel_trigger, emupchan_trigger), KOBJMETHOD(channel_getptr, emupchan_getptr), KOBJMETHOD(channel_getcaps, emupchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emupchan); /* channel interface */ static void * emurchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_rchinfo *ch; KASSERT(dir == PCMDIR_REC, ("emurchan_init: bad direction")); ch = &sc->rch[sc->rnum]; ch->buffer = b; ch->parent = sc; ch->channel = c; ch->blksz = sc->bufsz / 2; ch->fmt = SND_FORMAT(AFMT_U8, 1, 0); ch->spd = 8000; ch->num = sc->rnum; switch(sc->rnum) { case 0: ch->idxreg = sc->audigy ? EMU_A_ADCIDX : EMU_ADCIDX; ch->basereg = EMU_ADCBA; ch->sizereg = EMU_ADCBS; ch->setupreg = EMU_ADCCR; ch->irqmask = EMU_INTE_ADCBUFENABLE; break; case 1: ch->idxreg = EMU_FXIDX; ch->basereg = EMU_FXBA; ch->sizereg = EMU_FXBS; ch->setupreg = EMU_FXWC; ch->irqmask = EMU_INTE_EFXBUFENABLE; break; case 2: ch->idxreg = EMU_MICIDX; ch->basereg = EMU_MICBA; ch->sizereg = EMU_MICBS; ch->setupreg = 0; ch->irqmask = EMU_INTE_MICBUFENABLE; break; } sc->rnum++; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) return NULL; else { snd_mtxlock(sc->lock); emu_wrptr(sc, 0, ch->basereg, sndbuf_getbufaddr(ch->buffer)); emu_wrptr(sc, 0, ch->sizereg, 0); /* off */ snd_mtxunlock(sc->lock); return ch; } } static int emurchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_rchinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t emurchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_rchinfo *ch = data; if (ch->num == 0) { if (ch->parent->audigy) speed = audigy_adcspeed[audigy_recval(speed)]; else speed = adcspeed[emu_recval(speed)]; } if (ch->num == 1) speed = 48000; if (ch->num == 2) speed = 8000; ch->spd = speed; return ch->spd; } static u_int32_t emurchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; int irqrate, blksz; ch->blksz = blocksize; snd_mtxlock(sc->lock); emu_settimer(sc); irqrate = 48000 / sc->timerinterval; snd_mtxunlock(sc->lock); blksz = (ch->spd * sndbuf_getalign(ch->buffer)) / irqrate; return blocksize; } /* semantic note: must start at beginning of buffer */ static int emurchan_trigger(kobj_t obj, void *data, int go) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; u_int32_t val, sz; if (!PCMTRIG_COMMON(go)) return 0; switch(sc->bufsz) { case 4096: sz = EMU_RECBS_BUFSIZE_4096; break; case 8192: sz = EMU_RECBS_BUFSIZE_8192; break; case 16384: sz = EMU_RECBS_BUFSIZE_16384; break; case 32768: sz = EMU_RECBS_BUFSIZE_32768; break; case 65536: sz = EMU_RECBS_BUFSIZE_65536; break; default: sz = EMU_RECBS_BUFSIZE_4096; } snd_mtxlock(sc->lock); switch(go) { case PCMTRIG_START: ch->run = 1; emu_wrptr(sc, 0, ch->sizereg, sz); if (ch->num == 0) { if (sc->audigy) { val = EMU_A_ADCCR_LCHANENABLE; if (AFMT_CHANNEL(ch->fmt) > 1) val |= EMU_A_ADCCR_RCHANENABLE; val |= audigy_recval(ch->spd); } else { val = EMU_ADCCR_LCHANENABLE; if (AFMT_CHANNEL(ch->fmt) > 1) val |= EMU_ADCCR_RCHANENABLE; val |= emu_recval(ch->spd); } emu_wrptr(sc, 0, ch->setupreg, 0); emu_wrptr(sc, 0, ch->setupreg, val); } val = emu_rd(sc, EMU_INTE, 4); val |= ch->irqmask; emu_wr(sc, EMU_INTE, val, 4); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: ch->run = 0; emu_wrptr(sc, 0, ch->sizereg, 0); if (ch->setupreg) emu_wrptr(sc, 0, ch->setupreg, 0); val = emu_rd(sc, EMU_INTE, 4); val &= ~ch->irqmask; emu_wr(sc, EMU_INTE, val, 4); break; case PCMTRIG_EMLDMAWR: case PCMTRIG_EMLDMARD: default: break; } snd_mtxunlock(sc->lock); return 0; } static u_int32_t emurchan_getptr(kobj_t obj, void *data) { struct sc_rchinfo *ch = data; struct sc_info *sc = ch->parent; int r; snd_mtxlock(sc->lock); r = emu_rdptr(sc, 0, ch->idxreg) & 0x0000ffff; snd_mtxunlock(sc->lock); return r; } static struct pcmchan_caps * emurchan_getcaps(kobj_t obj, void *data) { struct sc_rchinfo *ch = data; return &emu_reccaps[ch->num]; } static kobj_method_t emurchan_methods[] = { KOBJMETHOD(channel_init, emurchan_init), KOBJMETHOD(channel_setformat, emurchan_setformat), KOBJMETHOD(channel_setspeed, emurchan_setspeed), KOBJMETHOD(channel_setblocksize, emurchan_setblocksize), KOBJMETHOD(channel_trigger, emurchan_trigger), KOBJMETHOD(channel_getptr, emurchan_getptr), KOBJMETHOD(channel_getcaps, emurchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(emurchan); static unsigned char emu_mread(struct mpu401 *arg, void *sc, int reg) { unsigned int d; d = emu_rd((struct sc_info *)sc, 0x18 + reg, 1); return d; } static void emu_mwrite(struct mpu401 *arg, void *sc, int reg, unsigned char b) { emu_wr((struct sc_info *)sc, 0x18 + reg, b, 1); } static int emu_muninit(struct mpu401 *arg, void *cookie) { struct sc_info *sc = cookie; snd_mtxlock(sc->lock); - sc->mpu_intr = 0; + sc->mpu_intr = NULL; snd_mtxunlock(sc->lock); return 0; } static kobj_method_t emu_mpu_methods[] = { KOBJMETHOD(mpufoi_read, emu_mread), KOBJMETHOD(mpufoi_write, emu_mwrite), KOBJMETHOD(mpufoi_uninit, emu_muninit), KOBJMETHOD_END }; static DEFINE_CLASS(emu_mpu, emu_mpu_methods, 0); static void emu_intr2(void *p) { struct sc_info *sc = (struct sc_info *)p; if (sc->mpu_intr) (sc->mpu_intr)(sc->mpu); } static void emu_midiattach(struct sc_info *sc) { int i; i = emu_rd(sc, EMU_INTE, 4); i |= EMU_INTE_MIDIRXENABLE; emu_wr(sc, EMU_INTE, i, 4); sc->mpu = mpu401_init(&emu_mpu_class, sc, emu_intr2, &sc->mpu_intr); } /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void emu_intr(void *data) { struct sc_info *sc = data; u_int32_t stat, ack, i, x; snd_mtxlock(sc->lock); while (1) { stat = emu_rd(sc, EMU_IPR, 4); if (stat == 0) break; ack = 0; /* process irq */ if (stat & EMU_IPR_INTERVALTIMER) ack |= EMU_IPR_INTERVALTIMER; if (stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL)) ack |= stat & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL); if (stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL)) ack |= stat & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL); if (stat & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL)) ack |= stat & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL); if (stat & EMU_PCIERROR) { ack |= EMU_PCIERROR; device_printf(sc->dev, "pci error\n"); /* we still get an nmi with ecc ram even if we ack this */ } if (stat & EMU_IPR_RATETRCHANGE) { ack |= EMU_IPR_RATETRCHANGE; #ifdef EMUDEBUG device_printf(sc->dev, "sample rate tracker lock status change\n"); #endif } if (stat & EMU_IPR_MIDIRECVBUFE) if (sc->mpu_intr) { (sc->mpu_intr)(sc->mpu); ack |= EMU_IPR_MIDIRECVBUFE | EMU_IPR_MIDITRANSBUFE; } if (stat & ~ack) device_printf(sc->dev, "dodgy irq: %x (harmless)\n", stat & ~ack); emu_wr(sc, EMU_IPR, stat, 4); if (ack) { snd_mtxunlock(sc->lock); if (ack & EMU_IPR_INTERVALTIMER) { x = 0; for (i = 0; i < sc->nchans; i++) { if (sc->pch[i].run) { x = 1; chn_intr(sc->pch[i].channel); } } if (x == 0) emu_enatimer(sc, 0); } if (ack & (EMU_IPR_ADCBUFFULL | EMU_IPR_ADCBUFHALFFULL)) { if (sc->rch[0].channel) chn_intr(sc->rch[0].channel); } if (ack & (EMU_IPR_EFXBUFFULL | EMU_IPR_EFXBUFHALFFULL)) { if (sc->rch[1].channel) chn_intr(sc->rch[1].channel); } if (ack & (EMU_IPR_MICBUFFULL | EMU_IPR_MICBUFHALFFULL)) { if (sc->rch[2].channel) chn_intr(sc->rch[2].channel); } snd_mtxlock(sc->lock); } } snd_mtxunlock(sc->lock); } /* -------------------------------------------------------------------- */ static void emu_setmap(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *phys = arg; *phys = error ? 0 : (bus_addr_t)segs->ds_addr; if (bootverbose) { printf("emu: setmap (%lx, %lx), nseg=%d, error=%d\n", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len, nseg, error); } } static void * emu_malloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr, bus_dmamap_t *map) { void *buf; *addr = 0; if (bus_dmamem_alloc(sc->parent_dmat, &buf, BUS_DMA_NOWAIT, map)) return NULL; if (bus_dmamap_load(sc->parent_dmat, *map, buf, sz, emu_setmap, addr, 0) || !*addr) { bus_dmamem_free(sc->parent_dmat, buf, *map); return NULL; } return buf; } static void emu_free(struct sc_info *sc, void *buf, bus_dmamap_t map) { bus_dmamap_unload(sc->parent_dmat, map); bus_dmamem_free(sc->parent_dmat, buf, map); } static void * emu_memalloc(struct sc_info *sc, u_int32_t sz, bus_addr_t *addr) { u_int32_t blksz, start, idx, ofs, tmp, found; struct emu_mem *mem = &sc->mem; struct emu_memblk *blk; void *buf; blksz = sz / EMUPAGESIZE; if (sz > (blksz * EMUPAGESIZE)) blksz++; /* find a free block in the bitmap */ found = 0; start = 1; while (!found && start + blksz < EMUMAXPAGES) { found = 1; for (idx = start; idx < start + blksz; idx++) if (mem->bmap[idx >> 3] & (1 << (idx & 7))) found = 0; if (!found) start++; } if (!found) return NULL; blk = malloc(sizeof(*blk), M_DEVBUF, M_NOWAIT); if (blk == NULL) return NULL; buf = emu_malloc(sc, sz, &blk->buf_addr, &blk->buf_map); *addr = blk->buf_addr; if (buf == NULL) { free(blk, M_DEVBUF); return NULL; } blk->buf = buf; blk->pte_start = start; blk->pte_size = blksz; #ifdef EMUDEBUG printf("buf %p, pte_start %d, pte_size %d\n", blk->buf, blk->pte_start, blk->pte_size); #endif ofs = 0; for (idx = start; idx < start + blksz; idx++) { mem->bmap[idx >> 3] |= 1 << (idx & 7); tmp = (uint32_t)(blk->buf_addr + ofs); #ifdef EMUDEBUG printf("pte[%d] -> %x phys, %x virt\n", idx, tmp, ((u_int32_t)buf) + ofs); #endif mem->ptb_pages[idx] = (tmp << 1) | idx; ofs += EMUPAGESIZE; } SLIST_INSERT_HEAD(&mem->blocks, blk, link); return buf; } static int emu_memfree(struct sc_info *sc, void *buf) { u_int32_t idx, tmp; struct emu_mem *mem = &sc->mem; struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == buf) blk = i; } if (blk == NULL) return EINVAL; SLIST_REMOVE(&mem->blocks, blk, emu_memblk, link); emu_free(sc, buf, blk->buf_map); tmp = (u_int32_t)(sc->mem.silent_page_addr) << 1; for (idx = blk->pte_start; idx < blk->pte_start + blk->pte_size; idx++) { mem->bmap[idx >> 3] &= ~(1 << (idx & 7)); mem->ptb_pages[idx] = tmp | idx; } free(blk, M_DEVBUF); return 0; } static int emu_memstart(struct sc_info *sc, void *buf) { struct emu_mem *mem = &sc->mem; struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == buf) blk = i; } if (blk == NULL) return -EINVAL; return blk->pte_start; } static void emu_addefxop(struct sc_info *sc, int op, int z, int w, int x, int y, u_int32_t *pc) { emu_wrefx(sc, (*pc) * 2, (x << 10) | y); emu_wrefx(sc, (*pc) * 2 + 1, (op << 20) | (z << 10) | w); (*pc)++; } static void audigy_addefxop(struct sc_info *sc, int op, int z, int w, int x, int y, u_int32_t *pc) { emu_wrefx(sc, (*pc) * 2, (x << 12) | y); emu_wrefx(sc, (*pc) * 2 + 1, (op << 24) | (z << 12) | w); (*pc)++; } static void audigy_initefx(struct sc_info *sc) { int i; u_int32_t pc = 0; /* skip 0, 0, -1, 0 - NOPs */ for (i = 0; i < 512; i++) audigy_addefxop(sc, 0x0f, 0x0c0, 0x0c0, 0x0cf, 0x0c0, &pc); for (i = 0; i < 512; i++) emu_wrptr(sc, 0, EMU_A_FXGPREGBASE + i, 0x0); pc = 16; /* stop fx processor */ emu_wrptr(sc, 0, EMU_A_DBG, EMU_A_DBG_SINGLE_STEP); /* Audigy 2 (EMU10K2) DSP Registers: FX Bus 0x000-0x00f : 16 registers (?) Input 0x040/0x041 : AC97 Codec (l/r) 0x042/0x043 : ADC, S/PDIF (l/r) 0x044/0x045 : Optical S/PDIF in (l/r) 0x046/0x047 : ? 0x048/0x049 : Line/Mic 2 (l/r) 0x04a/0x04b : RCA S/PDIF (l/r) 0x04c/0x04d : Aux 2 (l/r) Output 0x060/0x061 : Digital Front (l/r) 0x062/0x063 : Digital Center/LFE 0x064/0x065 : AudigyDrive Heaphone (l/r) 0x066/0x067 : Digital Rear (l/r) 0x068/0x069 : Analog Front (l/r) 0x06a/0x06b : Analog Center/LFE 0x06c/0x06d : ? 0x06e/0x06f : Analog Rear (l/r) 0x070/0x071 : AC97 Output (l/r) 0x072/0x073 : ? 0x074/0x075 : ? 0x076/0x077 : ADC Recording Buffer (l/r) Constants 0x0c0 - 0x0c4 = 0 - 4 0x0c5 = 0x8, 0x0c6 = 0x10, 0x0c7 = 0x20 0x0c8 = 0x100, 0x0c9 = 0x10000, 0x0ca = 0x80000 0x0cb = 0x10000000, 0x0cc = 0x20000000, 0x0cd = 0x40000000 0x0ce = 0x80000000, 0x0cf = 0x7fffffff, 0x0d0 = 0xffffffff 0x0d1 = 0xfffffffe, 0x0d2 = 0xc0000000, 0x0d3 = 0x41fbbcdc 0x0d4 = 0x5a7ef9db, 0x0d5 = 0x00100000, 0x0dc = 0x00000001 (?) Temporary Values 0x0d6 : Accumulator (?) 0x0d7 : Condition Register 0x0d8 : Noise source 0x0d9 : Noise source Tank Memory Data Registers 0x200 - 0x2ff Tank Memory Address Registers 0x300 - 0x3ff General Purpose Registers 0x400 - 0x5ff */ /* AC97Output[l/r] = FXBus PCM[l/r] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AC97_L), A_C_00000000, A_C_00000000, A_FXBUS(FXBUS_PCM_LEFT), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AC97_R), A_C_00000000, A_C_00000000, A_FXBUS(FXBUS_PCM_RIGHT), &pc); /* GPR[0/1] = RCA S/PDIF[l/r] -- Master volume */ audigy_addefxop(sc, iACC3, A_GPR(0), A_C_00000000, A_C_00000000, A_EXTIN(EXTIN_COAX_SPDIF_L), &pc); audigy_addefxop(sc, iACC3, A_GPR(1), A_C_00000000, A_C_00000000, A_EXTIN(EXTIN_COAX_SPDIF_R), &pc); /* GPR[2] = GPR[0] (Left) / 2 + GPR[1] (Right) / 2 -- Central volume */ audigy_addefxop(sc, iINTERP, A_GPR(2), A_GPR(1), A_C_40000000, A_GPR(0), &pc); /* Headphones[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_HEADPHONE_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_HEADPHONE_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Analog Front[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AFRONT_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AFRONT_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Digital Front[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_FRONT_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Center and Subwoofer configuration */ /* Analog Center = GPR[0] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ACENTER), A_C_00000000, A_GPR(0), A_GPR(2), &pc); /* Analog Sub = GPR[1] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ALFE), A_C_00000000, A_GPR(1), A_GPR(2), &pc); /* Digital Center = GPR[0] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_CENTER), A_C_00000000, A_GPR(0), A_GPR(2), &pc); /* Digital Sub = GPR[1] + GPR[2] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_LFE), A_C_00000000, A_GPR(1), A_GPR(2), &pc); #if 0 /* Analog Rear[l/r] = (GPR[0/1] * RearVolume[l/r]) >> 31 */ /* RearVolume = GPR[0x10/0x11] (Will this ever be implemented?) */ audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_AREAR_L), A_C_00000000, A_GPR(16), A_GPR(0), &pc); audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_AREAR_R), A_C_00000000, A_GPR(17), A_GPR(1), &pc); /* Digital Rear[l/r] = (GPR[0/1] * RearVolume[l/r]) >> 31 */ /* RearVolume = GPR[0x10/0x11] (Will this ever be implemented?) */ audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_REAR_L), A_C_00000000, A_GPR(16), A_GPR(0), &pc); audigy_addefxop(sc, iMAC0, A_EXTOUT(A_EXTOUT_REAR_R), A_C_00000000, A_GPR(17), A_GPR(1), &pc); #else /* XXX This is just a copy to the channel, since we do not have * a patch manager, it is useful for have another output enabled. */ /* Analog Rear[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AREAR_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_AREAR_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); /* Digital Rear[l/r] = GPR[0/1] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_REAR_L), A_C_00000000, A_C_00000000, A_GPR(0), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_REAR_R), A_C_00000000, A_C_00000000, A_GPR(1), &pc); #endif /* ADC Recording buffer[l/r] = AC97Input[l/r] */ audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ADC_CAP_L), A_C_00000000, A_C_00000000, A_EXTIN(A_EXTIN_AC97_L), &pc); audigy_addefxop(sc, iACC3, A_EXTOUT(A_EXTOUT_ADC_CAP_R), A_C_00000000, A_C_00000000, A_EXTIN(A_EXTIN_AC97_R), &pc); /* resume normal operations */ emu_wrptr(sc, 0, EMU_A_DBG, 0); } static void emu_initefx(struct sc_info *sc) { int i; u_int32_t pc = 16; /* acc3 0,0,0,0 - NOPs */ for (i = 0; i < 512; i++) { emu_wrefx(sc, i * 2, 0x10040); emu_wrefx(sc, i * 2 + 1, 0x610040); } for (i = 0; i < 256; i++) emu_wrptr(sc, 0, EMU_FXGPREGBASE + i, 0); /* FX-8010 DSP Registers: FX Bus 0x000-0x00f : 16 registers Input 0x010/0x011 : AC97 Codec (l/r) 0x012/0x013 : ADC, S/PDIF (l/r) 0x014/0x015 : Mic(left), Zoom (l/r) 0x016/0x017 : TOS link in (l/r) 0x018/0x019 : Line/Mic 1 (l/r) 0x01a/0x01b : COAX S/PDIF (l/r) 0x01c/0x01d : Line/Mic 2 (l/r) Output 0x020/0x021 : AC97 Output (l/r) 0x022/0x023 : TOS link out (l/r) 0x024/0x025 : Center/LFE 0x026/0x027 : LiveDrive Headphone (l/r) 0x028/0x029 : Rear Channel (l/r) 0x02a/0x02b : ADC Recording Buffer (l/r) 0x02c : Mic Recording Buffer 0x031/0x032 : Analog Center/LFE Constants 0x040 - 0x044 = 0 - 4 0x045 = 0x8, 0x046 = 0x10, 0x047 = 0x20 0x048 = 0x100, 0x049 = 0x10000, 0x04a = 0x80000 0x04b = 0x10000000, 0x04c = 0x20000000, 0x04d = 0x40000000 0x04e = 0x80000000, 0x04f = 0x7fffffff, 0x050 = 0xffffffff 0x051 = 0xfffffffe, 0x052 = 0xc0000000, 0x053 = 0x41fbbcdc 0x054 = 0x5a7ef9db, 0x055 = 0x00100000 Temporary Values 0x056 : Accumulator 0x057 : Condition Register 0x058 : Noise source 0x059 : Noise source 0x05a : IRQ Register 0x05b : TRAM Delay Base Address Count General Purpose Registers 0x100 - 0x1ff Tank Memory Data Registers 0x200 - 0x2ff Tank Memory Address Registers 0x300 - 0x3ff */ /* Routing - this will be configurable in later version */ /* GPR[0/1] = FX * 4 + SPDIF-in */ emu_addefxop(sc, iMACINT0, GPR(0), EXTIN(EXTIN_SPDIF_CD_L), FXBUS(FXBUS_PCM_LEFT), C_00000004, &pc); emu_addefxop(sc, iMACINT0, GPR(1), EXTIN(EXTIN_SPDIF_CD_R), FXBUS(FXBUS_PCM_RIGHT), C_00000004, &pc); /* GPR[0/1] += APS-input */ emu_addefxop(sc, iACC3, GPR(0), GPR(0), C_00000000, sc->APS ? EXTIN(EXTIN_TOSLINK_L) : C_00000000, &pc); emu_addefxop(sc, iACC3, GPR(1), GPR(1), C_00000000, sc->APS ? EXTIN(EXTIN_TOSLINK_R) : C_00000000, &pc); /* FrontOut (AC97) = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_R), C_00000000, C_00000001, GPR(1), &pc); /* GPR[2] = GPR[0] (Left) / 2 + GPR[1] (Right) / 2 -- Central volume */ emu_addefxop(sc, iINTERP, GPR(2), GPR(1), C_40000000, GPR(0), &pc); #if 0 /* RearOut = (GPR[0/1] * RearVolume) >> 31 */ /* RearVolume = GPR[0x10/0x11] */ emu_addefxop(sc, iMAC0, EXTOUT(EXTOUT_REAR_L), C_00000000, GPR(16), GPR(0), &pc); emu_addefxop(sc, iMAC0, EXTOUT(EXTOUT_REAR_R), C_00000000, GPR(17), GPR(1), &pc); #else /* XXX This is just a copy to the channel, since we do not have * a patch manager, it is useful for have another output enabled. */ /* Rear[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_REAR_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_REAR_R), C_00000000, C_00000000, GPR(1), &pc); #endif /* TOS out[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_TOSLINK_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_TOSLINK_R), C_00000000, C_00000000, GPR(1), &pc); /* Center and Subwoofer configuration */ /* Analog Center = GPR[0] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ACENTER), C_00000000, GPR(0), GPR(2), &pc); /* Analog Sub = GPR[1] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ALFE), C_00000000, GPR(1), GPR(2), &pc); /* Digital Center = GPR[0] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_CENTER), C_00000000, GPR(0), GPR(2), &pc); /* Digital Sub = GPR[1] + GPR[2] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_AC97_LFE), C_00000000, GPR(1), GPR(2), &pc); /* Headphones[l/r] = GPR[0/1] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_HEADPHONE_L), C_00000000, C_00000000, GPR(0), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_HEADPHONE_R), C_00000000, C_00000000, GPR(1), &pc); /* ADC Recording buffer[l/r] = AC97Input[l/r] */ emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ADC_CAP_L), C_00000000, C_00000000, EXTIN(EXTIN_AC97_L), &pc); emu_addefxop(sc, iACC3, EXTOUT(EXTOUT_ADC_CAP_R), C_00000000, C_00000000, EXTIN(EXTIN_AC97_R), &pc); /* resume normal operations */ emu_wrptr(sc, 0, EMU_DBG, 0); } /* Probe and attach the card */ static int emu_init(struct sc_info *sc) { u_int32_t spcs, ch, tmp, i; if (sc->audigy) { /* enable additional AC97 slots */ emu_wrptr(sc, 0, EMU_AC97SLOT, EMU_AC97SLOT_CENTER | EMU_AC97SLOT_LFE); } /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); /* disable channel interrupt */ emu_wr(sc, EMU_INTE, EMU_INTE_INTERTIMERENB | EMU_INTE_SAMPLERATER | EMU_INTE_PCIERRENABLE, 4); emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); /* wonder what these do... */ if (sc->audigy) { emu_wrptr(sc, 0, EMU_SPBYPASS, 0xf00); emu_wrptr(sc, 0, EMU_AC97SLOT, 0x3); } /* init envelope engine */ for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, ENV_OFF); emu_wrptr(sc, ch, EMU_CHAN_IP, 0); emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); emu_wrptr(sc, ch, EMU_CHAN_CCR, 0); emu_wrptr(sc, ch, EMU_CHAN_PSST, 0); emu_wrptr(sc, ch, EMU_CHAN_DSL, 0x10); emu_wrptr(sc, ch, EMU_CHAN_CCCA, 0); emu_wrptr(sc, ch, EMU_CHAN_Z1, 0); emu_wrptr(sc, ch, EMU_CHAN_Z2, 0); emu_wrptr(sc, ch, EMU_CHAN_FXRT, 0xd01c0000); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, ch, EMU_CHAN_DCYSUSM, 0); emu_wrptr(sc, ch, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PEFE, 0); emu_wrptr(sc, ch, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, ch, EMU_CHAN_TREMFRQ, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_FM2FRQ2, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_TEMPENV, 0); /*** these are last so OFF prevents writing ***/ emu_wrptr(sc, ch, EMU_CHAN_LFOVAL2, 0); emu_wrptr(sc, ch, EMU_CHAN_LFOVAL1, 0); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDV, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVOL, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVAL, 0); if (sc->audigy) { /* audigy cards need this to initialize correctly */ emu_wrptr(sc, ch, 0x4c, 0); emu_wrptr(sc, ch, 0x4d, 0); emu_wrptr(sc, ch, 0x4e, 0); emu_wrptr(sc, ch, 0x4f, 0); /* set default routing */ emu_wrptr(sc, ch, EMU_A_CHAN_FXRT1, 0x03020100); emu_wrptr(sc, ch, EMU_A_CHAN_FXRT2, 0x3f3f3f3f); emu_wrptr(sc, ch, EMU_A_CHAN_SENDAMOUNTS, 0); } sc->voice[ch].vnum = ch; sc->voice[ch].slave = NULL; sc->voice[ch].busy = 0; sc->voice[ch].ismaster = 0; sc->voice[ch].running = 0; sc->voice[ch].b16 = 0; sc->voice[ch].stereo = 0; sc->voice[ch].speed = 0; sc->voice[ch].start = 0; sc->voice[ch].end = 0; sc->voice[ch].channel = NULL; } sc->pnum = sc->rnum = 0; /* * Init to 0x02109204 : * Clock accuracy = 0 (1000ppm) * Sample Rate = 2 (48kHz) * Audio Channel = 1 (Left of 2) * Source Number = 0 (Unspecified) * Generation Status = 1 (Original for Cat Code 12) * Cat Code = 12 (Digital Signal Mixer) * Mode = 0 (Mode 0) * Emphasis = 0 (None) * CP = 1 (Copyright unasserted) * AN = 0 (Audio data) * P = 0 (Consumer) */ spcs = EMU_SPCS_CLKACCY_1000PPM | EMU_SPCS_SAMPLERATE_48 | EMU_SPCS_CHANNELNUM_LEFT | EMU_SPCS_SOURCENUM_UNSPEC | EMU_SPCS_GENERATIONSTATUS | 0x00001200 | 0x00000000 | EMU_SPCS_EMPHASIS_NONE | EMU_SPCS_COPYRIGHT; emu_wrptr(sc, 0, EMU_SPCS0, spcs); emu_wrptr(sc, 0, EMU_SPCS1, spcs); emu_wrptr(sc, 0, EMU_SPCS2, spcs); if (!sc->audigy) emu_initefx(sc); else if (sc->audigy2) { /* Audigy 2 */ /* from ALSA initialization code: */ /* Hack for Alice3 to work independent of haP16V driver */ u_int32_t tmp; /* Setup SRCMulti_I2S SamplingRate */ tmp = emu_rdptr(sc, 0, EMU_A_SPDIF_SAMPLERATE) & 0xfffff1ff; emu_wrptr(sc, 0, EMU_A_SPDIF_SAMPLERATE, tmp | 0x400); /* Setup SRCSel (Enable SPDIF, I2S SRCMulti) */ emu_wr(sc, 0x20, 0x00600000, 4); emu_wr(sc, 0x24, 0x00000014, 4); /* Setup SRCMulti Input Audio Enable */ emu_wr(sc, 0x20, 0x006e0000, 4); emu_wr(sc, 0x24, 0xff00ff00, 4); } SLIST_INIT(&sc->mem.blocks); sc->mem.ptb_pages = emu_malloc(sc, EMUMAXPAGES * sizeof(u_int32_t), &sc->mem.ptb_pages_addr, &sc->mem.ptb_map); if (sc->mem.ptb_pages == NULL) return -1; sc->mem.silent_page = emu_malloc(sc, EMUPAGESIZE, &sc->mem.silent_page_addr, &sc->mem.silent_map); if (sc->mem.silent_page == NULL) { emu_free(sc, sc->mem.ptb_pages, sc->mem.ptb_map); return -1; } /* Clear page with silence & setup all pointers to this page */ bzero(sc->mem.silent_page, EMUPAGESIZE); tmp = (u_int32_t)(sc->mem.silent_page_addr) << 1; for (i = 0; i < EMUMAXPAGES; i++) sc->mem.ptb_pages[i] = tmp | i; emu_wrptr(sc, 0, EMU_PTB, (sc->mem.ptb_pages_addr)); emu_wrptr(sc, 0, EMU_TCB, 0); /* taken from original driver */ emu_wrptr(sc, 0, EMU_TCBS, 0); /* taken from original driver */ for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_MAPA, tmp | EMU_CHAN_MAP_PTI_MASK); emu_wrptr(sc, ch, EMU_CHAN_MAPB, tmp | EMU_CHAN_MAP_PTI_MASK); } /* emu_memalloc(sc, EMUPAGESIZE); */ /* * Hokay, now enable the AUD bit * * Audigy * Enable Audio = 0 (enabled after fx processor initialization) * Mute Disable Audio = 0 * Joystick = 1 * * Audigy 2 * Enable Audio = 1 * Mute Disable Audio = 0 * Joystick = 1 * GP S/PDIF AC3 Enable = 1 * CD S/PDIF AC3 Enable = 1 * * EMU10K1 * Enable Audio = 1 * Mute Disable Audio = 0 * Lock Tank Memory = 1 * Lock Sound Memory = 0 * Auto Mute = 1 */ if (sc->audigy) { tmp = EMU_HCFG_AUTOMUTE | EMU_HCFG_JOYENABLE; if (sc->audigy2) /* Audigy 2 */ tmp = EMU_HCFG_AUDIOENABLE | EMU_HCFG_AC3ENABLE_CDSPDIF | EMU_HCFG_AC3ENABLE_GPSPDIF; emu_wr(sc, EMU_HCFG, tmp, 4); audigy_initefx(sc); /* from ALSA initialization code: */ /* enable audio and disable both audio/digital outputs */ emu_wr(sc, EMU_HCFG, emu_rd(sc, EMU_HCFG, 4) | EMU_HCFG_AUDIOENABLE, 4); emu_wr(sc, EMU_A_IOCFG, emu_rd(sc, EMU_A_IOCFG, 4) & ~EMU_A_IOCFG_GPOUT_AD, 4); if (sc->audigy2) { /* Audigy 2 */ /* Unmute Analog. * Set GPO6 to 1 for Apollo. This has to be done after * init Alice3 I2SOut beyond 48kHz. * So, sequence is important. */ emu_wr(sc, EMU_A_IOCFG, emu_rd(sc, EMU_A_IOCFG, 4) | EMU_A_IOCFG_GPOUT_A, 4); } } else { /* EMU10K1 initialization code */ tmp = EMU_HCFG_AUDIOENABLE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_AUTOMUTE; if (sc->rev >= 6) tmp |= EMU_HCFG_JOYENABLE; emu_wr(sc, EMU_HCFG, tmp, 4); /* TOSLink detection */ sc->tos_link = 0; tmp = emu_rd(sc, EMU_HCFG, 4); if (tmp & (EMU_HCFG_GPINPUT0 | EMU_HCFG_GPINPUT1)) { emu_wr(sc, EMU_HCFG, tmp | EMU_HCFG_GPOUT1, 4); DELAY(50); if (tmp != (emu_rd(sc, EMU_HCFG, 4) & ~EMU_HCFG_GPOUT1)) { sc->tos_link = 1; emu_wr(sc, EMU_HCFG, tmp, 4); } } } return 0; } static int emu_uninit(struct sc_info *sc) { u_int32_t ch; emu_wr(sc, EMU_INTE, 0, 4); for (ch = 0; ch < NUM_G; ch++) emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, ENV_OFF); for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); } if (sc->audigy) { /* stop fx processor */ emu_wrptr(sc, 0, EMU_A_DBG, EMU_A_DBG_SINGLE_STEP); } /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); emu_wrptr(sc, 0, EMU_PTB, 0); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_FXWC, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); emu_wrptr(sc, 0, EMU_TCB, 0); emu_wrptr(sc, 0, EMU_TCBS, 0); /* disable channel interrupt */ emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); /* init envelope engine */ if (!SLIST_EMPTY(&sc->mem.blocks)) device_printf(sc->dev, "warning: memblock list not empty\n"); emu_free(sc, sc->mem.ptb_pages, sc->mem.ptb_map); emu_free(sc, sc->mem.silent_page, sc->mem.silent_map); if(sc->mpu) mpu401_uninit(sc->mpu); return 0; } static int emu_pci_probe(device_t dev) { char *s = NULL; switch (pci_get_devid(dev)) { case EMU10K1_PCI_ID: s = "Creative EMU10K1"; break; case EMU10K2_PCI_ID: if (pci_get_revid(dev) == 0x04) s = "Creative Audigy 2 (EMU10K2)"; else s = "Creative Audigy (EMU10K2)"; break; case EMU10K3_PCI_ID: s = "Creative Audigy 2 (EMU10K3)"; break; default: return ENXIO; } device_set_desc(dev, s); return BUS_PROBE_LOW_PRIORITY; } static int emu_pci_attach(device_t dev) { struct ac97_info *codec = NULL; struct sc_info *sc; int i, gotmic; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_emu10k1 softc"); sc->dev = dev; sc->type = pci_get_devid(dev); sc->rev = pci_get_revid(dev); sc->audigy = sc->type == EMU10K2_PCI_ID || sc->type == EMU10K3_PCI_ID; sc->audigy2 = (sc->audigy && sc->rev == 0x04); sc->nchans = sc->audigy ? 8 : 4; sc->addrmask = sc->audigy ? EMU_A_PTR_ADDR_MASK : EMU_PTR_ADDR_MASK; pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (sc->reg == NULL) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->bufsz = pcm_getbuffersize(dev, 4096, EMU_DEFAULT_BUFSZ, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/(1U << 31) - 1, /* can only access 0-2gb */ /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } if (emu_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, emu_ac97); if (codec == NULL) goto bad; gotmic = (ac97_getcaps(codec) & AC97_CAP_MICCHANNEL) ? 1 : 0; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto bad; emu_midiattach(sc); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, emu_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(sc->reg), rman_get_start(sc->irq), PCM_KLDSTRING(snd_emu10k1)); if (pcm_register(dev, sc, sc->nchans, gotmic ? 3 : 2)) goto bad; for (i = 0; i < sc->nchans; i++) pcm_addchan(dev, PCMDIR_PLAY, &emupchan_class, sc); for (i = 0; i < (gotmic ? 3 : 2); i++) pcm_addchan(dev, PCMDIR_REC, &emurchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); if (sc->parent_dmat) bus_dma_tag_destroy(sc->parent_dmat); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return ENXIO; } static int emu_pci_detach(device_t dev) { int r; struct sc_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); /* shutdown chip */ emu_uninit(sc); bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); bus_dma_tag_destroy(sc->parent_dmat); snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return 0; } /* add suspend, resume */ static device_method_t emu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, emu_pci_probe), DEVMETHOD(device_attach, emu_pci_attach), DEVMETHOD(device_detach, emu_pci_detach), DEVMETHOD_END }; static driver_t emu_driver = { "pcm", emu_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_emu10k1, pci, emu_driver, pcm_devclass, NULL, NULL); MODULE_DEPEND(snd_emu10k1, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_emu10k1, 1); MODULE_DEPEND(snd_emu10k1, midi, 1, 1, 1); /* dummy driver to silence the joystick device */ static int emujoy_pci_probe(device_t dev) { char *s = NULL; switch (pci_get_devid(dev)) { case 0x70021102: s = "Creative EMU10K1 Joystick"; device_quiet(dev); break; case 0x70031102: s = "Creative EMU10K2 Joystick"; device_quiet(dev); break; } if (s) device_set_desc(dev, s); return s ? -1000 : ENXIO; } static int emujoy_pci_attach(device_t dev) { return 0; } static int emujoy_pci_detach(device_t dev) { return 0; } static device_method_t emujoy_methods[] = { DEVMETHOD(device_probe, emujoy_pci_probe), DEVMETHOD(device_attach, emujoy_pci_attach), DEVMETHOD(device_detach, emujoy_pci_detach), DEVMETHOD_END }; static driver_t emujoy_driver = { "emujoy", emujoy_methods, 1 /* no softc */ }; static devclass_t emujoy_devclass; DRIVER_MODULE(emujoy, pci, emujoy_driver, emujoy_devclass, NULL, NULL); Index: head/sys/dev/sound/pci/emu10kx.c =================================================================== --- head/sys/dev/sound/pci/emu10kx.c (revision 297861) +++ head/sys/dev/sound/pci/emu10kx.c (revision 297862) @@ -1,3570 +1,3570 @@ /*- * Copyright (c) 1999 Cameron Grant * Copyright (c) 2003-2007 Yuriy Tsibizov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include /* hw flags */ #define HAS_51 0x0001 #define HAS_71 0x0002 #define HAS_AC97 0x0004 #define IS_EMU10K1 0x0008 #define IS_EMU10K2 0x0010 #define IS_CA0102 0x0020 #define IS_CA0108 0x0040 #define IS_UNKNOWN 0x0080 #define BROKEN_DIGITAL 0x0100 #define DIGITAL_ONLY 0x0200 #define IS_CARDBUS 0x0400 #define MODE_ANALOG 1 #define MODE_DIGITAL 2 #define SPDIF_MODE_PCM 1 #define SPDIF_MODE_AC3 2 #define MACS 0x0 #define MACS1 0x1 #define MACW 0x2 #define MACW1 0x3 #define MACINTS 0x4 #define MACINTW 0x5 #define ACC3 0x6 #define MACMV 0x7 #define ANDXOR 0x8 #define TSTNEG 0x9 #define LIMIT 0xA #define LIMIT1 0xB #define LOG 0xC #define EXP 0xD #define INTERP 0xE #define SKIP 0xF #define GPR(i) (sc->gpr_base+(i)) #define INP(i) (sc->input_base+(i)) #define OUTP(i) (sc->output_base+(i)) #define FX(i) (i) #define FX2(i) (sc->efxc_base+(i)) #define DSP_CONST(i) (sc->dsp_zero+(i)) #define COND_NORMALIZED DSP_CONST(0x1) #define COND_BORROW DSP_CONST(0x2) #define COND_MINUS DSP_CONST(0x3) #define COND_LESS_ZERO DSP_CONST(0x4) #define COND_EQ_ZERO DSP_CONST(0x5) #define COND_SATURATION DSP_CONST(0x6) #define COND_NEQ_ZERO DSP_CONST(0x8) #define DSP_ACCUM DSP_CONST(0x16) #define DSP_CCR DSP_CONST(0x17) /* Live! Inputs */ #define IN_AC97_L 0x00 #define IN_AC97_R 0x01 #define IN_AC97 IN_AC97_L #define IN_SPDIF_CD_L 0x02 #define IN_SPDIF_CD_R 0x03 #define IN_SPDIF_CD IN_SPDIF_CD_L #define IN_ZOOM_L 0x04 #define IN_ZOOM_R 0x05 #define IN_ZOOM IN_ZOOM_L #define IN_TOSLINK_L 0x06 #define IN_TOSLINK_R 0x07 #define IN_TOSLINK IN_TOSLINK_L #define IN_LINE1_L 0x08 #define IN_LINE1_R 0x09 #define IN_LINE1 IN_LINE1_L #define IN_COAX_SPDIF_L 0x0a #define IN_COAX_SPDIF_R 0x0b #define IN_COAX_SPDIF IN_COAX_SPDIF_L #define IN_LINE2_L 0x0c #define IN_LINE2_R 0x0d #define IN_LINE2 IN_LINE2_L #define IN_0E 0x0e #define IN_0F 0x0f /* Outputs */ #define OUT_AC97_L 0x00 #define OUT_AC97_R 0x01 #define OUT_AC97 OUT_AC97_L #define OUT_A_FRONT OUT_AC97 #define OUT_TOSLINK_L 0x02 #define OUT_TOSLINK_R 0x03 #define OUT_TOSLINK OUT_TOSLINK_L #define OUT_D_CENTER 0x04 #define OUT_D_SUB 0x05 #define OUT_HEADPHONE_L 0x06 #define OUT_HEADPHONE_R 0x07 #define OUT_HEADPHONE OUT_HEADPHONE_L #define OUT_REAR_L 0x08 #define OUT_REAR_R 0x09 #define OUT_REAR OUT_REAR_L #define OUT_ADC_REC_L 0x0a #define OUT_ADC_REC_R 0x0b #define OUT_ADC_REC OUT_ADC_REC_L #define OUT_MIC_CAP 0x0c /* Live! 5.1 Digital, non-standard 5.1 (center & sub) outputs */ #define OUT_A_CENTER 0x11 #define OUT_A_SUB 0x12 /* Audigy Inputs */ #define A_IN_AC97_L 0x00 #define A_IN_AC97_R 0x01 #define A_IN_AC97 A_IN_AC97_L #define A_IN_SPDIF_CD_L 0x02 #define A_IN_SPDIF_CD_R 0x03 #define A_IN_SPDIF_CD A_IN_SPDIF_CD_L #define A_IN_O_SPDIF_L 0x04 #define A_IN_O_SPDIF_R 0x05 #define A_IN_O_SPDIF A_IN_O_SPDIF_L #define A_IN_LINE2_L 0x08 #define A_IN_LINE2_R 0x09 #define A_IN_LINE2 A_IN_LINE2_L #define A_IN_R_SPDIF_L 0x0a #define A_IN_R_SPDIF_R 0x0b #define A_IN_R_SPDIF A_IN_R_SPDIF_L #define A_IN_AUX2_L 0x0c #define A_IN_AUX2_R 0x0d #define A_IN_AUX2 A_IN_AUX2_L /* Audigy Outputs */ #define A_OUT_D_FRONT_L 0x00 #define A_OUT_D_FRONT_R 0x01 #define A_OUT_D_FRONT A_OUT_D_FRONT_L #define A_OUT_D_CENTER 0x02 #define A_OUT_D_SUB 0x03 #define A_OUT_D_SIDE_L 0x04 #define A_OUT_D_SIDE_R 0x05 #define A_OUT_D_SIDE A_OUT_D_SIDE_L #define A_OUT_D_REAR_L 0x06 #define A_OUT_D_REAR_R 0x07 #define A_OUT_D_REAR A_OUT_D_REAR_L /* on Audigy Platinum only */ #define A_OUT_HPHONE_L 0x04 #define A_OUT_HPHONE_R 0x05 #define A_OUT_HPHONE A_OUT_HPHONE_L #define A_OUT_A_FRONT_L 0x08 #define A_OUT_A_FRONT_R 0x09 #define A_OUT_A_FRONT A_OUT_A_FRONT_L #define A_OUT_A_CENTER 0x0a #define A_OUT_A_SUB 0x0b #define A_OUT_A_SIDE_L 0x0c #define A_OUT_A_SIDE_R 0x0d #define A_OUT_A_SIDE A_OUT_A_SIDE_L #define A_OUT_A_REAR_L 0x0e #define A_OUT_A_REAR_R 0x0f #define A_OUT_A_REAR A_OUT_A_REAR_L #define A_OUT_AC97_L 0x10 #define A_OUT_AC97_R 0x11 #define A_OUT_AC97 A_OUT_AC97_L #define A_OUT_ADC_REC_L 0x16 #define A_OUT_ADC_REC_R 0x17 #define A_OUT_ADC_REC A_OUT_ADC_REC_L #define EMU_DATA2 0x24 #define EMU_IPR2 0x28 #define EMU_INTE2 0x2c #define EMU_IPR3 0x38 #define EMU_INTE3 0x3c #define EMU_A2_SRCSel 0x60 #define EMU_A2_SRCMULTI_ENABLE 0x6e #define EMU_A_I2S_CAPTURE_96000 0x00000400 #define EMU_A2_MIXER_I2S_ENABLE 0x7B #define EMU_A2_MIXER_SPDIF_ENABLE 0x7A #define C_FRONT_L 0 #define C_FRONT_R 1 #define C_REC_L 2 #define C_REC_R 3 #define C_REAR_L 4 #define C_REAR_R 5 #define C_CENTER 6 #define C_SUB 7 #define C_SIDE_L 8 #define C_SIDE_R 9 #define NUM_CACHES 10 #define CDSPDIFMUTE 0 #define ANALOGMUTE 1 #define NUM_MUTE 2 #define EMU_MAX_GPR 512 #define EMU_MAX_IRQ_CONSUMERS 32 struct emu_voice { int vnum; unsigned int b16:1, stereo:1, busy:1, running:1, ismaster:1; int speed; int start; int end; int vol; uint32_t buf; void *vbuf; struct emu_voice *slave; uint32_t sa; uint32_t ea; uint32_t routing[8]; uint32_t amounts[8]; }; struct emu_memblk { SLIST_ENTRY(emu_memblk) link; void *buf; char owner[16]; bus_addr_t buf_addr; uint32_t pte_start, pte_size; bus_dmamap_t buf_map; }; struct emu_mem { uint8_t bmap[EMU_MAXPAGES / 8]; uint32_t *ptb_pages; void *silent_page; bus_addr_t ptb_pages_addr; bus_addr_t silent_page_addr; bus_dmamap_t ptb_map; bus_dmamap_t silent_map; bus_dma_tag_t dmat; struct emu_sc_info *card; SLIST_HEAD(, emu_memblk) blocks; }; /* rm */ struct emu_rm { struct emu_sc_info *card; struct mtx gpr_lock; signed int allocmap[EMU_MAX_GPR]; int num_gprs; int last_free_gpr; int num_used; }; struct emu_intr_handler { void* softc; uint32_t intr_mask; uint32_t inte_mask; uint32_t(*irq_func) (void *softc, uint32_t irq); }; struct emu_sc_info { struct mtx lock; struct mtx rw; /* Hardware exclusive access lock */ /* Hardware and subdevices */ device_t dev; device_t pcm[RT_COUNT]; device_t midi[2]; uint32_t type; uint32_t rev; bus_space_tag_t st; bus_space_handle_t sh; struct cdev *cdev; /* /dev/emu10k character device */ struct mtx emu10kx_lock; int emu10kx_isopen; struct sbuf emu10kx_sbuf; int emu10kx_bufptr; /* Resources */ struct resource *reg; struct resource *irq; void *ih; /* IRQ handlers */ struct emu_intr_handler ihandler[EMU_MAX_IRQ_CONSUMERS]; /* Card HW configuration */ unsigned int mode; /* analog / digital */ unsigned int mchannel_fx; unsigned int dsp_zero; unsigned int code_base; unsigned int code_size; unsigned int gpr_base; unsigned int num_gprs; unsigned int input_base; unsigned int output_base; unsigned int efxc_base; unsigned int opcode_shift; unsigned int high_operand_shift; unsigned int address_mask; uint32_t is_emu10k1:1, is_emu10k2, is_ca0102, is_ca0108:1, has_ac97:1, has_51:1, has_71:1, enable_ir:1, broken_digital:1, is_cardbus:1; signed int mch_disabled, mch_rec, dbg_level; signed int num_inputs; unsigned int num_outputs; unsigned int num_fxbuses; unsigned int routing_code_start; unsigned int routing_code_end; /* HW resources */ struct emu_voice voice[NUM_G]; /* Hardware voices */ uint32_t irq_mask[EMU_MAX_IRQ_CONSUMERS]; /* IRQ manager data */ int timer[EMU_MAX_IRQ_CONSUMERS]; /* timer */ int timerinterval; struct emu_rm *rm; struct emu_mem mem; /* memory */ /* Mixer */ int mixer_gpr[NUM_MIXERS]; int mixer_volcache[NUM_MIXERS]; int cache_gpr[NUM_CACHES]; int dummy_gpr; int mute_gpr[NUM_MUTE]; struct sysctl_ctx_list *ctx; struct sysctl_oid *root; }; static void emu_setmap(void *arg, bus_dma_segment_t * segs, int nseg, int error); static void* emu_malloc(struct emu_mem *mem, uint32_t sz, bus_addr_t * addr, bus_dmamap_t *map); static void emu_free(struct emu_mem *mem, void *dmabuf, bus_dmamap_t map); static void* emu_memalloc(struct emu_mem *mem, uint32_t sz, bus_addr_t * addr, const char * owner); static int emu_memfree(struct emu_mem *mem, void *membuf); static int emu_memstart(struct emu_mem *mem, void *membuf); /* /dev */ static int emu10kx_dev_init(struct emu_sc_info *sc); static int emu10kx_dev_uninit(struct emu_sc_info *sc); static int emu10kx_prepare(struct emu_sc_info *sc, struct sbuf *s); static void emumix_set_mode(struct emu_sc_info *sc, int mode); static void emumix_set_spdif_mode(struct emu_sc_info *sc, int mode); static void emumix_set_fxvol(struct emu_sc_info *sc, unsigned gpr, int32_t vol); static void emumix_set_gpr(struct emu_sc_info *sc, unsigned gpr, int32_t val); static int sysctl_emu_mixer_control(SYSCTL_HANDLER_ARGS); static int emu_rm_init(struct emu_sc_info *sc); static int emu_rm_uninit(struct emu_sc_info *sc); static int emu_rm_gpr_alloc(struct emu_rm *rm, int count); static unsigned int emu_getcard(device_t dev); static uint32_t emu_rd_nolock(struct emu_sc_info *sc, unsigned int regno, unsigned int size); static void emu_wr_nolock(struct emu_sc_info *sc, unsigned int regno, uint32_t data, unsigned int size); static void emu_wr_cbptr(struct emu_sc_info *sc, uint32_t data); static void emu_vstop(struct emu_sc_info *sc, char channel, int enable); static void emu_intr(void *p); static void emu_wrefx(struct emu_sc_info *sc, unsigned int pc, unsigned int data); static void emu_addefxop(struct emu_sc_info *sc, unsigned int op, unsigned int z, unsigned int w, unsigned int x, unsigned int y, uint32_t * pc); static void emu_initefx(struct emu_sc_info *sc); static int emu_cardbus_init(struct emu_sc_info *sc); static int emu_init(struct emu_sc_info *sc); static int emu_uninit(struct emu_sc_info *sc); static int emu_read_ivar(device_t bus __unused, device_t dev, int ivar_index, uintptr_t * result); static int emu_write_ivar(device_t bus __unused, device_t dev __unused, int ivar_index, uintptr_t value __unused); static int emu_pci_probe(device_t dev); static int emu_pci_attach(device_t dev); static int emu_pci_detach(device_t dev); static int emu_modevent(module_t mod __unused, int cmd, void *data __unused); #ifdef SND_EMU10KX_DEBUG #define EMU_MTX_DEBUG() do { \ if (mtx_owned(&sc->rw)) { \ printf("RW owned in %s line %d for %s\n", __func__, \ __LINE__ , device_get_nameunit(sc->dev)); \ printf("rw lock owned: %d\n", mtx_owned(&sc->rw)); \ printf("rw lock: value %x thread %x\n", \ ((&sc->rw)->mtx_lock & ~MTX_FLAGMASK), \ (uintptr_t)curthread); \ printf("rw lock: recursed %d\n", mtx_recursed(&sc->rw));\ db_show_mtx(&sc->rw); \ } \ } while (0) #else #define EMU_MTX_DEBUG() do { \ } while (0) #endif #define EMU_RWLOCK() do { \ EMU_MTX_DEBUG(); \ mtx_lock(&(sc->rw)); \ } while (0) #define EMU_RWUNLOCK() do { \ mtx_unlock(&(sc->rw)); \ EMU_MTX_DEBUG(); \ } while (0) /* Supported cards */ struct emu_hwinfo { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; char SBcode[8]; char desc[32]; int flags; }; static struct emu_hwinfo emu_cards[] = { {0xffff, 0xffff, 0xffff, 0xffff, "BADCRD", "Not a compatible card", 0}, /* 0x0020..0x002f 4.0 EMU10K1 cards */ {0x1102, 0x0002, 0x1102, 0x0020, "CT4850", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x0021, "CT4620", "SBLive!", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x002f, "CT????", "SBLive! mainboard implementation", HAS_AC97 | IS_EMU10K1}, /* (range unknown) 5.1 EMU10K1 cards */ {0x1102, 0x0002, 0x1102, 0x100a, "CT????", "SBLive! 5.1", HAS_AC97 | HAS_51 | IS_EMU10K1}, /* 0x80??..0x805? 4.0 EMU10K1 cards */ {0x1102, 0x0002, 0x1102, 0x8022, "CT4780", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8023, "CT4790", "SB PCI512", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8024, "CT4760", "SBLive!", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8025, "CT????", "SBLive! Mainboard Implementation", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8026, "CT4830", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8027, "CT4832", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8028, "CT4760", "SBLive! OEM version", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8031, "CT4831", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8040, "CT4760", "SBLive!", HAS_AC97 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8051, "CT4850", "SBLive! Value", HAS_AC97 | IS_EMU10K1}, /* 0x8061..0x???? 5.1 EMU10K1 cards */ {0x1102, 0x0002, 0x1102, 0x8061, "SB????", "SBLive! Player 5.1", HAS_AC97 | HAS_51 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8062, "CT4830", "SBLive! 1024", HAS_AC97 | HAS_51 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8064, "SB????", "SBLive! 5.1", HAS_AC97 | HAS_51 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8065, "SB0220", "SBLive! 5.1 Digital", HAS_AC97 | HAS_51 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8066, "CT4780", "SBLive! 5.1 Digital", HAS_AC97 | HAS_51 | IS_EMU10K1}, {0x1102, 0x0002, 0x1102, 0x8067, "SB????", "SBLive!", HAS_AC97 | HAS_51 | IS_EMU10K1}, /* Generic SB Live! */ {0x1102, 0x0002, 0x1102, 0x0000, "SB????", "SBLive! (Unknown model)", HAS_AC97 | IS_EMU10K1}, /* 0x0041..0x0043 EMU10K2 (some kind of Audigy) cards */ /* 0x0051..0x0051 5.1 CA0100-IAF cards */ {0x1102, 0x0004, 0x1102, 0x0051, "SB0090", "Audigy", HAS_AC97 | HAS_51 | IS_EMU10K2}, /* ES is CA0100-IDF chip that don't work in digital mode */ {0x1102, 0x0004, 0x1102, 0x0052, "SB0160", "Audigy ES", HAS_AC97 | HAS_71 | IS_EMU10K2 | BROKEN_DIGITAL}, /* 0x0053..0x005C 5.1 CA0101-NAF cards */ {0x1102, 0x0004, 0x1102, 0x0053, "SB0090", "Audigy Player/OEM", HAS_AC97 | HAS_51 | IS_EMU10K2}, {0x1102, 0x0004, 0x1102, 0x0058, "SB0090", "Audigy Player/OEM", HAS_AC97 | HAS_51 | IS_EMU10K2}, /* 0x1002..0x1009 5.1 CA0102-IAT cards */ {0x1102, 0x0004, 0x1102, 0x1002, "SB????", "Audigy 2 Platinum", HAS_51 | IS_CA0102}, {0x1102, 0x0004, 0x1102, 0x1005, "SB????", "Audigy 2 Platinum EX", HAS_51 | IS_CA0102}, {0x1102, 0x0004, 0x1102, 0x1007, "SB0240", "Audigy 2", HAS_AC97 | HAS_51 | IS_CA0102}, /* 0x2001..0x2003 7.1 CA0102-ICT cards */ {0x1102, 0x0004, 0x1102, 0x2001, "SB0350", "Audigy 2 ZS", HAS_AC97 | HAS_71 | IS_CA0102}, {0x1102, 0x0004, 0x1102, 0x2002, "SB0350", "Audigy 2 ZS", HAS_AC97 | HAS_71 | IS_CA0102}, /* XXX No reports about 0x2003 & 0x2004 cards */ {0x1102, 0x0004, 0x1102, 0x2003, "SB0350", "Audigy 2 ZS", HAS_AC97 | HAS_71 | IS_CA0102}, {0x1102, 0x0004, 0x1102, 0x2004, "SB0350", "Audigy 2 ZS", HAS_AC97 | HAS_71 | IS_CA0102}, {0x1102, 0x0004, 0x1102, 0x2005, "SB0350", "Audigy 2 ZS", HAS_AC97 | HAS_71 | IS_CA0102}, /* (range unknown) 7.1 CA0102-xxx Audigy 4 cards */ {0x1102, 0x0004, 0x1102, 0x2007, "SB0380", "Audigy 4 Pro", HAS_AC97 | HAS_71 | IS_CA0102}, /* Generic Audigy or Audigy 2 */ {0x1102, 0x0004, 0x1102, 0x0000, "SB????", "Audigy (Unknown model)", HAS_AC97 | HAS_51 | IS_EMU10K2}, /* We don't support CA0103-DAT (Audigy LS) cards */ /* There is NO CA0104-xxx cards */ /* There is NO CA0105-xxx cards */ /* We don't support CA0106-DAT (SB Live! 24 bit) cards */ /* There is NO CA0107-xxx cards */ /* 0x1000..0x1001 7.1 CA0108-IAT cards */ {0x1102, 0x0008, 0x1102, 0x1000, "SB????", "Audigy 2 LS", HAS_AC97 | HAS_51 | IS_CA0108 | DIGITAL_ONLY}, {0x1102, 0x0008, 0x1102, 0x1001, "SB0400", "Audigy 2 Value", HAS_AC97 | HAS_71 | IS_CA0108 | DIGITAL_ONLY}, {0x1102, 0x0008, 0x1102, 0x1021, "SB0610", "Audigy 4", HAS_AC97 | HAS_71 | IS_CA0108 | DIGITAL_ONLY}, {0x1102, 0x0008, 0x1102, 0x2001, "SB0530", "Audigy 2 ZS CardBus", HAS_AC97 | HAS_71 | IS_CA0108 | IS_CARDBUS}, {0x1102, 0x0008, 0x0000, 0x0000, "SB????", "Audigy 2 Value (Unknown model)", HAS_AC97 | HAS_51 | IS_CA0108}, }; /* Unsupported cards */ static struct emu_hwinfo emu_bad_cards[] = { /* APS cards should be possible to support */ {0x1102, 0x0002, 0x1102, 0x4001, "EMUAPS", "E-mu APS", 0}, {0x1102, 0x0002, 0x1102, 0x4002, "EMUAPS", "E-mu APS", 0}, {0x1102, 0x0004, 0x1102, 0x4001, "EMU???", "E-mu 1212m [4001]", 0}, /* Similar-named ("Live!" or "Audigy") cards on different chipsets */ {0x1102, 0x8064, 0x0000, 0x0000, "SB0100", "SBLive! 5.1 OEM", 0}, {0x1102, 0x0006, 0x0000, 0x0000, "SB0200", "DELL OEM SBLive! Value", 0}, {0x1102, 0x0007, 0x0000, 0x0000, "SB0310", "Audigy LS", 0}, }; /* * Get best known information about device. */ static unsigned int emu_getcard(device_t dev) { uint16_t device; uint16_t subdevice; int n_cards; unsigned int thiscard; int i; device = pci_read_config(dev, PCIR_DEVICE, /* bytes */ 2); subdevice = pci_read_config(dev, PCIR_SUBDEV_0, /* bytes */ 2); n_cards = sizeof(emu_cards) / sizeof(struct emu_hwinfo); thiscard = 0; for (i = 1; i < n_cards; i++) { if (device == emu_cards[i].device) { if (subdevice == emu_cards[i].subdevice) { thiscard = i; break; } if (0x0000 == emu_cards[i].subdevice) { thiscard = i; /* * don't break, we can get more specific card * later in the list. */ } } } n_cards = sizeof(emu_bad_cards) / sizeof(struct emu_hwinfo); for (i = 0; i < n_cards; i++) { if (device == emu_bad_cards[i].device) { if (subdevice == emu_bad_cards[i].subdevice) { thiscard = 0; break; } if (0x0000 == emu_bad_cards[i].subdevice) { thiscard = 0; break; /* we avoid all this cards */ } } } return (thiscard); } /* * Base hardware interface are 32 (Audigy) or 64 (Audigy2) registers. * Some of them are used directly, some of them provide pointer / data pairs. */ static uint32_t emu_rd_nolock(struct emu_sc_info *sc, unsigned int regno, unsigned int size) { KASSERT(sc != NULL, ("emu_rd: NULL sc")); switch (size) { case 1: return (bus_space_read_1(sc->st, sc->sh, regno)); case 2: return (bus_space_read_2(sc->st, sc->sh, regno)); case 4: return (bus_space_read_4(sc->st, sc->sh, regno)); } return (0xffffffff); } static void emu_wr_nolock(struct emu_sc_info *sc, unsigned int regno, uint32_t data, unsigned int size) { KASSERT(sc != NULL, ("emu_rd: NULL sc")); switch (size) { case 1: bus_space_write_1(sc->st, sc->sh, regno, data); break; case 2: bus_space_write_2(sc->st, sc->sh, regno, data); break; case 4: bus_space_write_4(sc->st, sc->sh, regno, data); break; } } /* * EMU_PTR / EMU_DATA interface. Access to EMU10Kx is made * via (channel, register) pair. Some registers are channel-specific, * some not. */ uint32_t emu_rdptr(struct emu_sc_info *sc, unsigned int chn, unsigned int reg) { uint32_t ptr, val, mask, size, offset; ptr = ((reg << 16) & sc->address_mask) | (chn & EMU_PTR_CHNO_MASK); EMU_RWLOCK(); emu_wr_nolock(sc, EMU_PTR, ptr, 4); val = emu_rd_nolock(sc, EMU_DATA, 4); EMU_RWUNLOCK(); /* * XXX Some register numbers has data size and offset encoded in * it to get only part of 32bit register. This use is not described * in register name, be careful! */ if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; val &= mask; val >>= offset; } return (val); } void emu_wrptr(struct emu_sc_info *sc, unsigned int chn, unsigned int reg, uint32_t data) { uint32_t ptr, mask, size, offset; ptr = ((reg << 16) & sc->address_mask) | (chn & EMU_PTR_CHNO_MASK); EMU_RWLOCK(); emu_wr_nolock(sc, EMU_PTR, ptr, 4); /* * XXX Another kind of magic encoding in register number. This can * give you side effect - it will read previous data from register * and change only required bits. */ if (reg & 0xff000000) { size = (reg >> 24) & 0x3f; offset = (reg >> 16) & 0x1f; mask = ((1 << size) - 1) << offset; data <<= offset; data &= mask; data |= emu_rd_nolock(sc, EMU_DATA, 4) & ~mask; } emu_wr_nolock(sc, EMU_DATA, data, 4); EMU_RWUNLOCK(); } /* * EMU_A2_PTR / EMU_DATA2 interface. Access to P16v is made * via (channel, register) pair. Some registers are channel-specific, * some not. This interface is supported by CA0102 and CA0108 chips only. */ uint32_t emu_rd_p16vptr(struct emu_sc_info *sc, uint16_t chn, uint16_t reg) { uint32_t val; /* XXX separate lock? */ EMU_RWLOCK(); emu_wr_nolock(sc, EMU_A2_PTR, (reg << 16) | chn, 4); val = emu_rd_nolock(sc, EMU_DATA2, 4); EMU_RWUNLOCK(); return (val); } void emu_wr_p16vptr(struct emu_sc_info *sc, uint16_t chn, uint16_t reg, uint32_t data) { EMU_RWLOCK(); emu_wr_nolock(sc, EMU_A2_PTR, (reg << 16) | chn, 4); emu_wr_nolock(sc, EMU_DATA2, data, 4); EMU_RWUNLOCK(); } /* * XXX CardBus interface. Not tested on any real hardware. */ static void emu_wr_cbptr(struct emu_sc_info *sc, uint32_t data) { uint32_t val; /* * 0x38 is IPE3 (CD S/PDIF interrupt pending register) on CA0102. Seems * to be some reg/value accessible kind of config register on CardBus * CA0108, with value(?) in top 16 bit, address(?) in low 16 */ val = emu_rd_nolock(sc, 0x38, 4); emu_wr_nolock(sc, 0x38, data, 4); val = emu_rd_nolock(sc, 0x38, 4); } /* * Direct hardware register access * Assume that it is never used to access EMU_PTR-based registers and can run unlocked. */ void emu_wr(struct emu_sc_info *sc, unsigned int regno, uint32_t data, unsigned int size) { KASSERT(regno != EMU_PTR, ("emu_wr: attempt to write to EMU_PTR")); KASSERT(regno != EMU_A2_PTR, ("emu_wr: attempt to write to EMU_A2_PTR")); emu_wr_nolock(sc, regno, data, size); } uint32_t emu_rd(struct emu_sc_info *sc, unsigned int regno, unsigned int size) { uint32_t rd; KASSERT(regno != EMU_DATA, ("emu_rd: attempt to read DATA")); KASSERT(regno != EMU_DATA2, ("emu_rd: attempt to read DATA2")); rd = emu_rd_nolock(sc, regno, size); return (rd); } /* * Enabling IR MIDI messages is another kind of black magic. It just * has to be made this way. It really do it. */ void emu_enable_ir(struct emu_sc_info *sc) { uint32_t iocfg; if (sc->is_emu10k2 || sc->is_ca0102) { iocfg = emu_rd_nolock(sc, EMU_A_IOCFG, 2); emu_wr_nolock(sc, EMU_A_IOCFG, iocfg | EMU_A_IOCFG_GPOUT2, 2); DELAY(500); emu_wr_nolock(sc, EMU_A_IOCFG, iocfg | EMU_A_IOCFG_GPOUT1 | EMU_A_IOCFG_GPOUT2, 2); DELAY(500); emu_wr_nolock(sc, EMU_A_IOCFG, iocfg | EMU_A_IOCFG_GPOUT1, 2); DELAY(100); emu_wr_nolock(sc, EMU_A_IOCFG, iocfg, 2); device_printf(sc->dev, "Audigy IR MIDI events enabled.\n"); sc->enable_ir = 1; } if (sc->is_emu10k1) { iocfg = emu_rd_nolock(sc, EMU_HCFG, 4); emu_wr_nolock(sc, EMU_HCFG, iocfg | EMU_HCFG_GPOUT2, 4); DELAY(500); emu_wr_nolock(sc, EMU_HCFG, iocfg | EMU_HCFG_GPOUT1 | EMU_HCFG_GPOUT2, 4); DELAY(100); emu_wr_nolock(sc, EMU_HCFG, iocfg, 4); device_printf(sc->dev, "SB Live! IR MIDI events enabled.\n"); sc->enable_ir = 1; } } /* * emu_timer_ - HW timer management */ int emu_timer_create(struct emu_sc_info *sc) { int i, timer; timer = -1; mtx_lock(&sc->lock); for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) if (sc->timer[i] == 0) { sc->timer[i] = -1; /* disable it */ timer = i; mtx_unlock(&sc->lock); return (timer); } mtx_unlock(&sc->lock); return (-1); } int emu_timer_set(struct emu_sc_info *sc, int timer, int delay) { int i; if (timer < 0) return (-1); RANGE(delay, 16, 1024); RANGE(timer, 0, EMU_MAX_IRQ_CONSUMERS-1); mtx_lock(&sc->lock); sc->timer[timer] = delay; for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) if (sc->timerinterval > sc->timer[i]) sc->timerinterval = sc->timer[i]; /* XXX */ emu_wr(sc, EMU_TIMER, sc->timerinterval & 0x03ff, 2); mtx_unlock(&sc->lock); return (timer); } int emu_timer_enable(struct emu_sc_info *sc, int timer, int go) { uint32_t x; int ena_int; int i; if (timer < 0) return (-1); RANGE(timer, 0, EMU_MAX_IRQ_CONSUMERS-1); mtx_lock(&sc->lock); if ((go == 1) && (sc->timer[timer] < 0)) sc->timer[timer] = -sc->timer[timer]; if ((go == 0) && (sc->timer[timer] > 0)) sc->timer[timer] = -sc->timer[timer]; ena_int = 0; for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) { if (sc->timerinterval > sc->timer[i]) sc->timerinterval = sc->timer[i]; if (sc->timer[i] > 0) ena_int = 1; } emu_wr(sc, EMU_TIMER, sc->timerinterval & 0x03ff, 2); if (ena_int == 1) { x = emu_rd(sc, EMU_INTE, 4); x |= EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } else { x = emu_rd(sc, EMU_INTE, 4); x &= ~EMU_INTE_INTERTIMERENB; emu_wr(sc, EMU_INTE, x, 4); } mtx_unlock(&sc->lock); return (0); } int emu_timer_clear(struct emu_sc_info *sc, int timer) { if (timer < 0) return (-1); RANGE(timer, 0, EMU_MAX_IRQ_CONSUMERS-1); emu_timer_enable(sc, timer, 0); mtx_lock(&sc->lock); if (sc->timer[timer] != 0) sc->timer[timer] = 0; mtx_unlock(&sc->lock); return (timer); } /* * emu_intr_ - HW interrupt handler management */ int emu_intr_register(struct emu_sc_info *sc, uint32_t inte_mask, uint32_t intr_mask, uint32_t(*func) (void *softc, uint32_t irq), void *isc) { int i; uint32_t x; mtx_lock(&sc->lock); for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) if (sc->ihandler[i].inte_mask == 0) { sc->ihandler[i].inte_mask = inte_mask; sc->ihandler[i].intr_mask = intr_mask; sc->ihandler[i].softc = isc; sc->ihandler[i].irq_func = func; x = emu_rd(sc, EMU_INTE, 4); x |= inte_mask; emu_wr(sc, EMU_INTE, x, 4); mtx_unlock(&sc->lock); if (sc->dbg_level > 1) device_printf(sc->dev, "ihandle %d registered\n", i); return (i); } mtx_unlock(&sc->lock); if (sc->dbg_level > 1) device_printf(sc->dev, "ihandle not registered\n"); return (-1); } int emu_intr_unregister(struct emu_sc_info *sc, int hnumber) { uint32_t x; int i; mtx_lock(&sc->lock); if (sc->ihandler[hnumber].inte_mask == 0) { mtx_unlock(&sc->lock); return (-1); } x = emu_rd(sc, EMU_INTE, 4); x &= ~sc->ihandler[hnumber].inte_mask; sc->ihandler[hnumber].inte_mask = 0; sc->ihandler[hnumber].intr_mask = 0; sc->ihandler[hnumber].softc = NULL; sc->ihandler[hnumber].irq_func = NULL; /* other interrupt handlers may use this EMU_INTE value */ for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) if (sc->ihandler[i].inte_mask != 0) x |= sc->ihandler[i].inte_mask; emu_wr(sc, EMU_INTE, x, 4); mtx_unlock(&sc->lock); return (hnumber); } static void emu_intr(void *p) { struct emu_sc_info *sc = (struct emu_sc_info *)p; uint32_t stat, ack; int i; for (;;) { stat = emu_rd(sc, EMU_IPR, 4); ack = 0; if (stat == 0) break; emu_wr(sc, EMU_IPR, stat, 4); for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) { if ((((sc->ihandler[i].intr_mask) & stat) != 0) && (((void *)sc->ihandler[i].irq_func) != NULL)) { ack |= sc->ihandler[i].irq_func(sc->ihandler[i].softc, (sc->ihandler[i].intr_mask) & stat); } } if (sc->dbg_level > 1) if (stat & (~ack)) device_printf(sc->dev, "Unhandled interrupt: %08x\n", stat & (~ack)); } if ((sc->is_ca0102) || (sc->is_ca0108)) for (;;) { stat = emu_rd(sc, EMU_IPR2, 4); ack = 0; if (stat == 0) break; emu_wr(sc, EMU_IPR2, stat, 4); if (sc->dbg_level > 1) device_printf(sc->dev, "EMU_IPR2: %08x\n", stat); break; /* to avoid infinite loop. should be removed * after completion of P16V interface. */ } if (sc->is_ca0102) for (;;) { stat = emu_rd(sc, EMU_IPR3, 4); ack = 0; if (stat == 0) break; emu_wr(sc, EMU_IPR3, stat, 4); if (sc->dbg_level > 1) device_printf(sc->dev, "EMU_IPR3: %08x\n", stat); break; /* to avoid infinite loop. should be removed * after completion of S/PDIF interface */ } } /* * Get data from private emu10kx structure for PCM buffer allocation. * Used by PCM code only. */ bus_dma_tag_t emu_gettag(struct emu_sc_info *sc) { return (sc->mem.dmat); } static void emu_setmap(void *arg, bus_dma_segment_t * segs, int nseg, int error) { bus_addr_t *phys = (bus_addr_t *) arg; *phys = error ? 0 : (bus_addr_t) segs->ds_addr; if (bootverbose) { printf("emu10kx: setmap (%lx, %lx), nseg=%d, error=%d\n", (unsigned long)segs->ds_addr, (unsigned long)segs->ds_len, nseg, error); } } static void * emu_malloc(struct emu_mem *mem, uint32_t sz, bus_addr_t * addr, bus_dmamap_t *map) { void *dmabuf; int error; *addr = 0; if ((error = bus_dmamem_alloc(mem->dmat, &dmabuf, BUS_DMA_NOWAIT, map))) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_malloc: failed to alloc DMA map: %d\n", error); return (NULL); } if ((error = bus_dmamap_load(mem->dmat, *map, dmabuf, sz, emu_setmap, addr, 0)) || !*addr) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_malloc: failed to load DMA memory: %d\n", error); bus_dmamem_free(mem->dmat, dmabuf, *map); return (NULL); } return (dmabuf); } static void emu_free(struct emu_mem *mem, void *dmabuf, bus_dmamap_t map) { bus_dmamap_unload(mem->dmat, map); bus_dmamem_free(mem->dmat, dmabuf, map); } static void * emu_memalloc(struct emu_mem *mem, uint32_t sz, bus_addr_t * addr, const char *owner) { uint32_t blksz, start, idx, ofs, tmp, found; struct emu_memblk *blk; void *membuf; blksz = sz / EMUPAGESIZE; if (sz > (blksz * EMUPAGESIZE)) blksz++; if (blksz > EMU_MAX_BUFSZ / EMUPAGESIZE) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_memalloc: memory request tool large\n"); return (NULL); } /* find a free block in the bitmap */ found = 0; start = 1; while (!found && start + blksz < EMU_MAXPAGES) { found = 1; for (idx = start; idx < start + blksz; idx++) if (mem->bmap[idx >> 3] & (1 << (idx & 7))) found = 0; if (!found) start++; } if (!found) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_memalloc: no free space in bitmap\n"); return (NULL); } blk = malloc(sizeof(*blk), M_DEVBUF, M_NOWAIT); if (blk == NULL) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_memalloc: buffer allocation failed\n"); return (NULL); } bzero(blk, sizeof(*blk)); membuf = emu_malloc(mem, sz, &blk->buf_addr, &blk->buf_map); *addr = blk->buf_addr; if (membuf == NULL) { if (mem->card->dbg_level > 2) device_printf(mem->card->dev, "emu_memalloc: can't setup HW memory\n"); free(blk, M_DEVBUF); return (NULL); } blk->buf = membuf; blk->pte_start = start; blk->pte_size = blksz; strncpy(blk->owner, owner, 15); blk->owner[15] = '\0'; ofs = 0; for (idx = start; idx < start + blksz; idx++) { mem->bmap[idx >> 3] |= 1 << (idx & 7); tmp = (uint32_t) (blk->buf_addr + ofs); mem->ptb_pages[idx] = (tmp << 1) | idx; ofs += EMUPAGESIZE; } SLIST_INSERT_HEAD(&mem->blocks, blk, link); return (membuf); } static int emu_memfree(struct emu_mem *mem, void *membuf) { uint32_t idx, tmp; struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == membuf) blk = i; } if (blk == NULL) return (EINVAL); SLIST_REMOVE(&mem->blocks, blk, emu_memblk, link); emu_free(mem, membuf, blk->buf_map); tmp = (uint32_t) (mem->silent_page_addr) << 1; for (idx = blk->pte_start; idx < blk->pte_start + blk->pte_size; idx++) { mem->bmap[idx >> 3] &= ~(1 << (idx & 7)); mem->ptb_pages[idx] = tmp | idx; } free(blk, M_DEVBUF); return (0); } static int emu_memstart(struct emu_mem *mem, void *membuf) { struct emu_memblk *blk, *i; blk = NULL; SLIST_FOREACH(i, &mem->blocks, link) { if (i->buf == membuf) blk = i; } if (blk == NULL) return (-1); return (blk->pte_start); } static uint32_t emu_rate_to_pitch(uint32_t rate) { static uint32_t logMagTable[128] = { 0x00000, 0x02dfc, 0x05b9e, 0x088e6, 0x0b5d6, 0x0e26f, 0x10eb3, 0x13aa2, 0x1663f, 0x1918a, 0x1bc84, 0x1e72e, 0x2118b, 0x23b9a, 0x2655d, 0x28ed5, 0x2b803, 0x2e0e8, 0x30985, 0x331db, 0x359eb, 0x381b6, 0x3a93d, 0x3d081, 0x3f782, 0x41e42, 0x444c1, 0x46b01, 0x49101, 0x4b6c4, 0x4dc49, 0x50191, 0x5269e, 0x54b6f, 0x57006, 0x59463, 0x5b888, 0x5dc74, 0x60029, 0x623a7, 0x646ee, 0x66a00, 0x68cdd, 0x6af86, 0x6d1fa, 0x6f43c, 0x7164b, 0x73829, 0x759d4, 0x77b4f, 0x79c9a, 0x7bdb5, 0x7dea1, 0x7ff5e, 0x81fed, 0x8404e, 0x86082, 0x88089, 0x8a064, 0x8c014, 0x8df98, 0x8fef1, 0x91e20, 0x93d26, 0x95c01, 0x97ab4, 0x9993e, 0x9b79f, 0x9d5d9, 0x9f3ec, 0xa11d8, 0xa2f9d, 0xa4d3c, 0xa6ab5, 0xa8808, 0xaa537, 0xac241, 0xadf26, 0xafbe7, 0xb1885, 0xb3500, 0xb5157, 0xb6d8c, 0xb899f, 0xba58f, 0xbc15e, 0xbdd0c, 0xbf899, 0xc1404, 0xc2f50, 0xc4a7b, 0xc6587, 0xc8073, 0xc9b3f, 0xcb5ed, 0xcd07c, 0xceaec, 0xd053f, 0xd1f73, 0xd398a, 0xd5384, 0xd6d60, 0xd8720, 0xda0c3, 0xdba4a, 0xdd3b4, 0xded03, 0xe0636, 0xe1f4e, 0xe384a, 0xe512c, 0xe69f3, 0xe829f, 0xe9b31, 0xeb3a9, 0xecc08, 0xee44c, 0xefc78, 0xf148a, 0xf2c83, 0xf4463, 0xf5c2a, 0xf73da, 0xf8b71, 0xfa2f0, 0xfba57, 0xfd1a7, 0xfe8df }; static char logSlopeTable[128] = { 0x5c, 0x5c, 0x5b, 0x5a, 0x5a, 0x59, 0x58, 0x58, 0x57, 0x56, 0x56, 0x55, 0x55, 0x54, 0x53, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x50, 0x4f, 0x4f, 0x4e, 0x4d, 0x4d, 0x4d, 0x4c, 0x4c, 0x4b, 0x4b, 0x4a, 0x4a, 0x49, 0x49, 0x48, 0x48, 0x47, 0x47, 0x47, 0x46, 0x46, 0x45, 0x45, 0x45, 0x44, 0x44, 0x43, 0x43, 0x43, 0x42, 0x42, 0x42, 0x41, 0x41, 0x41, 0x40, 0x40, 0x40, 0x3f, 0x3f, 0x3f, 0x3e, 0x3e, 0x3e, 0x3d, 0x3d, 0x3d, 0x3c, 0x3c, 0x3c, 0x3b, 0x3b, 0x3b, 0x3b, 0x3a, 0x3a, 0x3a, 0x39, 0x39, 0x39, 0x39, 0x38, 0x38, 0x38, 0x38, 0x37, 0x37, 0x37, 0x37, 0x36, 0x36, 0x36, 0x36, 0x35, 0x35, 0x35, 0x35, 0x34, 0x34, 0x34, 0x34, 0x34, 0x33, 0x33, 0x33, 0x33, 0x32, 0x32, 0x32, 0x32, 0x32, 0x31, 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30, 0x30, 0x2f, 0x2f, 0x2f, 0x2f, 0x2f }; int i; if (rate == 0) return (0); rate *= 11185; /* Scale 48000 to 0x20002380 */ for (i = 31; i > 0; i--) { if (rate & 0x80000000) { /* Detect leading "1" */ return (((uint32_t) (i - 15) << 20) + logMagTable[0x7f & (rate >> 24)] + (0x7f & (rate >> 17)) * logSlopeTable[0x7f & (rate >> 24)]); } rate <<= 1; } /* NOTREACHED */ return (0); } static uint32_t emu_rate_to_linearpitch(uint32_t rate) { rate = (rate << 8) / 375; return ((rate >> 1) + (rate & 1)); } struct emu_voice * emu_valloc(struct emu_sc_info *sc) { struct emu_voice *v; int i; v = NULL; mtx_lock(&sc->lock); for (i = 0; i < NUM_G && sc->voice[i].busy; i++); if (i < NUM_G) { v = &sc->voice[i]; v->busy = 1; } mtx_unlock(&sc->lock); return (v); } void emu_vfree(struct emu_sc_info *sc, struct emu_voice *v) { int i, r; mtx_lock(&sc->lock); for (i = 0; i < NUM_G; i++) { if (v == &sc->voice[i] && sc->voice[i].busy) { v->busy = 0; /* * XXX What we should do with mono channels? * See -pcm.c emupchan_init for other side of * this problem */ if (v->slave != NULL) r = emu_memfree(&sc->mem, v->vbuf); } } mtx_unlock(&sc->lock); } int emu_vinit(struct emu_sc_info *sc, struct emu_voice *m, struct emu_voice *s, uint32_t sz, struct snd_dbuf *b) { void *vbuf; bus_addr_t tmp_addr; vbuf = emu_memalloc(&sc->mem, sz, &tmp_addr, "vinit"); if (vbuf == NULL) { if(sc->dbg_level > 2) device_printf(sc->dev, "emu_memalloc returns NULL in enu_vinit\n"); return (ENOMEM); } if (b != NULL) sndbuf_setup(b, vbuf, sz); m->start = emu_memstart(&sc->mem, vbuf) * EMUPAGESIZE; if (m->start < 0) { if(sc->dbg_level > 2) device_printf(sc->dev, "emu_memstart returns (-1) in enu_vinit\n"); emu_memfree(&sc->mem, vbuf); return (ENOMEM); } m->end = m->start + sz; m->speed = 0; m->b16 = 0; m->stereo = 0; m->running = 0; m->ismaster = 1; m->vol = 0xff; m->buf = tmp_addr; m->vbuf = vbuf; m->slave = s; if (s != NULL) { s->start = m->start; s->end = m->end; s->speed = 0; s->b16 = 0; s->stereo = 0; s->running = 0; s->ismaster = 0; s->vol = m->vol; s->buf = m->buf; s->vbuf = NULL; s->slave = NULL; } return (0); } void emu_vsetup(struct emu_voice *v, int fmt, int spd) { if (fmt) { v->b16 = (fmt & AFMT_16BIT) ? 1 : 0; v->stereo = (AFMT_CHANNEL(fmt) > 1) ? 1 : 0; if (v->slave != NULL) { v->slave->b16 = v->b16; v->slave->stereo = v->stereo; } } if (spd) { v->speed = spd; if (v->slave != NULL) v->slave->speed = v->speed; } } void emu_vroute(struct emu_sc_info *sc, struct emu_route *rt, struct emu_voice *v) { int i; for (i = 0; i < 8; i++) { v->routing[i] = rt->routing_left[i]; v->amounts[i] = rt->amounts_left[i]; } if ((v->stereo) && (v->ismaster == 0)) for (i = 0; i < 8; i++) { v->routing[i] = rt->routing_right[i]; v->amounts[i] = rt->amounts_right[i]; } if ((v->stereo) && (v->slave != NULL)) emu_vroute(sc, rt, v->slave); } void emu_vwrite(struct emu_sc_info *sc, struct emu_voice *v) { int s; uint32_t start, val, silent_page; s = (v->stereo ? 1 : 0) + (v->b16 ? 1 : 0); v->sa = v->start >> s; v->ea = v->end >> s; if (v->stereo) { emu_wrptr(sc, v->vnum, EMU_CHAN_CPF, EMU_CHAN_CPF_STEREO_MASK); } else { emu_wrptr(sc, v->vnum, EMU_CHAN_CPF, 0); } val = v->stereo ? 28 : 30; val *= v->b16 ? 1 : 2; start = v->sa + val; if (sc->is_emu10k1) { emu_wrptr(sc, v->vnum, EMU_CHAN_FXRT, ((v->routing[3] << 12) | (v->routing[2] << 8) | (v->routing[1] << 4) | (v->routing[0] << 0)) << 16); } else { emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT1, (v->routing[3] << 24) | (v->routing[2] << 16) | (v->routing[1] << 8) | (v->routing[0] << 0)); emu_wrptr(sc, v->vnum, EMU_A_CHAN_FXRT2, (v->routing[7] << 24) | (v->routing[6] << 16) | (v->routing[5] << 8) | (v->routing[4] << 0)); emu_wrptr(sc, v->vnum, EMU_A_CHAN_SENDAMOUNTS, (v->amounts[7] << 24) | (v->amounts[6] << 26) | (v->amounts[5] << 8) | (v->amounts[4] << 0)); } emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX, (v->amounts[0] << 8) | (v->amounts[1] << 0)); emu_wrptr(sc, v->vnum, EMU_CHAN_DSL, v->ea | (v->amounts[3] << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_PSST, v->sa | (v->amounts[2] << 24)); emu_wrptr(sc, v->vnum, EMU_CHAN_CCCA, start | (v->b16 ? 0 : EMU_CHAN_CCCA_8BITSELECT)); emu_wrptr(sc, v->vnum, EMU_CHAN_Z1, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_Z2, 0); silent_page = ((uint32_t) (sc->mem.silent_page_addr) << 1) | EMU_CHAN_MAP_PTI_MASK; emu_wrptr(sc, v->vnum, EMU_CHAN_MAPA, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_MAPB, silent_page); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, EMU_CHAN_CVCF_CURRFILTER_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, EMU_CHAN_VTFT_FILTERTARGET_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSM, EMU_CHAN_DCYSUSM_DECAYTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL1, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_LFOVAL2, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_TREMFRQ, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_FM2FRQ2, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVAL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_ATKHLDV, EMU_CHAN_ATKHLDV_HOLDTIME_MASK | EMU_CHAN_ATKHLDV_ATTACKTIME_MASK); emu_wrptr(sc, v->vnum, EMU_CHAN_ENVVOL, 0x8000); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_FILTERAMOUNT, 0x7f); emu_wrptr(sc, v->vnum, EMU_CHAN_PEFE_PITCHAMOUNT, 0); if ((v->stereo) && (v->slave != NULL)) emu_vwrite(sc, v->slave); } static void emu_vstop(struct emu_sc_info *sc, char channel, int enable) { int reg; reg = (channel & 0x20) ? EMU_SOLEH : EMU_SOLEL; channel &= 0x1f; reg |= 1 << 24; reg |= channel << 16; emu_wrptr(sc, 0, reg, enable); } void emu_vtrigger(struct emu_sc_info *sc, struct emu_voice *v, int go) { uint32_t pitch_target, initial_pitch; uint32_t cra, cs, ccis; uint32_t sample, i; if (go) { cra = 64; cs = v->stereo ? 4 : 2; ccis = v->stereo ? 28 : 30; ccis *= v->b16 ? 1 : 2; sample = v->b16 ? 0x00000000 : 0x80808080; for (i = 0; i < cs; i++) emu_wrptr(sc, v->vnum, EMU_CHAN_CD0 + i, sample); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_READADDRESS, cra); emu_wrptr(sc, v->vnum, EMU_CHAN_CCR_CACHEINVALIDSIZE, ccis); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xff00); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0xffffffff); emu_wrptr(sc, v->vnum, EMU_CHAN_DCYSUSV, 0x00007f7f); emu_vstop(sc, v->vnum, 0); pitch_target = emu_rate_to_linearpitch(v->speed); initial_pitch = emu_rate_to_pitch(v->speed) >> 8; emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, pitch_target); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, initial_pitch); } else { emu_wrptr(sc, v->vnum, EMU_CHAN_PTRX_PITCHTARGET, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_CPF_PITCH, 0); emu_wrptr(sc, v->vnum, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, v->vnum, EMU_CHAN_VTFT, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_CVCF, 0x0000ffff); emu_wrptr(sc, v->vnum, EMU_CHAN_IP, 0); emu_vstop(sc, v->vnum, 1); } if ((v->stereo) && (v->slave != NULL)) emu_vtrigger(sc, v->slave, go); } int emu_vpos(struct emu_sc_info *sc, struct emu_voice *v) { int s, ptr; s = (v->b16 ? 1 : 0) + (v->stereo ? 1 : 0); ptr = (emu_rdptr(sc, v->vnum, EMU_CHAN_CCCA_CURRADDR) - (v->start >> s)) << s; return (ptr & ~0x0000001f); } /* fx */ static void emu_wrefx(struct emu_sc_info *sc, unsigned int pc, unsigned int data) { emu_wrptr(sc, 0, sc->code_base + pc, data); } static void emu_addefxop(struct emu_sc_info *sc, unsigned int op, unsigned int z, unsigned int w, unsigned int x, unsigned int y, uint32_t * pc) { if ((*pc) + 1 > sc->code_size) { device_printf(sc->dev, "DSP CODE OVERRUN: attept to write past code_size (pc=%d)\n", (*pc)); return; } emu_wrefx(sc, (*pc) * 2, (x << sc->high_operand_shift) | y); emu_wrefx(sc, (*pc) * 2 + 1, (op << sc->opcode_shift) | (z << sc->high_operand_shift) | w); (*pc)++; } static int sysctl_emu_mixer_control(SYSCTL_HANDLER_ARGS) { struct emu_sc_info *sc; int mixer_id; int new_vol; int err; sc = arg1; mixer_id = arg2; new_vol = emumix_get_volume(sc, mixer_id); err = sysctl_handle_int(oidp, &new_vol, 0, req); if (err || req->newptr == NULL) return (err); if (new_vol < 0 || new_vol > 100) return (EINVAL); emumix_set_volume(sc, mixer_id, new_vol); return (0); } static int emu_addefxmixer(struct emu_sc_info *sc, const char *mix_name, const int mix_id, uint32_t defvolume) { int volgpr; char sysctl_name[32]; volgpr = emu_rm_gpr_alloc(sc->rm, 1); emumix_set_fxvol(sc, volgpr, defvolume); /* * Mixer controls with NULL mix_name are handled * by AC97 emulation code or PCM mixer. */ if (mix_name != NULL) { /* * Temporary sysctls should start with underscore, * see freebsd-current mailing list, emu10kx driver * discussion around 2006-05-24. */ snprintf(sysctl_name, 32, "_%s", mix_name); SYSCTL_ADD_PROC(sc->ctx, SYSCTL_CHILDREN(sc->root), OID_AUTO, sysctl_name, CTLTYPE_INT | CTLFLAG_RW, sc, mix_id, sysctl_emu_mixer_control, "I", ""); } return (volgpr); } static int sysctl_emu_digitalswitch_control(SYSCTL_HANDLER_ARGS) { struct emu_sc_info *sc; int new_val; int err; sc = arg1; new_val = (sc->mode == MODE_DIGITAL) ? 1 : 0; err = sysctl_handle_int(oidp, &new_val, 0, req); if (err || req->newptr == NULL) return (err); if (new_val < 0 || new_val > 1) return (EINVAL); switch (new_val) { case 0: emumix_set_mode(sc, MODE_ANALOG); break; case 1: emumix_set_mode(sc, MODE_DIGITAL); break; } return (0); } static void emu_digitalswitch(struct emu_sc_info *sc) { /* XXX temporary? */ SYSCTL_ADD_PROC(sc->ctx, SYSCTL_CHILDREN(sc->root), OID_AUTO, "_digital", CTLTYPE_INT | CTLFLAG_RW, sc, 0, sysctl_emu_digitalswitch_control, "I", "Enable digital output"); return; } /* * Allocate cache GPRs that will hold mixed output channels * and clear it on every DSP run. */ #define EFX_CACHE(CACHE_IDX) do { \ sc->cache_gpr[CACHE_IDX] = emu_rm_gpr_alloc(sc->rm, 1); \ emu_addefxop(sc, ACC3, \ GPR(sc->cache_gpr[CACHE_IDX]), \ DSP_CONST(0), \ DSP_CONST(0), \ DSP_CONST(0), \ &pc); \ } while (0) /* Allocate GPR for volume control and route sound: OUT = OUT + IN * VOL */ #define EFX_ROUTE(TITLE, INP_NR, IN_GPR_IDX, OUT_CACHE_IDX, DEF) do { \ sc->mixer_gpr[IN_GPR_IDX] = emu_addefxmixer(sc, TITLE, IN_GPR_IDX, DEF); \ sc->mixer_volcache[IN_GPR_IDX] = DEF; \ emu_addefxop(sc, MACS, \ GPR(sc->cache_gpr[OUT_CACHE_IDX]), \ GPR(sc->cache_gpr[OUT_CACHE_IDX]), \ INP_NR, \ GPR(sc->mixer_gpr[IN_GPR_IDX]), \ &pc); \ } while (0) /* allocate GPR, OUT = IN * VOL */ #define EFX_OUTPUT(TITLE, OUT_CACHE_IDX, OUT_GPR_IDX, OUTP_NR, DEF) do { \ sc->mixer_gpr[OUT_GPR_IDX] = emu_addefxmixer(sc, TITLE, OUT_GPR_IDX, DEF); \ sc->mixer_volcache[OUT_GPR_IDX] = DEF; \ emu_addefxop(sc, MACS, \ OUTP(OUTP_NR), \ DSP_CONST(0), \ GPR(sc->cache_gpr[OUT_CACHE_IDX]), \ GPR(sc->mixer_gpr[OUT_GPR_IDX]), \ &pc); \ } while (0) /* like EFX_OUTPUT, but don't allocate mixer gpr */ #define EFX_OUTPUTD(OUT_CACHE_IDX, OUT_GPR_IDX, OUTP_NR) do { \ emu_addefxop(sc, MACS, \ OUTP(OUTP_NR), \ DSP_CONST(0), \ GPR(sc->cache_gpr[OUT_CACHE_IDX]), \ GPR(sc->mixer_gpr[OUT_GPR_IDX]), \ &pc); \ } while (0) /* skip next OPCOUNT instructions if FLAG != 0 */ #define EFX_SKIP(OPCOUNT, FLAG_GPR) do { \ emu_addefxop(sc, MACS, \ DSP_CONST(0), \ GPR(sc->mute_gpr[FLAG_GPR]), \ DSP_CONST(0), \ DSP_CONST(0), \ &pc); \ emu_addefxop(sc, SKIP, \ DSP_CCR, \ DSP_CCR, \ COND_NEQ_ZERO, \ OPCOUNT, \ &pc); \ } while (0) #define EFX_COPY(TO, FROM) do { \ emu_addefxop(sc, ACC3, \ TO, \ DSP_CONST(0), \ DSP_CONST(0), \ FROM, \ &pc); \ } while (0) static void emu_initefx(struct emu_sc_info *sc) { unsigned int i; uint32_t pc; /* stop DSP */ if (sc->is_emu10k1) { emu_wrptr(sc, 0, EMU_DBG, EMU_DBG_SINGLE_STEP); } else { emu_wrptr(sc, 0, EMU_A_DBG, EMU_A_DBG_SINGLE_STEP); } /* code size is in instructions */ pc = 0; for (i = 0; i < sc->code_size; i++) { if (sc->is_emu10k1) { emu_addefxop(sc, ACC3, DSP_CONST(0x0), DSP_CONST(0x0), DSP_CONST(0x0), DSP_CONST(0x0), &pc); } else { emu_addefxop(sc, SKIP, DSP_CONST(0x0), DSP_CONST(0x0), DSP_CONST(0xf), DSP_CONST(0x0), &pc); } } /* allocate GPRs for mute switches (EFX_SKIP). Mute by default */ for (i = 0; i < NUM_MUTE; i++) { sc->mute_gpr[i] = emu_rm_gpr_alloc(sc->rm, 1); emumix_set_gpr(sc, sc->mute_gpr[i], 1); } emu_digitalswitch(sc); pc = 0; /* * DSP code below is not good, because: * 1. It can be written smaller, if it can use DSP accumulator register * instead of cache_gpr[]. * 2. It can be more careful when volume is 100%, because in DSP * x*0x7fffffff may not be equal to x ! */ /* clean outputs */ for (i = 0; i < 16 ; i++) { emu_addefxop(sc, ACC3, OUTP(i), DSP_CONST(0), DSP_CONST(0), DSP_CONST(0), &pc); } if (sc->is_emu10k1) { EFX_CACHE(C_FRONT_L); EFX_CACHE(C_FRONT_R); EFX_CACHE(C_REC_L); EFX_CACHE(C_REC_R); /* fx0 to front/record, 100%/muted by default */ EFX_ROUTE("pcm_front_l", FX(0), M_FX0_FRONT_L, C_FRONT_L, 100); EFX_ROUTE("pcm_front_r", FX(1), M_FX1_FRONT_R, C_FRONT_R, 100); EFX_ROUTE(NULL, FX(0), M_FX0_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, FX(1), M_FX1_REC_R, C_REC_R, 0); /* in0, from AC97 codec output */ EFX_ROUTE("ac97_front_l", INP(IN_AC97_L), M_IN0_FRONT_L, C_FRONT_L, 0); EFX_ROUTE("ac97_front_r", INP(IN_AC97_R), M_IN0_FRONT_R, C_FRONT_R, 0); EFX_ROUTE("ac97_rec_l", INP(IN_AC97_L), M_IN0_REC_L, C_REC_L, 0); EFX_ROUTE("ac97_rec_r", INP(IN_AC97_R), M_IN0_REC_R, C_REC_R, 0); /* in1, from CD S/PDIF */ /* XXX EFX_SKIP 4 assumes that each EFX_ROUTE is one DSP op */ EFX_SKIP(4, CDSPDIFMUTE); EFX_ROUTE(NULL, INP(IN_SPDIF_CD_L), M_IN1_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(IN_SPDIF_CD_R), M_IN1_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(IN_SPDIF_CD_L), M_IN1_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(IN_SPDIF_CD_R), M_IN1_REC_R, C_REC_R, 0); if (sc->dbg_level > 0) { /* in2, ZoomVide (???) */ EFX_ROUTE("zoom_front_l", INP(IN_ZOOM_L), M_IN2_FRONT_L, C_FRONT_L, 0); EFX_ROUTE("zoom_front_r", INP(IN_ZOOM_R), M_IN2_FRONT_R, C_FRONT_R, 0); EFX_ROUTE("zoom_rec_l", INP(IN_ZOOM_L), M_IN2_REC_L, C_REC_L, 0); EFX_ROUTE("zoom_rec_r", INP(IN_ZOOM_R), M_IN2_REC_R, C_REC_R, 0); } /* in3, TOSLink */ EFX_ROUTE(NULL, INP(IN_TOSLINK_L), M_IN3_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(IN_TOSLINK_R), M_IN3_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(IN_TOSLINK_L), M_IN3_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(IN_TOSLINK_R), M_IN3_REC_R, C_REC_R, 0); /* in4, LineIn */ EFX_ROUTE(NULL, INP(IN_LINE1_L), M_IN4_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(IN_LINE1_R), M_IN4_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(IN_LINE1_L), M_IN4_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(IN_LINE1_R), M_IN4_REC_R, C_REC_R, 0); /* in5, on-card S/PDIF */ EFX_ROUTE(NULL, INP(IN_COAX_SPDIF_L), M_IN5_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(IN_COAX_SPDIF_R), M_IN5_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(IN_COAX_SPDIF_L), M_IN5_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(IN_COAX_SPDIF_R), M_IN5_REC_R, C_REC_R, 0); /* in6, Line2 on Live!Drive */ EFX_ROUTE(NULL, INP(IN_LINE2_L), M_IN6_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(IN_LINE2_R), M_IN6_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(IN_LINE2_L), M_IN6_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(IN_LINE2_R), M_IN6_REC_R, C_REC_R, 0); if (sc->dbg_level > 0) { /* in7, unknown */ EFX_ROUTE("in7_front_l", INP(0xE), M_IN7_FRONT_L, C_FRONT_L, 0); EFX_ROUTE("in7_front_r", INP(0xF), M_IN7_FRONT_R, C_FRONT_R, 0); EFX_ROUTE("in7_rec_l", INP(0xE), M_IN7_REC_L, C_REC_L, 0); EFX_ROUTE("in7_rec_r", INP(0xF), M_IN7_REC_R, C_REC_R, 0); } /* analog and digital */ EFX_OUTPUT("master_front_l", C_FRONT_L, M_MASTER_FRONT_L, OUT_AC97_L, 100); EFX_OUTPUT("master_front_r", C_FRONT_R, M_MASTER_FRONT_R, OUT_AC97_R, 100); /* S/PDIF */ EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, OUT_TOSLINK_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, OUT_TOSLINK_R); /* Headphones */ EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, OUT_HEADPHONE_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, OUT_HEADPHONE_R); /* rec output to "ADC" */ EFX_OUTPUT("master_rec_l", C_REC_L, M_MASTER_REC_L, OUT_ADC_REC_L, 100); EFX_OUTPUT("master_rec_r", C_REC_R, M_MASTER_REC_R, OUT_ADC_REC_R, 100); if (!(sc->mch_disabled)) { /* * Additional channel volume is controlled by mixer in * emu_dspmixer_set() in -pcm.c */ /* fx2/3 (pcm1) to rear */ EFX_CACHE(C_REAR_L); EFX_CACHE(C_REAR_R); EFX_ROUTE(NULL, FX(2), M_FX2_REAR_L, C_REAR_L, 100); EFX_ROUTE(NULL, FX(3), M_FX3_REAR_R, C_REAR_R, 100); EFX_OUTPUT(NULL, C_REAR_L, M_MASTER_REAR_L, OUT_REAR_L, 100); EFX_OUTPUT(NULL, C_REAR_R, M_MASTER_REAR_R, OUT_REAR_R, 100); if (sc->has_51) { /* fx4 (pcm2) to center */ EFX_CACHE(C_CENTER); EFX_ROUTE(NULL, FX(4), M_FX4_CENTER, C_CENTER, 100); EFX_OUTPUT(NULL, C_CENTER, M_MASTER_CENTER, OUT_D_CENTER, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_CENTER, M_MASTER_CENTER, OUT_A_CENTER); /* fx5 (pcm3) to sub */ EFX_CACHE(C_SUB); EFX_ROUTE(NULL, FX(5), M_FX5_SUBWOOFER, C_SUB, 100); EFX_OUTPUT(NULL, C_SUB, M_MASTER_SUBWOOFER, OUT_D_SUB, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_SUB, M_MASTER_SUBWOOFER, OUT_A_SUB); } } else { /* SND_EMU10KX_MULTICHANNEL_DISABLED */ EFX_OUTPUT(NULL, C_FRONT_L, M_MASTER_REAR_L, OUT_REAR_L, 57); /* 75%*75% */ EFX_OUTPUT(NULL, C_FRONT_R, M_MASTER_REAR_R, OUT_REAR_R, 57); /* 75%*75% */ #if 0 /* XXX 5.1 does not work */ if (sc->has_51) { /* (fx0+fx1)/2 to center */ EFX_CACHE(C_CENTER); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_L]), &pc); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_R]), &pc); EFX_OUTPUT(NULL, C_CENTER, M_MASTER_CENTER, OUT_D_CENTER, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_CENTER, M_MASTER_CENTER, OUT_A_CENTER); /* (fx0+fx1)/2 to sub */ EFX_CACHE(C_SUB); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_L]), &pc); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_R]), &pc); /* XXX add lowpass filter here */ EFX_OUTPUT(NULL, C_SUB, M_MASTER_SUBWOOFER, OUT_D_SUB, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_SUB, M_MASTER_SUBWOOFER, OUT_A_SUB); } #endif } /* !mch_disabled */ if (sc->mch_rec) { /* * MCH RECORDING , hight 16 slots. On 5.1 cards first 4 slots * are used as outputs and already filled with data */ /* * XXX On Live! cards stream does not begin at zero offset. * It can be HW, driver or sound buffering problem. * Use sync substream (offset 0x3E) to let userland find * correct data. */ /* * Substream map (in byte offsets, each substream is 2 bytes): * 0x00..0x1E - outputs * 0x20..0x3E - FX, inputs and sync stream */ /* First 2 channels (offset 0x20,0x22) are empty */ for(i = (sc->has_51 ? 2 : 0); i < 2; i++) EFX_COPY(FX2(i), DSP_CONST(0)); /* PCM Playback monitoring, offset 0x24..0x2A */ for(i = 0; i < 4; i++) EFX_COPY(FX2(i+2), FX(i)); /* Copy of some inputs, offset 0x2C..0x3C */ for(i = 0; i < 9; i++) EFX_COPY(FX2(i+8), INP(i)); /* sync data (0xc0de, offset 0x3E) */ sc->dummy_gpr = emu_rm_gpr_alloc(sc->rm, 1); emumix_set_gpr(sc, sc->dummy_gpr, 0xc0de0000); EFX_COPY(FX2(15), GPR(sc->dummy_gpr)); } /* mch_rec */ } else /* emu10k2 and later */ { EFX_CACHE(C_FRONT_L); EFX_CACHE(C_FRONT_R); EFX_CACHE(C_REC_L); EFX_CACHE(C_REC_R); /* fx0 to front/record, 100%/muted by default */ /* * FRONT_[L|R] is controlled by AC97 emulation in * emu_ac97_[read|write]_emulation in -pcm.c */ EFX_ROUTE(NULL, FX(0), M_FX0_FRONT_L, C_FRONT_L, 100); EFX_ROUTE(NULL, FX(1), M_FX1_FRONT_R, C_FRONT_R, 100); EFX_ROUTE(NULL, FX(0), M_FX0_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, FX(1), M_FX1_REC_R, C_REC_R, 0); /* in0, from AC97 codec output */ EFX_ROUTE(NULL, INP(A_IN_AC97_L), M_IN0_FRONT_L, C_FRONT_L, 100); EFX_ROUTE(NULL, INP(A_IN_AC97_R), M_IN0_FRONT_R, C_FRONT_R, 100); EFX_ROUTE(NULL, INP(A_IN_AC97_L), M_IN0_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_AC97_R), M_IN0_REC_R, C_REC_R, 0); /* in1, from CD S/PDIF */ EFX_ROUTE(NULL, INP(A_IN_SPDIF_CD_L), M_IN1_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(A_IN_SPDIF_CD_R), M_IN1_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(A_IN_SPDIF_CD_L), M_IN1_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_SPDIF_CD_R), M_IN1_REC_R, C_REC_R, 0); /* in2, optical & coax S/PDIF on AudigyDrive*/ /* XXX Should be muted when GPRSCS valid stream == 0 */ EFX_ROUTE(NULL, INP(A_IN_O_SPDIF_L), M_IN2_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(A_IN_O_SPDIF_R), M_IN2_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(A_IN_O_SPDIF_L), M_IN2_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_O_SPDIF_R), M_IN2_REC_R, C_REC_R, 0); if (sc->dbg_level > 0) { /* in3, unknown */ EFX_ROUTE("in3_front_l", INP(0x6), M_IN3_FRONT_L, C_FRONT_L, 0); EFX_ROUTE("in3_front_r", INP(0x7), M_IN3_FRONT_R, C_FRONT_R, 0); EFX_ROUTE("in3_rec_l", INP(0x6), M_IN3_REC_L, C_REC_L, 0); EFX_ROUTE("in3_rec_r", INP(0x7), M_IN3_REC_R, C_REC_R, 0); } /* in4, LineIn 2 on AudigyDrive */ EFX_ROUTE(NULL, INP(A_IN_LINE2_L), M_IN4_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(A_IN_LINE2_R), M_IN4_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(A_IN_LINE2_L), M_IN4_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_LINE2_R), M_IN4_REC_R, C_REC_R, 0); /* in5, on-card S/PDIF */ EFX_ROUTE(NULL, INP(A_IN_R_SPDIF_L), M_IN5_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(A_IN_R_SPDIF_R), M_IN5_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(A_IN_R_SPDIF_L), M_IN5_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_R_SPDIF_R), M_IN5_REC_R, C_REC_R, 0); /* in6, AUX2 on AudigyDrive */ EFX_ROUTE(NULL, INP(A_IN_AUX2_L), M_IN6_FRONT_L, C_FRONT_L, 0); EFX_ROUTE(NULL, INP(A_IN_AUX2_R), M_IN6_FRONT_R, C_FRONT_R, 0); EFX_ROUTE(NULL, INP(A_IN_AUX2_L), M_IN6_REC_L, C_REC_L, 0); EFX_ROUTE(NULL, INP(A_IN_AUX2_R), M_IN6_REC_R, C_REC_R, 0); if (sc->dbg_level > 0) { /* in7, unknown */ EFX_ROUTE("in7_front_l", INP(0xE), M_IN7_FRONT_L, C_FRONT_L, 0); EFX_ROUTE("in7_front_r", INP(0xF), M_IN7_FRONT_R, C_FRONT_R, 0); EFX_ROUTE("in7_rec_l", INP(0xE), M_IN7_REC_L, C_REC_L, 0); EFX_ROUTE("in7_rec_r", INP(0xF), M_IN7_REC_R, C_REC_R, 0); } /* front output to headphones and alog and digital *front */ /* volume controlled by AC97 emulation */ EFX_OUTPUT(NULL, C_FRONT_L, M_MASTER_FRONT_L, A_OUT_A_FRONT_L, 100); EFX_OUTPUT(NULL, C_FRONT_R, M_MASTER_FRONT_R, A_OUT_A_FRONT_R, 100); EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, A_OUT_D_FRONT_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, A_OUT_D_FRONT_R); EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, A_OUT_HPHONE_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, A_OUT_HPHONE_R); /* rec output to "ADC" */ /* volume controlled by AC97 emulation */ EFX_OUTPUT(NULL, C_REC_L, M_MASTER_REC_L, A_OUT_ADC_REC_L, 100); EFX_OUTPUT(NULL, C_REC_R, M_MASTER_REC_R, A_OUT_ADC_REC_R, 100); if (!(sc->mch_disabled)) { /* * Additional channel volume is controlled by mixer in * emu_dspmixer_set() in -pcm.c */ /* fx2/3 (pcm1) to rear */ EFX_CACHE(C_REAR_L); EFX_CACHE(C_REAR_R); EFX_ROUTE(NULL, FX(2), M_FX2_REAR_L, C_REAR_L, 100); EFX_ROUTE(NULL, FX(3), M_FX3_REAR_R, C_REAR_R, 100); EFX_OUTPUT(NULL, C_REAR_L, M_MASTER_REAR_L, A_OUT_A_REAR_L, 100); EFX_OUTPUT(NULL, C_REAR_R, M_MASTER_REAR_R, A_OUT_A_REAR_R, 100); EFX_OUTPUTD(C_REAR_L, M_MASTER_REAR_L, A_OUT_D_REAR_L); EFX_OUTPUTD(C_REAR_R, M_MASTER_REAR_R, A_OUT_D_REAR_R); /* fx4 (pcm2) to center */ EFX_CACHE(C_CENTER); EFX_ROUTE(NULL, FX(4), M_FX4_CENTER, C_CENTER, 100); EFX_OUTPUT(NULL, C_CENTER, M_MASTER_CENTER, A_OUT_D_CENTER, 100); #if 0 /* * XXX in digital mode (default) this should be muted * because this output is shared with digital out */ EFX_OUTPUTD(C_CENTER, M_MASTER_CENTER, A_OUT_A_CENTER); #endif /* fx5 (pcm3) to sub */ EFX_CACHE(C_SUB); EFX_ROUTE(NULL, FX(5), M_FX5_SUBWOOFER, C_SUB, 100); EFX_OUTPUT(NULL, C_SUB, M_MASTER_SUBWOOFER, A_OUT_D_SUB, 100); #if 0 /* * XXX in digital mode (default) this should be muted * because this output is shared with digital out */ EFX_OUTPUTD(C_SUB, M_MASTER_SUBWOOFER, A_OUT_A_SUB); #endif if (sc->has_71) { /* XXX this will broke headphones on AudigyDrive */ /* fx6/7 (pcm4) to side */ EFX_CACHE(C_SIDE_L); EFX_CACHE(C_SIDE_R); EFX_ROUTE(NULL, FX(6), M_FX6_SIDE_L, C_SIDE_L, 100); EFX_ROUTE(NULL, FX(7), M_FX7_SIDE_R, C_SIDE_R, 100); EFX_OUTPUT(NULL, C_SIDE_L, M_MASTER_SIDE_L, A_OUT_A_SIDE_L, 100); EFX_OUTPUT(NULL, C_SIDE_R, M_MASTER_SIDE_R, A_OUT_A_SIDE_R, 100); EFX_OUTPUTD(C_SIDE_L, M_MASTER_SIDE_L, A_OUT_D_SIDE_L); EFX_OUTPUTD(C_SIDE_R, M_MASTER_SIDE_R, A_OUT_D_SIDE_R); } } else { /* mch_disabled */ EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, A_OUT_A_REAR_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, A_OUT_A_REAR_R); EFX_OUTPUTD(C_FRONT_L, M_MASTER_FRONT_L, A_OUT_D_REAR_L); EFX_OUTPUTD(C_FRONT_R, M_MASTER_FRONT_R, A_OUT_D_REAR_R); if (sc->has_51) { /* (fx0+fx1)/2 to center */ EFX_CACHE(C_CENTER); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_L]), &pc); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_CENTER]), GPR(sc->cache_gpr[C_CENTER]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_R]), &pc); EFX_OUTPUT(NULL, C_CENTER, M_MASTER_CENTER, A_OUT_D_CENTER, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_CENTER, M_MASTER_CENTER, A_OUT_A_CENTER); /* (fx0+fx1)/2 to sub */ EFX_CACHE(C_SUB); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_SUB]), GPR(sc->cache_gpr[C_SUB]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_L]), &pc); emu_addefxop(sc, MACS, GPR(sc->cache_gpr[C_SUB]), GPR(sc->cache_gpr[C_SUB]), DSP_CONST(0xd), /* = 1/2 */ GPR(sc->cache_gpr[C_FRONT_R]), &pc); /* XXX add lowpass filter here */ EFX_OUTPUT(NULL, C_SUB, M_MASTER_SUBWOOFER, A_OUT_D_SUB, 100); /* XXX in digital mode (default) this should be muted because this output is shared with digital out */ EFX_SKIP(1, ANALOGMUTE); EFX_OUTPUTD(C_SUB, M_MASTER_SUBWOOFER, A_OUT_A_SUB); } } /* mch_disabled */ if (sc->mch_rec) { /* MCH RECORDING, high 32 slots */ /* * Stream map (in byte offsets): * 0x00..0x3E - outputs * 0x40..0x7E - FX, inputs * each substream is 2 bytes. */ /* * XXX Audigy 2 Value cards (and, possibly, * Audigy 4) write some unknown data in place of * some outputs (offsets 0x20..0x3F) and one * input (offset 0x7E). */ /* PCM Playback monitoring, offsets 0x40..0x5E */ for(i = 0; i < 16; i++) EFX_COPY(FX2(i), FX(i)); /* Copy of all inputs, offsets 0x60..0x7E */ for(i = 0; i < 16; i++) EFX_COPY(FX2(i+16), INP(i)); #if 0 /* XXX Audigy seems to work correct and does not need this */ /* sync data (0xc0de), offset 0x7E */ sc->dummy_gpr = emu_rm_gpr_alloc(sc->rm, 1); emumix_set_gpr(sc, sc->dummy_gpr, 0xc0de0000); EFX_COPY(FX2(31), GPR(sc->dummy_gpr)); #endif } /* mch_rec */ } sc->routing_code_end = pc; /* start DSP */ if (sc->is_emu10k1) { emu_wrptr(sc, 0, EMU_DBG, 0); } else { emu_wrptr(sc, 0, EMU_A_DBG, 0); } } /* /dev/em10kx */ static d_open_t emu10kx_open; static d_close_t emu10kx_close; static d_read_t emu10kx_read; static struct cdevsw emu10kx_cdevsw = { .d_open = emu10kx_open, .d_close = emu10kx_close, .d_read = emu10kx_read, .d_name = "emu10kx", .d_version = D_VERSION, }; static int emu10kx_open(struct cdev *i_dev, int flags __unused, int mode __unused, struct thread *td __unused) { int error; struct emu_sc_info *sc; sc = i_dev->si_drv1; mtx_lock(&sc->emu10kx_lock); if (sc->emu10kx_isopen) { mtx_unlock(&sc->emu10kx_lock); return (EBUSY); } sc->emu10kx_isopen = 1; mtx_unlock(&sc->emu10kx_lock); if (sbuf_new(&sc->emu10kx_sbuf, NULL, 4096, 0) == NULL) { error = ENXIO; goto out; } sc->emu10kx_bufptr = 0; error = (emu10kx_prepare(sc, &sc->emu10kx_sbuf) > 0) ? 0 : ENOMEM; out: if (error) { mtx_lock(&sc->emu10kx_lock); sc->emu10kx_isopen = 0; mtx_unlock(&sc->emu10kx_lock); } return (error); } static int emu10kx_close(struct cdev *i_dev, int flags __unused, int mode __unused, struct thread *td __unused) { struct emu_sc_info *sc; sc = i_dev->si_drv1; mtx_lock(&sc->emu10kx_lock); if (!(sc->emu10kx_isopen)) { mtx_unlock(&sc->emu10kx_lock); return (EBADF); } sbuf_delete(&sc->emu10kx_sbuf); sc->emu10kx_isopen = 0; mtx_unlock(&sc->emu10kx_lock); return (0); } static int emu10kx_read(struct cdev *i_dev, struct uio *buf, int flag __unused) { int l, err; struct emu_sc_info *sc; sc = i_dev->si_drv1; mtx_lock(&sc->emu10kx_lock); if (!(sc->emu10kx_isopen)) { mtx_unlock(&sc->emu10kx_lock); return (EBADF); } mtx_unlock(&sc->emu10kx_lock); l = min(buf->uio_resid, sbuf_len(&sc->emu10kx_sbuf) - sc->emu10kx_bufptr); err = (l > 0) ? uiomove(sbuf_data(&sc->emu10kx_sbuf) + sc->emu10kx_bufptr, l, buf) : 0; sc->emu10kx_bufptr += l; return (err); } static int emu10kx_prepare(struct emu_sc_info *sc, struct sbuf *s) { int i; sbuf_printf(s, "FreeBSD EMU10Kx Audio Driver\n"); sbuf_printf(s, "\nHardware resource usage:\n"); sbuf_printf(s, "DSP General Purpose Registers: %d used, %d total\n", sc->rm->num_used, sc->rm->num_gprs); sbuf_printf(s, "DSP Instruction Registers: %d used, %d total\n", sc->routing_code_end, sc->code_size); sbuf_printf(s, "Card supports"); if (sc->has_ac97) { sbuf_printf(s, " AC97 codec"); } else { sbuf_printf(s, " NO AC97 codec"); } if (sc->has_51) { if (sc->has_71) sbuf_printf(s, " and 7.1 output"); else sbuf_printf(s, " and 5.1 output"); } if (sc->is_emu10k1) sbuf_printf(s, ", SBLive! DSP code"); if (sc->is_emu10k2) sbuf_printf(s, ", Audigy DSP code"); if (sc->is_ca0102) sbuf_printf(s, ", Audigy DSP code with Audigy2 hacks"); if (sc->is_ca0108) sbuf_printf(s, ", Audigy DSP code with Audigy2Value hacks"); sbuf_printf(s, "\n"); if (sc->broken_digital) sbuf_printf(s, "Digital mode unsupported\n"); sbuf_printf(s, "\nInstalled devices:\n"); for (i = 0; i < RT_COUNT; i++) if (sc->pcm[i] != NULL) if (device_is_attached(sc->pcm[i])) { sbuf_printf(s, "%s on %s\n", device_get_desc(sc->pcm[i]), device_get_nameunit(sc->pcm[i])); } if (sc->midi[0] != NULL) if (device_is_attached(sc->midi[0])) { sbuf_printf(s, "EMU10Kx MIDI Interface\n"); sbuf_printf(s, "\tOn-card connector on %s\n", device_get_nameunit(sc->midi[0])); } if (sc->midi[1] != NULL) if (device_is_attached(sc->midi[1])) { sbuf_printf(s, "\tOn-Drive connector on %s\n", device_get_nameunit(sc->midi[1])); } if (sc->midi[0] != NULL) if (device_is_attached(sc->midi[0])) { sbuf_printf(s, "\tIR receiver MIDI events %s\n", sc->enable_ir ? "enabled" : "disabled"); } sbuf_printf(s, "Card is in %s mode\n", (sc->mode == MODE_ANALOG) ? "analog" : "digital"); sbuf_finish(s); return (sbuf_len(s)); } /* INIT & UNINIT */ static int emu10kx_dev_init(struct emu_sc_info *sc) { int unit; mtx_init(&sc->emu10kx_lock, device_get_nameunit(sc->dev), "kxdevlock", 0); unit = device_get_unit(sc->dev); sc->cdev = make_dev(&emu10kx_cdevsw, PCMMINOR(unit), UID_ROOT, GID_WHEEL, 0640, "emu10kx%d", unit); if (sc->cdev != NULL) { sc->cdev->si_drv1 = sc; return (0); } return (ENXIO); } static int emu10kx_dev_uninit(struct emu_sc_info *sc) { mtx_lock(&sc->emu10kx_lock); if (sc->emu10kx_isopen) { mtx_unlock(&sc->emu10kx_lock); return (EBUSY); } if (sc->cdev) destroy_dev(sc->cdev); - sc->cdev = 0; + sc->cdev = NULL; mtx_destroy(&sc->emu10kx_lock); return (0); } /* resource manager */ int emu_rm_init(struct emu_sc_info *sc) { int i; int maxcount; struct emu_rm *rm; rm = malloc(sizeof(struct emu_rm), M_DEVBUF, M_NOWAIT | M_ZERO); if (rm == NULL) { return (ENOMEM); } sc->rm = rm; rm->card = sc; maxcount = sc->num_gprs; rm->num_used = 0; mtx_init(&(rm->gpr_lock), device_get_nameunit(sc->dev), "gpr alloc", MTX_DEF); rm->num_gprs = (maxcount < EMU_MAX_GPR ? maxcount : EMU_MAX_GPR); for (i = 0; i < rm->num_gprs; i++) rm->allocmap[i] = 0; /* pre-allocate gpr[0] */ rm->allocmap[0] = 1; rm->last_free_gpr = 1; return (0); } int emu_rm_uninit(struct emu_sc_info *sc) { int i; if (sc->dbg_level > 1) { mtx_lock(&(sc->rm->gpr_lock)); for (i = 1; i < sc->rm->last_free_gpr; i++) if (sc->rm->allocmap[i] > 0) device_printf(sc->dev, "rm: gpr %d not free before uninit\n", i); mtx_unlock(&(sc->rm->gpr_lock)); } mtx_destroy(&(sc->rm->gpr_lock)); free(sc->rm, M_DEVBUF); return (0); } static int emu_rm_gpr_alloc(struct emu_rm *rm, int count) { int i, j; int allocated_gpr; allocated_gpr = rm->num_gprs; /* try fast way first */ mtx_lock(&(rm->gpr_lock)); if (rm->last_free_gpr + count <= rm->num_gprs) { allocated_gpr = rm->last_free_gpr; rm->last_free_gpr += count; rm->allocmap[allocated_gpr] = count; for (i = 1; i < count; i++) rm->allocmap[allocated_gpr + i] = -(count - i); } else { /* longer */ i = 0; allocated_gpr = rm->num_gprs; while (i < rm->last_free_gpr - count) { if (rm->allocmap[i] > 0) { i += rm->allocmap[i]; } else { allocated_gpr = i; for (j = 1; j < count; j++) { if (rm->allocmap[i + j] != 0) allocated_gpr = rm->num_gprs; } if (allocated_gpr == i) break; } } if (allocated_gpr + count < rm->last_free_gpr) { rm->allocmap[allocated_gpr] = count; for (i = 1; i < count; i++) rm->allocmap[allocated_gpr + i] = -(count - i); } } if (allocated_gpr == rm->num_gprs) allocated_gpr = (-1); if (allocated_gpr >= 0) rm->num_used += count; mtx_unlock(&(rm->gpr_lock)); return (allocated_gpr); } /* mixer */ void emumix_set_mode(struct emu_sc_info *sc, int mode) { uint32_t a_iocfg; uint32_t hcfg; uint32_t tmp; switch (mode) { case MODE_DIGITAL: /* FALLTHROUGH */ case MODE_ANALOG: break; default: return; } hcfg = EMU_HCFG_AUDIOENABLE | EMU_HCFG_AUTOMUTE; a_iocfg = 0; if (sc->rev >= 6) hcfg |= EMU_HCFG_JOYENABLE; if (sc->is_emu10k1) hcfg |= EMU_HCFG_LOCKTANKCACHE_MASK; else hcfg |= EMU_HCFG_CODECFMT_I2S | EMU_HCFG_JOYENABLE; if (mode == MODE_DIGITAL) { if (sc->broken_digital) { device_printf(sc->dev, "Digital mode is reported as broken on this card.\n"); } a_iocfg |= EMU_A_IOCFG_GPOUT1; hcfg |= EMU_HCFG_GPOUT0; } if (mode == MODE_ANALOG) emumix_set_spdif_mode(sc, SPDIF_MODE_PCM); if (sc->is_emu10k2) a_iocfg |= 0x80; /* XXX */ if ((sc->is_ca0102) || (sc->is_ca0108)) /* * Setting EMU_A_IOCFG_DISABLE_ANALOG will do opposite things * on diffrerent cards. * "don't disable analog outs" on Audigy 2 (ca0102/ca0108) * "disable analog outs" on Audigy (emu10k2) */ a_iocfg |= EMU_A_IOCFG_DISABLE_ANALOG; if (sc->is_ca0108) a_iocfg |= 0x20; /* XXX */ /* Mute analog center & subwoofer before mode change */ if (mode == MODE_DIGITAL) emumix_set_gpr(sc, sc->mute_gpr[ANALOGMUTE], 1); emu_wr(sc, EMU_HCFG, hcfg, 4); if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) { tmp = emu_rd(sc, EMU_A_IOCFG, 2); tmp = a_iocfg; emu_wr(sc, EMU_A_IOCFG, tmp, 2); } /* Unmute if we have changed mode to analog. */ if (mode == MODE_ANALOG) emumix_set_gpr(sc, sc->mute_gpr[ANALOGMUTE], 0); sc->mode = mode; } void emumix_set_spdif_mode(struct emu_sc_info *sc, int mode) { uint32_t spcs; switch (mode) { case SPDIF_MODE_PCM: break; case SPDIF_MODE_AC3: device_printf(sc->dev, "AC3 mode does not work and disabled\n"); return; default: return; } spcs = EMU_SPCS_CLKACCY_1000PPM | EMU_SPCS_SAMPLERATE_48 | EMU_SPCS_CHANNELNUM_LEFT | EMU_SPCS_SOURCENUM_UNSPEC | EMU_SPCS_GENERATIONSTATUS | 0x00001200 | 0x00000000 | EMU_SPCS_EMPHASIS_NONE | EMU_SPCS_COPYRIGHT; mode = SPDIF_MODE_PCM; emu_wrptr(sc, 0, EMU_SPCS0, spcs); emu_wrptr(sc, 0, EMU_SPCS1, spcs); emu_wrptr(sc, 0, EMU_SPCS2, spcs); } #define L2L_POINTS 10 static int l2l_df[L2L_POINTS] = { 0x572C5CA, /* 100..90 */ 0x3211625, /* 90..80 */ 0x1CC1A76, /* 80..70 */ 0x108428F, /* 70..60 */ 0x097C70A, /* 60..50 */ 0x0572C5C, /* 50..40 */ 0x0321162, /* 40..30 */ 0x01CC1A7, /* 30..20 */ 0x0108428, /* 20..10 */ 0x016493D /* 10..0 */ }; static int l2l_f[L2L_POINTS] = { 0x4984461A, /* 90 */ 0x2A3968A7, /* 80 */ 0x18406003, /* 70 */ 0x0DEDC66D, /* 60 */ 0x07FFFFFF, /* 50 */ 0x04984461, /* 40 */ 0x02A3968A, /* 30 */ 0x01840600, /* 20 */ 0x00DEDC66, /* 10 */ 0x00000000 /* 0 */ }; static int log2lin(int log_t) { int lin_t; int idx, lin; if (log_t <= 0) { lin_t = 0x00000000; return (lin_t); } if (log_t >= 100) { lin_t = 0x7fffffff; return (lin_t); } idx = (L2L_POINTS - 1) - log_t / (L2L_POINTS); lin = log_t % (L2L_POINTS); lin_t = l2l_df[idx] * lin + l2l_f[idx]; return (lin_t); } void emumix_set_fxvol(struct emu_sc_info *sc, unsigned gpr, int32_t vol) { vol = log2lin(vol); emumix_set_gpr(sc, gpr, vol); } void emumix_set_gpr(struct emu_sc_info *sc, unsigned gpr, int32_t val) { if (sc->dbg_level > 1) if (gpr == 0) { device_printf(sc->dev, "Zero gpr write access\n"); #ifdef KDB kdb_backtrace(); #endif return; } emu_wrptr(sc, 0, GPR(gpr), val); } void emumix_set_volume(struct emu_sc_info *sc, int mixer_idx, int volume) { RANGE(volume, 0, 100); if (mixer_idx < NUM_MIXERS) { sc->mixer_volcache[mixer_idx] = volume; emumix_set_fxvol(sc, sc->mixer_gpr[mixer_idx], volume); } } int emumix_get_volume(struct emu_sc_info *sc, int mixer_idx) { if ((mixer_idx < NUM_MIXERS) && (mixer_idx >= 0)) return (sc->mixer_volcache[mixer_idx]); return (-1); } /* Init CardBus part */ static int emu_cardbus_init(struct emu_sc_info *sc) { /* * XXX May not need this if we have EMU_IPR3 handler. * Is it a real init calls, or EMU_IPR3 interrupt acknowledgments? * Looks much like "(data << 16) | register". */ emu_wr_cbptr(sc, (0x00d0 << 16) | 0x0000); emu_wr_cbptr(sc, (0x00d0 << 16) | 0x0001); emu_wr_cbptr(sc, (0x00d0 << 16) | 0x005f); emu_wr_cbptr(sc, (0x00d0 << 16) | 0x007f); emu_wr_cbptr(sc, (0x0090 << 16) | 0x007f); return (0); } /* Probe and attach the card */ static int emu_init(struct emu_sc_info *sc) { uint32_t ch, tmp; uint32_t spdif_sr; uint32_t ac97slot; int def_mode; int i; /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); /* disable channel interrupt */ emu_wr(sc, EMU_INTE, EMU_INTE_INTERTIMERENB | EMU_INTE_SAMPLERATER | EMU_INTE_PCIERRENABLE, 4); emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); /* disable P16V and S/PDIF interrupts */ if ((sc->is_ca0102) || (sc->is_ca0108)) emu_wr(sc, EMU_INTE2, 0, 4); if (sc->is_ca0102) emu_wr(sc, EMU_INTE3, 0, 4); /* init phys inputs and outputs */ ac97slot = 0; if (sc->has_51) ac97slot = EMU_AC97SLOT_CENTER | EMU_AC97SLOT_LFE; if (sc->has_71) ac97slot = EMU_AC97SLOT_CENTER | EMU_AC97SLOT_LFE | EMU_AC97SLOT_REAR_LEFT | EMU_AC97SLOT_REAR_RIGHT; if (sc->is_emu10k2) ac97slot |= 0x40; emu_wrptr(sc, 0, EMU_AC97SLOT, ac97slot); if (sc->is_emu10k2) /* XXX for later cards? */ emu_wrptr(sc, 0, EMU_SPBYPASS, 0xf00); /* What will happen if * we write 1 here? */ if (bus_dma_tag_create( /* parent */ bus_get_dma_tag(sc->dev), /* alignment */ 2, /* boundary */ 0, /* lowaddr */ (1U << 31) - 1, /* can only access 0-2gb */ /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ EMU_MAX_BUFSZ, /* nsegments */ 1, /* maxsegz */ 0x3ffff, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &Giant, &(sc->mem.dmat)) != 0) { device_printf(sc->dev, "unable to create dma tag\n"); bus_dma_tag_destroy(sc->mem.dmat); return (ENOMEM); } sc->mem.card = sc; SLIST_INIT(&sc->mem.blocks); sc->mem.ptb_pages = emu_malloc(&sc->mem, EMU_MAXPAGES * sizeof(uint32_t), &sc->mem.ptb_pages_addr, &sc->mem.ptb_map); if (sc->mem.ptb_pages == NULL) return (ENOMEM); sc->mem.silent_page = emu_malloc(&sc->mem, EMUPAGESIZE, &sc->mem.silent_page_addr, &sc->mem.silent_map); if (sc->mem.silent_page == NULL) { emu_free(&sc->mem, sc->mem.ptb_pages, sc->mem.ptb_map); return (ENOMEM); } /* Clear page with silence & setup all pointers to this page */ bzero(sc->mem.silent_page, EMUPAGESIZE); tmp = (uint32_t) (sc->mem.silent_page_addr) << 1; for (i = 0; i < EMU_MAXPAGES; i++) sc->mem.ptb_pages[i] = tmp | i; for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_MAPA, tmp | EMU_CHAN_MAP_PTI_MASK); emu_wrptr(sc, ch, EMU_CHAN_MAPB, tmp | EMU_CHAN_MAP_PTI_MASK); } emu_wrptr(sc, 0, EMU_PTB, (sc->mem.ptb_pages_addr)); emu_wrptr(sc, 0, EMU_TCB, 0); /* taken from original driver */ emu_wrptr(sc, 0, EMU_TCBS, 0); /* taken from original driver */ /* init envelope engine */ for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, 0); emu_wrptr(sc, ch, EMU_CHAN_IP, 0); emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); emu_wrptr(sc, ch, EMU_CHAN_CCR, 0); emu_wrptr(sc, ch, EMU_CHAN_PSST, 0); emu_wrptr(sc, ch, EMU_CHAN_DSL, 0x10); emu_wrptr(sc, ch, EMU_CHAN_CCCA, 0); emu_wrptr(sc, ch, EMU_CHAN_Z1, 0); emu_wrptr(sc, ch, EMU_CHAN_Z2, 0); emu_wrptr(sc, ch, EMU_CHAN_FXRT, 0xd01c0000); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDM, 0); emu_wrptr(sc, ch, EMU_CHAN_DCYSUSM, 0); emu_wrptr(sc, ch, EMU_CHAN_IFATN, 0xffff); emu_wrptr(sc, ch, EMU_CHAN_PEFE, 0); emu_wrptr(sc, ch, EMU_CHAN_FMMOD, 0); emu_wrptr(sc, ch, EMU_CHAN_TREMFRQ, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_FM2FRQ2, 24); /* 1 Hz */ emu_wrptr(sc, ch, EMU_CHAN_TEMPENV, 0); /*** these are last so OFF prevents writing ***/ emu_wrptr(sc, ch, EMU_CHAN_LFOVAL2, 0); emu_wrptr(sc, ch, EMU_CHAN_LFOVAL1, 0); emu_wrptr(sc, ch, EMU_CHAN_ATKHLDV, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVOL, 0); emu_wrptr(sc, ch, EMU_CHAN_ENVVAL, 0); if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) { emu_wrptr(sc, ch, 0x4c, 0x0); emu_wrptr(sc, ch, 0x4d, 0x0); emu_wrptr(sc, ch, 0x4e, 0x0); emu_wrptr(sc, ch, 0x4f, 0x0); emu_wrptr(sc, ch, EMU_A_CHAN_FXRT1, 0x3f3f3f3f); emu_wrptr(sc, ch, EMU_A_CHAN_FXRT2, 0x3f3f3f3f); emu_wrptr(sc, ch, EMU_A_CHAN_SENDAMOUNTS, 0x0); } } emumix_set_spdif_mode(sc, SPDIF_MODE_PCM); if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) emu_wrptr(sc, 0, EMU_A_SPDIF_SAMPLERATE, EMU_A_SPDIF_48000); /* * CAxxxx cards needs additional setup: * 1. Set I2S capture sample rate to 96000 * 2. Disable P16v / P17v proceesing * 3. Allow EMU10K DSP inputs */ if ((sc->is_ca0102) || (sc->is_ca0108)) { spdif_sr = emu_rdptr(sc, 0, EMU_A_SPDIF_SAMPLERATE); spdif_sr &= 0xfffff1ff; spdif_sr |= EMU_A_I2S_CAPTURE_96000; emu_wrptr(sc, 0, EMU_A_SPDIF_SAMPLERATE, spdif_sr); /* Disable P16v processing */ emu_wr_p16vptr(sc, 0, EMU_A2_SRCSel, 0x14); /* Setup P16v/P17v sound routing */ if (sc->is_ca0102) emu_wr_p16vptr(sc, 0, EMU_A2_SRCMULTI_ENABLE, 0xFF00FF00); else { emu_wr_p16vptr(sc, 0, EMU_A2_MIXER_I2S_ENABLE, 0xFF000000); emu_wr_p16vptr(sc, 0, EMU_A2_MIXER_SPDIF_ENABLE, 0xFF000000); tmp = emu_rd(sc, EMU_A_IOCFG, 2); emu_wr(sc, EMU_A_IOCFG, tmp & ~0x8, 2); } } emu_initefx(sc); def_mode = MODE_ANALOG; if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) def_mode = MODE_DIGITAL; if (((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) && (sc->broken_digital)) { device_printf(sc->dev, "Audigy card initialized in analog mode.\n"); def_mode = MODE_ANALOG; } emumix_set_mode(sc, def_mode); if (bootverbose) { tmp = emu_rd(sc, EMU_HCFG, 4); device_printf(sc->dev, "Card Configuration ( 0x%08x )\n", tmp); device_printf(sc->dev, "Card Configuration ( & 0xff000000 ) : %s%s%s%s%s%s%s%s\n", (tmp & 0x80000000 ? "[Legacy MPIC] " : ""), (tmp & 0x40000000 ? "[0x40] " : ""), (tmp & 0x20000000 ? "[0x20] " : ""), (tmp & 0x10000000 ? "[0x10] " : ""), (tmp & 0x08000000 ? "[0x08] " : ""), (tmp & 0x04000000 ? "[0x04] " : ""), (tmp & 0x02000000 ? "[0x02] " : ""), (tmp & 0x01000000 ? "[0x01]" : " ")); device_printf(sc->dev, "Card Configuration ( & 0x00ff0000 ) : %s%s%s%s%s%s%s%s\n", (tmp & 0x00800000 ? "[0x80] " : ""), (tmp & 0x00400000 ? "[0x40] " : ""), (tmp & 0x00200000 ? "[Legacy INT] " : ""), (tmp & 0x00100000 ? "[0x10] " : ""), (tmp & 0x00080000 ? "[0x08] " : ""), (tmp & 0x00040000 ? "[Codec4] " : ""), (tmp & 0x00020000 ? "[Codec2] " : ""), (tmp & 0x00010000 ? "[I2S Codec]" : " ")); device_printf(sc->dev, "Card Configuration ( & 0x0000ff00 ) : %s%s%s%s%s%s%s%s\n", (tmp & 0x00008000 ? "[0x80] " : ""), (tmp & 0x00004000 ? "[GPINPUT0] " : ""), (tmp & 0x00002000 ? "[GPINPUT1] " : ""), (tmp & 0x00001000 ? "[GPOUT0] " : ""), (tmp & 0x00000800 ? "[GPOUT1] " : ""), (tmp & 0x00000400 ? "[GPOUT2] " : ""), (tmp & 0x00000200 ? "[Joystick] " : ""), (tmp & 0x00000100 ? "[0x01]" : " ")); device_printf(sc->dev, "Card Configuration ( & 0x000000ff ) : %s%s%s%s%s%s%s%s\n", (tmp & 0x00000080 ? "[0x80] " : ""), (tmp & 0x00000040 ? "[0x40] " : ""), (tmp & 0x00000020 ? "[0x20] " : ""), (tmp & 0x00000010 ? "[AUTOMUTE] " : ""), (tmp & 0x00000008 ? "[LOCKSOUNDCACHE] " : ""), (tmp & 0x00000004 ? "[LOCKTANKCACHE] " : ""), (tmp & 0x00000002 ? "[MUTEBUTTONENABLE] " : ""), (tmp & 0x00000001 ? "[AUDIOENABLE]" : " ")); if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) { tmp = emu_rd(sc, EMU_A_IOCFG, 2); device_printf(sc->dev, "Audigy Card Configuration ( 0x%04x )\n", tmp); device_printf(sc->dev, "Audigy Card Configuration ( & 0xff00 )"); printf(" : %s%s%s%s%s%s%s%s\n", (tmp & 0x8000 ? "[Rear Speakers] " : ""), (tmp & 0x4000 ? "[Front Speakers] " : ""), (tmp & 0x2000 ? "[0x20] " : ""), (tmp & 0x1000 ? "[0x10] " : ""), (tmp & 0x0800 ? "[0x08] " : ""), (tmp & 0x0400 ? "[0x04] " : ""), (tmp & 0x0200 ? "[0x02] " : ""), (tmp & 0x0100 ? "[AudigyDrive Phones]" : " ")); device_printf(sc->dev, "Audigy Card Configuration ( & 0x00ff )"); printf(" : %s%s%s%s%s%s%s%s\n", (tmp & 0x0080 ? "[0x80] " : ""), (tmp & 0x0040 ? "[Mute AnalogOut] " : ""), (tmp & 0x0020 ? "[0x20] " : ""), (tmp & 0x0010 ? "[0x10] " : ""), (tmp & 0x0008 ? "[0x08] " : ""), (tmp & 0x0004 ? "[GPOUT0] " : ""), (tmp & 0x0002 ? "[GPOUT1] " : ""), (tmp & 0x0001 ? "[GPOUT2]" : " ")); } /* is_emu10k2 or ca* */ } /* bootverbose */ return (0); } static int emu_uninit(struct emu_sc_info *sc) { uint32_t ch; struct emu_memblk *blk; emu_wr(sc, EMU_INTE, 0, 4); for (ch = 0; ch < NUM_G; ch++) emu_wrptr(sc, ch, EMU_CHAN_DCYSUSV, 0); for (ch = 0; ch < NUM_G; ch++) { emu_wrptr(sc, ch, EMU_CHAN_VTFT, 0); emu_wrptr(sc, ch, EMU_CHAN_CVCF, 0); emu_wrptr(sc, ch, EMU_CHAN_PTRX, 0); emu_wrptr(sc, ch, EMU_CHAN_CPF, 0); } /* disable audio and lock cache */ emu_wr(sc, EMU_HCFG, EMU_HCFG_LOCKSOUNDCACHE | EMU_HCFG_LOCKTANKCACHE_MASK | EMU_HCFG_MUTEBUTTONENABLE, 4); emu_wrptr(sc, 0, EMU_PTB, 0); /* reset recording buffers */ emu_wrptr(sc, 0, EMU_MICBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_MICBA, 0); emu_wrptr(sc, 0, EMU_FXBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_FXBA, 0); emu_wrptr(sc, 0, EMU_FXWC, 0); emu_wrptr(sc, 0, EMU_ADCBS, EMU_RECBS_BUFSIZE_NONE); emu_wrptr(sc, 0, EMU_ADCBA, 0); emu_wrptr(sc, 0, EMU_TCB, 0); emu_wrptr(sc, 0, EMU_TCBS, 0); /* disable channel interrupt */ emu_wrptr(sc, 0, EMU_CLIEL, 0); emu_wrptr(sc, 0, EMU_CLIEH, 0); emu_wrptr(sc, 0, EMU_SOLEL, 0); emu_wrptr(sc, 0, EMU_SOLEH, 0); if (!SLIST_EMPTY(&sc->mem.blocks)) device_printf(sc->dev, "warning: memblock list not empty\n"); SLIST_FOREACH(blk, &sc->mem.blocks, link) if (blk != NULL) device_printf(sc->dev, "lost %d for %s\n", blk->pte_size, blk->owner); emu_free(&sc->mem, sc->mem.ptb_pages, sc->mem.ptb_map); emu_free(&sc->mem, sc->mem.silent_page, sc->mem.silent_map); return (0); } static int emu_read_ivar(device_t bus, device_t dev, int ivar_index, uintptr_t * result) { struct sndcard_func *func = device_get_ivars(dev); struct emu_sc_info *sc = device_get_softc(bus); if (func==NULL) return (ENOMEM); if (sc == NULL) return (ENOMEM); switch (ivar_index) { case EMU_VAR_FUNC: *result = func->func; break; case EMU_VAR_ROUTE: if (func->varinfo == NULL) return (ENOMEM); *result = ((struct emu_pcminfo *)func->varinfo)->route; break; case EMU_VAR_ISEMU10K1: *result = sc->is_emu10k1; break; case EMU_VAR_MCH_DISABLED: *result = sc->mch_disabled; break; case EMU_VAR_MCH_REC: *result = sc->mch_rec; break; default: return (ENOENT); } return (0); } static int emu_write_ivar(device_t bus __unused, device_t dev __unused, int ivar_index, uintptr_t value __unused) { switch (ivar_index) { case 0: return (EINVAL); default: return (ENOENT); } } static int emu_pci_probe(device_t dev) { struct sbuf *s; unsigned int thiscard = 0; uint16_t vendor; vendor = pci_read_config(dev, PCIR_DEVVENDOR, /* bytes */ 2); if (vendor != 0x1102) return (ENXIO); /* Not Creative */ thiscard = emu_getcard(dev); if (thiscard == 0) return (ENXIO); s = sbuf_new(NULL, NULL, 4096, 0); if (s == NULL) return (ENOMEM); sbuf_printf(s, "Creative %s [%s]", emu_cards[thiscard].desc, emu_cards[thiscard].SBcode); sbuf_finish(s); device_set_desc_copy(dev, sbuf_data(s)); sbuf_delete(s); return (BUS_PROBE_DEFAULT); } static int emu_pci_attach(device_t dev) { struct sndcard_func *func; struct emu_sc_info *sc; struct emu_pcminfo *pcminfo; #if 0 struct emu_midiinfo *midiinfo; #endif int i; int device_flags; char status[255]; int error = ENXIO; int unit; sc = device_get_softc(dev); unit = device_get_unit(dev); /* Get configuration */ sc->ctx = device_get_sysctl_ctx(dev); if (sc->ctx == NULL) goto bad; sc->root = device_get_sysctl_tree(dev); if (sc->root == NULL) goto bad; if (resource_int_value("emu10kx", unit, "multichannel_disabled", &(sc->mch_disabled))) RANGE(sc->mch_disabled, 0, 1); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "multichannel_disabled", CTLFLAG_RD, &(sc->mch_disabled), 0, "Multichannel playback setting"); if (resource_int_value("emu10kx", unit, "multichannel_recording", &(sc->mch_rec))) RANGE(sc->mch_rec, 0, 1); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "multichannel_recording", CTLFLAG_RD, &(sc->mch_rec), 0, "Multichannel recording setting"); if (resource_int_value("emu10kx", unit, "debug", &(sc->dbg_level))) RANGE(sc->mch_rec, 0, 2); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &(sc->dbg_level), 0, "Debug level"); /* Fill in the softc. */ mtx_init(&sc->lock, device_get_nameunit(dev), "bridge conf", MTX_DEF); mtx_init(&sc->rw, device_get_nameunit(dev), "exclusive io", MTX_DEF); sc->dev = dev; sc->type = pci_get_devid(dev); sc->rev = pci_get_revid(dev); sc->enable_ir = 0; sc->has_ac97 = 0; sc->has_51 = 0; sc->has_71 = 0; sc->broken_digital = 0; sc->is_emu10k1 = 0; sc->is_emu10k2 = 0; sc->is_ca0102 = 0; sc->is_ca0108 = 0; sc->is_cardbus = 0; device_flags = emu_cards[emu_getcard(dev)].flags; if (device_flags & HAS_51) sc->has_51 = 1; if (device_flags & HAS_71) { sc->has_51 = 1; sc->has_71 = 1; } if (device_flags & IS_EMU10K1) sc->is_emu10k1 = 1; if (device_flags & IS_EMU10K2) sc->is_emu10k2 = 1; if (device_flags & IS_CA0102) sc->is_ca0102 = 1; if (device_flags & IS_CA0108) sc->is_ca0108 = 1; if ((sc->is_emu10k2) && (sc->rev == 4)) { sc->is_emu10k2 = 0; sc->is_ca0102 = 1; /* for unknown Audigy 2 cards */ } if ((sc->is_ca0102 == 1) || (sc->is_ca0108 == 1)) if (device_flags & IS_CARDBUS) sc->is_cardbus = 1; if ((sc->is_emu10k1 + sc->is_emu10k2 + sc->is_ca0102 + sc->is_ca0108) != 1) { device_printf(sc->dev, "Unable to detect HW chipset\n"); goto bad; } if (device_flags & BROKEN_DIGITAL) sc->broken_digital = 1; if (device_flags & HAS_AC97) sc->has_ac97 = 1; sc->opcode_shift = 0; if ((sc->is_emu10k2) || (sc->is_ca0102) || (sc->is_ca0108)) { sc->opcode_shift = 24; sc->high_operand_shift = 12; /* DSP map */ /* sc->fx_base = 0x0 */ sc->input_base = 0x40; /* sc->p16vinput_base = 0x50; */ sc->output_base = 0x60; sc->efxc_base = 0x80; /* sc->output32h_base = 0xa0; */ /* sc->output32l_base = 0xb0; */ sc->dsp_zero = 0xc0; /* 0xe0...0x100 are unknown */ /* sc->tram_base = 0x200 */ /* sc->tram_addr_base = 0x300 */ sc->gpr_base = EMU_A_FXGPREGBASE; sc->num_gprs = 0x200; sc->code_base = EMU_A_MICROCODEBASE; sc->code_size = 0x800 / 2; /* 0x600-0xdff, 2048 words, * 1024 instructions */ sc->mchannel_fx = 8; sc->num_fxbuses = 16; sc->num_inputs = 8; sc->num_outputs = 16; sc->address_mask = EMU_A_PTR_ADDR_MASK; } if (sc->is_emu10k1) { sc->has_51 = 0; /* We don't support 5.1 sound on SB Live! 5.1 */ sc->opcode_shift = 20; sc->high_operand_shift = 10; sc->code_base = EMU_MICROCODEBASE; sc->code_size = 0x400 / 2; /* 0x400-0x7ff, 1024 words, * 512 instructions */ sc->gpr_base = EMU_FXGPREGBASE; sc->num_gprs = 0x100; sc->input_base = 0x10; sc->output_base = 0x20; /* * XXX 5.1 Analog outputs are inside efxc address space! * They use output+0x11/+0x12 (=efxc+1/+2). * Don't use this efx registers for recording on SB Live! 5.1! */ sc->efxc_base = 0x30; sc->dsp_zero = 0x40; sc->mchannel_fx = 0; sc->num_fxbuses = 8; sc->num_inputs = 8; sc->num_outputs = 16; sc->address_mask = EMU_PTR_ADDR_MASK; } if (sc->opcode_shift == 0) goto bad; pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &i, RF_ACTIVE); if (sc->reg == NULL) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); for (i = 0; i < EMU_MAX_IRQ_CONSUMERS; i++) sc->timer[i] = 0; /* disable it */ i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if ((sc->irq == NULL) || bus_setup_intr(dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV, NULL, emu_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } if (emu_rm_init(sc) != 0) { device_printf(dev, "unable to create resource manager\n"); goto bad; } if (sc->is_cardbus) if (emu_cardbus_init(sc) != 0) { device_printf(dev, "unable to initialize CardBus interface\n"); goto bad; } if (emu_init(sc) != 0) { device_printf(dev, "unable to initialize the card\n"); goto bad; } if (emu10kx_dev_init(sc) != 0) { device_printf(dev, "unable to create control device\n"); goto bad; } snprintf(status, 255, "rev %d at io 0x%jx irq %jd", sc->rev, rman_get_start(sc->reg), rman_get_start(sc->irq)); /* Voices */ for (i = 0; i < NUM_G; i++) { sc->voice[i].vnum = i; sc->voice[i].slave = NULL; sc->voice[i].busy = 0; sc->voice[i].ismaster = 0; sc->voice[i].running = 0; sc->voice[i].b16 = 0; sc->voice[i].stereo = 0; sc->voice[i].speed = 0; sc->voice[i].start = 0; sc->voice[i].end = 0; } /* PCM Audio */ for (i = 0; i < RT_COUNT; i++) sc->pcm[i] = NULL; /* FRONT */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_FRONT; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_FRONT] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_FRONT], func); if (!(sc->mch_disabled)) { /* REAR */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_REAR; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_REAR] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_REAR], func); if (sc->has_51) { /* CENTER */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_CENTER; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_CENTER] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_CENTER], func); /* SUB */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_SUB; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_SUB] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_SUB], func); } if (sc->has_71) { /* SIDE */ func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_SIDE; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_SIDE] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_SIDE], func); } } /* mch_disabled */ if (sc->mch_rec) { func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } pcminfo = malloc(sizeof(struct emu_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (pcminfo == NULL) { error = ENOMEM; goto bad; } pcminfo->card = sc; pcminfo->route = RT_MCHRECORD; func->func = SCF_PCM; func->varinfo = pcminfo; sc->pcm[RT_MCHRECORD] = device_add_child(dev, "pcm", -1); device_set_ivars(sc->pcm[RT_MCHRECORD], func); } /*mch_rec */ for (i = 0; i < 2; i++) sc->midi[i] = NULL; /* MIDI has some memory mangament and (possible) locking problems */ #if 0 /* Midi Interface 1: Live!, Audigy, Audigy 2 */ if ((sc->is_emu10k1) || (sc->is_emu10k2) || (sc->is_ca0102)) { func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (midiinfo == NULL) { error = ENOMEM; goto bad; } midiinfo->card = sc; if (sc->is_emu10k2 || (sc->is_ca0102)) { midiinfo->port = EMU_A_MUDATA1; midiinfo->portnr = 1; } if (sc->is_emu10k1) { midiinfo->port = MUDATA; midiinfo->portnr = 1; } func->func = SCF_MIDI; func->varinfo = midiinfo; sc->midi[0] = device_add_child(dev, "midi", -1); device_set_ivars(sc->midi[0], func); } /* Midi Interface 2: Audigy, Audigy 2 (on AudigyDrive) */ if (sc->is_emu10k2 || (sc->is_ca0102)) { func = malloc(sizeof(struct sndcard_func), M_DEVBUF, M_NOWAIT | M_ZERO); if (func == NULL) { error = ENOMEM; goto bad; } midiinfo = malloc(sizeof(struct emu_midiinfo), M_DEVBUF, M_NOWAIT | M_ZERO); if (midiinfo == NULL) { error = ENOMEM; goto bad; } midiinfo->card = sc; midiinfo->port = EMU_A_MUDATA2; midiinfo->portnr = 2; func->func = SCF_MIDI; func->varinfo = midiinfo; sc->midi[1] = device_add_child(dev, "midi", -1); device_set_ivars(sc->midi[1], func); } #endif return (bus_generic_attach(dev)); bad: /* XXX can we just call emu_pci_detach here? */ if (sc->cdev) emu10kx_dev_uninit(sc); if (sc->rm != NULL) emu_rm_uninit(sc); if (sc->reg) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); mtx_destroy(&sc->rw); mtx_destroy(&sc->lock); return (error); } static int emu_pci_detach(device_t dev) { struct emu_sc_info *sc; struct sndcard_func *func; int devcount, i; device_t *childlist; int r = 0; sc = device_get_softc(dev); for (i = 0; i < RT_COUNT; i++) { if (sc->pcm[i] != NULL) { func = device_get_ivars(sc->pcm[i]); if (func != NULL && func->func == SCF_PCM) { device_set_ivars(sc->pcm[i], NULL); free(func->varinfo, M_DEVBUF); free(func, M_DEVBUF); } r = device_delete_child(dev, sc->pcm[i]); if (r) return (r); } } if (sc->midi[0] != NULL) { func = device_get_ivars(sc->midi[0]); if (func != NULL && func->func == SCF_MIDI) { device_set_ivars(sc->midi[0], NULL); free(func->varinfo, M_DEVBUF); free(func, M_DEVBUF); } r = device_delete_child(dev, sc->midi[0]); if (r) return (r); } if (sc->midi[1] != NULL) { func = device_get_ivars(sc->midi[1]); if (func != NULL && func->func == SCF_MIDI) { device_set_ivars(sc->midi[1], NULL); free(func->varinfo, M_DEVBUF); free(func, M_DEVBUF); } r = device_delete_child(dev, sc->midi[1]); if (r) return (r); } if (device_get_children(dev, &childlist, &devcount) == 0) for (i = 0; i < devcount - 1; i++) { device_printf(dev, "removing stale child %d (unit %d)\n", i, device_get_unit(childlist[i])); func = device_get_ivars(childlist[i]); if (func != NULL && (func->func == SCF_MIDI || func->func == SCF_PCM)) { device_set_ivars(childlist[i], NULL); free(func->varinfo, M_DEVBUF); free(func, M_DEVBUF); } device_delete_child(dev, childlist[i]); } if (childlist != NULL) free(childlist, M_TEMP); r = emu10kx_dev_uninit(sc); if (r) return (r); /* shutdown chip */ emu_uninit(sc); emu_rm_uninit(sc); if (sc->mem.dmat) bus_dma_tag_destroy(sc->mem.dmat); if (sc->reg) bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->reg); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); mtx_destroy(&sc->rw); mtx_destroy(&sc->lock); return (bus_generic_detach(dev)); } /* add suspend, resume */ static device_method_t emu_methods[] = { /* Device interface */ DEVMETHOD(device_probe, emu_pci_probe), DEVMETHOD(device_attach, emu_pci_attach), DEVMETHOD(device_detach, emu_pci_detach), /* Bus methods */ DEVMETHOD(bus_read_ivar, emu_read_ivar), DEVMETHOD(bus_write_ivar, emu_write_ivar), DEVMETHOD_END }; static driver_t emu_driver = { "emu10kx", emu_methods, sizeof(struct emu_sc_info), NULL, 0, NULL }; static int emu_modevent(module_t mod __unused, int cmd, void *data __unused) { int err = 0; switch (cmd) { case MOD_LOAD: break; /* Success */ case MOD_UNLOAD: case MOD_SHUTDOWN: /* XXX Should we check state of pcm & midi subdevices here? */ break; /* Success */ default: err = EINVAL; break; } return (err); } static devclass_t emu_devclass; DRIVER_MODULE(snd_emu10kx, pci, emu_driver, emu_devclass, emu_modevent, NULL); MODULE_VERSION(snd_emu10kx, SND_EMU10KX_PREFVER); Index: head/sys/dev/sound/pci/fm801.c =================================================================== --- head/sys/dev/sound/pci/fm801.c (revision 297861) +++ head/sys/dev/sound/pci/fm801.c (revision 297862) @@ -1,765 +1,766 @@ /*- * Copyright (c) 2000 Dmitry Dicky diwil@dataart.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS `AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); #define PCI_VENDOR_FORTEMEDIA 0x1319 #define PCI_DEVICE_FORTEMEDIA1 0x08011319 /* Audio controller */ #define PCI_DEVICE_FORTEMEDIA2 0x08021319 /* Joystick controller */ #define FM_PCM_VOLUME 0x00 #define FM_FM_VOLUME 0x02 #define FM_I2S_VOLUME 0x04 #define FM_RECORD_SOURCE 0x06 #define FM_PLAY_CTL 0x08 #define FM_PLAY_RATE_MASK 0x0f00 #define FM_PLAY_BUF1_LAST 0x0001 #define FM_PLAY_BUF2_LAST 0x0002 #define FM_PLAY_START 0x0020 #define FM_PLAY_PAUSE 0x0040 #define FM_PLAY_STOPNOW 0x0080 #define FM_PLAY_16BIT 0x4000 #define FM_PLAY_STEREO 0x8000 #define FM_PLAY_DMALEN 0x0a #define FM_PLAY_DMABUF1 0x0c #define FM_PLAY_DMABUF2 0x10 #define FM_REC_CTL 0x14 #define FM_REC_RATE_MASK 0x0f00 #define FM_REC_BUF1_LAST 0x0001 #define FM_REC_BUF2_LAST 0x0002 #define FM_REC_START 0x0020 #define FM_REC_PAUSE 0x0040 #define FM_REC_STOPNOW 0x0080 #define FM_REC_16BIT 0x4000 #define FM_REC_STEREO 0x8000 #define FM_REC_DMALEN 0x16 #define FM_REC_DMABUF1 0x18 #define FM_REC_DMABUF2 0x1c #define FM_CODEC_CTL 0x22 #define FM_VOLUME 0x26 #define FM_VOLUME_MUTE 0x8000 #define FM_CODEC_CMD 0x2a #define FM_CODEC_CMD_READ 0x0080 #define FM_CODEC_CMD_VALID 0x0100 #define FM_CODEC_CMD_BUSY 0x0200 #define FM_CODEC_DATA 0x2c #define FM_IO_CTL 0x52 #define FM_CARD_CTL 0x54 #define FM_INTMASK 0x56 #define FM_INTMASK_PLAY 0x0001 #define FM_INTMASK_REC 0x0002 #define FM_INTMASK_VOL 0x0040 #define FM_INTMASK_MPU 0x0080 #define FM_INTSTATUS 0x5a #define FM_INTSTATUS_PLAY 0x0100 #define FM_INTSTATUS_REC 0x0200 #define FM_INTSTATUS_VOL 0x4000 #define FM_INTSTATUS_MPU 0x8000 #define FM801_DEFAULT_BUFSZ 4096 /* Other values do not work!!! */ /* debug purposes */ #define DPRINT if(0) printf /* static int fm801ch_setup(struct pcm_channel *c); */ static u_int32_t fmts[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps fm801ch_caps = { 5500, 48000, fmts, 0 }; struct fm801_info; struct fm801_chinfo { struct fm801_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t spd, dir, fmt; /* speed, direction, format */ u_int32_t shift; }; struct fm801_info { int type; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; device_t dev; int num; u_int32_t unit; struct resource *reg, *irq; int regtype, regid, irqid; void *ih; u_int32_t play_flip, play_nextblk, play_start, play_blksize, play_fmt, play_shift, play_size; u_int32_t rec_flip, rec_nextblk, rec_start, rec_blksize, rec_fmt, rec_shift, rec_size; unsigned int bufsz; struct fm801_chinfo pch, rch; device_t radio; }; /* Bus Read / Write routines */ static u_int32_t fm801_rd(struct fm801_info *fm801, int regno, int size) { switch(size) { case 1: return (bus_space_read_1(fm801->st, fm801->sh, regno)); case 2: return (bus_space_read_2(fm801->st, fm801->sh, regno)); case 4: return (bus_space_read_4(fm801->st, fm801->sh, regno)); default: return 0xffffffff; } } static void fm801_wr(struct fm801_info *fm801, int regno, u_int32_t data, int size) { switch(size) { case 1: bus_space_write_1(fm801->st, fm801->sh, regno, data); break; case 2: bus_space_write_2(fm801->st, fm801->sh, regno, data); break; case 4: bus_space_write_4(fm801->st, fm801->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* * ac97 codec routines */ #define TIMO 50 static int fm801_rdcd(kobj_t obj, void *devinfo, int regno) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: codec busy\n"); return 0; } fm801_wr(fm801,FM_CODEC_CMD, regno|FM_CODEC_CMD_READ,2); for (i = 0; i < TIMO && !(fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_VALID); i++) { DELAY(10000); DPRINT("fm801 rdcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 rdcd: write codec invalid\n"); return 0; } return fm801_rd(fm801,FM_CODEC_DATA,2); } static int fm801_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; int i; DPRINT("fm801_wrcd reg 0x%x val 0x%x\n",regno, data); /* if(regno == AC97_REG_RECSEL) return; */ /* Poll until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 rdcd: 1 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } fm801_wr(fm801,FM_CODEC_DATA,data, 2); fm801_wr(fm801,FM_CODEC_CMD, regno,2); /* wait until codec is ready */ for (i = 0; i < TIMO && fm801_rd(fm801,FM_CODEC_CMD,2) & FM_CODEC_CMD_BUSY; i++) { DELAY(10000); DPRINT("fm801 wrcd: 2 - DELAY\n"); } if (i >= TIMO) { printf("fm801 wrcd: read codec busy\n"); return -1; } DPRINT("fm801 wrcd release reg 0x%x val 0x%x\n",regno, data); return 0; } static kobj_method_t fm801_ac97_methods[] = { KOBJMETHOD(ac97_read, fm801_rdcd), KOBJMETHOD(ac97_write, fm801_wrcd), DEVMETHOD_END }; AC97_DECLARE(fm801_ac97); /* -------------------------------------------------------------------- */ /* * The interrupt handler */ static void fm801_intr(void *p) { struct fm801_info *fm801 = (struct fm801_info *)p; u_int32_t intsrc = fm801_rd(fm801, FM_INTSTATUS, 2); DPRINT("\nfm801_intr intsrc 0x%x ", intsrc); if(intsrc & FM_INTSTATUS_PLAY) { fm801->play_flip++; if(fm801->play_flip & 1) { fm801_wr(fm801, FM_PLAY_DMABUF1, fm801->play_start,4); } else fm801_wr(fm801, FM_PLAY_DMABUF2, fm801->play_nextblk,4); chn_intr(fm801->pch.channel); } if(intsrc & FM_INTSTATUS_REC) { fm801->rec_flip++; if(fm801->rec_flip & 1) { fm801_wr(fm801, FM_REC_DMABUF1, fm801->rec_start,4); } else fm801_wr(fm801, FM_REC_DMABUF2, fm801->rec_nextblk,4); chn_intr(fm801->rch.channel); } if ( intsrc & FM_INTSTATUS_MPU ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_MPU,2); } if ( intsrc & FM_INTSTATUS_VOL ) { /* This is a TODOish thing... */ fm801_wr(fm801, FM_INTSTATUS, intsrc & FM_INTSTATUS_VOL,2); } DPRINT("fm801_intr clear\n\n"); fm801_wr(fm801, FM_INTSTATUS, intsrc & (FM_INTSTATUS_PLAY | FM_INTSTATUS_REC), 2); } /* -------------------------------------------------------------------- */ /* channel interface */ static void * fm801ch_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct fm801_info *fm801 = (struct fm801_info *)devinfo; struct fm801_chinfo *ch = (dir == PCMDIR_PLAY)? &fm801->pch : &fm801->rch; DPRINT("fm801ch_init, direction = %d\n", dir); ch->parent = fm801; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, fm801->parent_dmat, 0, fm801->bufsz) != 0) return NULL; return (void *)ch; } static int fm801ch_setformat(kobj_t obj, void *data, u_int32_t format) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; DPRINT("fm801ch_setformat 0x%x : %s, %s, %s, %s\n", format, (AFMT_CHANNEL(format) > 1)?"stereo":"mono", (format & AFMT_16BIT) ? "16bit":"8bit", (format & AFMT_SIGNED)? "signed":"unsigned", (format & AFMT_BIGENDIAN)?"bigendiah":"littleendian" ); if(ch->dir == PCMDIR_PLAY) { fm801->play_fmt = (AFMT_CHANNEL(format) > 1)? FM_PLAY_STEREO : 0; fm801->play_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } if(ch->dir == PCMDIR_REC ) { fm801->rec_fmt = (AFMT_CHANNEL(format) > 1)? FM_REC_STEREO:0; fm801->rec_fmt |= (format & AFMT_16BIT) ? FM_PLAY_16BIT : 0; return 0; } return 0; } struct { u_int32_t limit; u_int32_t rate; } fm801_rates[11] = { { 6600, 5500 }, { 8750, 8000 }, { 10250, 9600 }, { 13200, 11025 }, { 17500, 16000 }, { 20500, 19200 }, { 26500, 22050 }, { 35000, 32000 }, { 41000, 38400 }, { 46000, 44100 }, { 48000, 48000 }, /* anything above -> 48000 */ }; static u_int32_t fm801ch_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; register int i; for (i = 0; i < 10 && fm801_rates[i].limit <= speed; i++) ; if(ch->dir == PCMDIR_PLAY) { fm801->pch.spd = fm801_rates[i].rate; fm801->play_shift = (i<<8); fm801->play_shift &= FM_PLAY_RATE_MASK; } if(ch->dir == PCMDIR_REC ) { fm801->rch.spd = fm801_rates[i].rate; fm801->rec_shift = (i<<8); fm801->rec_shift &= FM_REC_RATE_MASK; } ch->spd = fm801_rates[i].rate; return fm801_rates[i].rate; } static u_int32_t fm801ch_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; /* * Don't mind for play_flip, set the blocksize to the * desired values in any case - otherwise sound playback * breaks here. */ if(ch->dir == PCMDIR_PLAY) fm801->play_blksize = blocksize; if(ch->dir == PCMDIR_REC) fm801->rec_blksize = blocksize; DPRINT("fm801ch_setblocksize %d (dir %d)\n",blocksize, ch->dir); return blocksize; } static int fm801ch_trigger(kobj_t obj, void *data, int go) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t baseaddr = sndbuf_getbufaddr(ch->buffer); u_int32_t k1; DPRINT("fm801ch_trigger go %d , ", go); if (!PCMTRIG_COMMON(go)) return 0; if (ch->dir == PCMDIR_PLAY) { if (go == PCMTRIG_START) { fm801->play_start = baseaddr; fm801->play_nextblk = fm801->play_start + fm801->play_blksize; fm801->play_flip = 0; fm801_wr(fm801, FM_PLAY_DMALEN, fm801->play_blksize - 1, 2); fm801_wr(fm801, FM_PLAY_DMABUF1,fm801->play_start,4); fm801_wr(fm801, FM_PLAY_DMABUF2,fm801->play_nextblk,4); fm801_wr(fm801, FM_PLAY_CTL, FM_PLAY_START | FM_PLAY_STOPNOW | fm801->play_fmt | fm801->play_shift, 2 ); } else { fm801->play_flip = 0; k1 = fm801_rd(fm801, FM_PLAY_CTL,2); fm801_wr(fm801, FM_PLAY_CTL, (k1 & ~(FM_PLAY_STOPNOW | FM_PLAY_START)) | FM_PLAY_BUF1_LAST | FM_PLAY_BUF2_LAST, 2 ); } } else if(ch->dir == PCMDIR_REC) { if (go == PCMTRIG_START) { fm801->rec_start = baseaddr; fm801->rec_nextblk = fm801->rec_start + fm801->rec_blksize; fm801->rec_flip = 0; fm801_wr(fm801, FM_REC_DMALEN, fm801->rec_blksize - 1, 2); fm801_wr(fm801, FM_REC_DMABUF1,fm801->rec_start,4); fm801_wr(fm801, FM_REC_DMABUF2,fm801->rec_nextblk,4); fm801_wr(fm801, FM_REC_CTL, FM_REC_START | FM_REC_STOPNOW | fm801->rec_fmt | fm801->rec_shift, 2 ); } else { fm801->rec_flip = 0; k1 = fm801_rd(fm801, FM_REC_CTL,2); fm801_wr(fm801, FM_REC_CTL, (k1 & ~(FM_REC_STOPNOW | FM_REC_START)) | FM_REC_BUF1_LAST | FM_REC_BUF2_LAST, 2); } } return 0; } /* Almost ALSA copy */ static u_int32_t fm801ch_getptr(kobj_t obj, void *data) { struct fm801_chinfo *ch = data; struct fm801_info *fm801 = ch->parent; u_int32_t result = 0; if (ch->dir == PCMDIR_PLAY) { result = fm801_rd(fm801, (fm801->play_flip&1) ? FM_PLAY_DMABUF2:FM_PLAY_DMABUF1, 4) - fm801->play_start; } if (ch->dir == PCMDIR_REC) { result = fm801_rd(fm801, (fm801->rec_flip&1) ? FM_REC_DMABUF2:FM_REC_DMABUF1, 4) - fm801->rec_start; } return result; } static struct pcmchan_caps * fm801ch_getcaps(kobj_t obj, void *data) { return &fm801ch_caps; } static kobj_method_t fm801ch_methods[] = { KOBJMETHOD(channel_init, fm801ch_init), KOBJMETHOD(channel_setformat, fm801ch_setformat), KOBJMETHOD(channel_setspeed, fm801ch_setspeed), KOBJMETHOD(channel_setblocksize, fm801ch_setblocksize), KOBJMETHOD(channel_trigger, fm801ch_trigger), KOBJMETHOD(channel_getptr, fm801ch_getptr), KOBJMETHOD(channel_getcaps, fm801ch_getcaps), DEVMETHOD_END }; CHANNEL_DECLARE(fm801ch); /* -------------------------------------------------------------------- */ /* * Init routine is taken from an original NetBSD driver */ static int fm801_init(struct fm801_info *fm801) { u_int32_t k1; /* reset codec */ fm801_wr(fm801, FM_CODEC_CTL, 0x0020,2); DELAY(100000); fm801_wr(fm801, FM_CODEC_CTL, 0x0000,2); DELAY(100000); fm801_wr(fm801, FM_PCM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_FM_VOLUME, 0x0808,2); fm801_wr(fm801, FM_I2S_VOLUME, 0x0808,2); fm801_wr(fm801, 0x40,0x107f,2); /* enable legacy audio */ fm801_wr((void *)fm801, FM_RECORD_SOURCE, 0x0000,2); /* Unmask playback, record and mpu interrupts, mask the rest */ k1 = fm801_rd((void *)fm801, FM_INTMASK,2); fm801_wr(fm801, FM_INTMASK, (k1 & ~(FM_INTMASK_PLAY | FM_INTMASK_REC | FM_INTMASK_MPU)) | FM_INTMASK_VOL,2); fm801_wr(fm801, FM_INTSTATUS, FM_INTSTATUS_PLAY | FM_INTSTATUS_REC | FM_INTSTATUS_MPU | FM_INTSTATUS_VOL,2); DPRINT("FM801 init Ok\n"); return 0; } static int fm801_pci_attach(device_t dev) { - struct ac97_info *codec = 0; + struct ac97_info *codec = NULL; struct fm801_info *fm801; int i; int mapped = 0; char status[SND_STATUSLEN]; fm801 = malloc(sizeof(*fm801), M_DEVBUF, M_WAITOK | M_ZERO); fm801->type = pci_get_devid(dev); pci_enable_busmaster(dev); for (i = 0; (mapped == 0) && (i < PCI_MAXMAPS_0); i++) { fm801->regid = PCIR_BAR(i); fm801->regtype = SYS_RES_MEMORY; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); if(!fm801->reg) { fm801->regtype = SYS_RES_IOPORT; fm801->reg = bus_alloc_resource_any(dev, fm801->regtype, &fm801->regid, RF_ACTIVE); } if(fm801->reg) { fm801->st = rman_get_bustag(fm801->reg); fm801->sh = rman_get_bushandle(fm801->reg); mapped++; } } if (mapped == 0) { device_printf(dev, "unable to map register space\n"); goto oops; } fm801->bufsz = pcm_getbuffersize(dev, 4096, FM801_DEFAULT_BUFSZ, 65536); fm801_init(fm801); codec = AC97_CREATE(dev, fm801, fm801_ac97); if (codec == NULL) goto oops; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto oops; fm801->irqid = 0; fm801->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &fm801->irqid, RF_ACTIVE | RF_SHAREABLE); - if (!fm801->irq || snd_setup_intr(dev, fm801->irq, 0, fm801_intr, fm801, &fm801->ih)) { + if (!fm801->irq || + snd_setup_intr(dev, fm801->irq, 0, fm801_intr, fm801, &fm801->ih)) { device_printf(dev, "unable to map interrupt\n"); goto oops; } if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/fm801->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &fm801->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto oops; } snprintf(status, 64, "at %s 0x%jx irq %jd %s", (fm801->regtype == SYS_RES_IOPORT)? "io" : "memory", rman_get_start(fm801->reg), rman_get_start(fm801->irq),PCM_KLDSTRING(snd_fm801)); #define FM801_MAXPLAYCH 1 if (pcm_register(dev, fm801, FM801_MAXPLAYCH, 1)) goto oops; pcm_addchan(dev, PCMDIR_PLAY, &fm801ch_class, fm801); pcm_addchan(dev, PCMDIR_REC, &fm801ch_class, fm801); pcm_setstatus(dev, status); fm801->radio = device_add_child(dev, "radio", -1); bus_generic_attach(dev); return 0; oops: if (codec) ac97_destroy(codec); if (fm801->reg) bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); if (fm801->ih) bus_teardown_intr(dev, fm801->irq, fm801->ih); if (fm801->irq) bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); if (fm801->parent_dmat) bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return ENXIO; } static int fm801_pci_detach(device_t dev) { int r; struct fm801_info *fm801; DPRINT("Forte Media FM801 detach\n"); fm801 = pcm_getdevinfo(dev); r = bus_generic_detach(dev); if (r) return r; if (fm801->radio != NULL) { r = device_delete_child(dev, fm801->radio); if (r) return r; fm801->radio = NULL; } r = pcm_unregister(dev); if (r) return r; bus_release_resource(dev, fm801->regtype, fm801->regid, fm801->reg); bus_teardown_intr(dev, fm801->irq, fm801->ih); bus_release_resource(dev, SYS_RES_IRQ, fm801->irqid, fm801->irq); bus_dma_tag_destroy(fm801->parent_dmat); free(fm801, M_DEVBUF); return 0; } static int fm801_pci_probe( device_t dev ) { int id; if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA1 ) { device_set_desc(dev, "Forte Media FM801 Audio Controller"); return BUS_PROBE_DEFAULT; } /* if ((id = pci_get_devid(dev)) == PCI_DEVICE_FORTEMEDIA2 ) { device_set_desc(dev, "Forte Media FM801 Joystick (Not Supported)"); return ENXIO; } */ return ENXIO; } static struct resource * fm801_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fm801_info *fm801; fm801 = pcm_getdevinfo(bus); if (type == SYS_RES_IOPORT && *rid == PCIR_BAR(0)) return (fm801->reg); return (NULL); } static int fm801_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { return (0); } static device_method_t fm801_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fm801_pci_probe), DEVMETHOD(device_attach, fm801_pci_attach), DEVMETHOD(device_detach, fm801_pci_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_alloc_resource, fm801_alloc_resource), DEVMETHOD(bus_release_resource, fm801_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD_END }; static driver_t fm801_driver = { "pcm", fm801_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_fm801, pci, fm801_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_fm801, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_fm801, 1); Index: head/sys/dev/sound/pci/neomagic.c =================================================================== --- head/sys/dev/sound/pci/neomagic.c (revision 297861) +++ head/sys/dev/sound/pci/neomagic.c (revision 297862) @@ -1,820 +1,820 @@ /*- * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Derived from the public domain Linux driver * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); /* -------------------------------------------------------------------- */ #define NM_BUFFSIZE 16384 #define NM256AV_PCI_ID 0x800510c8 #define NM256ZX_PCI_ID 0x800610c8 struct sc_info; /* channel registers */ struct sc_chinfo { int active, spd, dir, fmt; u_int32_t blksize, wmark; struct snd_dbuf *buffer; struct pcm_channel *channel; struct sc_info *parent; }; /* device private data */ struct sc_info { device_t dev; u_int32_t type; struct resource *reg, *irq, *buf; int regid, irqid, bufid; void *ih; u_int32_t ac97_base, ac97_status, ac97_busy; u_int32_t buftop, pbuf, rbuf, cbuf, acbuf; u_int32_t playint, recint, misc1int, misc2int; u_int32_t irsz, badintr; struct sc_chinfo pch, rch; }; /* -------------------------------------------------------------------- */ /* * prototypes */ /* stuff */ static int nm_loadcoeff(struct sc_info *sc, int dir, int num); static int nm_setch(struct sc_chinfo *ch); static int nm_init(struct sc_info *); static void nm_intr(void *); /* talk to the card */ static u_int32_t nm_rd(struct sc_info *, int, int); static void nm_wr(struct sc_info *, int, u_int32_t, int); static u_int32_t nm_rdbuf(struct sc_info *, int, int); static void nm_wrbuf(struct sc_info *, int, u_int32_t, int); static u_int32_t badcards[] = { 0x0007103c, 0x008f1028, 0x00dd1014, 0x8005110a, }; #define NUM_BADCARDS (sizeof(badcards) / sizeof(u_int32_t)) /* The actual rates supported by the card. */ static int samplerates[9] = { 8000, 11025, 16000, 22050, 24000, 32000, 44100, 48000, 99999999 }; /* -------------------------------------------------------------------- */ static u_int32_t nm_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps nm_caps = {4000, 48000, nm_fmt, 0}; /* -------------------------------------------------------------------- */ /* Hardware */ static u_int32_t nm_rd(struct sc_info *sc, int regno, int size) { bus_space_tag_t st = rman_get_bustag(sc->reg); bus_space_handle_t sh = rman_get_bushandle(sc->reg); switch (size) { case 1: return bus_space_read_1(st, sh, regno); case 2: return bus_space_read_2(st, sh, regno); case 4: return bus_space_read_4(st, sh, regno); default: return 0xffffffff; } } static void nm_wr(struct sc_info *sc, int regno, u_int32_t data, int size) { bus_space_tag_t st = rman_get_bustag(sc->reg); bus_space_handle_t sh = rman_get_bushandle(sc->reg); switch (size) { case 1: bus_space_write_1(st, sh, regno, data); break; case 2: bus_space_write_2(st, sh, regno, data); break; case 4: bus_space_write_4(st, sh, regno, data); break; } } static u_int32_t nm_rdbuf(struct sc_info *sc, int regno, int size) { bus_space_tag_t st = rman_get_bustag(sc->buf); bus_space_handle_t sh = rman_get_bushandle(sc->buf); switch (size) { case 1: return bus_space_read_1(st, sh, regno); case 2: return bus_space_read_2(st, sh, regno); case 4: return bus_space_read_4(st, sh, regno); default: return 0xffffffff; } } static void nm_wrbuf(struct sc_info *sc, int regno, u_int32_t data, int size) { bus_space_tag_t st = rman_get_bustag(sc->buf); bus_space_handle_t sh = rman_get_bushandle(sc->buf); switch (size) { case 1: bus_space_write_1(st, sh, regno, data); break; case 2: bus_space_write_2(st, sh, regno, data); break; case 4: bus_space_write_4(st, sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* ac97 codec */ static int nm_waitcd(struct sc_info *sc) { int cnt = 10; int fail = 1; while (cnt-- > 0) { if (nm_rd(sc, sc->ac97_status, 2) & sc->ac97_busy) { DELAY(100); } else { fail = 0; break; } } return (fail); } static u_int32_t nm_initcd(kobj_t obj, void *devinfo) { struct sc_info *sc = (struct sc_info *)devinfo; nm_wr(sc, 0x6c0, 0x01, 1); #if 0 /* * The following code-line may cause a hang for some chipsets, see * PR 56617. * In case of a bugreport without this line have a look at the PR and * conditionize the code-line based upon the specific version of * the chip. */ nm_wr(sc, 0x6cc, 0x87, 1); #endif nm_wr(sc, 0x6cc, 0x80, 1); nm_wr(sc, 0x6cc, 0x00, 1); return 1; } static int nm_rdcd(kobj_t obj, void *devinfo, int regno) { struct sc_info *sc = (struct sc_info *)devinfo; u_int32_t x; if (!nm_waitcd(sc)) { x = nm_rd(sc, sc->ac97_base + regno, 2); DELAY(1000); return x; } else { device_printf(sc->dev, "ac97 codec not ready\n"); return -1; } } static int nm_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct sc_info *sc = (struct sc_info *)devinfo; int cnt = 3; if (!nm_waitcd(sc)) { while (cnt-- > 0) { nm_wr(sc, sc->ac97_base + regno, data, 2); if (!nm_waitcd(sc)) { DELAY(1000); return 0; } } } device_printf(sc->dev, "ac97 codec not ready\n"); return -1; } static kobj_method_t nm_ac97_methods[] = { KOBJMETHOD(ac97_init, nm_initcd), KOBJMETHOD(ac97_read, nm_rdcd), KOBJMETHOD(ac97_write, nm_wrcd), KOBJMETHOD_END }; AC97_DECLARE(nm_ac97); /* -------------------------------------------------------------------- */ static void nm_ackint(struct sc_info *sc, u_int32_t num) { if (sc->type == NM256AV_PCI_ID) { nm_wr(sc, NM_INT_REG, num << 1, 2); } else if (sc->type == NM256ZX_PCI_ID) { nm_wr(sc, NM_INT_REG, num, 4); } } static int nm_loadcoeff(struct sc_info *sc, int dir, int num) { int ofs, sz, i; u_int32_t addr; addr = (dir == PCMDIR_PLAY)? 0x01c : 0x21c; if (dir == PCMDIR_REC) num += 8; sz = coefficientSizes[num]; ofs = 0; while (num-- > 0) ofs+= coefficientSizes[num]; for (i = 0; i < sz; i++) nm_wrbuf(sc, sc->cbuf + i, coefficients[ofs + i], 1); nm_wr(sc, addr, sc->cbuf, 4); if (dir == PCMDIR_PLAY) sz--; nm_wr(sc, addr + 4, sc->cbuf + sz, 4); return 0; } static int nm_setch(struct sc_chinfo *ch) { struct sc_info *sc = ch->parent; u_int32_t base; u_int8_t x; for (x = 0; x < 8; x++) if (ch->spd < (samplerates[x] + samplerates[x + 1]) / 2) break; if (x == 8) return 1; ch->spd = samplerates[x]; nm_loadcoeff(sc, ch->dir, x); x <<= 4; x &= NM_RATE_MASK; if (ch->fmt & AFMT_16BIT) x |= NM_RATE_BITS_16; if (AFMT_CHANNEL(ch->fmt) > 1) x |= NM_RATE_STEREO; base = (ch->dir == PCMDIR_PLAY)? NM_PLAYBACK_REG_OFFSET : NM_RECORD_REG_OFFSET; nm_wr(sc, base + NM_RATE_REG_OFFSET, x, 1); return 0; } /* channel interface */ static void * nmchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct sc_info *sc = devinfo; struct sc_chinfo *ch; u_int32_t chnbuf; chnbuf = (dir == PCMDIR_PLAY)? sc->pbuf : sc->rbuf; ch = (dir == PCMDIR_PLAY)? &sc->pch : &sc->rch; ch->active = 0; ch->blksize = 0; ch->wmark = 0; ch->buffer = b; sndbuf_setup(ch->buffer, (u_int8_t *)rman_get_virtual(sc->buf) + chnbuf, NM_BUFFSIZE); if (bootverbose) device_printf(sc->dev, "%s buf %p\n", (dir == PCMDIR_PLAY)? "play" : "rec", sndbuf_getbuf(ch->buffer)); ch->parent = sc; ch->channel = c; ch->dir = dir; return ch; } static int nmchan_free(kobj_t obj, void *data) { return 0; } static int nmchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct sc_chinfo *ch = data; ch->fmt = format; return nm_setch(ch); } static u_int32_t nmchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct sc_chinfo *ch = data; ch->spd = speed; return nm_setch(ch)? 0 : ch->spd; } static u_int32_t nmchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct sc_chinfo *ch = data; ch->blksize = blocksize; return blocksize; } static int nmchan_trigger(kobj_t obj, void *data, int go) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; int ssz; if (!PCMTRIG_COMMON(go)) return 0; ssz = (ch->fmt & AFMT_16BIT)? 2 : 1; if (AFMT_CHANNEL(ch->fmt) > 1) ssz <<= 1; if (ch->dir == PCMDIR_PLAY) { if (go == PCMTRIG_START) { ch->active = 1; ch->wmark = ch->blksize; nm_wr(sc, NM_PBUFFER_START, sc->pbuf, 4); nm_wr(sc, NM_PBUFFER_END, sc->pbuf + NM_BUFFSIZE - ssz, 4); nm_wr(sc, NM_PBUFFER_CURRP, sc->pbuf, 4); nm_wr(sc, NM_PBUFFER_WMARK, sc->pbuf + ch->wmark, 4); nm_wr(sc, NM_PLAYBACK_ENABLE_REG, NM_PLAYBACK_FREERUN | NM_PLAYBACK_ENABLE_FLAG, 1); nm_wr(sc, NM_AUDIO_MUTE_REG, 0, 2); } else { ch->active = 0; nm_wr(sc, NM_PLAYBACK_ENABLE_REG, 0, 1); nm_wr(sc, NM_AUDIO_MUTE_REG, NM_AUDIO_MUTE_BOTH, 2); } } else { if (go == PCMTRIG_START) { ch->active = 1; ch->wmark = ch->blksize; nm_wr(sc, NM_RECORD_ENABLE_REG, NM_RECORD_FREERUN | NM_RECORD_ENABLE_FLAG, 1); nm_wr(sc, NM_RBUFFER_START, sc->rbuf, 4); nm_wr(sc, NM_RBUFFER_END, sc->rbuf + NM_BUFFSIZE, 4); nm_wr(sc, NM_RBUFFER_CURRP, sc->rbuf, 4); nm_wr(sc, NM_RBUFFER_WMARK, sc->rbuf + ch->wmark, 4); } else { ch->active = 0; nm_wr(sc, NM_RECORD_ENABLE_REG, 0, 1); } } return 0; } static u_int32_t nmchan_getptr(kobj_t obj, void *data) { struct sc_chinfo *ch = data; struct sc_info *sc = ch->parent; if (ch->dir == PCMDIR_PLAY) return nm_rd(sc, NM_PBUFFER_CURRP, 4) - sc->pbuf; else return nm_rd(sc, NM_RBUFFER_CURRP, 4) - sc->rbuf; } static struct pcmchan_caps * nmchan_getcaps(kobj_t obj, void *data) { return &nm_caps; } static kobj_method_t nmchan_methods[] = { KOBJMETHOD(channel_init, nmchan_init), KOBJMETHOD(channel_free, nmchan_free), KOBJMETHOD(channel_setformat, nmchan_setformat), KOBJMETHOD(channel_setspeed, nmchan_setspeed), KOBJMETHOD(channel_setblocksize, nmchan_setblocksize), KOBJMETHOD(channel_trigger, nmchan_trigger), KOBJMETHOD(channel_getptr, nmchan_getptr), KOBJMETHOD(channel_getcaps, nmchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(nmchan); /* The interrupt handler */ static void nm_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; int status, x; status = nm_rd(sc, NM_INT_REG, sc->irsz); if (status == 0) return; if (status & sc->playint) { status &= ~sc->playint; sc->pch.wmark += sc->pch.blksize; sc->pch.wmark %= NM_BUFFSIZE; nm_wr(sc, NM_PBUFFER_WMARK, sc->pbuf + sc->pch.wmark, 4); nm_ackint(sc, sc->playint); chn_intr(sc->pch.channel); } if (status & sc->recint) { status &= ~sc->recint; sc->rch.wmark += sc->rch.blksize; sc->rch.wmark %= NM_BUFFSIZE; nm_wr(sc, NM_RBUFFER_WMARK, sc->rbuf + sc->rch.wmark, 4); nm_ackint(sc, sc->recint); chn_intr(sc->rch.channel); } if (status & sc->misc1int) { status &= ~sc->misc1int; nm_ackint(sc, sc->misc1int); x = nm_rd(sc, 0x400, 1); nm_wr(sc, 0x400, x | 2, 1); device_printf(sc->dev, "misc int 1\n"); } if (status & sc->misc2int) { status &= ~sc->misc2int; nm_ackint(sc, sc->misc2int); x = nm_rd(sc, 0x400, 1); nm_wr(sc, 0x400, x & ~2, 1); device_printf(sc->dev, "misc int 2\n"); } if (status) { nm_ackint(sc, status); device_printf(sc->dev, "unknown int\n"); } } /* -------------------------------------------------------------------- */ /* * Probe and attach the card */ static int nm_init(struct sc_info *sc) { u_int32_t ofs, i; if (sc->type == NM256AV_PCI_ID) { sc->ac97_base = NM_MIXER_OFFSET; sc->ac97_status = NM_MIXER_STATUS_OFFSET; sc->ac97_busy = NM_MIXER_READY_MASK; sc->buftop = 2560 * 1024; sc->irsz = 2; sc->playint = NM_PLAYBACK_INT; sc->recint = NM_RECORD_INT; sc->misc1int = NM_MISC_INT_1; sc->misc2int = NM_MISC_INT_2; } else if (sc->type == NM256ZX_PCI_ID) { sc->ac97_base = NM_MIXER_OFFSET; sc->ac97_status = NM2_MIXER_STATUS_OFFSET; sc->ac97_busy = NM2_MIXER_READY_MASK; sc->buftop = (nm_rd(sc, 0xa0b, 2)? 6144 : 4096) * 1024; sc->irsz = 4; sc->playint = NM2_PLAYBACK_INT; sc->recint = NM2_RECORD_INT; sc->misc1int = NM2_MISC_INT_1; sc->misc2int = NM2_MISC_INT_2; } else return -1; sc->badintr = 0; ofs = sc->buftop - 0x0400; sc->buftop -= 0x1400; if (bootverbose) device_printf(sc->dev, "buftop is 0x%08x\n", sc->buftop); if ((nm_rdbuf(sc, ofs, 4) & NM_SIG_MASK) == NM_SIGNATURE) { i = nm_rdbuf(sc, ofs + 4, 4); if (i != 0 && i != 0xffffffff) { if (bootverbose) device_printf(sc->dev, "buftop is changed to 0x%08x\n", i); sc->buftop = i; } } sc->cbuf = sc->buftop - NM_MAX_COEFFICIENT; sc->rbuf = sc->cbuf - NM_BUFFSIZE; sc->pbuf = sc->rbuf - NM_BUFFSIZE; sc->acbuf = sc->pbuf - (NM_TOTAL_COEFF_COUNT * 4); nm_wr(sc, 0, 0x11, 1); nm_wr(sc, NM_RECORD_ENABLE_REG, 0, 1); nm_wr(sc, 0x214, 0, 2); return 0; } static int nm_pci_probe(device_t dev) { struct sc_info *sc = NULL; char *s = NULL; u_int32_t subdev, i; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); switch (pci_get_devid(dev)) { case NM256AV_PCI_ID: i = 0; while ((i < NUM_BADCARDS) && (badcards[i] != subdev)) i++; /* Try to catch other non-ac97 cards */ if (i == NUM_BADCARDS) { if (!(sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } sc->regid = PCIR_BAR(1); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); free(sc, M_DEVBUF); return ENXIO; } /* * My Panasonic CF-M2EV needs resetting device * before checking mixer is present or not. * t.ichinoseki@nifty.com. */ nm_wr(sc, 0, 0x11, 1); /* reset device */ if ((nm_rd(sc, NM_MIXER_PRESENCE, 2) & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { i = 0; /* non-ac97 card, but not listed */ DEB(device_printf(dev, "subdev = 0x%x - badcard?\n", subdev)); } bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); free(sc, M_DEVBUF); } if (i == NUM_BADCARDS) s = "NeoMagic 256AV"; DEB(else) DEB(device_printf(dev, "this is a non-ac97 NM256AV, not attaching\n")); break; case NM256ZX_PCI_ID: s = "NeoMagic 256ZX"; break; } if (s) device_set_desc(dev, s); return s? 0 : ENXIO; } static int nm_pci_attach(device_t dev) { struct sc_info *sc; - struct ac97_info *codec = 0; + struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; sc->type = pci_get_devid(dev); pci_enable_busmaster(dev); sc->bufid = PCIR_BAR(0); sc->buf = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->bufid, RF_ACTIVE); sc->regid = PCIR_BAR(1); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->buf || !sc->reg) { device_printf(dev, "unable to map register space\n"); goto bad; } if (nm_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, nm_ac97); if (codec == NULL) goto bad; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto bad; sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, 0, nm_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at memory 0x%jx, 0x%jx irq %jd %s", rman_get_start(sc->buf), rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_neomagic)); if (pcm_register(dev, sc, 1, 1)) goto bad; pcm_addchan(dev, PCMDIR_REC, &nmchan_class, sc); pcm_addchan(dev, PCMDIR_PLAY, &nmchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->buf) bus_release_resource(dev, SYS_RES_MEMORY, sc->bufid, sc->buf); if (sc->reg) bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); free(sc, M_DEVBUF); return ENXIO; } static int nm_pci_detach(device_t dev) { int r; struct sc_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->bufid, sc->buf); bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); free(sc, M_DEVBUF); return 0; } static int nm_pci_suspend(device_t dev) { struct sc_info *sc; sc = pcm_getdevinfo(dev); /* stop playing */ if (sc->pch.active) { nm_wr(sc, NM_PLAYBACK_ENABLE_REG, 0, 1); nm_wr(sc, NM_AUDIO_MUTE_REG, NM_AUDIO_MUTE_BOTH, 2); } /* stop recording */ if (sc->rch.active) { nm_wr(sc, NM_RECORD_ENABLE_REG, 0, 1); } return 0; } static int nm_pci_resume(device_t dev) { struct sc_info *sc; sc = pcm_getdevinfo(dev); /* * Reinit audio device. * Don't call nm_init(). It would change buftop if X ran or * is running. This makes playing and recording buffer address * shift but these buffers of channel layer are not changed. * As a result of this inconsistency, periodic noise will be * generated while playing. */ nm_wr(sc, 0, 0x11, 1); nm_wr(sc, 0x214, 0, 2); /* Reinit mixer */ if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to reinitialize the mixer\n"); return ENXIO; } /* restart playing */ if (sc->pch.active) { nm_wr(sc, NM_PLAYBACK_ENABLE_REG, NM_PLAYBACK_FREERUN | NM_PLAYBACK_ENABLE_FLAG, 1); nm_wr(sc, NM_AUDIO_MUTE_REG, 0, 2); } /* restart recording */ if (sc->rch.active) { nm_wr(sc, NM_RECORD_ENABLE_REG, NM_RECORD_FREERUN | NM_RECORD_ENABLE_FLAG, 1); } return 0; } static device_method_t nm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nm_pci_probe), DEVMETHOD(device_attach, nm_pci_attach), DEVMETHOD(device_detach, nm_pci_detach), DEVMETHOD(device_suspend, nm_pci_suspend), DEVMETHOD(device_resume, nm_pci_resume), { 0, 0 } }; static driver_t nm_driver = { "pcm", nm_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_neomagic, pci, nm_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_neomagic, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_neomagic, 1); Index: head/sys/dev/sound/pci/solo.c =================================================================== --- head/sys/dev/sound/pci/solo.c (revision 297861) +++ head/sys/dev/sound/pci/solo.c (revision 297862) @@ -1,1108 +1,1108 @@ /*- * Copyright (c) 1999 Cameron Grant * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define SOLO_DEFAULT_BUFSZ 16384 #define ABS(x) (((x) < 0)? -(x) : (x)) /* if defined, playback always uses the 2nd channel and full duplex works */ #define ESS18XX_DUPLEX 1 /* more accurate clocks and split audio1/audio2 rates */ #define ESS18XX_NEWSPEED /* 1 = INTR_MPSAFE, 0 = GIANT */ #define ESS18XX_MPSAFE 1 static u_int32_t ess_playfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps ess_playcaps = {6000, 48000, ess_playfmt, 0}; /* * Recording output is byte-swapped */ static u_int32_t ess_recfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_BE, 1, 0), SND_FORMAT(AFMT_S16_BE, 2, 0), SND_FORMAT(AFMT_U16_BE, 1, 0), SND_FORMAT(AFMT_U16_BE, 2, 0), 0 }; static struct pcmchan_caps ess_reccaps = {6000, 48000, ess_recfmt, 0}; struct ess_info; struct ess_chinfo { struct ess_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; int dir, hwch, stopping; u_int32_t fmt, spd, blksz; }; struct ess_info { struct resource *io, *sb, *vc, *mpu, *gp; /* I/O address for the board */ struct resource *irq; void *ih; bus_dma_tag_t parent_dmat; int simplex_dir, type, dmasz[2]; unsigned int duplex:1, newspeed:1; unsigned int bufsz; struct ess_chinfo pch, rch; #if ESS18XX_MPSAFE == 1 struct mtx *lock; #endif }; #if ESS18XX_MPSAFE == 1 #define ess_lock(_ess) snd_mtxlock((_ess)->lock) #define ess_unlock(_ess) snd_mtxunlock((_ess)->lock) #define ess_lock_assert(_ess) snd_mtxassert((_ess)->lock) #else #define ess_lock(_ess) #define ess_unlock(_ess) #define ess_lock_assert(_ess) #endif static int ess_rd(struct ess_info *sc, int reg); static void ess_wr(struct ess_info *sc, int reg, u_int8_t val); static int ess_dspready(struct ess_info *sc); static int ess_cmd(struct ess_info *sc, u_char val); static int ess_cmd1(struct ess_info *sc, u_char cmd, int val); static int ess_get_byte(struct ess_info *sc); static void ess_setmixer(struct ess_info *sc, u_int port, u_int value); static int ess_getmixer(struct ess_info *sc, u_int port); static int ess_reset_dsp(struct ess_info *sc); static int ess_write(struct ess_info *sc, u_char reg, int val); static int ess_read(struct ess_info *sc, u_char reg); static void ess_intr(void *arg); static int ess_setupch(struct ess_info *sc, int ch, int dir, int spd, u_int32_t fmt, int len); static int ess_start(struct ess_chinfo *ch); static int ess_stop(struct ess_chinfo *ch); static int ess_dmasetup(struct ess_info *sc, int ch, u_int32_t base, u_int16_t cnt, int dir); static int ess_dmapos(struct ess_info *sc, int ch); static int ess_dmatrigger(struct ess_info *sc, int ch, int go); /* * Common code for the midi and pcm functions * * ess_cmd write a single byte to the CMD port. * ess_cmd1 write a CMD + 1 byte arg * ess_cmd2 write a CMD + 2 byte arg * ess_get_byte returns a single byte from the DSP data port * * ess_write is actually ess_cmd1 * ess_read access ext. regs via ess_cmd(0xc0, reg) followed by ess_get_byte */ static int port_rd(struct resource *port, int regno, int size) { bus_space_tag_t st = rman_get_bustag(port); bus_space_handle_t sh = rman_get_bushandle(port); switch (size) { case 1: return bus_space_read_1(st, sh, regno); case 2: return bus_space_read_2(st, sh, regno); case 4: return bus_space_read_4(st, sh, regno); default: return 0xffffffff; } } static void port_wr(struct resource *port, int regno, u_int32_t data, int size) { bus_space_tag_t st = rman_get_bustag(port); bus_space_handle_t sh = rman_get_bushandle(port); switch (size) { case 1: bus_space_write_1(st, sh, regno, data); break; case 2: bus_space_write_2(st, sh, regno, data); break; case 4: bus_space_write_4(st, sh, regno, data); break; } } static int ess_rd(struct ess_info *sc, int reg) { return port_rd(sc->sb, reg, 1); } static void ess_wr(struct ess_info *sc, int reg, u_int8_t val) { port_wr(sc->sb, reg, val, 1); } static int ess_dspready(struct ess_info *sc) { return ((ess_rd(sc, SBDSP_STATUS) & 0x80) == 0); } static int ess_dspwr(struct ess_info *sc, u_char val) { int i; for (i = 0; i < 1000; i++) { if (ess_dspready(sc)) { ess_wr(sc, SBDSP_CMD, val); return 1; } if (i > 10) DELAY((i > 100)? 1000 : 10); } printf("ess_dspwr(0x%02x) timed out.\n", val); return 0; } static int ess_cmd(struct ess_info *sc, u_char val) { DEB(printf("ess_cmd: %x\n", val)); return ess_dspwr(sc, val); } static int ess_cmd1(struct ess_info *sc, u_char cmd, int val) { DEB(printf("ess_cmd1: %x, %x\n", cmd, val)); if (ess_dspwr(sc, cmd)) { return ess_dspwr(sc, val & 0xff); } else return 0; } static void ess_setmixer(struct ess_info *sc, u_int port, u_int value) { DEB(printf("ess_setmixer: reg=%x, val=%x\n", port, value);) ess_wr(sc, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); ess_wr(sc, SB_MIX_DATA, (u_char) (value & 0xff)); DELAY(10); } static int ess_getmixer(struct ess_info *sc, u_int port) { int val; ess_wr(sc, SB_MIX_ADDR, (u_char) (port & 0xff)); /* Select register */ DELAY(10); val = ess_rd(sc, SB_MIX_DATA); DELAY(10); return val; } static int ess_get_byte(struct ess_info *sc) { int i; for (i = 1000; i > 0; i--) { if (ess_rd(sc, 0xc) & 0x40) return ess_rd(sc, DSP_READ); else DELAY(20); } return -1; } static int ess_write(struct ess_info *sc, u_char reg, int val) { return ess_cmd1(sc, reg, val); } static int ess_read(struct ess_info *sc, u_char reg) { return (ess_cmd(sc, 0xc0) && ess_cmd(sc, reg))? ess_get_byte(sc) : -1; } static int ess_reset_dsp(struct ess_info *sc) { DEB(printf("ess_reset_dsp\n")); ess_wr(sc, SBDSP_RST, 3); DELAY(100); ess_wr(sc, SBDSP_RST, 0); if (ess_get_byte(sc) != 0xAA) { DEB(printf("ess_reset_dsp failed\n")); /* rman_get_start(d->io_base))); */ return ENXIO; /* Sorry */ } ess_cmd(sc, 0xc6); return 0; } static void ess_intr(void *arg) { struct ess_info *sc = (struct ess_info *)arg; int src, pirq = 0, rirq = 0; ess_lock(sc); src = 0; if (ess_getmixer(sc, 0x7a) & 0x80) src |= 2; if (ess_rd(sc, 0x0c) & 0x01) src |= 1; if (src == 0) { ess_unlock(sc); return; } if (sc->duplex) { pirq = (src & sc->pch.hwch)? 1 : 0; rirq = (src & sc->rch.hwch)? 1 : 0; } else { if (sc->simplex_dir == PCMDIR_PLAY) pirq = 1; if (sc->simplex_dir == PCMDIR_REC) rirq = 1; if (!pirq && !rirq) printf("solo: IRQ neither playback nor rec!\n"); } DEB(printf("ess_intr: pirq:%d rirq:%d\n",pirq,rirq)); if (pirq) { if (sc->pch.stopping) { ess_dmatrigger(sc, sc->pch.hwch, 0); sc->pch.stopping = 0; if (sc->pch.hwch == 1) ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x01); else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) & ~0x03); } ess_unlock(sc); chn_intr(sc->pch.channel); ess_lock(sc); } if (rirq) { if (sc->rch.stopping) { ess_dmatrigger(sc, sc->rch.hwch, 0); sc->rch.stopping = 0; /* XXX: will this stop audio2? */ ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x01); } ess_unlock(sc); chn_intr(sc->rch.channel); ess_lock(sc); } if (src & 2) ess_setmixer(sc, 0x7a, ess_getmixer(sc, 0x7a) & ~0x80); if (src & 1) ess_rd(sc, DSP_DATA_AVAIL); ess_unlock(sc); } /* utility functions for ESS */ static u_int8_t ess_calcspeed8(int *spd) { int speed = *spd; u_int32_t t; if (speed > 22000) { t = (795500 + speed / 2) / speed; speed = (795500 + t / 2) / t; t = (256 - t) | 0x80; } else { t = (397700 + speed / 2) / speed; speed = (397700 + t / 2) / t; t = 128 - t; } *spd = speed; return t & 0x000000ff; } static u_int8_t ess_calcspeed9(int *spd) { int speed, s0, s1, use0; u_int8_t t0, t1; /* rate = source / (256 - divisor) */ /* divisor = 256 - (source / rate) */ speed = *spd; t0 = 128 - (793800 / speed); s0 = 793800 / (128 - t0); t1 = 128 - (768000 / speed); s1 = 768000 / (128 - t1); t1 |= 0x80; use0 = (ABS(speed - s0) < ABS(speed - s1))? 1 : 0; *spd = use0? s0 : s1; return use0? t0 : t1; } static u_int8_t ess_calcfilter(int spd) { int cutoff; /* cutoff = 7160000 / (256 - divisor) */ /* divisor = 256 - (7160000 / cutoff) */ cutoff = (spd * 9 * 82) / 20; return (256 - (7160000 / cutoff)); } static int ess_setupch(struct ess_info *sc, int ch, int dir, int spd, u_int32_t fmt, int len) { int play = (dir == PCMDIR_PLAY)? 1 : 0; int b16 = (fmt & AFMT_16BIT)? 1 : 0; int stereo = (AFMT_CHANNEL(fmt) > 1)? 1 : 0; int unsign = (!(fmt & AFMT_SIGNED))? 1 : 0; u_int8_t spdval, fmtval; DEB(printf("ess_setupch\n")); spdval = (sc->newspeed)? ess_calcspeed9(&spd) : ess_calcspeed8(&spd); sc->simplex_dir = play ? PCMDIR_PLAY : PCMDIR_REC ; if (ch == 1) { KASSERT((dir == PCMDIR_PLAY) || (dir == PCMDIR_REC), ("ess_setupch: dir1 bad")); len = -len; /* transfer length low */ ess_write(sc, 0xa4, len & 0x00ff); /* transfer length high */ ess_write(sc, 0xa5, (len & 0xff00) >> 8); /* autoinit, dma dir */ ess_write(sc, 0xb8, 0x04 | (play? 0x00 : 0x0a)); /* mono/stereo */ ess_write(sc, 0xa8, (ess_read(sc, 0xa8) & ~0x03) | (stereo? 0x01 : 0x02)); /* demand mode, 4 bytes/xfer */ ess_write(sc, 0xb9, 0x02); /* sample rate */ ess_write(sc, 0xa1, spdval); /* filter cutoff */ ess_write(sc, 0xa2, ess_calcfilter(spd)); /* setup dac/adc */ /* if (play) ess_write(sc, 0xb6, unsign? 0x80 : 0x00); */ /* mono, b16: signed, load signal */ /* ess_write(sc, 0xb7, 0x51 | (unsign? 0x00 : 0x20)); */ /* setup fifo */ ess_write(sc, 0xb7, 0x91 | (unsign? 0x00 : 0x20) | (b16? 0x04 : 0x00) | (stereo? 0x08 : 0x40)); /* irq control */ ess_write(sc, 0xb1, (ess_read(sc, 0xb1) & 0x0f) | 0x50); /* drq control */ ess_write(sc, 0xb2, (ess_read(sc, 0xb2) & 0x0f) | 0x50); } else if (ch == 2) { KASSERT(dir == PCMDIR_PLAY, ("ess_setupch: dir2 bad")); len >>= 1; len = -len; /* transfer length low */ ess_setmixer(sc, 0x74, len & 0x00ff); /* transfer length high */ ess_setmixer(sc, 0x76, (len & 0xff00) >> 8); /* autoinit, 4 bytes/req */ ess_setmixer(sc, 0x78, 0x10); fmtval = b16 | (stereo << 1) | ((!unsign) << 2); /* enable irq, set format */ ess_setmixer(sc, 0x7a, 0x40 | fmtval); if (sc->newspeed) { /* sample rate */ ess_setmixer(sc, 0x70, spdval); /* filter cutoff */ ess_setmixer(sc, 0x72, ess_calcfilter(spd)); } } return 0; } static int ess_start(struct ess_chinfo *ch) { struct ess_info *sc = ch->parent; DEB(printf("ess_start\n");); ess_setupch(sc, ch->hwch, ch->dir, ch->spd, ch->fmt, ch->blksz); ch->stopping = 0; if (ch->hwch == 1) { ess_write(sc, 0xb8, ess_read(sc, 0xb8) | 0x01); if (ch->dir == PCMDIR_PLAY) { #if 0 DELAY(100000); /* 100 ms */ #endif ess_cmd(sc, 0xd1); } } else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) | 0x03); return 0; } static int ess_stop(struct ess_chinfo *ch) { struct ess_info *sc = ch->parent; DEB(printf("ess_stop\n")); ch->stopping = 1; if (ch->hwch == 1) ess_write(sc, 0xb8, ess_read(sc, 0xb8) & ~0x04); else ess_setmixer(sc, 0x78, ess_getmixer(sc, 0x78) & ~0x10); DEB(printf("done with stop\n")); return 0; } /* -------------------------------------------------------------------- */ /* channel interface for ESS18xx */ static void * esschan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct ess_info *sc = devinfo; struct ess_chinfo *ch = (dir == PCMDIR_PLAY)? &sc->pch : &sc->rch; DEB(printf("esschan_init\n")); ch->parent = sc; ch->channel = c; ch->buffer = b; ch->dir = dir; if (sndbuf_alloc(ch->buffer, sc->parent_dmat, 0, sc->bufsz) != 0) return NULL; ch->hwch = 1; if ((dir == PCMDIR_PLAY) && (sc->duplex)) ch->hwch = 2; return ch; } static int esschan_setformat(kobj_t obj, void *data, u_int32_t format) { struct ess_chinfo *ch = data; ch->fmt = format; return 0; } static u_int32_t esschan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct ess_chinfo *ch = data; struct ess_info *sc = ch->parent; ch->spd = speed; if (sc->newspeed) ess_calcspeed9(&ch->spd); else ess_calcspeed8(&ch->spd); return ch->spd; } static u_int32_t esschan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct ess_chinfo *ch = data; ch->blksz = blocksize; return ch->blksz; } static int esschan_trigger(kobj_t obj, void *data, int go) { struct ess_chinfo *ch = data; struct ess_info *sc = ch->parent; if (!PCMTRIG_COMMON(go)) return 0; DEB(printf("esschan_trigger: %d\n",go)); ess_lock(sc); switch (go) { case PCMTRIG_START: ess_dmasetup(sc, ch->hwch, sndbuf_getbufaddr(ch->buffer), sndbuf_getsize(ch->buffer), ch->dir); ess_dmatrigger(sc, ch->hwch, 1); ess_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: default: ess_stop(ch); break; } ess_unlock(sc); return 0; } static u_int32_t esschan_getptr(kobj_t obj, void *data) { struct ess_chinfo *ch = data; struct ess_info *sc = ch->parent; u_int32_t ret; ess_lock(sc); ret = ess_dmapos(sc, ch->hwch); ess_unlock(sc); return ret; } static struct pcmchan_caps * esschan_getcaps(kobj_t obj, void *data) { struct ess_chinfo *ch = data; return (ch->dir == PCMDIR_PLAY)? &ess_playcaps : &ess_reccaps; } static kobj_method_t esschan_methods[] = { KOBJMETHOD(channel_init, esschan_init), KOBJMETHOD(channel_setformat, esschan_setformat), KOBJMETHOD(channel_setspeed, esschan_setspeed), KOBJMETHOD(channel_setblocksize, esschan_setblocksize), KOBJMETHOD(channel_trigger, esschan_trigger), KOBJMETHOD(channel_getptr, esschan_getptr), KOBJMETHOD(channel_getcaps, esschan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(esschan); /************************************************************/ static int essmix_init(struct snd_mixer *m) { struct ess_info *sc = mix_getdevinfo(m); mix_setrecdevs(m, SOUND_MASK_CD | SOUND_MASK_MIC | SOUND_MASK_LINE | SOUND_MASK_IMIX); mix_setdevs(m, SOUND_MASK_SYNTH | SOUND_MASK_PCM | SOUND_MASK_LINE | SOUND_MASK_MIC | SOUND_MASK_CD | SOUND_MASK_VOLUME | SOUND_MASK_LINE1); ess_setmixer(sc, 0, 0); /* reset */ return 0; } static int essmix_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct ess_info *sc = mix_getdevinfo(m); int preg = 0, rreg = 0, l, r; l = (left * 15) / 100; r = (right * 15) / 100; switch (dev) { case SOUND_MIXER_SYNTH: preg = 0x36; rreg = 0x6b; break; case SOUND_MIXER_PCM: preg = 0x14; rreg = 0x7c; break; case SOUND_MIXER_LINE: preg = 0x3e; rreg = 0x6e; break; case SOUND_MIXER_MIC: preg = 0x1a; rreg = 0x68; break; case SOUND_MIXER_LINE1: preg = 0x3a; rreg = 0x6c; break; case SOUND_MIXER_CD: preg = 0x38; rreg = 0x6a; break; case SOUND_MIXER_VOLUME: l = left? (left * 63) / 100 : 64; r = right? (right * 63) / 100 : 64; ess_setmixer(sc, 0x60, l); ess_setmixer(sc, 0x62, r); left = (l == 64)? 0 : (l * 100) / 63; right = (r == 64)? 0 : (r * 100) / 63; return left | (right << 8); } if (preg) ess_setmixer(sc, preg, (l << 4) | r); if (rreg) ess_setmixer(sc, rreg, (l << 4) | r); left = (l * 100) / 15; right = (r * 100) / 15; return left | (right << 8); } static u_int32_t essmix_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct ess_info *sc = mix_getdevinfo(m); u_char recdev; switch (src) { case SOUND_MASK_CD: recdev = 0x02; break; case SOUND_MASK_LINE: recdev = 0x06; break; case SOUND_MASK_IMIX: recdev = 0x05; break; case SOUND_MASK_MIC: default: recdev = 0x00; src = SOUND_MASK_MIC; break; } ess_setmixer(sc, 0x1c, recdev); return src; } static kobj_method_t solomixer_methods[] = { KOBJMETHOD(mixer_init, essmix_init), KOBJMETHOD(mixer_set, essmix_set), KOBJMETHOD(mixer_setrecsrc, essmix_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(solomixer); /************************************************************/ static int ess_dmasetup(struct ess_info *sc, int ch, u_int32_t base, u_int16_t cnt, int dir) { KASSERT(ch == 1 || ch == 2, ("bad ch")); sc->dmasz[ch - 1] = cnt; if (ch == 1) { port_wr(sc->vc, 0x8, 0xc4, 1); /* command */ port_wr(sc->vc, 0xd, 0xff, 1); /* reset */ port_wr(sc->vc, 0xf, 0x01, 1); /* mask */ port_wr(sc->vc, 0xb, dir == PCMDIR_PLAY? 0x58 : 0x54, 1); /* mode */ port_wr(sc->vc, 0x0, base, 4); port_wr(sc->vc, 0x4, cnt - 1, 2); } else if (ch == 2) { port_wr(sc->io, 0x6, 0x08, 1); /* autoinit */ port_wr(sc->io, 0x0, base, 4); port_wr(sc->io, 0x4, cnt, 2); } return 0; } static int ess_dmapos(struct ess_info *sc, int ch) { int p = 0, i = 0, j = 0; KASSERT(ch == 1 || ch == 2, ("bad ch")); if (ch == 1) { /* * During recording, this register is known to give back * garbage if it's not quiescent while being read. That's * why we spl, stop the DMA, and try over and over until * adjacent reads are "close", in the right order and not * bigger than is otherwise possible. */ ess_dmatrigger(sc, ch, 0); DELAY(20); do { DELAY(10); if (j > 1) printf("DMA count reg bogus: %04x & %04x\n", i, p); i = port_rd(sc->vc, 0x4, 2) + 1; p = port_rd(sc->vc, 0x4, 2) + 1; } while ((p > sc->dmasz[ch - 1] || i < p || (p - i) > 0x8) && j++ < 1000); ess_dmatrigger(sc, ch, 1); } else if (ch == 2) p = port_rd(sc->io, 0x4, 2); return sc->dmasz[ch - 1] - p; } static int ess_dmatrigger(struct ess_info *sc, int ch, int go) { KASSERT(ch == 1 || ch == 2, ("bad ch")); if (ch == 1) port_wr(sc->vc, 0xf, go? 0x00 : 0x01, 1); /* mask */ else if (ch == 2) port_wr(sc->io, 0x6, 0x08 | (go? 0x02 : 0x00), 1); /* autoinit */ return 0; } static void ess_release_resources(struct ess_info *sc, device_t dev) { if (sc->irq) { if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq); - sc->irq = 0; + sc->irq = NULL; } if (sc->io) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->io); - sc->io = 0; + sc->io = NULL; } if (sc->sb) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(1), sc->sb); - sc->sb = 0; + sc->sb = NULL; } if (sc->vc) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(2), sc->vc); - sc->vc = 0; + sc->vc = NULL; } if (sc->mpu) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(3), sc->mpu); - sc->mpu = 0; + sc->mpu = NULL; } if (sc->gp) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(4), sc->gp); - sc->gp = 0; + sc->gp = NULL; } if (sc->parent_dmat) { bus_dma_tag_destroy(sc->parent_dmat); sc->parent_dmat = 0; } #if ESS18XX_MPSAFE == 1 if (sc->lock) { snd_mtxfree(sc->lock); sc->lock = NULL; } #endif free(sc, M_DEVBUF); } static int ess_alloc_resources(struct ess_info *sc, device_t dev) { int rid; rid = PCIR_BAR(0); sc->io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = PCIR_BAR(1); sc->sb = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = PCIR_BAR(2); sc->vc = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = PCIR_BAR(3); sc->mpu = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = PCIR_BAR(4); sc->gp = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); #if ESS18XX_MPSAFE == 1 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_solo softc"); return (sc->irq && sc->io && sc->sb && sc->vc && sc->mpu && sc->gp && sc->lock)? 0 : ENXIO; #else return (sc->irq && sc->io && sc->sb && sc->vc && sc->mpu && sc->gp)? 0 : ENXIO; #endif } static int ess_probe(device_t dev) { char *s = NULL; u_int32_t subdev; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); switch (pci_get_devid(dev)) { case 0x1969125d: if (subdev == 0x8888125d) s = "ESS Solo-1E"; else if (subdev == 0x1818125d) s = "ESS Solo-1"; else s = "ESS Solo-1 (unknown vendor)"; break; } if (s) device_set_desc(dev, s); return s ? BUS_PROBE_DEFAULT : ENXIO; } #define ESS_PCI_LEGACYCONTROL 0x40 #define ESS_PCI_CONFIG 0x50 #define ESS_PCI_DDMACONTROL 0x60 static int ess_suspend(device_t dev) { return 0; } static int ess_resume(device_t dev) { uint16_t ddma; struct ess_info *sc = pcm_getdevinfo(dev); ess_lock(sc); ddma = rman_get_start(sc->vc) | 1; pci_write_config(dev, ESS_PCI_LEGACYCONTROL, 0x805f, 2); pci_write_config(dev, ESS_PCI_DDMACONTROL, ddma, 2); pci_write_config(dev, ESS_PCI_CONFIG, 0, 2); if (ess_reset_dsp(sc)) { ess_unlock(sc); goto no; } ess_unlock(sc); if (mixer_reinit(dev)) goto no; ess_lock(sc); if (sc->newspeed) ess_setmixer(sc, 0x71, 0x2a); port_wr(sc->io, 0x7, 0xb0, 1); /* enable irqs */ ess_unlock(sc); return 0; no: return EIO; } static int ess_attach(device_t dev) { struct ess_info *sc; char status[SND_STATUSLEN]; u_int16_t ddma; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); pci_enable_busmaster(dev); if (ess_alloc_resources(sc, dev)) goto no; sc->bufsz = pcm_getbuffersize(dev, 4096, SOLO_DEFAULT_BUFSZ, 65536); ddma = rman_get_start(sc->vc) | 1; pci_write_config(dev, ESS_PCI_LEGACYCONTROL, 0x805f, 2); pci_write_config(dev, ESS_PCI_DDMACONTROL, ddma, 2); pci_write_config(dev, ESS_PCI_CONFIG, 0, 2); port_wr(sc->io, 0x7, 0xb0, 1); /* enable irqs */ #ifdef ESS18XX_DUPLEX sc->duplex = 1; #else sc->duplex = 0; #endif #ifdef ESS18XX_NEWSPEED sc->newspeed = 1; #else sc->newspeed = 0; #endif if (snd_setup_intr(dev, sc->irq, #if ESS18XX_MPSAFE == 1 INTR_MPSAFE #else 0 #endif , ess_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto no; } if (!sc->duplex) pcm_setflags(dev, pcm_getflags(dev) | SD_F_SIMPLEX); #if 0 if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/65536, /*boundary*/0, #endif if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_24BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, #if ESS18XX_MPSAFE == 1 /*lockfunc*/NULL, /*lockarg*/NULL, #else /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, #endif &sc->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto no; } if (ess_reset_dsp(sc)) goto no; if (sc->newspeed) ess_setmixer(sc, 0x71, 0x2a); if (mixer_init(dev, &solomixer_class, sc)) goto no; snprintf(status, SND_STATUSLEN, "at io 0x%jx,0x%jx,0x%jx irq %jd %s", rman_get_start(sc->io), rman_get_start(sc->sb), rman_get_start(sc->vc), rman_get_start(sc->irq),PCM_KLDSTRING(snd_solo)); if (pcm_register(dev, sc, 1, 1)) goto no; pcm_addchan(dev, PCMDIR_REC, &esschan_class, sc); pcm_addchan(dev, PCMDIR_PLAY, &esschan_class, sc); pcm_setstatus(dev, status); return 0; no: ess_release_resources(sc, dev); return ENXIO; } static int ess_detach(device_t dev) { int r; struct ess_info *sc; r = pcm_unregister(dev); if (r) return r; sc = pcm_getdevinfo(dev); ess_release_resources(sc, dev); return 0; } static device_method_t ess_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ess_probe), DEVMETHOD(device_attach, ess_attach), DEVMETHOD(device_detach, ess_detach), DEVMETHOD(device_resume, ess_resume), DEVMETHOD(device_suspend, ess_suspend), { 0, 0 } }; static driver_t ess_driver = { "pcm", ess_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_solo, pci, ess_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_solo, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_solo, 1); Index: head/sys/dev/sound/pci/t4dwave.c =================================================================== --- head/sys/dev/sound/pci/t4dwave.c (revision 297861) +++ head/sys/dev/sound/pci/t4dwave.c (revision 297862) @@ -1,1067 +1,1067 @@ /*- * Copyright (c) 1999 Cameron Grant * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHERIN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THEPOSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); /* -------------------------------------------------------------------- */ #define TDX_PCI_ID 0x20001023 #define TNX_PCI_ID 0x20011023 #define ALI_PCI_ID 0x545110b9 #define SPA_PCI_ID 0x70181039 #define TR_DEFAULT_BUFSZ 0x1000 /* For ALi M5451 the DMA transfer size appears to be fixed to 64k. */ #define ALI_BUFSZ 0x10000 #define TR_BUFALGN 0x8 #define TR_TIMEOUT_CDC 0xffff #define TR_MAXHWCH 64 #define ALI_MAXHWCH 32 #define TR_MAXPLAYCH 4 #define ALI_MAXPLAYCH 1 /* * Though, it's not clearly documented in the 4DWAVE datasheet, the * DX and NX chips can't handle DMA addresses located above 1GB as the * LBA (loop begin address) register which holds the DMA base address * is 32-bit, but the two MSBs are used for other purposes. */ #define TR_MAXADDR ((1U << 30) - 1) #define ALI_MAXADDR ((1U << 31) - 1) struct tr_info; /* channel registers */ struct tr_chinfo { u_int32_t cso, alpha, fms, fmc, ec; u_int32_t lba; u_int32_t eso, delta; u_int32_t rvol, cvol; u_int32_t gvsel, pan, vol, ctrl; u_int32_t active:1, was_active:1; int index, bufhalf; struct snd_dbuf *buffer; struct pcm_channel *channel; struct tr_info *parent; }; struct tr_rchinfo { u_int32_t delta; u_int32_t active:1, was_active:1; struct snd_dbuf *buffer; struct pcm_channel *channel; struct tr_info *parent; }; /* device private data */ struct tr_info { u_int32_t type; u_int32_t rev; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; struct resource *reg, *irq; int regtype, regid, irqid; void *ih; struct mtx *lock; u_int32_t hwchns; u_int32_t playchns; unsigned int bufsz; struct tr_chinfo chinfo[TR_MAXPLAYCH]; struct tr_rchinfo recchinfo; }; /* -------------------------------------------------------------------- */ static u_int32_t tr_recfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps tr_reccaps = {4000, 48000, tr_recfmt, 0}; static u_int32_t tr_playfmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S8, 1, 0), SND_FORMAT(AFMT_S8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_U16_LE, 1, 0), SND_FORMAT(AFMT_U16_LE, 2, 0), 0 }; static struct pcmchan_caps tr_playcaps = {4000, 48000, tr_playfmt, 0}; /* -------------------------------------------------------------------- */ /* Hardware */ static u_int32_t tr_rd(struct tr_info *tr, int regno, int size) { switch(size) { case 1: return bus_space_read_1(tr->st, tr->sh, regno); case 2: return bus_space_read_2(tr->st, tr->sh, regno); case 4: return bus_space_read_4(tr->st, tr->sh, regno); default: return 0xffffffff; } } static void tr_wr(struct tr_info *tr, int regno, u_int32_t data, int size) { switch(size) { case 1: bus_space_write_1(tr->st, tr->sh, regno, data); break; case 2: bus_space_write_2(tr->st, tr->sh, regno, data); break; case 4: bus_space_write_4(tr->st, tr->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* ac97 codec */ static int tr_rdcd(kobj_t obj, void *devinfo, int regno) { struct tr_info *tr = (struct tr_info *)devinfo; int i, j, treg, trw; switch (tr->type) { case SPA_PCI_ID: treg=SPA_REG_CODECRD; trw=SPA_CDC_RWSTAT; break; case ALI_PCI_ID: if (tr->rev > 0x01) treg=TDX_REG_CODECWR; else treg=TDX_REG_CODECRD; trw=TDX_CDC_RWSTAT; break; case TDX_PCI_ID: treg=TDX_REG_CODECRD; trw=TDX_CDC_RWSTAT; break; case TNX_PCI_ID: treg=(regno & 0x100)? TNX_REG_CODEC2RD : TNX_REG_CODEC1RD; trw=TNX_CDC_RWSTAT; break; default: printf("!!! tr_rdcd defaulted !!!\n"); return -1; } i = j = 0; regno &= 0x7f; snd_mtxlock(tr->lock); if (tr->type == ALI_PCI_ID) { u_int32_t chk1, chk2; j = trw; for (i = TR_TIMEOUT_CDC; (i > 0) && (j & trw); i--) j = tr_rd(tr, treg, 4); if (i > 0) { chk1 = tr_rd(tr, 0xc8, 4); chk2 = tr_rd(tr, 0xc8, 4); for (i = TR_TIMEOUT_CDC; (i > 0) && (chk1 == chk2); i--) chk2 = tr_rd(tr, 0xc8, 4); } } if (tr->type != ALI_PCI_ID || i > 0) { tr_wr(tr, treg, regno | trw, 4); j=trw; for (i=TR_TIMEOUT_CDC; (i > 0) && (j & trw); i--) j=tr_rd(tr, treg, 4); } snd_mtxunlock(tr->lock); if (i == 0) printf("codec timeout during read of register %x\n", regno); return (j >> TR_CDC_DATA) & 0xffff; } static int tr_wrcd(kobj_t obj, void *devinfo, int regno, u_int32_t data) { struct tr_info *tr = (struct tr_info *)devinfo; int i, j, treg, trw; switch (tr->type) { case SPA_PCI_ID: treg=SPA_REG_CODECWR; trw=SPA_CDC_RWSTAT; break; case ALI_PCI_ID: case TDX_PCI_ID: treg=TDX_REG_CODECWR; trw=TDX_CDC_RWSTAT; break; case TNX_PCI_ID: treg=TNX_REG_CODECWR; trw=TNX_CDC_RWSTAT | ((regno & 0x100)? TNX_CDC_SEC : 0); break; default: printf("!!! tr_wrcd defaulted !!!"); return -1; } i = 0; regno &= 0x7f; #if 0 printf("tr_wrcd: reg %x was %x", regno, tr_rdcd(devinfo, regno)); #endif j=trw; snd_mtxlock(tr->lock); if (tr->type == ALI_PCI_ID) { j = trw; for (i = TR_TIMEOUT_CDC; (i > 0) && (j & trw); i--) j = tr_rd(tr, treg, 4); if (i > 0) { u_int32_t chk1, chk2; chk1 = tr_rd(tr, 0xc8, 4); chk2 = tr_rd(tr, 0xc8, 4); for (i = TR_TIMEOUT_CDC; (i > 0) && (chk1 == chk2); i--) chk2 = tr_rd(tr, 0xc8, 4); } } if (tr->type != ALI_PCI_ID || i > 0) { for (i=TR_TIMEOUT_CDC; (i>0) && (j & trw); i--) j=tr_rd(tr, treg, 4); if (tr->type == ALI_PCI_ID && tr->rev > 0x01) trw |= 0x0100; tr_wr(tr, treg, (data << TR_CDC_DATA) | regno | trw, 4); } #if 0 printf(" - wrote %x, now %x\n", data, tr_rdcd(devinfo, regno)); #endif snd_mtxunlock(tr->lock); if (i==0) printf("codec timeout writing %x, data %x\n", regno, data); return (i > 0)? 0 : -1; } static kobj_method_t tr_ac97_methods[] = { KOBJMETHOD(ac97_read, tr_rdcd), KOBJMETHOD(ac97_write, tr_wrcd), KOBJMETHOD_END }; AC97_DECLARE(tr_ac97); /* -------------------------------------------------------------------- */ /* playback channel interrupts */ #if 0 static u_int32_t tr_testint(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; int bank, chan; bank = (ch->index & 0x20) ? 1 : 0; chan = ch->index & 0x1f; return tr_rd(tr, bank? TR_REG_ADDRINTB : TR_REG_ADDRINTA, 4) & (1 << chan); } #endif static void tr_clrint(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; int bank, chan; bank = (ch->index & 0x20) ? 1 : 0; chan = ch->index & 0x1f; tr_wr(tr, bank? TR_REG_ADDRINTB : TR_REG_ADDRINTA, 1 << chan, 4); } static void tr_enaint(struct tr_chinfo *ch, int enable) { struct tr_info *tr = ch->parent; u_int32_t i, reg; int bank, chan; snd_mtxlock(tr->lock); bank = (ch->index & 0x20) ? 1 : 0; chan = ch->index & 0x1f; reg = bank? TR_REG_INTENB : TR_REG_INTENA; i = tr_rd(tr, reg, 4); i &= ~(1 << chan); i |= (enable? 1 : 0) << chan; tr_clrint(ch); tr_wr(tr, reg, i, 4); snd_mtxunlock(tr->lock); } /* playback channels */ static void tr_selch(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; int i; i = tr_rd(tr, TR_REG_CIR, 4); i &= ~TR_CIR_MASK; i |= ch->index & 0x3f; tr_wr(tr, TR_REG_CIR, i, 4); } static void tr_startch(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; int bank, chan; bank = (ch->index & 0x20) ? 1 : 0; chan = ch->index & 0x1f; tr_wr(tr, bank? TR_REG_STARTB : TR_REG_STARTA, 1 << chan, 4); } static void tr_stopch(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; int bank, chan; bank = (ch->index & 0x20) ? 1 : 0; chan = ch->index & 0x1f; tr_wr(tr, bank? TR_REG_STOPB : TR_REG_STOPA, 1 << chan, 4); } static void tr_wrch(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; u_int32_t cr[TR_CHN_REGS], i; ch->gvsel &= 0x00000001; ch->fmc &= 0x00000003; ch->fms &= 0x0000000f; ch->ctrl &= 0x0000000f; ch->pan &= 0x0000007f; ch->rvol &= 0x0000007f; ch->cvol &= 0x0000007f; ch->vol &= 0x000000ff; ch->ec &= 0x00000fff; ch->alpha &= 0x00000fff; ch->delta &= 0x0000ffff; if (tr->type == ALI_PCI_ID) ch->lba &= ALI_MAXADDR; else ch->lba &= TR_MAXADDR; cr[1]=ch->lba; cr[3]=(ch->fmc<<14) | (ch->rvol<<7) | (ch->cvol); cr[4]=(ch->gvsel<<31) | (ch->pan<<24) | (ch->vol<<16) | (ch->ctrl<<12) | (ch->ec); switch (tr->type) { case SPA_PCI_ID: case ALI_PCI_ID: case TDX_PCI_ID: ch->cso &= 0x0000ffff; ch->eso &= 0x0000ffff; cr[0]=(ch->cso<<16) | (ch->alpha<<4) | (ch->fms); cr[2]=(ch->eso<<16) | (ch->delta); break; case TNX_PCI_ID: ch->cso &= 0x00ffffff; ch->eso &= 0x00ffffff; cr[0]=((ch->delta & 0xff)<<24) | (ch->cso); cr[2]=((ch->delta>>8)<<24) | (ch->eso); cr[3]|=(ch->alpha<<20) | (ch->fms<<16) | (ch->fmc<<14); break; } snd_mtxlock(tr->lock); tr_selch(ch); for (i=0; ilock); } static void tr_rdch(struct tr_chinfo *ch) { struct tr_info *tr = ch->parent; u_int32_t cr[5], i; snd_mtxlock(tr->lock); tr_selch(ch); for (i=0; i<5; i++) cr[i]=tr_rd(tr, TR_REG_CHNBASE+(i<<2), 4); snd_mtxunlock(tr->lock); if (tr->type == ALI_PCI_ID) ch->lba=(cr[1] & ALI_MAXADDR); else ch->lba=(cr[1] & TR_MAXADDR); ch->fmc= (cr[3] & 0x0000c000) >> 14; ch->rvol= (cr[3] & 0x00003f80) >> 7; ch->cvol= (cr[3] & 0x0000007f); ch->gvsel= (cr[4] & 0x80000000) >> 31; ch->pan= (cr[4] & 0x7f000000) >> 24; ch->vol= (cr[4] & 0x00ff0000) >> 16; ch->ctrl= (cr[4] & 0x0000f000) >> 12; ch->ec= (cr[4] & 0x00000fff); switch(tr->type) { case SPA_PCI_ID: case ALI_PCI_ID: case TDX_PCI_ID: ch->cso= (cr[0] & 0xffff0000) >> 16; ch->alpha= (cr[0] & 0x0000fff0) >> 4; ch->fms= (cr[0] & 0x0000000f); ch->eso= (cr[2] & 0xffff0000) >> 16; ch->delta= (cr[2] & 0x0000ffff); break; case TNX_PCI_ID: ch->cso= (cr[0] & 0x00ffffff); ch->eso= (cr[2] & 0x00ffffff); ch->delta= ((cr[2] & 0xff000000) >> 16) | ((cr[0] & 0xff000000) >> 24); ch->alpha= (cr[3] & 0xfff00000) >> 20; ch->fms= (cr[3] & 0x000f0000) >> 16; break; } } static u_int32_t tr_fmttobits(u_int32_t fmt) { u_int32_t bits; bits = 0; bits |= (fmt & AFMT_SIGNED)? 0x2 : 0; bits |= (AFMT_CHANNEL(fmt) > 1)? 0x4 : 0; bits |= (fmt & AFMT_16BIT)? 0x8 : 0; return bits; } /* -------------------------------------------------------------------- */ /* channel interface */ static void * trpchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct tr_info *tr = devinfo; struct tr_chinfo *ch; KASSERT(dir == PCMDIR_PLAY, ("trpchan_init: bad direction")); ch = &tr->chinfo[tr->playchns]; ch->index = tr->playchns++; ch->buffer = b; ch->parent = tr; ch->channel = c; if (sndbuf_alloc(ch->buffer, tr->parent_dmat, 0, tr->bufsz) != 0) return NULL; return ch; } static int trpchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct tr_chinfo *ch = data; ch->ctrl = tr_fmttobits(format) | 0x01; return 0; } static u_int32_t trpchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct tr_chinfo *ch = data; ch->delta = (speed << 12) / 48000; return (ch->delta * 48000) >> 12; } static u_int32_t trpchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct tr_chinfo *ch = data; sndbuf_resize(ch->buffer, 2, blocksize); return blocksize; } static int trpchan_trigger(kobj_t obj, void *data, int go) { struct tr_chinfo *ch = data; if (!PCMTRIG_COMMON(go)) return 0; if (go == PCMTRIG_START) { ch->fmc = 3; ch->fms = 0; ch->ec = 0; ch->alpha = 0; ch->lba = sndbuf_getbufaddr(ch->buffer); ch->cso = 0; ch->eso = (sndbuf_getsize(ch->buffer) / sndbuf_getalign(ch->buffer)) - 1; ch->rvol = ch->cvol = 0x7f; ch->gvsel = 0; ch->pan = 0; ch->vol = 0; ch->bufhalf = 0; tr_wrch(ch); tr_enaint(ch, 1); tr_startch(ch); ch->active = 1; } else { tr_stopch(ch); ch->active = 0; } return 0; } static u_int32_t trpchan_getptr(kobj_t obj, void *data) { struct tr_chinfo *ch = data; tr_rdch(ch); return ch->cso * sndbuf_getalign(ch->buffer); } static struct pcmchan_caps * trpchan_getcaps(kobj_t obj, void *data) { return &tr_playcaps; } static kobj_method_t trpchan_methods[] = { KOBJMETHOD(channel_init, trpchan_init), KOBJMETHOD(channel_setformat, trpchan_setformat), KOBJMETHOD(channel_setspeed, trpchan_setspeed), KOBJMETHOD(channel_setblocksize, trpchan_setblocksize), KOBJMETHOD(channel_trigger, trpchan_trigger), KOBJMETHOD(channel_getptr, trpchan_getptr), KOBJMETHOD(channel_getcaps, trpchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(trpchan); /* -------------------------------------------------------------------- */ /* rec channel interface */ static void * trrchan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct tr_info *tr = devinfo; struct tr_rchinfo *ch; KASSERT(dir == PCMDIR_REC, ("trrchan_init: bad direction")); ch = &tr->recchinfo; ch->buffer = b; ch->parent = tr; ch->channel = c; if (sndbuf_alloc(ch->buffer, tr->parent_dmat, 0, tr->bufsz) != 0) return NULL; return ch; } static int trrchan_setformat(kobj_t obj, void *data, u_int32_t format) { struct tr_rchinfo *ch = data; struct tr_info *tr = ch->parent; u_int32_t i, bits; bits = tr_fmttobits(format); /* set # of samples between interrupts */ i = (sndbuf_runsz(ch->buffer) >> ((bits & 0x08)? 1 : 0)) - 1; tr_wr(tr, TR_REG_SBBL, i | (i << 16), 4); /* set sample format */ i = 0x18 | (bits << 4); tr_wr(tr, TR_REG_SBCTRL, i, 1); return 0; } static u_int32_t trrchan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct tr_rchinfo *ch = data; struct tr_info *tr = ch->parent; /* setup speed */ ch->delta = (48000 << 12) / speed; tr_wr(tr, TR_REG_SBDELTA, ch->delta, 2); /* return closest possible speed */ return (48000 << 12) / ch->delta; } static u_int32_t trrchan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct tr_rchinfo *ch = data; sndbuf_resize(ch->buffer, 2, blocksize); return blocksize; } static int trrchan_trigger(kobj_t obj, void *data, int go) { struct tr_rchinfo *ch = data; struct tr_info *tr = ch->parent; u_int32_t i; if (!PCMTRIG_COMMON(go)) return 0; if (go == PCMTRIG_START) { /* set up dma mode regs */ tr_wr(tr, TR_REG_DMAR15, 0, 1); i = tr_rd(tr, TR_REG_DMAR11, 1) & 0x03; tr_wr(tr, TR_REG_DMAR11, i | 0x54, 1); /* set up base address */ tr_wr(tr, TR_REG_DMAR0, sndbuf_getbufaddr(ch->buffer), 4); /* set up buffer size */ i = tr_rd(tr, TR_REG_DMAR4, 4) & ~0x00ffffff; tr_wr(tr, TR_REG_DMAR4, i | (sndbuf_runsz(ch->buffer) - 1), 4); /* start */ tr_wr(tr, TR_REG_SBCTRL, tr_rd(tr, TR_REG_SBCTRL, 1) | 1, 1); ch->active = 1; } else { tr_wr(tr, TR_REG_SBCTRL, tr_rd(tr, TR_REG_SBCTRL, 1) & ~7, 1); ch->active = 0; } /* return 0 if ok */ return 0; } static u_int32_t trrchan_getptr(kobj_t obj, void *data) { struct tr_rchinfo *ch = data; struct tr_info *tr = ch->parent; /* return current byte offset of channel */ return tr_rd(tr, TR_REG_DMAR0, 4) - sndbuf_getbufaddr(ch->buffer); } static struct pcmchan_caps * trrchan_getcaps(kobj_t obj, void *data) { return &tr_reccaps; } static kobj_method_t trrchan_methods[] = { KOBJMETHOD(channel_init, trrchan_init), KOBJMETHOD(channel_setformat, trrchan_setformat), KOBJMETHOD(channel_setspeed, trrchan_setspeed), KOBJMETHOD(channel_setblocksize, trrchan_setblocksize), KOBJMETHOD(channel_trigger, trrchan_trigger), KOBJMETHOD(channel_getptr, trrchan_getptr), KOBJMETHOD(channel_getcaps, trrchan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(trrchan); /* -------------------------------------------------------------------- */ /* The interrupt handler */ static void tr_intr(void *p) { struct tr_info *tr = (struct tr_info *)p; struct tr_chinfo *ch; u_int32_t active, mask, bufhalf, chnum, intsrc; int tmp; intsrc = tr_rd(tr, TR_REG_MISCINT, 4); if (intsrc & TR_INT_ADDR) { chnum = 0; while (chnum < tr->hwchns) { mask = 0x00000001; active = tr_rd(tr, (chnum < 32)? TR_REG_ADDRINTA : TR_REG_ADDRINTB, 4); bufhalf = tr_rd(tr, (chnum < 32)? TR_REG_CSPF_A : TR_REG_CSPF_B, 4); if (active) { do { if (active & mask) { tmp = (bufhalf & mask)? 1 : 0; if (chnum < tr->playchns) { ch = &tr->chinfo[chnum]; /* printf("%d @ %d, ", chnum, trpchan_getptr(NULL, ch)); */ if (ch->bufhalf != tmp) { chn_intr(ch->channel); ch->bufhalf = tmp; } } } chnum++; mask <<= 1; } while (chnum & 31); } else chnum += 32; tr_wr(tr, (chnum <= 32)? TR_REG_ADDRINTA : TR_REG_ADDRINTB, active, 4); } } if (intsrc & TR_INT_SB) { chn_intr(tr->recchinfo.channel); tr_rd(tr, TR_REG_SBR9, 1); tr_rd(tr, TR_REG_SBR10, 1); } } /* -------------------------------------------------------------------- */ /* * Probe and attach the card */ static int tr_init(struct tr_info *tr) { switch (tr->type) { case SPA_PCI_ID: tr_wr(tr, SPA_REG_GPIO, 0, 4); tr_wr(tr, SPA_REG_CODECST, SPA_RST_OFF, 4); break; case TDX_PCI_ID: tr_wr(tr, TDX_REG_CODECST, TDX_CDC_ON, 4); break; case TNX_PCI_ID: tr_wr(tr, TNX_REG_CODECST, TNX_CDC_ON, 4); break; } tr_wr(tr, TR_REG_CIR, TR_CIR_MIDENA | TR_CIR_ADDRENA, 4); return 0; } static int tr_pci_probe(device_t dev) { switch (pci_get_devid(dev)) { case SPA_PCI_ID: device_set_desc(dev, "SiS 7018"); return BUS_PROBE_DEFAULT; case ALI_PCI_ID: device_set_desc(dev, "Acer Labs M5451"); return BUS_PROBE_DEFAULT; case TDX_PCI_ID: device_set_desc(dev, "Trident 4DWave DX"); return BUS_PROBE_DEFAULT; case TNX_PCI_ID: device_set_desc(dev, "Trident 4DWave NX"); return BUS_PROBE_DEFAULT; } return ENXIO; } static int tr_pci_attach(device_t dev) { struct tr_info *tr; - struct ac97_info *codec = 0; + struct ac97_info *codec = NULL; bus_addr_t lowaddr; int i, dacn; char status[SND_STATUSLEN]; #ifdef __sparc64__ device_t *children; int nchildren; u_int32_t data; #endif tr = malloc(sizeof(*tr), M_DEVBUF, M_WAITOK | M_ZERO); tr->type = pci_get_devid(dev); tr->rev = pci_get_revid(dev); tr->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_t4dwave softc"); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "dac", &i) == 0) { if (i < 1) dacn = 1; else if (i > TR_MAXPLAYCH) dacn = TR_MAXPLAYCH; else dacn = i; } else { switch (tr->type) { case ALI_PCI_ID: dacn = ALI_MAXPLAYCH; break; default: dacn = TR_MAXPLAYCH; break; } } pci_enable_busmaster(dev); tr->regid = PCIR_BAR(0); tr->regtype = SYS_RES_IOPORT; tr->reg = bus_alloc_resource_any(dev, tr->regtype, &tr->regid, RF_ACTIVE); if (tr->reg) { tr->st = rman_get_bustag(tr->reg); tr->sh = rman_get_bushandle(tr->reg); } else { device_printf(dev, "unable to map register space\n"); goto bad; } if (tr_init(tr) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } tr->playchns = 0; codec = AC97_CREATE(dev, tr, tr_ac97); if (codec == NULL) goto bad; if (mixer_init(dev, ac97_getmixerclass(), codec) == -1) goto bad; tr->irqid = 0; tr->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &tr->irqid, RF_ACTIVE | RF_SHAREABLE); if (!tr->irq || snd_setup_intr(dev, tr->irq, 0, tr_intr, tr, &tr->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } if (tr->type == ALI_PCI_ID) { /* * The M5451 generates 31 bit of DMA and in order to do * 32-bit DMA, the 31st bit can be set via its accompanying * ISA bridge. Note that we can't predict whether bus_dma(9) * will actually supply us with a 32-bit buffer and even when * using a low address of BUS_SPACE_MAXADDR_32BIT for both * we might end up with the play buffer being in the 32-bit * range while the record buffer isn't or vice versa. So we * limit enabling the 31st bit to sparc64, where the IOMMU * guarantees that we're using a 32-bit address (and in turn * requires it). */ lowaddr = ALI_MAXADDR; #ifdef __sparc64__ if (device_get_children(device_get_parent(dev), &children, &nchildren) == 0) { for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == 0x153310b9) { lowaddr = BUS_SPACE_MAXADDR_32BIT; data = pci_read_config(children[i], 0x7e, 1); if (bootverbose) device_printf(dev, "M1533 0x7e: 0x%x -> ", data); data |= 0x1; if (bootverbose) printf("0x%x\n", data); pci_write_config(children[i], 0x7e, data, 1); break; } } } free(children, M_TEMP); #endif tr->hwchns = ALI_MAXHWCH; tr->bufsz = ALI_BUFSZ; } else { lowaddr = TR_MAXADDR; tr->hwchns = TR_MAXHWCH; tr->bufsz = pcm_getbuffersize(dev, 4096, TR_DEFAULT_BUFSZ, 65536); } if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/TR_BUFALGN, /*boundary*/0, /*lowaddr*/lowaddr, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/tr->bufsz, /*nsegments*/1, /*maxsegz*/tr->bufsz, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &tr->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } snprintf(status, 64, "at io 0x%jx irq %jd %s", rman_get_start(tr->reg), rman_get_start(tr->irq),PCM_KLDSTRING(snd_t4dwave)); if (pcm_register(dev, tr, dacn, 1)) goto bad; pcm_addchan(dev, PCMDIR_REC, &trrchan_class, tr); for (i = 0; i < dacn; i++) pcm_addchan(dev, PCMDIR_PLAY, &trpchan_class, tr); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (tr->reg) bus_release_resource(dev, tr->regtype, tr->regid, tr->reg); if (tr->ih) bus_teardown_intr(dev, tr->irq, tr->ih); if (tr->irq) bus_release_resource(dev, SYS_RES_IRQ, tr->irqid, tr->irq); if (tr->parent_dmat) bus_dma_tag_destroy(tr->parent_dmat); if (tr->lock) snd_mtxfree(tr->lock); free(tr, M_DEVBUF); return ENXIO; } static int tr_pci_detach(device_t dev) { int r; struct tr_info *tr; r = pcm_unregister(dev); if (r) return r; tr = pcm_getdevinfo(dev); bus_release_resource(dev, tr->regtype, tr->regid, tr->reg); bus_teardown_intr(dev, tr->irq, tr->ih); bus_release_resource(dev, SYS_RES_IRQ, tr->irqid, tr->irq); bus_dma_tag_destroy(tr->parent_dmat); snd_mtxfree(tr->lock); free(tr, M_DEVBUF); return 0; } static int tr_pci_suspend(device_t dev) { int i; struct tr_info *tr; tr = pcm_getdevinfo(dev); for (i = 0; i < tr->playchns; i++) { tr->chinfo[i].was_active = tr->chinfo[i].active; if (tr->chinfo[i].active) { trpchan_trigger(NULL, &tr->chinfo[i], PCMTRIG_STOP); } } tr->recchinfo.was_active = tr->recchinfo.active; if (tr->recchinfo.active) { trrchan_trigger(NULL, &tr->recchinfo, PCMTRIG_STOP); } return 0; } static int tr_pci_resume(device_t dev) { int i; struct tr_info *tr; tr = pcm_getdevinfo(dev); if (tr_init(tr) == -1) { device_printf(dev, "unable to initialize the card\n"); return ENXIO; } if (mixer_reinit(dev) == -1) { device_printf(dev, "unable to initialize the mixer\n"); return ENXIO; } for (i = 0; i < tr->playchns; i++) { if (tr->chinfo[i].was_active) { trpchan_trigger(NULL, &tr->chinfo[i], PCMTRIG_START); } } if (tr->recchinfo.was_active) { trrchan_trigger(NULL, &tr->recchinfo, PCMTRIG_START); } return 0; } static device_method_t tr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, tr_pci_probe), DEVMETHOD(device_attach, tr_pci_attach), DEVMETHOD(device_detach, tr_pci_detach), DEVMETHOD(device_suspend, tr_pci_suspend), DEVMETHOD(device_resume, tr_pci_resume), { 0, 0 } }; static driver_t tr_driver = { "pcm", tr_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_t4dwave, pci, tr_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_t4dwave, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_t4dwave, 1); Index: head/sys/dev/sound/pci/via8233.c =================================================================== --- head/sys/dev/sound/pci/via8233.c (revision 297861) +++ head/sys/dev/sound/pci/via8233.c (revision 297862) @@ -1,1445 +1,1445 @@ /*- * Copyright (c) 2002 Orion Hodson * Portions of this code derived from via82c686.c: * Copyright (c) 2000 David Jones * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Credits due to: * * Grzybowski Rafal, Russell Davies, Mark Handley, Daniel O'Connor for * comments, machine time, testing patches, and patience. VIA for * providing specs. ALSA for helpful comments and some register poke * ordering. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); #define VIA8233_PCI_ID 0x30591106 #define VIA8233_REV_ID_8233PRE 0x10 #define VIA8233_REV_ID_8233C 0x20 #define VIA8233_REV_ID_8233 0x30 #define VIA8233_REV_ID_8233A 0x40 #define VIA8233_REV_ID_8235 0x50 #define VIA8233_REV_ID_8237 0x60 #define VIA8233_REV_ID_8251 0x70 #define SEGS_PER_CHAN 2 /* Segments per channel */ #define NDXSCHANS 4 /* No of DXS channels */ #define NMSGDCHANS 1 /* No of multichannel SGD */ #define NWRCHANS 1 /* No of write channels */ #define NCHANS (NWRCHANS + NDXSCHANS + NMSGDCHANS) #define NSEGS NCHANS * SEGS_PER_CHAN /* Segments in SGD table */ #define VIA_SEGS_MIN 2 #define VIA_SEGS_MAX 64 #define VIA_SEGS_DEFAULT 2 #define VIA_BLK_MIN 32 #define VIA_BLK_ALIGN (~(VIA_BLK_MIN - 1)) #define VIA_DEFAULT_BUFSZ 0x1000 /* we rely on this struct being packed to 64 bits */ struct via_dma_op { volatile uint32_t ptr; volatile uint32_t flags; #define VIA_DMAOP_EOL 0x80000000 #define VIA_DMAOP_FLAG 0x40000000 #define VIA_DMAOP_STOP 0x20000000 #define VIA_DMAOP_COUNT(x) ((x)&0x00FFFFFF) }; struct via_info; struct via_chinfo { struct via_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; struct via_dma_op *sgd_table; bus_addr_t sgd_addr; int dir, rbase, active; unsigned int blksz, blkcnt; unsigned int ptr, prevptr; }; struct via_info { device_t dev; bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; bus_dma_tag_t sgd_dmat; bus_dmamap_t sgd_dmamap; bus_addr_t sgd_addr; struct resource *reg, *irq; int regid, irqid; void *ih; struct ac97_info *codec; unsigned int bufsz, blkcnt; int dxs_src, dma_eol_wake; struct via_chinfo pch[NDXSCHANS + NMSGDCHANS]; struct via_chinfo rch[NWRCHANS]; struct via_dma_op *sgd_table; uint16_t codec_caps; uint16_t n_dxs_registered; int play_num, rec_num; struct mtx *lock; struct callout poll_timer; int poll_ticks, polling; }; static uint32_t via_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps via_vracaps = { 4000, 48000, via_fmt, 0 }; static struct pcmchan_caps via_caps = { 48000, 48000, via_fmt, 0 }; static __inline int via_chan_active(struct via_info *via) { int i, ret = 0; if (via == NULL) return (0); for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) ret += via->pch[i].active; for (i = 0; i < NWRCHANS; i++) ret += via->rch[i].active; return (ret); } static int sysctl_via8233_spdif_enable(SYSCTL_HANDLER_ARGS) { struct via_info *via; device_t dev; uint32_t r; int err, new_en; dev = oidp->oid_arg1; via = pcm_getdevinfo(dev); snd_mtxlock(via->lock); r = pci_read_config(dev, VIA_PCI_SPDIF, 1); snd_mtxunlock(via->lock); new_en = (r & VIA_SPDIF_EN) ? 1 : 0; err = sysctl_handle_int(oidp, &new_en, 0, req); if (err || req->newptr == NULL) return (err); if (new_en < 0 || new_en > 1) return (EINVAL); if (new_en) r |= VIA_SPDIF_EN; else r &= ~VIA_SPDIF_EN; snd_mtxlock(via->lock); pci_write_config(dev, VIA_PCI_SPDIF, r, 1); snd_mtxunlock(via->lock); return (0); } static int sysctl_via8233_dxs_src(SYSCTL_HANDLER_ARGS) { struct via_info *via; device_t dev; int err, val; dev = oidp->oid_arg1; via = pcm_getdevinfo(dev); snd_mtxlock(via->lock); val = via->dxs_src; snd_mtxunlock(via->lock); err = sysctl_handle_int(oidp, &val, 0, req); if (err || req->newptr == NULL) return (err); if (val < 0 || val > 1) return (EINVAL); snd_mtxlock(via->lock); via->dxs_src = val; snd_mtxunlock(via->lock); return (0); } static int sysctl_via_polling(SYSCTL_HANDLER_ARGS) { struct via_info *via; device_t dev; int err, val; dev = oidp->oid_arg1; via = pcm_getdevinfo(dev); if (via == NULL) return (EINVAL); snd_mtxlock(via->lock); val = via->polling; snd_mtxunlock(via->lock); err = sysctl_handle_int(oidp, &val, 0, req); if (err || req->newptr == NULL) return (err); if (val < 0 || val > 1) return (EINVAL); snd_mtxlock(via->lock); if (val != via->polling) { if (via_chan_active(via) != 0) err = EBUSY; else if (val == 0) via->polling = 0; else via->polling = 1; } snd_mtxunlock(via->lock); return (err); } static void via_init_sysctls(device_t dev) { /* XXX: an user should be able to set this with a control tool, if not done before 7.0-RELEASE, this needs to be converted to a device specific sysctl "dev.pcm.X.yyy" via device_get_sysctl_*() as discussed on multimedia@ in msg-id <861wujij2q.fsf@xps.des.no> */ SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "spdif_enabled", CTLTYPE_INT | CTLFLAG_RW, dev, sizeof(dev), sysctl_via8233_spdif_enable, "I", "Enable S/PDIF output on primary playback channel"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "dxs_src", CTLTYPE_INT | CTLFLAG_RW, dev, sizeof(dev), sysctl_via8233_dxs_src, "I", "Enable VIA DXS Sample Rate Converter"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "polling", CTLTYPE_INT | CTLFLAG_RW, dev, sizeof(dev), sysctl_via_polling, "I", "Enable polling mode"); } static __inline uint32_t via_rd(struct via_info *via, int regno, int size) { switch (size) { case 1: return (bus_space_read_1(via->st, via->sh, regno)); case 2: return (bus_space_read_2(via->st, via->sh, regno)); case 4: return (bus_space_read_4(via->st, via->sh, regno)); default: return (0xFFFFFFFF); } } static __inline void via_wr(struct via_info *via, int regno, uint32_t data, int size) { switch (size) { case 1: bus_space_write_1(via->st, via->sh, regno, data); break; case 2: bus_space_write_2(via->st, via->sh, regno, data); break; case 4: bus_space_write_4(via->st, via->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* Codec interface */ static int via_waitready_codec(struct via_info *via) { int i; /* poll until codec not busy */ for (i = 0; i < 1000; i++) { if ((via_rd(via, VIA_AC97_CONTROL, 4) & VIA_AC97_BUSY) == 0) return (0); DELAY(1); } device_printf(via->dev, "%s: codec busy\n", __func__); return (1); } static int via_waitvalid_codec(struct via_info *via) { int i; /* poll until codec valid */ for (i = 0; i < 1000; i++) { if (via_rd(via, VIA_AC97_CONTROL, 4) & VIA_AC97_CODEC00_VALID) return (0); DELAY(1); } device_printf(via->dev, "%s: codec invalid\n", __func__); return (1); } static int via_write_codec(kobj_t obj, void *addr, int reg, uint32_t val) { struct via_info *via = addr; if (via_waitready_codec(via)) return (-1); via_wr(via, VIA_AC97_CONTROL, VIA_AC97_CODEC00_VALID | VIA_AC97_INDEX(reg) | VIA_AC97_DATA(val), 4); return (0); } static int via_read_codec(kobj_t obj, void *addr, int reg) { struct via_info *via = addr; if (via_waitready_codec(via)) return (-1); via_wr(via, VIA_AC97_CONTROL, VIA_AC97_CODEC00_VALID | VIA_AC97_READ | VIA_AC97_INDEX(reg), 4); if (via_waitready_codec(via)) return (-1); if (via_waitvalid_codec(via)) return (-1); return (via_rd(via, VIA_AC97_CONTROL, 2)); } static kobj_method_t via_ac97_methods[] = { KOBJMETHOD(ac97_read, via_read_codec), KOBJMETHOD(ac97_write, via_write_codec), KOBJMETHOD_END }; AC97_DECLARE(via_ac97); /* -------------------------------------------------------------------- */ static int via_buildsgdt(struct via_chinfo *ch) { uint32_t phys_addr, flag; int i; phys_addr = sndbuf_getbufaddr(ch->buffer); for (i = 0; i < ch->blkcnt; i++) { flag = (i == ch->blkcnt - 1) ? VIA_DMAOP_EOL : VIA_DMAOP_FLAG; ch->sgd_table[i].ptr = phys_addr + (i * ch->blksz); ch->sgd_table[i].flags = flag | ch->blksz; } return (0); } /* -------------------------------------------------------------------- */ /* Format setting functions */ static int via8233wr_setformat(kobj_t obj, void *data, uint32_t format) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; uint32_t f = WR_FORMAT_STOP_INDEX; if (AFMT_CHANNEL(format) > 1) f |= WR_FORMAT_STEREO; if (format & AFMT_S16_LE) f |= WR_FORMAT_16BIT; snd_mtxlock(via->lock); via_wr(via, VIA_WR0_FORMAT, f, 4); snd_mtxunlock(via->lock); return (0); } static int via8233dxs_setformat(kobj_t obj, void *data, uint32_t format) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; uint32_t r, v; r = ch->rbase + VIA8233_RP_DXS_RATEFMT; snd_mtxlock(via->lock); v = via_rd(via, r, 4); v &= ~(VIA8233_DXS_RATEFMT_STEREO | VIA8233_DXS_RATEFMT_16BIT); if (AFMT_CHANNEL(format) > 1) v |= VIA8233_DXS_RATEFMT_STEREO; if (format & AFMT_16BIT) v |= VIA8233_DXS_RATEFMT_16BIT; via_wr(via, r, v, 4); snd_mtxunlock(via->lock); return (0); } static int via8233msgd_setformat(kobj_t obj, void *data, uint32_t format) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; uint32_t s = 0xff000000; uint8_t v = (format & AFMT_S16_LE) ? MC_SGD_16BIT : MC_SGD_8BIT; if (AFMT_CHANNEL(format) > 1) { v |= MC_SGD_CHANNELS(2); s |= SLOT3(1) | SLOT4(2); } else { v |= MC_SGD_CHANNELS(1); s |= SLOT3(1) | SLOT4(1); } snd_mtxlock(via->lock); via_wr(via, VIA_MC_SLOT_SELECT, s, 4); via_wr(via, VIA_MC_SGD_FORMAT, v, 1); snd_mtxunlock(via->lock); return (0); } /* -------------------------------------------------------------------- */ /* Speed setting functions */ static uint32_t via8233wr_setspeed(kobj_t obj, void *data, uint32_t speed) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; if (via->codec_caps & AC97_EXTCAP_VRA) return (ac97_setrate(via->codec, AC97_REGEXT_LADCRATE, speed)); return (48000); } static uint32_t via8233dxs_setspeed(kobj_t obj, void *data, uint32_t speed) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; uint32_t r, v; r = ch->rbase + VIA8233_RP_DXS_RATEFMT; snd_mtxlock(via->lock); v = via_rd(via, r, 4) & ~VIA8233_DXS_RATEFMT_48K; /* Careful to avoid overflow (divide by 48 per vt8233c docs) */ v |= VIA8233_DXS_RATEFMT_48K * (speed / 48) / (48000 / 48); via_wr(via, r, v, 4); snd_mtxunlock(via->lock); return (speed); } static uint32_t via8233msgd_setspeed(kobj_t obj, void *data, uint32_t speed) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; if (via->codec_caps & AC97_EXTCAP_VRA) return (ac97_setrate(via->codec, AC97_REGEXT_FDACRATE, speed)); return (48000); } /* -------------------------------------------------------------------- */ /* Format probing functions */ static struct pcmchan_caps * via8233wr_getcaps(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; /* Controlled by ac97 registers */ if (via->codec_caps & AC97_EXTCAP_VRA) return (&via_vracaps); return (&via_caps); } static struct pcmchan_caps * via8233dxs_getcaps(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; /* * Controlled by onboard registers * * Apparently, few boards can do DXS sample rate * conversion. */ if (via->dxs_src) return (&via_vracaps); return (&via_caps); } static struct pcmchan_caps * via8233msgd_getcaps(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; /* Controlled by ac97 registers */ if (via->codec_caps & AC97_EXTCAP_VRA) return (&via_vracaps); return (&via_caps); } /* -------------------------------------------------------------------- */ /* Common functions */ static int via8233chan_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; blksz &= VIA_BLK_ALIGN; if (blksz > (sndbuf_getmaxsize(ch->buffer) / VIA_SEGS_MIN)) blksz = sndbuf_getmaxsize(ch->buffer) / VIA_SEGS_MIN; if (blksz < VIA_BLK_MIN) blksz = VIA_BLK_MIN; if (blkcnt > VIA_SEGS_MAX) blkcnt = VIA_SEGS_MAX; if (blkcnt < VIA_SEGS_MIN) blkcnt = VIA_SEGS_MIN; while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->buffer)) { if ((blkcnt >> 1) >= VIA_SEGS_MIN) blkcnt >>= 1; else if ((blksz >> 1) >= VIA_BLK_MIN) blksz >>= 1; else break; } if ((sndbuf_getblksz(ch->buffer) != blksz || sndbuf_getblkcnt(ch->buffer) != blkcnt) && sndbuf_resize(ch->buffer, blkcnt, blksz) != 0) device_printf(via->dev, "%s: failed blksz=%u blkcnt=%u\n", __func__, blksz, blkcnt); ch->blksz = sndbuf_getblksz(ch->buffer); ch->blkcnt = sndbuf_getblkcnt(ch->buffer); return (0); } static uint32_t via8233chan_setblocksize(kobj_t obj, void *data, uint32_t blksz) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; via8233chan_setfragments(obj, data, blksz, via->blkcnt); return (ch->blksz); } static uint32_t via8233chan_getptr(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; uint32_t v, index, count, ptr; snd_mtxlock(via->lock); if (via->polling != 0) { ptr = ch->ptr; snd_mtxunlock(via->lock); } else { v = via_rd(via, ch->rbase + VIA_RP_CURRENT_COUNT, 4); snd_mtxunlock(via->lock); index = v >> 24; /* Last completed buffer */ count = v & 0x00ffffff; /* Bytes remaining */ ptr = (index + 1) * ch->blksz - count; ptr %= ch->blkcnt * ch->blksz; /* Wrap to available space */ } return (ptr); } static void via8233chan_reset(struct via_info *via, struct via_chinfo *ch) { via_wr(via, ch->rbase + VIA_RP_CONTROL, SGD_CONTROL_STOP, 1); via_wr(via, ch->rbase + VIA_RP_CONTROL, 0x00, 1); via_wr(via, ch->rbase + VIA_RP_STATUS, SGD_STATUS_EOL | SGD_STATUS_FLAG, 1); } /* -------------------------------------------------------------------- */ /* Channel initialization functions */ static void via8233chan_sgdinit(struct via_info *via, struct via_chinfo *ch, int chnum) { ch->sgd_table = &via->sgd_table[chnum * VIA_SEGS_MAX]; ch->sgd_addr = via->sgd_addr + chnum * VIA_SEGS_MAX * sizeof(struct via_dma_op); } static void* via8233wr_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct via_info *via = devinfo; struct via_chinfo *ch; int num; snd_mtxlock(via->lock); num = via->rec_num++; ch = &via->rch[num]; ch->parent = via; ch->channel = c; ch->buffer = b; ch->dir = dir; ch->blkcnt = via->blkcnt; ch->rbase = VIA_WR_BASE(num); via_wr(via, ch->rbase + VIA_WR_RP_SGD_FORMAT, WR_FIFO_ENABLE, 1); snd_mtxunlock(via->lock); if (sndbuf_alloc(ch->buffer, via->parent_dmat, 0, via->bufsz) != 0) return (NULL); snd_mtxlock(via->lock); via8233chan_sgdinit(via, ch, num); via8233chan_reset(via, ch); snd_mtxunlock(via->lock); return (ch); } static void* via8233dxs_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct via_info *via = devinfo; struct via_chinfo *ch; int num; snd_mtxlock(via->lock); num = via->play_num++; ch = &via->pch[num]; ch->parent = via; ch->channel = c; ch->buffer = b; ch->dir = dir; ch->blkcnt = via->blkcnt; /* * All cards apparently support DXS3, but not other DXS * channels. We therefore want to align first DXS channel to * DXS3. */ ch->rbase = VIA_DXS_BASE(NDXSCHANS - 1 - via->n_dxs_registered); via->n_dxs_registered++; snd_mtxunlock(via->lock); if (sndbuf_alloc(ch->buffer, via->parent_dmat, 0, via->bufsz) != 0) return (NULL); snd_mtxlock(via->lock); via8233chan_sgdinit(via, ch, NWRCHANS + num); via8233chan_reset(via, ch); snd_mtxunlock(via->lock); return (ch); } static void* via8233msgd_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct via_info *via = devinfo; struct via_chinfo *ch; int num; snd_mtxlock(via->lock); num = via->play_num++; ch = &via->pch[num]; ch->parent = via; ch->channel = c; ch->buffer = b; ch->dir = dir; ch->rbase = VIA_MC_SGD_STATUS; ch->blkcnt = via->blkcnt; snd_mtxunlock(via->lock); if (sndbuf_alloc(ch->buffer, via->parent_dmat, 0, via->bufsz) != 0) return (NULL); snd_mtxlock(via->lock); via8233chan_sgdinit(via, ch, NWRCHANS + num); via8233chan_reset(via, ch); snd_mtxunlock(via->lock); return (ch); } static void via8233chan_mute(struct via_info *via, struct via_chinfo *ch, int muted) { if (BASE_IS_VIA_DXS_REG(ch->rbase)) { int r; muted = (muted) ? VIA8233_DXS_MUTE : 0; via_wr(via, ch->rbase + VIA8233_RP_DXS_LVOL, muted, 1); via_wr(via, ch->rbase + VIA8233_RP_DXS_RVOL, muted, 1); r = via_rd(via, ch->rbase + VIA8233_RP_DXS_LVOL, 1) & VIA8233_DXS_MUTE; if (r != muted) device_printf(via->dev, "%s: failed to set dxs volume " "(dxs base 0x%02x).\n", __func__, ch->rbase); } } static __inline int via_poll_channel(struct via_chinfo *ch) { struct via_info *via; uint32_t sz, delta; uint32_t v, index, count; int ptr; if (ch == NULL || ch->channel == NULL || ch->active == 0) return (0); via = ch->parent; sz = ch->blksz * ch->blkcnt; v = via_rd(via, ch->rbase + VIA_RP_CURRENT_COUNT, 4); index = v >> 24; count = v & 0x00ffffff; ptr = ((index + 1) * ch->blksz) - count; ptr %= sz; ptr &= ~(ch->blksz - 1); ch->ptr = ptr; delta = (sz + ptr - ch->prevptr) % sz; if (delta < ch->blksz) return (0); ch->prevptr = ptr; return (1); } static void via_poll_callback(void *arg) { struct via_info *via = arg; uint32_t ptrigger = 0, rtrigger = 0; int i; if (via == NULL) return; snd_mtxlock(via->lock); if (via->polling == 0 || via_chan_active(via) == 0) { snd_mtxunlock(via->lock); return; } for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) ptrigger |= (via_poll_channel(&via->pch[i]) != 0) ? (1 << i) : 0; for (i = 0; i < NWRCHANS; i++) rtrigger |= (via_poll_channel(&via->rch[i]) != 0) ? (1 << i) : 0; /* XXX */ callout_reset(&via->poll_timer, 1/*via->poll_ticks*/, via_poll_callback, via); snd_mtxunlock(via->lock); for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) { if (ptrigger & (1 << i)) chn_intr(via->pch[i].channel); } for (i = 0; i < NWRCHANS; i++) { if (rtrigger & (1 << i)) chn_intr(via->rch[i].channel); } } static int via_poll_ticks(struct via_info *via) { struct via_chinfo *ch; int i; int ret = hz; int pollticks; for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) { ch = &via->pch[i]; if (ch->channel == NULL || ch->active == 0) continue; pollticks = ((uint64_t)hz * ch->blksz) / ((uint64_t)sndbuf_getalign(ch->buffer) * sndbuf_getspd(ch->buffer)); pollticks >>= 2; if (pollticks > hz) pollticks = hz; if (pollticks < 1) pollticks = 1; if (pollticks < ret) ret = pollticks; } for (i = 0; i < NWRCHANS; i++) { ch = &via->rch[i]; if (ch->channel == NULL || ch->active == 0) continue; pollticks = ((uint64_t)hz * ch->blksz) / ((uint64_t)sndbuf_getalign(ch->buffer) * sndbuf_getspd(ch->buffer)); pollticks >>= 2; if (pollticks > hz) pollticks = hz; if (pollticks < 1) pollticks = 1; if (pollticks < ret) ret = pollticks; } return (ret); } static int via8233chan_trigger(kobj_t obj, void* data, int go) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; int pollticks; if (!PCMTRIG_COMMON(go)) return (0); snd_mtxlock(via->lock); switch(go) { case PCMTRIG_START: via_buildsgdt(ch); via8233chan_mute(via, ch, 0); via_wr(via, ch->rbase + VIA_RP_TABLE_PTR, ch->sgd_addr, 4); if (via->polling != 0) { ch->ptr = 0; ch->prevptr = 0; pollticks = ((uint64_t)hz * ch->blksz) / ((uint64_t)sndbuf_getalign(ch->buffer) * sndbuf_getspd(ch->buffer)); pollticks >>= 2; if (pollticks > hz) pollticks = hz; if (pollticks < 1) pollticks = 1; if (via_chan_active(via) == 0 || pollticks < via->poll_ticks) { if (bootverbose) { if (via_chan_active(via) == 0) printf("%s: pollticks=%d\n", __func__, pollticks); else printf("%s: " "pollticks %d -> %d\n", __func__, via->poll_ticks, pollticks); } via->poll_ticks = pollticks; callout_reset(&via->poll_timer, 1, via_poll_callback, via); } } via_wr(via, ch->rbase + VIA_RP_CONTROL, SGD_CONTROL_START | SGD_CONTROL_AUTOSTART | ((via->polling == 0) ? (SGD_CONTROL_I_EOL | SGD_CONTROL_I_FLAG) : 0), 1); ch->active = 1; break; case PCMTRIG_STOP: case PCMTRIG_ABORT: via_wr(via, ch->rbase + VIA_RP_CONTROL, SGD_CONTROL_STOP, 1); via8233chan_mute(via, ch, 1); via8233chan_reset(via, ch); ch->active = 0; if (via->polling != 0) { if (via_chan_active(via) == 0) { callout_stop(&via->poll_timer); via->poll_ticks = 1; } else { pollticks = via_poll_ticks(via); if (pollticks > via->poll_ticks) { if (bootverbose) printf("%s: pollticks " "%d -> %d\n", __func__, via->poll_ticks, pollticks); via->poll_ticks = pollticks; callout_reset(&via->poll_timer, 1, via_poll_callback, via); } } } break; default: break; } snd_mtxunlock(via->lock); return (0); } static kobj_method_t via8233wr_methods[] = { KOBJMETHOD(channel_init, via8233wr_init), KOBJMETHOD(channel_setformat, via8233wr_setformat), KOBJMETHOD(channel_setspeed, via8233wr_setspeed), KOBJMETHOD(channel_getcaps, via8233wr_getcaps), KOBJMETHOD(channel_setblocksize, via8233chan_setblocksize), KOBJMETHOD(channel_setfragments, via8233chan_setfragments), KOBJMETHOD(channel_trigger, via8233chan_trigger), KOBJMETHOD(channel_getptr, via8233chan_getptr), KOBJMETHOD_END }; CHANNEL_DECLARE(via8233wr); static kobj_method_t via8233dxs_methods[] = { KOBJMETHOD(channel_init, via8233dxs_init), KOBJMETHOD(channel_setformat, via8233dxs_setformat), KOBJMETHOD(channel_setspeed, via8233dxs_setspeed), KOBJMETHOD(channel_getcaps, via8233dxs_getcaps), KOBJMETHOD(channel_setblocksize, via8233chan_setblocksize), KOBJMETHOD(channel_setfragments, via8233chan_setfragments), KOBJMETHOD(channel_trigger, via8233chan_trigger), KOBJMETHOD(channel_getptr, via8233chan_getptr), KOBJMETHOD_END }; CHANNEL_DECLARE(via8233dxs); static kobj_method_t via8233msgd_methods[] = { KOBJMETHOD(channel_init, via8233msgd_init), KOBJMETHOD(channel_setformat, via8233msgd_setformat), KOBJMETHOD(channel_setspeed, via8233msgd_setspeed), KOBJMETHOD(channel_getcaps, via8233msgd_getcaps), KOBJMETHOD(channel_setblocksize, via8233chan_setblocksize), KOBJMETHOD(channel_setfragments, via8233chan_setfragments), KOBJMETHOD(channel_trigger, via8233chan_trigger), KOBJMETHOD(channel_getptr, via8233chan_getptr), KOBJMETHOD_END }; CHANNEL_DECLARE(via8233msgd); /* -------------------------------------------------------------------- */ static void via_intr(void *p) { struct via_info *via = p; uint32_t ptrigger = 0, rtrigger = 0; int i, reg, stat; snd_mtxlock(via->lock); if (via->polling != 0) { snd_mtxunlock(via->lock); return; } /* Poll playback channels */ for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) { if (via->pch[i].channel == NULL || via->pch[i].active == 0) continue; reg = via->pch[i].rbase + VIA_RP_STATUS; stat = via_rd(via, reg, 1); if (stat & SGD_STATUS_INTR) { if (via->dma_eol_wake && ((stat & SGD_STATUS_EOL) || !(stat & SGD_STATUS_ACTIVE))) via_wr(via, via->pch[i].rbase + VIA_RP_CONTROL, SGD_CONTROL_START | SGD_CONTROL_AUTOSTART | SGD_CONTROL_I_EOL | SGD_CONTROL_I_FLAG, 1); via_wr(via, reg, stat, 1); ptrigger |= 1 << i; } } /* Poll record channels */ for (i = 0; i < NWRCHANS; i++) { if (via->rch[i].channel == NULL || via->rch[i].active == 0) continue; reg = via->rch[i].rbase + VIA_RP_STATUS; stat = via_rd(via, reg, 1); if (stat & SGD_STATUS_INTR) { if (via->dma_eol_wake && ((stat & SGD_STATUS_EOL) || !(stat & SGD_STATUS_ACTIVE))) via_wr(via, via->rch[i].rbase + VIA_RP_CONTROL, SGD_CONTROL_START | SGD_CONTROL_AUTOSTART | SGD_CONTROL_I_EOL | SGD_CONTROL_I_FLAG, 1); via_wr(via, reg, stat, 1); rtrigger |= 1 << i; } } snd_mtxunlock(via->lock); for (i = 0; i < NDXSCHANS + NMSGDCHANS; i++) { if (ptrigger & (1 << i)) chn_intr(via->pch[i].channel); } for (i = 0; i < NWRCHANS; i++) { if (rtrigger & (1 << i)) chn_intr(via->rch[i].channel); } } /* * Probe and attach the card */ static int via_probe(device_t dev) { switch(pci_get_devid(dev)) { case VIA8233_PCI_ID: switch(pci_get_revid(dev)) { case VIA8233_REV_ID_8233PRE: device_set_desc(dev, "VIA VT8233 (pre)"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8233C: device_set_desc(dev, "VIA VT8233C"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8233: device_set_desc(dev, "VIA VT8233"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8233A: device_set_desc(dev, "VIA VT8233A"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8235: device_set_desc(dev, "VIA VT8235"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8237: device_set_desc(dev, "VIA VT8237"); return (BUS_PROBE_DEFAULT); case VIA8233_REV_ID_8251: device_set_desc(dev, "VIA VT8251"); return (BUS_PROBE_DEFAULT); default: device_set_desc(dev, "VIA VT8233X"); /* Unknown */ return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static void dma_cb(void *p, bus_dma_segment_t *bds, int a, int b) { struct via_info *via = (struct via_info *)p; via->sgd_addr = bds->ds_addr; } static int via_chip_init(device_t dev) { uint32_t data, cnt; /* Wake up and reset AC97 if necessary */ data = pci_read_config(dev, VIA_PCI_ACLINK_STAT, 1); if ((data & VIA_PCI_ACLINK_C00_READY) == 0) { /* Cold reset per ac97r2.3 spec (page 95) */ /* Assert low */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN, 1); /* Wait T_rst_low */ DELAY(100); /* Assert high */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN | VIA_PCI_ACLINK_NRST, 1); /* Wait T_rst2clk */ DELAY(5); /* Assert low */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN, 1); } else { /* Warm reset */ /* Force no sync */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN, 1); DELAY(100); /* Sync */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN | VIA_PCI_ACLINK_SYNC, 1); /* Wait T_sync_high */ DELAY(5); /* Force no sync */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_EN, 1); /* Wait T_sync2clk */ DELAY(5); } /* Power everything up */ pci_write_config(dev, VIA_PCI_ACLINK_CTRL, VIA_PCI_ACLINK_DESIRED, 1); /* Wait for codec to become ready (largest reported delay 310ms) */ for (cnt = 0; cnt < 2000; cnt++) { data = pci_read_config(dev, VIA_PCI_ACLINK_STAT, 1); if (data & VIA_PCI_ACLINK_C00_READY) return (0); DELAY(5000); } device_printf(dev, "primary codec not ready (cnt = 0x%02x)\n", cnt); return (ENXIO); } static int via_attach(device_t dev) { - struct via_info *via = 0; + struct via_info *via = NULL; char status[SND_STATUSLEN]; int i, via_dxs_disabled, via_dxs_src, via_dxs_chnum, via_sgd_chnum; int nsegs; uint32_t revid; via = malloc(sizeof *via, M_DEVBUF, M_WAITOK | M_ZERO); via->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_via8233 softc"); via->dev = dev; callout_init(&via->poll_timer, 1); via->poll_ticks = 1; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "polling", &i) == 0 && i != 0) via->polling = 1; else via->polling = 0; pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_enable_busmaster(dev); via->regid = PCIR_BAR(0); via->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &via->regid, RF_ACTIVE); if (!via->reg) { device_printf(dev, "cannot allocate bus resource."); goto bad; } via->st = rman_get_bustag(via->reg); via->sh = rman_get_bushandle(via->reg); via->irqid = 0; via->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &via->irqid, RF_ACTIVE | RF_SHAREABLE); if (!via->irq || snd_setup_intr(dev, via->irq, INTR_MPSAFE, via_intr, via, &via->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } via->bufsz = pcm_getbuffersize(dev, 4096, VIA_DEFAULT_BUFSZ, 65536); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "blocksize", &i) == 0 && i > 0) { i &= VIA_BLK_ALIGN; if (i < VIA_BLK_MIN) i = VIA_BLK_MIN; via->blkcnt = via->bufsz / i; i = 0; while (via->blkcnt >> i) i++; via->blkcnt = 1 << (i - 1); if (via->blkcnt < VIA_SEGS_MIN) via->blkcnt = VIA_SEGS_MIN; else if (via->blkcnt > VIA_SEGS_MAX) via->blkcnt = VIA_SEGS_MAX; } else via->blkcnt = VIA_SEGS_DEFAULT; revid = pci_get_revid(dev); /* * VIA8251 lost its interrupt after DMA EOL, and need * a gentle spank on its face within interrupt handler. */ if (revid == VIA8233_REV_ID_8251) via->dma_eol_wake = 1; else via->dma_eol_wake = 0; /* * Decide whether DXS had to be disabled or not */ if (revid == VIA8233_REV_ID_8233A) { /* * DXS channel is disabled. Reports from multiple users * that it plays at half-speed. Do not see this behaviour * on available 8233C or when emulating 8233A register set * on 8233C (either with or without ac97 VRA). */ via_dxs_disabled = 1; } else if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_disabled", &via_dxs_disabled) == 0) via_dxs_disabled = (via_dxs_disabled > 0) ? 1 : 0; else via_dxs_disabled = 0; if (via_dxs_disabled) { via_dxs_chnum = 0; via_sgd_chnum = 1; } else { if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_channels", &via_dxs_chnum) != 0) via_dxs_chnum = NDXSCHANS; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_sgd_channels", &via_sgd_chnum) != 0) via_sgd_chnum = NMSGDCHANS; } if (via_dxs_chnum > NDXSCHANS) via_dxs_chnum = NDXSCHANS; else if (via_dxs_chnum < 0) via_dxs_chnum = 0; if (via_sgd_chnum > NMSGDCHANS) via_sgd_chnum = NMSGDCHANS; else if (via_sgd_chnum < 0) via_sgd_chnum = 0; if (via_dxs_chnum + via_sgd_chnum < 1) { /* Minimalist ? */ via_dxs_chnum = 1; via_sgd_chnum = 0; } if (via_dxs_chnum > 0 && resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_src", &via_dxs_src) == 0) via->dxs_src = (via_dxs_src > 0) ? 1 : 0; else via->dxs_src = 0; nsegs = (via_dxs_chnum + via_sgd_chnum + NWRCHANS) * VIA_SEGS_MAX; /* DMA tag for buffers */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/via->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &via->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } /* * DMA tag for SGD table. The 686 uses scatter/gather DMA and * requires a list in memory of work to do. We need only 16 bytes * for this list, and it is wasteful to allocate 16K. */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/nsegs * sizeof(struct via_dma_op), /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &via->sgd_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } if (bus_dmamem_alloc(via->sgd_dmat, (void **)&via->sgd_table, BUS_DMA_NOWAIT, &via->sgd_dmamap) == -1) goto bad; if (bus_dmamap_load(via->sgd_dmat, via->sgd_dmamap, via->sgd_table, nsegs * sizeof(struct via_dma_op), dma_cb, via, 0)) goto bad; if (via_chip_init(dev)) goto bad; via->codec = AC97_CREATE(dev, via, via_ac97); if (!via->codec) goto bad; mixer_init(dev, ac97_getmixerclass(), via->codec); via->codec_caps = ac97_getextcaps(via->codec); /* Try to set VRA without generating an error, VRM not reqrd yet */ if (via->codec_caps & (AC97_EXTCAP_VRA | AC97_EXTCAP_VRM | AC97_EXTCAP_DRA)) { uint16_t ext = ac97_getextmode(via->codec); ext |= (via->codec_caps & (AC97_EXTCAP_VRA | AC97_EXTCAP_VRM)); ext &= ~AC97_EXTCAP_DRA; ac97_setextmode(via->codec, ext); } snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(via->reg), rman_get_start(via->irq), PCM_KLDSTRING(snd_via8233)); /* Register */ if (pcm_register(dev, via, via_dxs_chnum + via_sgd_chnum, NWRCHANS)) goto bad; for (i = 0; i < via_dxs_chnum; i++) pcm_addchan(dev, PCMDIR_PLAY, &via8233dxs_class, via); for (i = 0; i < via_sgd_chnum; i++) pcm_addchan(dev, PCMDIR_PLAY, &via8233msgd_class, via); for (i = 0; i < NWRCHANS; i++) pcm_addchan(dev, PCMDIR_REC, &via8233wr_class, via); if (via_dxs_chnum > 0) via_init_sysctls(dev); device_printf(dev, "\n", (via_dxs_chnum > 0) ? "En" : "Dis", (via->dxs_src) ? "(SRC)" : "", via_dxs_chnum, via_sgd_chnum, NWRCHANS); pcm_setstatus(dev, status); return (0); bad: if (via->codec) ac97_destroy(via->codec); if (via->reg) bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg); if (via->ih) bus_teardown_intr(dev, via->irq, via->ih); if (via->irq) bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq); if (via->parent_dmat) bus_dma_tag_destroy(via->parent_dmat); if (via->sgd_addr) bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap); if (via->sgd_table) bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap); if (via->sgd_dmat) bus_dma_tag_destroy(via->sgd_dmat); if (via->lock) snd_mtxfree(via->lock); if (via) free(via, M_DEVBUF); return (ENXIO); } static int via_detach(device_t dev) { int r; struct via_info *via; r = pcm_unregister(dev); if (r) return (r); via = pcm_getdevinfo(dev); if (via != NULL && (via->play_num != 0 || via->rec_num != 0)) { snd_mtxlock(via->lock); via->polling = 0; callout_stop(&via->poll_timer); snd_mtxunlock(via->lock); callout_drain(&via->poll_timer); } bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg); bus_teardown_intr(dev, via->irq, via->ih); bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq); bus_dma_tag_destroy(via->parent_dmat); bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap); bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap); bus_dma_tag_destroy(via->sgd_dmat); snd_mtxfree(via->lock); free(via, M_DEVBUF); return (0); } static device_method_t via_methods[] = { DEVMETHOD(device_probe, via_probe), DEVMETHOD(device_attach, via_attach), DEVMETHOD(device_detach, via_detach), { 0, 0} }; static driver_t via_driver = { "pcm", via_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_via8233, pci, via_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_via8233, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_via8233, 1); Index: head/sys/dev/sound/pci/via82c686.c =================================================================== --- head/sys/dev/sound/pci/via82c686.c (revision 297861) +++ head/sys/dev/sound/pci/via82c686.c (revision 297862) @@ -1,656 +1,656 @@ /*- * Copyright (c) 2000 David Jones * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include SND_DECLARE_FILE("$FreeBSD$"); #define VIA_PCI_ID 0x30581106 #define NSEGS 4 /* Number of segments in SGD table */ #define SEGS_PER_CHAN (NSEGS/2) #define TIMEOUT 50 #define VIA_DEFAULT_BUFSZ 0x1000 #undef DEB #define DEB(x) /* we rely on this struct being packed to 64 bits */ struct via_dma_op { u_int32_t ptr; u_int32_t flags; #define VIA_DMAOP_EOL 0x80000000 #define VIA_DMAOP_FLAG 0x40000000 #define VIA_DMAOP_STOP 0x20000000 #define VIA_DMAOP_COUNT(x) ((x)&0x00FFFFFF) }; struct via_info; struct via_chinfo { struct via_info *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; struct via_dma_op *sgd_table; bus_addr_t sgd_addr; int dir, blksz; int base, count, mode, ctrl; }; struct via_info { bus_space_tag_t st; bus_space_handle_t sh; bus_dma_tag_t parent_dmat; bus_dma_tag_t sgd_dmat; bus_dmamap_t sgd_dmamap; bus_addr_t sgd_addr; struct resource *reg, *irq; int regid, irqid; void *ih; struct ac97_info *codec; unsigned int bufsz; struct via_chinfo pch, rch; struct via_dma_op *sgd_table; u_int16_t codec_caps; struct mtx *lock; }; static u_int32_t via_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps via_vracaps = {4000, 48000, via_fmt, 0}; static struct pcmchan_caps via_caps = {48000, 48000, via_fmt, 0}; static __inline u_int32_t via_rd(struct via_info *via, int regno, int size) { switch (size) { case 1: return bus_space_read_1(via->st, via->sh, regno); case 2: return bus_space_read_2(via->st, via->sh, regno); case 4: return bus_space_read_4(via->st, via->sh, regno); default: return 0xFFFFFFFF; } } static __inline void via_wr(struct via_info *via, int regno, u_int32_t data, int size) { switch (size) { case 1: bus_space_write_1(via->st, via->sh, regno, data); break; case 2: bus_space_write_2(via->st, via->sh, regno, data); break; case 4: bus_space_write_4(via->st, via->sh, regno, data); break; } } /* -------------------------------------------------------------------- */ /* Codec interface */ static int via_waitready_codec(struct via_info *via) { int i; /* poll until codec not busy */ for (i = 0; (i < TIMEOUT) && (via_rd(via, VIA_CODEC_CTL, 4) & VIA_CODEC_BUSY); i++) DELAY(1); if (i >= TIMEOUT) { printf("via: codec busy\n"); return 1; } return 0; } static int via_waitvalid_codec(struct via_info *via) { int i; /* poll until codec valid */ for (i = 0; (i < TIMEOUT) && !(via_rd(via, VIA_CODEC_CTL, 4) & VIA_CODEC_PRIVALID); i++) DELAY(1); if (i >= TIMEOUT) { printf("via: codec invalid\n"); return 1; } return 0; } static int via_write_codec(kobj_t obj, void *addr, int reg, u_int32_t val) { struct via_info *via = addr; if (via_waitready_codec(via)) return -1; via_wr(via, VIA_CODEC_CTL, VIA_CODEC_PRIVALID | VIA_CODEC_INDEX(reg) | val, 4); return 0; } static int via_read_codec(kobj_t obj, void *addr, int reg) { struct via_info *via = addr; if (via_waitready_codec(via)) return -1; via_wr(via, VIA_CODEC_CTL, VIA_CODEC_PRIVALID | VIA_CODEC_READ | VIA_CODEC_INDEX(reg),4); if (via_waitready_codec(via)) return -1; if (via_waitvalid_codec(via)) return -1; return via_rd(via, VIA_CODEC_CTL, 2); } static kobj_method_t via_ac97_methods[] = { KOBJMETHOD(ac97_read, via_read_codec), KOBJMETHOD(ac97_write, via_write_codec), KOBJMETHOD_END }; AC97_DECLARE(via_ac97); /* -------------------------------------------------------------------- */ static int via_buildsgdt(struct via_chinfo *ch) { u_int32_t phys_addr, flag; int i, segs, seg_size; /* * Build the scatter/gather DMA (SGD) table. * There are four slots in the table: two for play, two for record. * This creates two half-buffers, one of which is playing; the other * is feeding. */ seg_size = ch->blksz; segs = sndbuf_getsize(ch->buffer) / seg_size; phys_addr = sndbuf_getbufaddr(ch->buffer); for (i = 0; i < segs; i++) { flag = (i == segs - 1)? VIA_DMAOP_EOL : VIA_DMAOP_FLAG; ch->sgd_table[i].ptr = phys_addr + (i * seg_size); ch->sgd_table[i].flags = flag | seg_size; } return 0; } /* channel interface */ static void * viachan_init(kobj_t obj, void *devinfo, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct via_info *via = devinfo; struct via_chinfo *ch; snd_mtxlock(via->lock); if (dir == PCMDIR_PLAY) { ch = &via->pch; ch->base = VIA_PLAY_DMAOPS_BASE; ch->count = VIA_PLAY_DMAOPS_COUNT; ch->ctrl = VIA_PLAY_CONTROL; ch->mode = VIA_PLAY_MODE; ch->sgd_addr = via->sgd_addr; ch->sgd_table = &via->sgd_table[0]; } else { ch = &via->rch; ch->base = VIA_RECORD_DMAOPS_BASE; ch->count = VIA_RECORD_DMAOPS_COUNT; ch->ctrl = VIA_RECORD_CONTROL; ch->mode = VIA_RECORD_MODE; ch->sgd_addr = via->sgd_addr + sizeof(struct via_dma_op) * SEGS_PER_CHAN; ch->sgd_table = &via->sgd_table[SEGS_PER_CHAN]; } ch->parent = via; ch->channel = c; ch->buffer = b; ch->dir = dir; snd_mtxunlock(via->lock); if (sndbuf_alloc(ch->buffer, via->parent_dmat, 0, via->bufsz) != 0) return NULL; return ch; } static int viachan_setformat(kobj_t obj, void *data, u_int32_t format) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; int mode, mode_set; mode_set = 0; if (AFMT_CHANNEL(format) > 1) mode_set |= VIA_RPMODE_STEREO; if (format & AFMT_S16_LE) mode_set |= VIA_RPMODE_16BIT; DEB(printf("set format: dir = %d, format=%x\n", ch->dir, format)); snd_mtxlock(via->lock); mode = via_rd(via, ch->mode, 1); mode &= ~(VIA_RPMODE_16BIT | VIA_RPMODE_STEREO); mode |= mode_set; via_wr(via, ch->mode, mode, 1); snd_mtxunlock(via->lock); return 0; } static u_int32_t viachan_setspeed(kobj_t obj, void *data, u_int32_t speed) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; int reg; /* * Basic AC'97 defines a 48 kHz sample rate only. For other rates, * upsampling is required. * * The VT82C686A does not perform upsampling, and neither do we. * If the codec supports variable-rate audio (i.e. does the upsampling * itself), then negotiate the rate with the codec. Otherwise, * return 48 kHz cuz that's all you got. */ if (via->codec_caps & AC97_EXTCAP_VRA) { reg = (ch->dir == PCMDIR_PLAY)? AC97_REGEXT_FDACRATE : AC97_REGEXT_LADCRATE; return ac97_setrate(via->codec, reg, speed); } else return 48000; } static u_int32_t viachan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct via_chinfo *ch = data; ch->blksz = blocksize; sndbuf_resize(ch->buffer, SEGS_PER_CHAN, ch->blksz); return ch->blksz; } static int viachan_trigger(kobj_t obj, void *data, int go) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; struct via_dma_op *ado; bus_addr_t sgd_addr = ch->sgd_addr; if (!PCMTRIG_COMMON(go)) return 0; ado = ch->sgd_table; DEB(printf("ado located at va=%p pa=%x\n", ado, sgd_addr)); snd_mtxlock(via->lock); if (go == PCMTRIG_START) { via_buildsgdt(ch); via_wr(via, ch->base, sgd_addr, 4); via_wr(via, ch->ctrl, VIA_RPCTRL_START, 1); } else via_wr(via, ch->ctrl, VIA_RPCTRL_TERMINATE, 1); snd_mtxunlock(via->lock); DEB(printf("viachan_trigger: go=%d\n", go)); return 0; } static u_int32_t viachan_getptr(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; struct via_dma_op *ado; bus_addr_t sgd_addr = ch->sgd_addr; u_int32_t ptr, base, base1, len, seg; ado = ch->sgd_table; snd_mtxlock(via->lock); base1 = via_rd(via, ch->base, 4); len = via_rd(via, ch->count, 4); base = via_rd(via, ch->base, 4); if (base != base1) /* Avoid race hazard */ len = via_rd(via, ch->count, 4); snd_mtxunlock(via->lock); DEB(printf("viachan_getptr: len / base = %x / %x\n", len, base)); /* Base points to SGD segment to do, one past current */ /* Determine how many segments have been done */ seg = (base - sgd_addr) / sizeof(struct via_dma_op); if (seg == 0) seg = SEGS_PER_CHAN; /* Now work out offset: seg less count */ ptr = (seg * sndbuf_getsize(ch->buffer) / SEGS_PER_CHAN) - len; if (ch->dir == PCMDIR_REC) { /* DMA appears to operate on memory 'lines' of 32 bytes */ /* so don't return any part line - it isn't in RAM yet */ ptr = ptr & ~0x1f; } DEB(printf("return ptr=%u\n", ptr)); return ptr; } static struct pcmchan_caps * viachan_getcaps(kobj_t obj, void *data) { struct via_chinfo *ch = data; struct via_info *via = ch->parent; return (via->codec_caps & AC97_EXTCAP_VRA)? &via_vracaps : &via_caps; } static kobj_method_t viachan_methods[] = { KOBJMETHOD(channel_init, viachan_init), KOBJMETHOD(channel_setformat, viachan_setformat), KOBJMETHOD(channel_setspeed, viachan_setspeed), KOBJMETHOD(channel_setblocksize, viachan_setblocksize), KOBJMETHOD(channel_trigger, viachan_trigger), KOBJMETHOD(channel_getptr, viachan_getptr), KOBJMETHOD(channel_getcaps, viachan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(viachan); /* -------------------------------------------------------------------- */ static void via_intr(void *p) { struct via_info *via = p; /* DEB(printf("viachan_intr\n")); */ /* Read channel */ snd_mtxlock(via->lock); if (via_rd(via, VIA_PLAY_STAT, 1) & VIA_RPSTAT_INTR) { via_wr(via, VIA_PLAY_STAT, VIA_RPSTAT_INTR, 1); snd_mtxunlock(via->lock); chn_intr(via->pch.channel); snd_mtxlock(via->lock); } /* Write channel */ if (via_rd(via, VIA_RECORD_STAT, 1) & VIA_RPSTAT_INTR) { via_wr(via, VIA_RECORD_STAT, VIA_RPSTAT_INTR, 1); snd_mtxunlock(via->lock); chn_intr(via->rch.channel); return; } snd_mtxunlock(via->lock); } /* * Probe and attach the card */ static int via_probe(device_t dev) { if (pci_get_devid(dev) == VIA_PCI_ID) { device_set_desc(dev, "VIA VT82C686A"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void dma_cb(void *p, bus_dma_segment_t *bds, int a, int b) { struct via_info *via = (struct via_info *)p; via->sgd_addr = bds->ds_addr; } static int via_attach(device_t dev) { - struct via_info *via = 0; + struct via_info *via = NULL; char status[SND_STATUSLEN]; u_int32_t data, cnt; via = malloc(sizeof(*via), M_DEVBUF, M_WAITOK | M_ZERO); via->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_via82c686 softc"); pci_enable_busmaster(dev); /* Wake up and reset AC97 if necessary */ data = pci_read_config(dev, VIA_AC97STATUS, 1); if ((data & VIA_AC97STATUS_RDY) == 0) { /* Cold reset per ac97r2.3 spec (page 95) */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN, 1); /* Assert low */ DELAY(100); /* Wait T_rst_low */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN | VIA_ACLINK_NRST, 1); /* Assert high */ DELAY(5); /* Wait T_rst2clk */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN, 1); /* Assert low */ } else { /* Warm reset */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN, 1); /* Force no sync */ DELAY(100); pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN | VIA_ACLINK_SYNC, 1); /* Sync */ DELAY(5); /* Wait T_sync_high */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_EN, 1); /* Force no sync */ DELAY(5); /* Wait T_sync2clk */ } /* Power everything up */ pci_write_config(dev, VIA_ACLINKCTRL, VIA_ACLINK_DESIRED, 1); /* Wait for codec to become ready (largest reported delay here 310ms) */ for (cnt = 0; cnt < 2000; cnt++) { data = pci_read_config(dev, VIA_AC97STATUS, 1); if (data & VIA_AC97STATUS_RDY) break; DELAY(5000); } via->regid = PCIR_BAR(0); via->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &via->regid, RF_ACTIVE); if (!via->reg) { device_printf(dev, "cannot allocate bus resource."); goto bad; } via->st = rman_get_bustag(via->reg); via->sh = rman_get_bushandle(via->reg); via->bufsz = pcm_getbuffersize(dev, 4096, VIA_DEFAULT_BUFSZ, 65536); via->irqid = 0; via->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &via->irqid, RF_ACTIVE | RF_SHAREABLE); if (!via->irq || snd_setup_intr(dev, via->irq, INTR_MPSAFE, via_intr, via, &via->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } via_wr(via, VIA_PLAY_MODE, VIA_RPMODE_AUTOSTART | VIA_RPMODE_INTR_FLAG | VIA_RPMODE_INTR_EOL, 1); via_wr(via, VIA_RECORD_MODE, VIA_RPMODE_AUTOSTART | VIA_RPMODE_INTR_FLAG | VIA_RPMODE_INTR_EOL, 1); via->codec = AC97_CREATE(dev, via, via_ac97); if (!via->codec) goto bad; if (mixer_init(dev, ac97_getmixerclass(), via->codec)) goto bad; via->codec_caps = ac97_getextcaps(via->codec); ac97_setextmode(via->codec, via->codec_caps & (AC97_EXTCAP_VRA | AC97_EXTCAP_VRM)); /* DMA tag for buffers */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/via->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &via->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } /* * DMA tag for SGD table. The 686 uses scatter/gather DMA and * requires a list in memory of work to do. We need only 16 bytes * for this list, and it is wasteful to allocate 16K. */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/NSEGS * sizeof(struct via_dma_op), /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &via->sgd_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } if (bus_dmamem_alloc(via->sgd_dmat, (void **)&via->sgd_table, BUS_DMA_NOWAIT, &via->sgd_dmamap) != 0) goto bad; if (bus_dmamap_load(via->sgd_dmat, via->sgd_dmamap, via->sgd_table, NSEGS * sizeof(struct via_dma_op), dma_cb, via, 0) != 0) goto bad; snprintf(status, SND_STATUSLEN, "at io 0x%jx irq %jd %s", rman_get_start(via->reg), rman_get_start(via->irq), PCM_KLDSTRING(snd_via82c686)); /* Register */ if (pcm_register(dev, via, 1, 1)) goto bad; pcm_addchan(dev, PCMDIR_PLAY, &viachan_class, via); pcm_addchan(dev, PCMDIR_REC, &viachan_class, via); pcm_setstatus(dev, status); return 0; bad: if (via->codec) ac97_destroy(via->codec); if (via->reg) bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg); if (via->ih) bus_teardown_intr(dev, via->irq, via->ih); if (via->irq) bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq); if (via->parent_dmat) bus_dma_tag_destroy(via->parent_dmat); if (via->sgd_addr) bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap); if (via->sgd_table) bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap); if (via->sgd_dmat) bus_dma_tag_destroy(via->sgd_dmat); if (via->lock) snd_mtxfree(via->lock); if (via) free(via, M_DEVBUF); return ENXIO; } static int via_detach(device_t dev) { int r; - struct via_info *via = 0; + struct via_info *via = NULL; r = pcm_unregister(dev); if (r) return r; via = pcm_getdevinfo(dev); bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg); bus_teardown_intr(dev, via->irq, via->ih); bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq); bus_dma_tag_destroy(via->parent_dmat); bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap); bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap); bus_dma_tag_destroy(via->sgd_dmat); snd_mtxfree(via->lock); free(via, M_DEVBUF); return 0; } static device_method_t via_methods[] = { DEVMETHOD(device_probe, via_probe), DEVMETHOD(device_attach, via_attach), DEVMETHOD(device_detach, via_detach), { 0, 0} }; static driver_t via_driver = { "pcm", via_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_via82c686, pci, via_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_via82c686, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_via82c686, 1); Index: head/sys/dev/vxge/vxgehal/vxgehal-fifo.c =================================================================== --- head/sys/dev/vxge/vxgehal/vxgehal-fifo.c (revision 297861) +++ head/sys/dev/vxge/vxgehal/vxgehal-fifo.c (revision 297862) @@ -1,1896 +1,1896 @@ /*- * Copyright(c) 2002-2011 Exar Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification are permitted provided the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Exar Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include /* * __hal_fifo_mempool_item_alloc - Allocate List blocks for TxD list callback * @mempoolh: Handle to memory pool * @memblock: Address of this memory block * @memblock_index: Index of this memory block * @dma_object: dma object for this block * @item: Pointer to this item * @index: Index of this item in memory block * @is_last: If this is last item in the block * @userdata: Specific data of user * * This function is callback passed to __hal_mempool_create to create memory * pool for TxD list */ static vxge_hal_status_e __hal_fifo_mempool_item_alloc( vxge_hal_mempool_h mempoolh, void *memblock, u32 memblock_index, vxge_hal_mempool_dma_t *dma_object, void *item, u32 item_index, u32 is_last, void *userdata) { u32 i; void *block_priv; u32 memblock_item_idx; __hal_fifo_t *fifo = (__hal_fifo_t *) userdata; vxge_assert(fifo != NULL); vxge_assert(item); #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK) { __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_pool("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_pool( "mempoolh = 0x"VXGE_OS_STXFMT", " "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, " "dma_object = 0x"VXGE_OS_STXFMT", \ item = 0x"VXGE_OS_STXFMT", " "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock, memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last, (ptr_t) userdata); } #endif block_priv = __hal_mempool_item_priv((vxge_hal_mempool_t *) mempoolh, memblock_index, item, &memblock_item_idx); vxge_assert(block_priv != NULL); for (i = 0; i < fifo->txdl_per_memblock; i++) { __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp; int dtr_index = item_index * fifo->txdl_per_memblock + i; txdp = (vxge_hal_fifo_txd_t *) ((void *) ((char *) item + i * fifo->txdl_size)); txdp->host_control = dtr_index; fifo->channel.dtr_arr[dtr_index].dtr = txdp; fifo->channel.dtr_arr[dtr_index].uld_priv = (void *) ((char *) block_priv + fifo->txdl_priv_size * i); fifo->channel.dtr_arr[dtr_index].hal_priv = (void *) (((char *) fifo->channel.dtr_arr[dtr_index].uld_priv) + fifo->per_txdl_space); txdl_priv = (__hal_fifo_txdl_priv_t *) fifo->channel.dtr_arr[dtr_index].hal_priv; vxge_assert(txdl_priv); /* pre-format HAL's TxDL's private */ /* LINTED */ txdl_priv->dma_offset = (char *) txdp - (char *) memblock; txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset; txdl_priv->dma_handle = dma_object->handle; txdl_priv->memblock = memblock; txdl_priv->first_txdp = (vxge_hal_fifo_txd_t *) txdp; txdl_priv->next_txdl_priv = NULL; txdl_priv->dang_txdl = NULL; txdl_priv->dang_frags = 0; txdl_priv->alloc_frags = 0; #if defined(VXGE_DEBUG_ASSERT) txdl_priv->dma_object = dma_object; #endif #if defined(VXGE_HAL_ALIGN_XMIT) txdl_priv->align_vaddr = NULL; txdl_priv->align_dma_addr = (dma_addr_t) 0; #ifndef VXGE_HAL_ALIGN_XMIT_ALLOC_RT /* CONSTCOND */ if (TRUE) { vxge_hal_status_e status; if (fifo->config->alignment_size) { status = __hal_fifo_txdl_align_alloc_map(fifo, txdp); if (status != VXGE_HAL_OK) { #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK) __hal_device_t *hldev; hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_err_log_pool( "align buffer[%d] %d bytes, \ status %d", (item_index * fifo->txdl_per_memblock + i), fifo->align_size, status); vxge_hal_trace_log_pool( "<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); #endif return (status); } } } #endif #endif if (fifo->txdl_init) { fifo->txdl_init(fifo->channel.vph, (vxge_hal_txdl_h) txdp, VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp), VXGE_HAL_FIFO_TXDL_INDEX(txdp), fifo->channel.userdata, VXGE_HAL_OPEN_NORMAL); } } #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK) { __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } #endif return (VXGE_HAL_OK); } /* * __hal_fifo_mempool_item_free - Free List blocks for TxD list callback * @mempoolh: Handle to memory pool * @memblock: Address of this memory block * @memblock_index: Index of this memory block * @dma_object: dma object for this block * @item: Pointer to this item * @index: Index of this item in memory block * @is_last: If this is last item in the block * @userdata: Specific data of user * * This function is callback passed to __hal_mempool_free to destroy memory * pool for TxD list */ static vxge_hal_status_e __hal_fifo_mempool_item_free( vxge_hal_mempool_h mempoolh, void *memblock, u32 memblock_index, vxge_hal_mempool_dma_t *dma_object, void *item, u32 item_index, u32 is_last, void *userdata) { vxge_assert(item); #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK) { __hal_fifo_t *fifo = (__hal_fifo_t *) userdata; vxge_assert(fifo != NULL); __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_pool("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_pool("mempoolh = 0x"VXGE_OS_STXFMT", " "memblock = 0x"VXGE_OS_STXFMT", memblock_index = %d, " "dma_object = 0x"VXGE_OS_STXFMT", \ item = 0x"VXGE_OS_STXFMT", " "item_index = %d, is_last = %d, userdata = 0x"VXGE_OS_STXFMT, (ptr_t) mempoolh, (ptr_t) memblock, memblock_index, (ptr_t) dma_object, (ptr_t) item, item_index, is_last, (ptr_t) userdata); } #endif #if defined(VXGE_HAL_ALIGN_XMIT) { __hal_fifo_t *fifo = (__hal_fifo_t *) userdata; vxge_assert(fifo != NULL); if (fifo->config->alignment_size) { int i; vxge_hal_fifo_txd_t *txdp; for (i = 0; i < fifo->txdl_per_memblock; i++) { txdp = (void *) ((char *) item + i * fifo->txdl_size); __hal_fifo_txdl_align_free_unmap(fifo, txdp); } } } #endif #if (VXGE_COMPONENT_HAL_POOL & VXGE_DEBUG_MODULE_MASK) { __hal_fifo_t *fifo = (__hal_fifo_t *) userdata; vxge_assert(fifo != NULL); __hal_device_t *hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_pool("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } #endif return (VXGE_HAL_OK); } /* * __hal_fifo_create - Create a FIFO * @vpath_handle: Handle returned by virtual path open * @attr: FIFO configuration parameters structure * * This function creates FIFO and initializes it. * */ vxge_hal_status_e __hal_fifo_create( vxge_hal_vpath_h vpath_handle, vxge_hal_fifo_attr_t *attr) { vxge_hal_status_e status; __hal_fifo_t *fifo; vxge_hal_fifo_config_t *config; u32 txdl_size, memblock_size, txdl_per_memblock; __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_device_t *hldev; vxge_assert((vpath_handle != NULL) && (attr != NULL)); hldev = (__hal_device_t *) vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "vpath_handle = 0x"VXGE_OS_STXFMT", attr = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) attr); if ((vpath_handle == NULL) || (attr == NULL)) { vxge_hal_err_log_fifo("null pointer passed == > %s : %d", __func__, __LINE__); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_HANDLE); return (VXGE_HAL_ERR_INVALID_HANDLE); } config = &vp->vpath->hldev->header.config.vp_config[vp->vpath->vp_id].fifo; txdl_size = config->max_frags * sizeof(vxge_hal_fifo_txd_t); if (txdl_size <= VXGE_OS_HOST_PAGE_SIZE) memblock_size = VXGE_OS_HOST_PAGE_SIZE; else memblock_size = txdl_size; txdl_per_memblock = memblock_size / txdl_size; config->fifo_length = ((config->fifo_length + txdl_per_memblock - 1) / txdl_per_memblock) * txdl_per_memblock; fifo = (__hal_fifo_t *) vxge_hal_channel_allocate( (vxge_hal_device_h) vp->vpath->hldev, vpath_handle, VXGE_HAL_CHANNEL_TYPE_FIFO, config->fifo_length, attr->per_txdl_space, attr->userdata); if (fifo == NULL) { vxge_hal_err_log_fifo("Memory allocation failed == > %s : %d", __func__, __LINE__); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); return (VXGE_HAL_ERR_OUT_OF_MEMORY); } vp->vpath->fifoh = fifo; fifo->stats = &vp->vpath->sw_stats->fifo_stats; fifo->config = config; fifo->memblock_size = memblock_size; #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock_init(&fifo->channel.post_lock, vp->vpath->hldev->header.pdev); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_init_irq(&fifo->channel.post_lock, vp->vpath->hldev->header.irqh); #endif fifo->align_size = fifo->config->alignment_size * fifo->config->max_aligned_frags; /* apply "interrupts per txdl" attribute */ fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_UTILZ; if (fifo->config->intr) { fifo->interrupt_type = VXGE_HAL_FIFO_TXD_INT_TYPE_PER_LIST; } fifo->no_snoop_bits = config->no_snoop_bits; /* * FIFO memory management strategy: * * TxDL splitted into three independent parts: * - set of TxD's * - TxD HAL private part * - upper layer private part * * Adaptative memory allocation used. i.e. Memory allocated on * demand with the size which will fit into one memory block. * One memory block may contain more than one TxDL. In simple case * memory block size can be equal to CPU page size. On more * sophisticated OS's memory block can be contigious across * several pages. * * During "reserve" operations more memory can be allocated on demand * for example due to FIFO full condition. * * Pool of memory memblocks never shrinks except __hal_fifo_close * routine which will essentially stop channel and free the resources. */ /* TxDL common private size == TxDL private + ULD private */ fifo->txdl_priv_size = sizeof(__hal_fifo_txdl_priv_t) + attr->per_txdl_space; fifo->txdl_priv_size = ((fifo->txdl_priv_size + __vxge_os_cacheline_size - 1) / __vxge_os_cacheline_size) * __vxge_os_cacheline_size; fifo->per_txdl_space = attr->per_txdl_space; /* recompute txdl size to be cacheline aligned */ fifo->txdl_size = txdl_size; fifo->txdl_per_memblock = txdl_per_memblock; /* * since txdl_init() callback will be called from item_alloc(), * the same way channels userdata might be used prior to * channel_initialize() */ fifo->txdl_init = attr->txdl_init; fifo->txdl_term = attr->txdl_term; fifo->callback = attr->callback; if (fifo->txdl_per_memblock == 0) { __hal_fifo_delete(vpath_handle); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_BLOCK_SIZE); return (VXGE_HAL_ERR_INVALID_BLOCK_SIZE); } /* calculate actual TxDL block private size */ fifo->txdlblock_priv_size = fifo->txdl_priv_size * fifo->txdl_per_memblock; fifo->mempool = vxge_hal_mempool_create((vxge_hal_device_h) vp->vpath->hldev, fifo->memblock_size, fifo->memblock_size, fifo->txdlblock_priv_size, fifo->config->fifo_length / fifo->txdl_per_memblock, fifo->config->fifo_length / fifo->txdl_per_memblock, __hal_fifo_mempool_item_alloc, __hal_fifo_mempool_item_free, fifo); if (fifo->mempool == NULL) { __hal_fifo_delete(vpath_handle); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); return (VXGE_HAL_ERR_OUT_OF_MEMORY); } status = vxge_hal_channel_initialize(&fifo->channel); if (status != VXGE_HAL_OK) { __hal_fifo_delete(vpath_handle); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * __hal_fifo_abort - Returns the TxD * @fifoh: Fifo to be reset * @reopen: See vxge_hal_reopen_e {}. * * This function terminates the TxDs of fifo */ void __hal_fifo_abort( vxge_hal_fifo_h fifoh, vxge_hal_reopen_e reopen) { u32 i = 0; __hal_fifo_t *fifo = (__hal_fifo_t *) fifoh; __hal_device_t *hldev; vxge_hal_txdl_h txdlh; vxge_assert(fifoh != NULL); hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT", reopen = %d", (ptr_t) fifoh, reopen); if (fifo->txdl_term) { __hal_channel_for_each_dtr(&fifo->channel, txdlh, i) { if (!__hal_channel_is_posted_dtr(&fifo->channel, i)) { fifo->txdl_term(fifo->channel.vph, txdlh, VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh), VXGE_HAL_TXDL_STATE_FREED, fifo->channel.userdata, reopen); } } } for (;;) { __hal_channel_dtr_try_complete(&fifo->channel, &txdlh); if (txdlh == NULL) break; __hal_channel_dtr_complete(&fifo->channel); if (fifo->txdl_term) { fifo->txdl_term(fifo->channel.vph, txdlh, VXGE_HAL_FIFO_ULD_PRIV(fifo, txdlh), VXGE_HAL_TXDL_STATE_POSTED, fifo->channel.userdata, reopen); } __hal_channel_dtr_free(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh)); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * __hal_fifo_reset - Resets the fifo * @fifoh: Fifo to be reset * * This function resets the fifo during vpath reset operation */ vxge_hal_status_e __hal_fifo_reset( vxge_hal_fifo_h fifoh) { vxge_hal_status_e status; __hal_device_t *hldev; __hal_fifo_t *fifo = (__hal_fifo_t *) fifoh; vxge_assert(fifoh != NULL); hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("fifo = 0x"VXGE_OS_STXFMT, (ptr_t) fifoh); __hal_fifo_abort(fifoh, VXGE_HAL_RESET_ONLY); status = __hal_channel_reset(&fifo->channel); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_fifo_doorbell_reset - Resets the doorbell fifo * @vapth_handle: Vpath Handle * * This function resets the doorbell fifo during if fifo error occurs */ vxge_hal_status_e vxge_hal_fifo_doorbell_reset( vxge_hal_vpath_h vpath_handle) { u32 i; vxge_hal_txdl_h txdlh; __hal_fifo_t *fifo; __hal_virtualpath_t *vpath; __hal_fifo_txdl_priv_t *txdl_priv; __hal_device_t *hldev; __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert(vpath_handle != NULL); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vpath = ((__hal_vpath_handle_t *) fifo->channel.vph)->vpath; status = __hal_non_offload_db_reset(fifo->channel.vph); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (status); } __hal_channel_for_each_posted_dtr(&fifo->channel, txdlh, i) { txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); __hal_non_offload_db_post(fifo->channel.vph, ((VXGE_HAL_FIFO_TXD_NO_BW_LIMIT_GET( ((vxge_hal_fifo_txd_t *) txdlh)->control_1)) ? (((u64) txdl_priv->dma_addr) | 0x1) : (u64) txdl_priv->dma_addr), txdl_priv->frags - 1, vpath->vp_config->fifo.no_snoop_bits); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (status); } /* * __hal_fifo_delete - Removes the FIFO * @vpath_handle: Virtual path handle to which this queue belongs * * This function freeup the memory pool and removes the FIFO */ void __hal_fifo_delete( vxge_hal_vpath_h vpath_handle) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; vxge_assert(vpath_handle != NULL); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); if (fifo->mempool) { __hal_fifo_abort(vp->vpath->fifoh, VXGE_HAL_OPEN_NORMAL); vxge_hal_mempool_destroy(fifo->mempool); } vxge_hal_channel_terminate(&fifo->channel); #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock_destroy(&fifo->channel.post_lock, vp->vpath->hldev->header.pdev); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_destroy_irq(&fifo->channel.post_lock, vp->vpath->hldev->header.pdev); #endif vxge_hal_channel_free(&fifo->channel); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } #if defined(VXGE_HAL_ALIGN_XMIT) /* * __hal_fifo_txdl_align_free_unmap - Unmap the alignement buffers * @fifo: Fifo * @txdp: txdl * * This function unmaps dma memory for the alignment buffers */ void __hal_fifo_txdl_align_free_unmap( __hal_fifo_t *fifo, vxge_hal_fifo_txd_t *txdp) { __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_assert((fifo != NULL) && (txdp != NULL)); hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT, (ptr_t) fifo, (ptr_t) txdp); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp); if (txdl_priv->align_vaddr != NULL) { __hal_blockpool_free(fifo->channel.devh, txdl_priv->align_vaddr, fifo->align_size, &txdl_priv->align_dma_addr, &txdl_priv->align_dma_handle, &txdl_priv->align_dma_acch); txdl_priv->align_vaddr = NULL; txdl_priv->align_dma_addr = 0; } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * __hal_fifo_txdl_align_alloc_map - Maps the alignement buffers * @fifo: Fifo * @txdp: txdl * * This function maps dma memory for the alignment buffers */ vxge_hal_status_e __hal_fifo_txdl_align_alloc_map( __hal_fifo_t *fifo, vxge_hal_fifo_txd_t *txdp) { __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_assert((fifo != NULL) && (txdp != NULL)); hldev = (__hal_device_t *) fifo->channel.devh; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT, (ptr_t) fifo, (ptr_t) txdp); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp); /* allocate alignment DMA-buffer */ txdl_priv->align_vaddr = (u8 *) __hal_blockpool_malloc(fifo->channel.devh, fifo->align_size, &txdl_priv->align_dma_addr, &txdl_priv->align_dma_handle, &txdl_priv->align_dma_acch); if (txdl_priv->align_vaddr == NULL) { vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); return (VXGE_HAL_ERR_OUT_OF_MEMORY); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } #endif /* * vxge_hal_fifo_free_txdl_count_get - returns the number of txdls * available in the fifo * @vpath_handle: Virtual path handle. */ u32 vxge_hal_fifo_free_txdl_count_get(vxge_hal_vpath_h vpath_handle) { return __hal_channel_free_dtr_count(&((__hal_fifo_t *) ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh)->channel); } /* * vxge_hal_fifo_txdl_private_get - Retrieve per-descriptor private data. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. * * Retrieve per-descriptor private data. * Note that ULD requests per-descriptor space via * vxge_hal_fifo_attr_t passed to * vxge_hal_vpath_open(). * * Returns: private ULD data associated with the descriptor. */ void * vxge_hal_fifo_txdl_private_get( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh) { return (VXGE_HAL_FIFO_ULD_PRIV(((__hal_fifo_t *) ((__hal_vpath_handle_t *) vpath_handle)->vpath->fifoh), txdlh)); } /* * vxge_hal_fifo_txdl_reserve - Reserve fifo descriptor. * @vapth_handle: virtual path handle. * @txdlh: Reserved descriptor. On success HAL fills this "out" parameter * with a valid handle. * @txdl_priv: Buffer to return the pointer to per txdl space * * Reserve a single TxDL (that is, fifo descriptor) * for the subsequent filling-in by upper layerdriver (ULD)) * and posting on the corresponding channel (@channelh) * via vxge_hal_fifo_txdl_post(). * * Note: it is the responsibility of ULD to reserve multiple descriptors * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor * carries up to configured number (fifo.max_frags) of contiguous buffers. * * Returns: VXGE_HAL_OK - success; * VXGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available * */ vxge_hal_status_e vxge_hal_fifo_txdl_reserve( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h *txdlh, void **txdl_priv) { u32 i; __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_device_t *hldev; __hal_fifo_t *fifo; vxge_hal_status_e status; #if defined(VXGE_HAL_TX_MULTI_POST_IRQ) unsigned long flags = 0; #endif vxge_assert((vpath_handle != NULL) && (txdlh != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags); #endif status = __hal_channel_dtr_reserve(&fifo->channel, txdlh); #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_unlock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags); #endif if (status == VXGE_HAL_OK) { vxge_hal_fifo_txd_t *txdp = (vxge_hal_fifo_txd_t *)*txdlh; __hal_fifo_txdl_priv_t *priv; priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp); /* reset the TxDL's private */ priv->align_dma_offset = 0; priv->align_vaddr_start = priv->align_vaddr; priv->align_used_frags = 0; priv->frags = 0; priv->alloc_frags = fifo->config->max_frags; priv->dang_txdl = NULL; priv->dang_frags = 0; priv->next_txdl_priv = NULL; priv->bytes_sent = 0; *txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp); for (i = 0; i < fifo->config->max_frags; i++) { txdp = ((vxge_hal_fifo_txd_t *)*txdlh) + i; txdp->control_0 = txdp->control_1 = 0; } #if defined(VXGE_OS_MEMORY_CHECK) priv->allocated = 1; #endif } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (status); } /* * vxge_hal_fifo_txdl_buffer_set - Set transmit buffer pointer in the * descriptor. * @vpath_handle: virtual path handle. * @txdlh: Descriptor handle. * @frag_idx: Index of the data buffer in the caller's scatter-gather list * (of buffers). * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. * @size: Size of the data buffer (in bytes). * * This API is part of the preparation of the transmit descriptor for posting * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits(). * All three APIs fill in the fields of the fifo descriptor, * in accordance with the X3100 specification. * */ void vxge_hal_fifo_txdl_buffer_set( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, u32 frag_idx, dma_addr_t dma_pointer, unsigned long size) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp; vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (dma_pointer != 0) && (size != 0)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d, " "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %lu", (ptr_t) vpath_handle, (ptr_t) txdlh, frag_idx, (u64) dma_pointer, size); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags; /* * Note: * it is the responsibility of upper layers and not HAL * detect it and skip zero-size fragment */ vxge_assert(size > 0); vxge_assert(frag_idx < txdl_priv->alloc_frags); txdp->buffer_pointer = (u64) dma_pointer; txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(size); txdl_priv->bytes_sent += size; fifo->stats->total_buffers++; txdl_priv->frags++; vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_fifo_txdl_buffer_set_aligned - Align transmit buffer and fill * in fifo descriptor. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. * @frag_idx: Index of the data buffer in the caller's scatter-gather list * (of buffers). * @vaddr: Virtual address of the data buffer. * @dma_pointer: DMA address of the data buffer referenced by @frag_idx. * @size: Size of the data buffer (in bytes). * @misaligned_size: Size (in bytes) of the misaligned portion of the * data buffer. Calculated by the caller, based on the platform/OS/other * specific criteria, which is outside of HAL's domain. See notes below. * * This API is part of the transmit descriptor preparation for posting * (via vxge_hal_fifo_txdl_post()). The related "preparation" APIs include * vxge_hal_fifo_txdl_mss_set() and vxge_hal_fifo_txdl_cksum_set_bits(). * All three APIs fill in the fields of the fifo descriptor, * in accordance with the X3100 specification. * On the PCI-X based systems aligning transmit data typically provides better * transmit performance. The typical alignment granularity: L2 cacheline size. * However, HAL does not make assumptions in terms of the alignment granularity; * this is specified via additional @misaligned_size parameter described above. * Prior to calling vxge_hal_fifo_txdl_buffer_set_aligned(), * ULD is supposed to check alignment of a given fragment/buffer. For this HAL * provides a separate vxge_hal_check_alignment() API sufficient to cover * most (but not all) possible alignment criteria. * If the buffer appears to be aligned, the ULD calls * vxge_hal_fifo_txdl_buffer_set(). * Otherwise, ULD calls vxge_hal_fifo_txdl_buffer_set_aligned(). * * Note; This API is a "superset" of vxge_hal_fifo_txdl_buffer_set(). In * addition to filling in the specified descriptor it aligns transmit data on * the specified boundary. * Note: Decision on whether to align or not to align a given contiguous * transmit buffer is outside of HAL's domain. To this end ULD can use any * programmable criteria, which can help to 1) boost transmit performance, * and/or 2) provide a workaround for PCI bridge bugs, if any. * */ vxge_hal_status_e vxge_hal_fifo_txdl_buffer_set_aligned( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, u32 frag_idx, void *vaddr, dma_addr_t dma_pointer, u32 size, u32 misaligned_size) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp; int remaining_size; ptrdiff_t prev_boff; vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && - (vaddr != 0) && (dma_pointer != 0) && + (vaddr != NULL) && (dma_pointer != 0) && (size != 0) && (misaligned_size != 0)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "vpath_handle = 0x"VXGE_OS_STXFMT", txdlh = 0x"VXGE_OS_STXFMT", " "frag_idx = %d, vaddr = 0x"VXGE_OS_STXFMT", " "dma_pointer = 0x"VXGE_OS_LLXFMT", size = %d, " "misaligned_size = %d", (ptr_t) vpath_handle, (ptr_t) txdlh, frag_idx, (ptr_t) vaddr, (u64) dma_pointer, size, misaligned_size); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags; /* * On some systems buffer size could be zero. * It is the responsibility of ULD and *not HAL* to * detect it and skip it. */ vxge_assert(size > 0); vxge_assert(frag_idx < txdl_priv->alloc_frags); vxge_assert(misaligned_size != 0 && misaligned_size <= fifo->config->alignment_size); remaining_size = size - misaligned_size; vxge_assert(remaining_size >= 0); vxge_os_memcpy((char *) txdl_priv->align_vaddr_start, vaddr, misaligned_size); if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) { return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS); } /* setup new buffer */ /* LINTED */ prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff; txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(misaligned_size); txdl_priv->bytes_sent += misaligned_size; fifo->stats->total_buffers++; txdl_priv->frags++; txdl_priv->align_used_frags++; txdl_priv->align_vaddr_start += fifo->config->alignment_size; txdl_priv->align_dma_offset = 0; #if defined(VXGE_OS_DMA_REQUIRES_SYNC) /* sync new buffer */ vxge_os_dma_sync(fifo->channel.pdev, txdl_priv->align_dma_handle, txdp->buffer_pointer, 0, misaligned_size, VXGE_OS_DMA_DIR_TODEVICE); #endif if (remaining_size) { vxge_assert(frag_idx < txdl_priv->alloc_frags); txdp++; txdp->buffer_pointer = (u64) dma_pointer + misaligned_size; txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(remaining_size); txdl_priv->bytes_sent += remaining_size; fifo->stats->total_buffers++; txdl_priv->frags++; } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_fifo_txdl_buffer_append - Append the contents of virtually * contiguous data buffer to a single physically contiguous buffer. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. * @vaddr: Virtual address of the data buffer. * @size: Size of the data buffer (in bytes). * * This API is part of the transmit descriptor preparation for posting * (via vxge_hal_fifo_txdl_post()). * The main difference of this API wrt to the APIs * vxge_hal_fifo_txdl_buffer_set_aligned() is that this API appends the * contents of virtually contiguous data buffers received from * upper layer into a single physically contiguous data buffer and the * device will do a DMA from this buffer. * * See Also: vxge_hal_fifo_txdl_buffer_finalize(), * vxge_hal_fifo_txdl_buffer_set(), * vxge_hal_fifo_txdl_buffer_set_aligned(). */ vxge_hal_status_e vxge_hal_fifo_txdl_buffer_append( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, void *vaddr, u32 size) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; ptrdiff_t used; - vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (vaddr != 0) && - (size == 0)); + vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && + (vaddr != NULL) && (size == 0)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", vaddr = 0x"VXGE_OS_STXFMT", " "size = %d", (ptr_t) vpath_handle, (ptr_t) txdlh, (ptr_t) vaddr, size); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); /* LINTED */ used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; used += txdl_priv->align_dma_offset; if (used + (unsigned int)size > (unsigned int)fifo->align_size) return (VXGE_HAL_ERR_OUT_ALIGNED_FRAGS); vxge_os_memcpy((char *) txdl_priv->align_vaddr_start + txdl_priv->align_dma_offset, vaddr, size); fifo->stats->copied_frags++; txdl_priv->align_dma_offset += size; vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_fifo_txdl_buffer_finalize - Prepares a descriptor that contains the * single physically contiguous buffer. * * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. * @frag_idx: Index of the data buffer in the Txdl list. * * This API in conjuction with vxge_hal_fifo_txdl_buffer_append() prepares * a descriptor that consists of a single physically contiguous buffer * which inturn contains the contents of one or more virtually contiguous * buffers received from the upper layer. * * See Also: vxge_hal_fifo_txdl_buffer_append(). */ void vxge_hal_fifo_txdl_buffer_finalize( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, u32 frag_idx) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp; ptrdiff_t prev_boff; vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (frag_idx != 0)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", frag_idx = %d", (ptr_t) vpath_handle, (ptr_t) txdlh, frag_idx); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags; /* LINTED */ prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr; txdp->buffer_pointer = (u64) txdl_priv->align_dma_addr + prev_boff; txdp->control_0 |= VXGE_HAL_FIFO_TXD_BUFFER_SIZE(txdl_priv->align_dma_offset); txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset; fifo->stats->total_buffers++; fifo->stats->copied_buffers++; txdl_priv->frags++; txdl_priv->align_used_frags++; #if defined(VXGE_OS_DMA_REQUIRES_SYNC) /* sync pre-mapped buffer */ vxge_os_dma_sync(fifo->channel.pdev, txdl_priv->align_dma_handle, txdp->buffer_pointer, 0, txdl_priv->align_dma_offset, VXGE_OS_DMA_DIR_TODEVICE); #endif /* increment vaddr_start for the next buffer_append() iteration */ txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset; txdl_priv->align_dma_offset = 0; vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_fifo_txdl_new_frame_set - Start the new packet by setting TXDL flags * @vpath_handle: virtual path handle. * @txdlh: Descriptor handle. * @tagged: Is the frame tagged * * This API is part of the preparation of the transmit descriptor for posting * (via vxge_hal_fifo_txdl_post()). This api is used to mark the end of previous * frame and start of a new frame. * */ void vxge_hal_fifo_txdl_new_frame_set( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, u32 tagged) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp; vxge_assert((vpath_handle != NULL) && (txdlh != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d", (ptr_t) vpath_handle, (ptr_t) txdlh, tagged); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); txdp = (vxge_hal_fifo_txd_t *) txdlh + txdl_priv->frags; txdp->control_0 |= VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port); txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE( VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST); txdp->control_1 |= fifo->interrupt_type; txdp->control_1 |= VXGE_HAL_FIFO_TXD_INT_NUMBER( vp->vpath->tx_intr_num); if (tagged) txdp->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT; if (txdl_priv->frags) { txdp = (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1); txdp->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE( VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST); } vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_fifo_txdl_post - Post descriptor on the fifo channel. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor obtained via vxge_hal_fifo_txdl_reserve() * @tagged: Is the frame tagged * * Post descriptor on the 'fifo' type channel for transmission. * Prior to posting the descriptor should be filled in accordance with * Host/X3100 interface specification for a given service (LL, etc.). * */ void vxge_hal_fifo_txdl_post( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, u32 tagged) { u64 list_ptr; __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; vxge_hal_fifo_txd_t *txdp_last; vxge_hal_fifo_txd_t *txdp_first; #if defined(VXGE_HAL_TX_MULTI_POST_IRQ) unsigned long flags = 0; #endif vxge_assert((vpath_handle != NULL) && (txdlh != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", tagged = %d", (ptr_t) vpath_handle, (ptr_t) txdlh, tagged); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); txdp_first = (vxge_hal_fifo_txd_t *) txdlh; txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_HOST_STEER(vp->vpath->vp_config->wire_port); txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_FIRST); txdp_first->control_1 |= VXGE_HAL_FIFO_TXD_INT_NUMBER(vp->vpath->tx_intr_num); txdp_first->control_1 |= fifo->interrupt_type; list_ptr = (u64) txdl_priv->dma_addr; if (tagged) { txdp_first->control_1 |= VXGE_HAL_FIFO_TXD_NO_BW_LIMIT; list_ptr |= 0x1; } txdp_last = (vxge_hal_fifo_txd_t *) txdlh + (txdl_priv->frags - 1); txdp_last->control_0 |= VXGE_HAL_FIFO_TXD_GATHER_CODE(VXGE_HAL_FIFO_TXD_GATHER_CODE_LAST); #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags); #endif txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER; #if defined(VXGE_DEBUG_ASSERT) /* make sure device overwrites the t_code value on completion */ txdp_first->control_0 |= VXGE_HAL_FIFO_TXD_T_CODE(VXGE_HAL_FIFO_TXD_T_CODE_UNUSED); #endif #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING) /* sync the TxDL to device */ vxge_os_dma_sync(fifo->channel.pdev, txdl_priv->dma_handle, txdl_priv->dma_addr, txdl_priv->dma_offset, txdl_priv->frags << 5, /* sizeof(vxge_hal_fifo_txd_t) */ VXGE_OS_DMA_DIR_TODEVICE); #endif /* * we want touch dtr_arr in order with ownership bit set to HW */ __hal_channel_dtr_post(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh)); __hal_non_offload_db_post(vpath_handle, list_ptr, txdl_priv->frags - 1, vp->vpath->vp_config->fifo.no_snoop_bits); #if defined(VXGE_HAL_FIFO_DUMP_TXD) vxge_hal_info_log_fifo( ""VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":"VXGE_OS_LLXFMT":" VXGE_OS_LLXFMT" dma "VXGE_OS_LLXFMT, txdp_first->control_0, txdp_first->control_1, txdp_first->buffer_pointer, VXGE_HAL_FIFO_TXDL_INDEX(txdp_first), txdl_priv->dma_addr); #endif fifo->stats->total_posts++; fifo->stats->common_stats.usage_cnt++; if (fifo->stats->common_stats.usage_max < fifo->stats->common_stats.usage_cnt) fifo->stats->common_stats.usage_max = fifo->stats->common_stats.usage_cnt; #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_unlock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags); #endif vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_fifo_is_next_txdl_completed - Checks if the next txdl is completed * @vpath_handle: Virtual path handle. */ vxge_hal_status_e vxge_hal_fifo_is_next_txdl_completed(vxge_hal_vpath_h vpath_handle) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; vxge_hal_fifo_txd_t *txdp; vxge_hal_txdl_h txdlh; vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; #if defined(VXGE_HAL_TX_MULTI_POST_IRQ) unsigned long flags = 0; #endif vxge_assert(vpath_handle != NULL); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags); #endif __hal_channel_dtr_try_complete(&fifo->channel, &txdlh); txdp = (vxge_hal_fifo_txd_t *) txdlh; if ((txdp != NULL) && (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER))) { status = VXGE_HAL_OK; } #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_unlock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags); #endif vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); /* no more completions */ return (status); } /* * vxge_hal_fifo_txdl_next_completed - Retrieve next completed descriptor. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. Returned by HAL. * @txdl_priv: Buffer to return the pointer to per txdl space * @t_code: Transfer code, as per X3100 User Guide, * Transmit Descriptor Format. * Returned by HAL. * * Retrieve the _next_ completed descriptor. * HAL uses channel callback (*vxge_hal_channel_callback_f) to notifiy * upper-layer driver (ULD) of new completed descriptors. After that * the ULD can use vxge_hal_fifo_txdl_next_completed to retrieve the rest * completions (the very first completion is passed by HAL via * vxge_hal_channel_callback_f). * * Implementation-wise, the upper-layer driver is free to call * vxge_hal_fifo_txdl_next_completed either immediately from inside the * channel callback, or in a deferred fashion and separate (from HAL) * context. * * Non-zero @t_code means failure to process the descriptor. * The failure could happen, for instance, when the link is * down, in which case X3100 completes the descriptor because it * is not able to send the data out. * * For details please refer to X3100 User Guide. * * Returns: VXGE_HAL_OK - success. * VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors * are currently available for processing. * */ vxge_hal_status_e vxge_hal_fifo_txdl_next_completed( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h * txdlh, void **txdl_priv, vxge_hal_fifo_tcode_e * t_code) { __hal_fifo_t *fifo; __hal_device_t *hldev; vxge_hal_fifo_txd_t *txdp; #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING) __hal_fifo_txdl_priv_t *priv; #endif #if defined(VXGE_HAL_TX_MULTI_POST_IRQ) unsigned long flags = 0; #endif __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; vxge_hal_status_e status = VXGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS; vxge_assert((vpath_handle != NULL) && (txdlh != NULL) && (t_code != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh, (ptr_t) t_code); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); *txdlh = 0; #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags); #endif __hal_channel_dtr_try_complete(&fifo->channel, txdlh); txdp = (vxge_hal_fifo_txd_t *) * txdlh; if (txdp != NULL) { #if defined(VXGE_OS_DMA_REQUIRES_SYNC) && defined(VXGE_HAL_DMA_TXDL_STREAMING) priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp); /* * sync TxDL to read the ownership * * Note: 16bytes means Control_1 & Control_2 */ vxge_os_dma_sync(fifo->channel.pdev, priv->dma_handle, priv->dma_addr, priv->dma_offset, 16, VXGE_OS_DMA_DIR_FROMDEVICE); #endif /* check whether host owns it */ if (!(txdp->control_0 & VXGE_HAL_FIFO_TXD_LIST_OWN_ADAPTER)) { __hal_channel_dtr_complete(&fifo->channel); *txdl_priv = VXGE_HAL_FIFO_ULD_PRIV(fifo, txdp); *t_code = (vxge_hal_fifo_tcode_e) VXGE_HAL_FIFO_TXD_T_CODE_GET(txdp->control_0); if (fifo->stats->common_stats.usage_cnt > 0) fifo->stats->common_stats.usage_cnt--; status = VXGE_HAL_OK; } } /* no more completions */ #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_unlock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags); #endif vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_fifo_handle_tcode - Handle transfer code. * @vpath_handle: Virtual Path handle. * @txdlh: Descriptor handle. * @t_code: One of the enumerated (and documented in the X3100 user guide) * "transfer codes". * * Handle descriptor's transfer code. The latter comes with each completed * descriptor. * * Returns: one of the vxge_hal_status_e {} enumerated types. * VXGE_HAL_OK - for success. * VXGE_HAL_ERR_CRITICAL - when encounters critical error. */ vxge_hal_status_e vxge_hal_fifo_handle_tcode( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh, vxge_hal_fifo_tcode_e t_code) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_device_t *hldev; vxge_assert((vpath_handle != NULL) && (txdlh != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT", t_code = 0x%d", (ptr_t) vpath_handle, (ptr_t) txdlh, t_code); switch ((t_code & 0x7)) { case 0: /* 000: Transfer operation completed successfully. */ break; case 1: /* * 001: a PCI read transaction (either TxD or frame data) * returned with corrupt data. */ break; case 2: /* 010: a PCI read transaction was returned with no data. */ break; case 3: /* * 011: The host attempted to send either a frame or LSO * MSS that was too long (>9800B). */ break; case 4: /* * 100: Error detected during TCP/UDP Large Send * Offload operation, due to improper header template, * unsupported protocol, etc. */ break; default: vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TCODE); return (VXGE_HAL_ERR_INVALID_TCODE); } vp->vpath->sw_stats->fifo_stats.txd_t_code_err_cnt[t_code]++; vxge_hal_trace_log_fifo("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_OK); return (VXGE_HAL_OK); } /* * __hal_fifo_txdl_free_many - Free the fragments * @fifo: FIFO * @txdp: Poniter to a TxD * @list_size: List size * @frags: Number of fragments * * This routinf frees the fragments in a txdl */ void __hal_fifo_txdl_free_many( __hal_fifo_t *fifo, vxge_hal_fifo_txd_t * txdp, u32 list_size, u32 frags) { __hal_fifo_txdl_priv_t *current_txdl_priv; __hal_fifo_txdl_priv_t *next_txdl_priv; u32 invalid_frags = frags % list_size; __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) fifo->channel.vph; __hal_device_t *hldev; vxge_assert((fifo != NULL) && (txdp != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo( "fifo = 0x"VXGE_OS_STXFMT", txdp = 0x"VXGE_OS_STXFMT", " "list_size = %d, frags = %d", (ptr_t) fifo, (ptr_t) txdp, list_size, frags); if (invalid_frags) { vxge_hal_trace_log_fifo( "freeing corrupt txdlh 0x"VXGE_OS_STXFMT", " "fragments %d list size %d", (ptr_t) txdp, frags, list_size); vxge_assert(invalid_frags == 0); } while (txdp) { vxge_hal_trace_log_fifo("freeing linked txdlh 0x"VXGE_OS_STXFMT ", " "fragments %d list size %d", (ptr_t) txdp, frags, list_size); current_txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdp); #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK) current_txdl_priv->allocated = 0; #endif __hal_channel_dtr_free(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdp)); next_txdl_priv = current_txdl_priv->next_txdl_priv; vxge_assert(frags); frags -= list_size; if (next_txdl_priv) { current_txdl_priv->next_txdl_priv = NULL; txdp = next_txdl_priv->first_txdp; } else { vxge_hal_trace_log_fifo( "freed linked txdlh fragments %d list size %d", frags, list_size); break; } } vxge_assert(frags == 0); vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_fifo_txdl_free - Free descriptor. * @vpath_handle: Virtual path handle. * @txdlh: Descriptor handle. * * Free the reserved descriptor. This operation is "symmetrical" to * vxge_hal_fifo_txdl_reserve. The "free-ing" completes the descriptor's * lifecycle. * * After free-ing (see vxge_hal_fifo_txdl_free()) the descriptor again can * be: * * - reserved (vxge_hal_fifo_txdl_reserve); * * - posted (vxge_hal_fifo_txdl_post); * * - completed (vxge_hal_fifo_txdl_next_completed); * * - and recycled again (vxge_hal_fifo_txdl_free). * * For alternative state transitions and more details please refer to * the design doc. * */ void vxge_hal_fifo_txdl_free( vxge_hal_vpath_h vpath_handle, vxge_hal_txdl_h txdlh) { __hal_vpath_handle_t *vp = (__hal_vpath_handle_t *) vpath_handle; __hal_fifo_t *fifo; __hal_device_t *hldev; __hal_fifo_txdl_priv_t *txdl_priv; u32 max_frags; #if defined(VXGE_HAL_TX_MULTI_POST_IRQ) u32 flags = 0; #endif vxge_assert((vpath_handle != NULL) && (txdlh != NULL)); hldev = vp->vpath->hldev; vxge_hal_trace_log_fifo("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_fifo("vpath_handle = 0x"VXGE_OS_STXFMT", " "txdlh = 0x"VXGE_OS_STXFMT, (ptr_t) vpath_handle, (ptr_t) txdlh); fifo = (__hal_fifo_t *) vp->vpath->fifoh; vxge_assert(fifo != NULL); txdl_priv = VXGE_HAL_FIFO_HAL_PRIV(fifo, txdlh); max_frags = fifo->config->max_frags; #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_lock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_lock_irq(&fifo->channel.post_lock, flags); #endif if (txdl_priv->alloc_frags > max_frags) { vxge_hal_fifo_txd_t *dang_txdp = (vxge_hal_fifo_txd_t *) txdl_priv->dang_txdl; u32 dang_frags = txdl_priv->dang_frags; u32 alloc_frags = txdl_priv->alloc_frags; txdl_priv->dang_txdl = NULL; txdl_priv->dang_frags = 0; txdl_priv->alloc_frags = 0; /* txdlh must have a linked list of txdlh */ vxge_assert(txdl_priv->next_txdl_priv); /* free any dangling txdlh first */ if (dang_txdp) { vxge_hal_info_log_fifo( "freeing dangled txdlh 0x"VXGE_OS_STXFMT" for %d " "fragments", (ptr_t) dang_txdp, dang_frags); __hal_fifo_txdl_free_many(fifo, dang_txdp, max_frags, dang_frags); } /* now free the reserved txdlh list */ vxge_hal_info_log_fifo( "freeing txdlh 0x"VXGE_OS_STXFMT" list of %d fragments", (ptr_t) txdlh, alloc_frags); __hal_fifo_txdl_free_many(fifo, (vxge_hal_fifo_txd_t *) txdlh, max_frags, alloc_frags); } else { __hal_channel_dtr_free(&fifo->channel, VXGE_HAL_FIFO_TXDL_INDEX(txdlh)); } fifo->channel.poll_bytes += txdl_priv->bytes_sent; #if defined(VXGE_DEBUG_ASSERT) && defined(VXGE_OS_MEMORY_CHECK) txdl_priv->allocated = 0; #endif #if defined(VXGE_HAL_TX_MULTI_POST) vxge_os_spin_unlock(&fifo->channel.post_lock); #elif defined(VXGE_HAL_TX_MULTI_POST_IRQ) vxge_os_spin_unlock_irq(&fifo->channel.post_lock, flags); #endif vxge_hal_trace_log_fifo("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); } Index: head/sys/dev/vxge/vxgehal/vxgehal-mrpcim.c =================================================================== --- head/sys/dev/vxge/vxgehal/vxgehal-mrpcim.c (revision 297861) +++ head/sys/dev/vxge/vxgehal/vxgehal-mrpcim.c (revision 297862) @@ -1,6638 +1,6638 @@ /*- * Copyright(c) 2002-2011 Exar Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification are permitted provided the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Exar Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include /* * vxge_hal_mrpcim_serial_number_get - Returns the serial number * @devh: HAL device handle. * * Return the serial number */ const u8 * vxge_hal_mrpcim_serial_number_get(vxge_hal_device_h devh) { __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (NULL); } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (hldev->mrpcim->vpd_data.serial_num); } /* * vxge_hal_mrpcim_vpath_map_get - Returns the assigned vpaths map * @pdev: PCI device object. * @regh0: BAR0 mapped memory handle (Solaris), or simply PCI device @pdev * (Linux and the rest.) * @bar0: Address of BAR0 in PCI config * @func: Function Number * * Returns the assigned vpaths map */ u64 vxge_hal_mrpcim_vpath_map_get( pci_dev_h pdev, pci_reg_h regh0, u8 *bar0, u32 func) { u64 val64; vxge_hal_legacy_reg_t *legacy_reg; vxge_hal_toc_reg_t *toc_reg; vxge_hal_vpath_reg_t *vpath_reg; vxge_assert(bar0 != NULL); vxge_hal_trace_log_driver("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_driver( "pdev = 0x"VXGE_OS_STXFMT", regh0 = 0x"VXGE_OS_STXFMT", " "bar0 = 0x"VXGE_OS_STXFMT", func = %d", (ptr_t) pdev, (ptr_t) regh0, (ptr_t) bar0, func); legacy_reg = (vxge_hal_legacy_reg_t *) vxge_hal_device_get_legacy_reg(pdev, regh0, bar0); val64 = vxge_os_pio_mem_read64(pdev, regh0, &legacy_reg->toc_first_pointer); toc_reg = (vxge_hal_toc_reg_t *) ((void *)(bar0 + val64)); val64 = vxge_os_pio_mem_read64(pdev, regh0, &toc_reg->toc_vpath_pointer[0]); vpath_reg = (vxge_hal_vpath_reg_t *) ((void *)(bar0 + val64)); val64 = __hal_vpath_vpath_map_get(pdev, regh0, 0, 0, func, vpath_reg); vxge_hal_trace_log_driver("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (val64); } /* * vxge_hal_mrpcim_pcie_func_mode_set - Set PCI-E function mode * @devh: Device Handle. * @func_mode: PCI-E func mode. Please see vxge_hal_pcie_function_mode_e{} * * Set PCI-E function mode. * */ vxge_hal_status_e vxge_hal_mrpcim_pcie_func_mode_set( vxge_hal_device_h devh, vxge_hal_pcie_function_mode_e func_mode) { __hal_device_t *hldev = (__hal_device_t *) devh; u32 fmode; vxge_hal_status_e status; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_driver("devh = 0x"VXGE_OS_STXFMT ",func_mode = %d", (ptr_t) devh, func_mode); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } switch (func_mode) { case VXGE_HAL_PCIE_FUNC_MODE_SF1_VP17: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_SF1_VP17; break; case VXGE_HAL_PCIE_FUNC_MODE_MF8_VP2: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MF8_VP2; break; case VXGE_HAL_PCIE_FUNC_MODE_SR17_VP1: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_SR17_VP1; break; case VXGE_HAL_PCIE_FUNC_MODE_MR17_VP1: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MR17_VP1; break; case VXGE_HAL_PCIE_FUNC_MODE_MR8_VP2: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MR8_VP2; break; case VXGE_HAL_PCIE_FUNC_MODE_MF17_VP1: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MF17_VP1; break; case VXGE_HAL_PCIE_FUNC_MODE_SR8_VP2: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_SR8_VP2; break; case VXGE_HAL_PCIE_FUNC_MODE_SR4_VP4: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_SR4_VP4; break; case VXGE_HAL_PCIE_FUNC_MODE_MF2_VP8: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MF2_VP8; break; case VXGE_HAL_PCIE_FUNC_MODE_MF4_VP4: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MF4_VP4; break; case VXGE_HAL_PCIE_FUNC_MODE_MR4_VP4: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MR4_VP4; break; case VXGE_HAL_PCIE_FUNC_MODE_MF8P_VP2: fmode = VXGE_HAL_RTS_ACCESS_STEER_DATA0_FUNC_MODE_MF8P_VP2; break; default: vxge_hal_trace_log_driver("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_TYPE); return (VXGE_HAL_ERR_INVALID_TYPE); } status = __hal_vpath_pcie_func_mode_set(hldev, hldev->first_vp_id, fmode); vxge_hal_trace_log_driver("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (status); } /* * vxge_hal_mrpcim_fw_upgrade - Upgrade firmware * @pdev: PCI device object. * @regh0: BAR0 mapped memory handle (Solaris), or simply PCI device @pdev * (Linux and the rest.) * @bar0: Address of BAR0 in PCI config * @buffer: Buffer containing F/W image * @length: F/W image length * * Upgrade firmware */ vxge_hal_status_e vxge_hal_mrpcim_fw_upgrade( pci_dev_h pdev, pci_reg_h regh0, u8 *bar0, u8 *buffer, u32 length) { u64 val64, vpath_mask; u32 host_type, func_id, i; vxge_hal_legacy_reg_t *legacy_reg; vxge_hal_toc_reg_t *toc_reg; vxge_hal_mrpcim_reg_t *mrpcim_reg; vxge_hal_common_reg_t *common_reg; vxge_hal_vpmgmt_reg_t *vpmgmt_reg; vxge_hal_vpath_reg_t *vpath_reg; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert((bar0 != NULL) && (buffer != NULL)); vxge_hal_trace_log_driver("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_driver( "pdev = 0x"VXGE_OS_STXFMT", regh0 = 0x"VXGE_OS_STXFMT", " "bar0 = 0x"VXGE_OS_STXFMT", buffer = 0x"VXGE_OS_STXFMT", " "length = %d", (ptr_t) pdev, (ptr_t) regh0, (ptr_t) bar0, (ptr_t) buffer, length); legacy_reg = (vxge_hal_legacy_reg_t *) vxge_hal_device_get_legacy_reg(pdev, regh0, bar0); val64 = vxge_os_pio_mem_read64(pdev, regh0, &legacy_reg->toc_first_pointer); toc_reg = (vxge_hal_toc_reg_t *) ((void *)(bar0 + val64)); val64 = vxge_os_pio_mem_read64(pdev, regh0, &toc_reg->toc_common_pointer); common_reg = (vxge_hal_common_reg_t *) ((void *)(bar0 + val64)); vpath_mask = vxge_os_pio_mem_read64(pdev, regh0, &common_reg->vpath_assignments); val64 = vxge_os_pio_mem_read64(pdev, regh0, &common_reg->host_type_assignments); host_type = (u32) VXGE_HAL_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { if (!((vpath_mask) & mBIT(i))) continue; val64 = vxge_os_pio_mem_read64(pdev, regh0, &toc_reg->toc_vpmgmt_pointer[i]); vpmgmt_reg = (vxge_hal_vpmgmt_reg_t *) ((void *)(bar0 + val64)); val64 = vxge_os_pio_mem_read64(pdev, regh0, &vpmgmt_reg->vpath_to_func_map_cfg1); func_id = (u32) VXGE_HAL_VPATH_TO_FUNC_MAP_CFG1_GET_CFG1(val64); if (!(__hal_device_access_rights_get(host_type, func_id) & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_driver("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } val64 = vxge_os_pio_mem_read64(pdev, regh0, &toc_reg->toc_vpath_pointer[i]); vpath_reg = (vxge_hal_vpath_reg_t *) ((void *)(bar0 + val64)); status = __hal_vpath_fw_upgrade(pdev, regh0, i, vpath_reg, buffer, length); break; } if (status == VXGE_HAL_OK) { val64 = vxge_os_pio_mem_read64(pdev, regh0, &toc_reg->toc_mrpcim_pointer); mrpcim_reg = (vxge_hal_mrpcim_reg_t *) ((void *)(bar0 + val64)); val64 = vxge_os_pio_mem_read64(pdev, regh0, &mrpcim_reg->sw_reset_cfg1); val64 |= VXGE_HAL_SW_RESET_CFG1_TYPE; vxge_os_pio_mem_write64(pdev, regh0, val64, &mrpcim_reg->sw_reset_cfg1); vxge_os_pio_mem_write64(pdev, regh0, VXGE_HAL_PF_SW_RESET_PF_SW_RESET( VXGE_HAL_PF_SW_RESET_COMMAND), &mrpcim_reg->bf_sw_reset); vxge_os_mdelay(100); } vxge_hal_trace_log_driver("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_vpath_qos_set - Set the priority, Guaranteed and maximum * bandwidth for a vpath. * @devh: HAL device handle. * @vp_id: Vpath Id. * @priority: Priority * @min_bandwidth: Minimum Bandwidth * @max_bandwidth: Maximum Bandwidth * * Set the Guaranteed and maximum bandwidth for a given vpath * */ vxge_hal_status_e vxge_hal_mrpcim_vpath_qos_set( vxge_hal_device_h devh, u32 vp_id, u32 priority, u32 min_bandwidth, u32 max_bandwidth) { vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_vpath_qos_config_t config; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT", vp_id = %d, " "priority = %d, min_bandwidth = %d, max_bandwidth = %d", (ptr_t) devh, vp_id, priority, min_bandwidth, max_bandwidth); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (vp_id >= VXGE_HAL_MAX_VIRTUAL_PATHS) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_VPATH_NOT_AVAILABLE); return (VXGE_HAL_ERR_VPATH_NOT_AVAILABLE); } config.priority = priority; config.min_bandwidth = min_bandwidth; config.max_bandwidth = max_bandwidth; if ((status = __hal_vpath_qos_config_check(&config)) != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } if (status == VXGE_HAL_OK) { hldev->header.config.mrpcim_config.vp_qos[vp_id].priority = priority; hldev->header.config.mrpcim_config.vp_qos[vp_id].min_bandwidth = min_bandwidth; hldev->header.config.mrpcim_config.vp_qos[vp_id].max_bandwidth = max_bandwidth; } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_vpath_qos_get - Get the priority, Guaranteed and maximum * bandwidth for a vpath. * @devh: HAL device handle. * @vp_id: Vpath Id. * @priority: Buffer to return Priority * @min_bandwidth: Buffer to return Minimum Bandwidth * @max_bandwidth: Buffer to return Maximum Bandwidth * * Get the Guaranteed and maximum bandwidth for a given vpath * */ vxge_hal_status_e vxge_hal_mrpcim_vpath_qos_get( vxge_hal_device_h devh, u32 vp_id, u32 *priority, u32 *min_bandwidth, u32 *max_bandwidth) { vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", vp_id = %d, " "priority = 0x"VXGE_OS_STXFMT", " "min_bandwidth = 0x"VXGE_OS_STXFMT", " "max_bandwidth = 0x"VXGE_OS_STXFMT, (ptr_t) devh, vp_id, (ptr_t) priority, (ptr_t) min_bandwidth, (ptr_t) max_bandwidth); if (vp_id >= VXGE_HAL_MAX_VIRTUAL_PATHS) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_VPATH_NOT_AVAILABLE); return (VXGE_HAL_ERR_VPATH_NOT_AVAILABLE); } *priority = hldev->header.config.mrpcim_config.vp_qos[vp_id].min_bandwidth; *min_bandwidth = hldev->header.config.mrpcim_config.vp_qos[vp_id].min_bandwidth; *max_bandwidth = hldev->header.config.mrpcim_config.vp_qos[vp_id].max_bandwidth; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_mdio_access - Access the MDIO device * @devh: HAL Device handle. * @port: Port id * @operation: Type of operation * @device: MMD device address * @addr: MMD address * @data: MMD data * * Access the data from a MDIO Device. * */ vxge_hal_status_e __hal_mrpcim_mdio_access( vxge_hal_device_h devh, u32 port, u32 operation, u32 device, u16 addr, u16 *data) { u64 val64; u32 prtad; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert((devh != NULL) && (data != NULL)); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", operation = %d, " "device = %d, addr = %d, data = 0x"VXGE_OS_STXFMT, (ptr_t) devh, operation, device, addr, (ptr_t) data); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (device == VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD_DTE_XS) { if (port == 0) prtad = hldev->mrpcim->mdio_dte_prtad0; else prtad = hldev->mrpcim->mdio_dte_prtad1; } else { if (port == 0) prtad = hldev->mrpcim->mdio_phy_prtad0; else prtad = hldev->mrpcim->mdio_phy_prtad1; } val64 = VXGE_HAL_MDIO_MGR_ACCESS_PORT_STROBE_ONE | VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE(operation) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD(device) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR(addr) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_DATA(*data) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_ST_PATTERN(0) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_PREAMBLE | VXGE_HAL_MDIO_MGR_ACCESS_PORT_PRTAD(prtad) | VXGE_HAL_MDIO_MGR_ACCESS_PORT_STROBE_TWO; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->mdio_mgr_access_port[port]); vxge_os_wmb(); status = vxge_hal_device_register_poll(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_mgr_access_port[port], 0, VXGE_HAL_MDIO_MGR_ACCESS_PORT_STROBE_ONE | VXGE_HAL_MDIO_MGR_ACCESS_PORT_STROBE_TWO, hldev->header.config.device_poll_millis); if ((status == VXGE_HAL_OK) && ((operation == VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_READ_INCR) || (operation == VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_READ) || (operation == VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_READ_INCR) || (operation == VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_READ))) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_mgr_access_port[port]); *data = (u16) VXGE_HAL_MDIO_MGR_ACCESS_GET_PORT_DATA(val64); } else { *data = 0; } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_intr_enable - Enable the interrupts on mrpcim. * @devh: HAL device handle. * * Enable mrpcim interrupts * * See also: vxge_hal_mrpcim_intr_disable(). */ vxge_hal_status_e vxge_hal_mrpcim_intr_enable(vxge_hal_device_h devh) { u32 i; u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_mrpcim_reg_t *mrpcim_reg; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } mrpcim_reg = hldev->mrpcim_reg; VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->ini_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->dma_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->tgt_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->config_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->crdt_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->mrpcim_general_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pll_errors_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->mrpcim_ppif_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->dbecc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->general_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pcipif_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pda_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pcc_error_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->lso_error_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->sm_error_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rtdma_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rc_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxdrm_sm_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxdcm_sm_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxdwm_sm_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rda_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rda_ecc_db_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rqa_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->frf_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rocrc_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->wde0_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->wde1_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->wde2_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->wde3_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->wrdma_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3cmct_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3cmct_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gsscc_err_reg); for (i = 0; i < 3; i++) { VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gssc_err0_reg[i]); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gssc_err1_reg[i]); } VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gcmg1_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gxtmc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gcp_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->cmc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->gcmg2_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_cml_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_cml_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_cmu_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_cmu_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->psscc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pcmg1_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pxtmc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->cp_exc_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->cp_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pcmg2_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->dam_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->pcmg3_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->xmac_gen_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->xgxs_gen_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->asic_ntwk_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->xgmac_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxmac_ecc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxmac_various_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->rxmac_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->txmac_gen_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->txmac_ecc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->tmac_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_fb_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3ifcmd_fb_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->mc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->grocrc_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->fau_ecc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->mc_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3fbct_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->g3fbct_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->orp_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->ptm_alarm_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->tpa_error_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->tpa_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->kdfc_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->doorbell_int_status); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->tim_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->msg_exc_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->msg_err_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->msg_err2_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->msg_err3_reg); VXGE_HAL_MRPCIM_ERROR_REG_CLEAR(&mrpcim_reg->msg_int_status); vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &mrpcim_reg->mrpcim_general_int_status); /* unmask interrupts */ val64 = VXGE_HAL_INI_ERRORS_REG_DCPL_FSM_ERR | VXGE_HAL_INI_ERRORS_REG_INI_BUF_DB_ERR | VXGE_HAL_INI_ERRORS_REG_INI_DATA_OVERFLOW | VXGE_HAL_INI_ERRORS_REG_INI_HDR_OVERFLOW; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->ini_errors_mask); val64 = VXGE_HAL_DMA_ERRORS_REG_RDARB_FSM_ERR | VXGE_HAL_DMA_ERRORS_REG_WRARB_FSM_ERR | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_WR_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_WR_DATA_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_MSG_WR_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_MSG_WR_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_MSG_WR_DATA_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_MSG_WR_DATA_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_STATS_WR_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_STATS_WR_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_STATS_WR_DATA_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_STATS_WR_DATA_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_WR_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_WR_DATA_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_WRDMA_RD_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_OVERFLOW | VXGE_HAL_DMA_ERRORS_REG_DMA_RTDMA_RD_HDR_UNDERFLOW | VXGE_HAL_DMA_ERRORS_REG_DBLGEN_FSM_ERR | VXGE_HAL_DMA_ERRORS_REG_DBLGEN_CREDIT_FSM_ERR | VXGE_HAL_DMA_ERRORS_REG_DBLGEN_DMA_WRR_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->dma_errors_mask); val64 = VXGE_HAL_TGT_ERRORS_REG_TGT_REQ_FSM_ERR | VXGE_HAL_TGT_ERRORS_REG_TGT_CPL_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->tgt_errors_mask); val64 = VXGE_HAL_CONFIG_ERRORS_REG_I2C_MAIN_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_I2C_REG_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_CFGM_I2C_TIMEOUT | VXGE_HAL_CONFIG_ERRORS_REG_RIC_I2C_TIMEOUT | VXGE_HAL_CONFIG_ERRORS_REG_CFGM_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_RIC_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_PIFM_TIMEOUT | VXGE_HAL_CONFIG_ERRORS_REG_PIFM_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_PIFM_TO_FSM_ERR | VXGE_HAL_CONFIG_ERRORS_REG_RIC_RIC_RD_TIMEOUT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->config_errors_mask); val64 = VXGE_HAL_CRDT_ERRORS_REG_WRCRDTARB_FSM_ERR | VXGE_HAL_CRDT_ERRORS_REG_WRCRDTARB_INTCTL_ILLEGAL_CRD_DEAL | VXGE_HAL_CRDT_ERRORS_REG_WRCRDTARB_PDA_ILLEGAL_CRD_DEAL | VXGE_HAL_CRDT_ERRORS_REG_WRCRDTARB_PCI_MSG_ILLEGAL_CRD_DEAL | VXGE_HAL_CRDT_ERRORS_REG_RDCRDTARB_FSM_ERR | VXGE_HAL_CRDT_ERRORS_REG_RDCRDTARB_RDA_ILLEGAL_CRD_DEAL | VXGE_HAL_CRDT_ERRORS_REG_RDCRDTARB_PDA_ILLEGAL_CRD_DEAL | VXGE_HAL_CRDT_ERRORS_REG_RDCRDTARB_DBLGEN_ILLEGAL_CRD_DEAL; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->crdt_errors_mask); val64 = VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_STATSB_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_XGEN_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_XMEM_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_KDFCCTL_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_MRIOVCTL_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_SPI_FLSH_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_ACK_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_SPI_IIC_CHKSUM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_INI_SERR_DET | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSIX_FSM_ERR | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_INTCTL_MSI_OVERFLOW | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_PPIF_PCI_NOT_FLUSH_SW_RESET | VXGE_HAL_MRPCIM_GENERAL_ERRORS_REG_PPIF_SW_RESET_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->mrpcim_general_errors_mask); val64 = VXGE_HAL_PLL_ERRORS_REG_CORE_CMG_PLL_OOL | VXGE_HAL_PLL_ERRORS_REG_CORE_FB_PLL_OOL | VXGE_HAL_PLL_ERRORS_REG_CORE_X_PLL_OOL; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pll_errors_mask); val64 = VXGE_HAL_MRPCIM_PPIF_INT_STATUS_INI_ERRORS_INI_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_DMA_ERRORS_DMA_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_TGT_ERRORS_TGT_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_CONFIG_ERRORS_CONFIG_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_CRDT_ERRORS_CRDT_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_MRPCIM_GENERAL_ERRORS_GENERAL_INT | VXGE_HAL_MRPCIM_PPIF_INT_STATUS_PLL_ERRORS_PLL_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->mrpcim_ppif_int_mask); val64 = VXGE_HAL_DBECC_ERR_REG_PCI_RETRY_BUF_DB_ERR | VXGE_HAL_DBECC_ERR_REG_PCI_RETRY_SOT_DB_ERR | VXGE_HAL_DBECC_ERR_REG_PCI_P_HDR_DB_ERR | VXGE_HAL_DBECC_ERR_REG_PCI_P_DATA_DB_ERR | VXGE_HAL_DBECC_ERR_REG_PCI_NP_HDR_DB_ERR | VXGE_HAL_DBECC_ERR_REG_PCI_NP_DATA_DB_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->dbecc_err_mask); val64 = VXGE_HAL_GENERAL_ERR_REG_PCI_LINK_RST_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->general_err_mask); val64 = VXGE_HAL_PCIPIF_INT_STATUS_DBECC_ERR_DBECC_ERR_INT | VXGE_HAL_PCIPIF_INT_STATUS_GENERAL_ERR_GENERAL_ERR_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pcipif_int_mask); val64 = VXGE_HAL_PDA_ALARM_REG_PDA_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pda_alarm_mask); val64 = 0; for (i = 0; i < 8; i++) { val64 |= VXGE_HAL_PCC_ERROR_REG_PCC_PCC_FRM_BUF_DBE(i) | VXGE_HAL_PCC_ERROR_REG_PCC_PCC_TXDO_DBE(i) | VXGE_HAL_PCC_ERROR_REG_PCC_PCC_FSM_ERR_ALARM(i) | VXGE_HAL_PCC_ERROR_REG_PCC_PCC_SERR(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pcc_error_mask); val64 = 0; for (i = 0; i < 8; i++) { val64 |= VXGE_HAL_LSO_ERROR_REG_PCC_LSO_FSM_ERR_ALARM(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->lso_error_mask); val64 = VXGE_HAL_SM_ERROR_REG_SM_FSM_ERR_ALARM; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->sm_error_mask); val64 = VXGE_HAL_RTDMA_INT_STATUS_PDA_ALARM_PDA_INT | VXGE_HAL_RTDMA_INT_STATUS_PCC_ERROR_PCC_INT | VXGE_HAL_RTDMA_INT_STATUS_LSO_ERROR_LSO_INT | VXGE_HAL_RTDMA_INT_STATUS_SM_ERROR_SM_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rtdma_int_mask); val64 = VXGE_HAL_RC_ALARM_REG_FTC_SM_ERR | VXGE_HAL_RC_ALARM_REG_FTC_SM_PHASE_ERR | VXGE_HAL_RC_ALARM_REG_BTDWM_SM_ERR | VXGE_HAL_RC_ALARM_REG_BTC_SM_ERR | VXGE_HAL_RC_ALARM_REG_BTDCM_SM_ERR | VXGE_HAL_RC_ALARM_REG_BTDRM_SM_ERR | VXGE_HAL_RC_ALARM_REG_RMM_RXD_RC_ECC_DB_ERR | VXGE_HAL_RC_ALARM_REG_RHS_RXD_RHS_ECC_DB_ERR | VXGE_HAL_RC_ALARM_REG_RMM_SM_ERR | VXGE_HAL_RC_ALARM_REG_BTC_VPATH_MISMATCH_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rc_alarm_mask); val64 = 0; for (i = 0; i < 17; i++) { val64 |= VXGE_HAL_RXDRM_SM_ERR_REG_PRC_VP(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxdrm_sm_err_mask); val64 = 0; for (i = 0; i < 17; i++) { val64 |= VXGE_HAL_RXDCM_SM_ERR_REG_PRC_VP(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxdcm_sm_err_mask); val64 = 0; for (i = 0; i < 17; i++) { val64 |= VXGE_HAL_RXDWM_SM_ERR_REG_PRC_VP(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxdwm_sm_err_mask); val64 = VXGE_HAL_RDA_ERR_REG_RDA_SM0_ERR_ALARM | VXGE_HAL_RDA_ERR_REG_RDA_RXD_ECC_DB_ERR | VXGE_HAL_RDA_ERR_REG_RDA_FRM_ECC_DB_ERR | VXGE_HAL_RDA_ERR_REG_RDA_UQM_ECC_DB_ERR | VXGE_HAL_RDA_ERR_REG_RDA_IMM_ECC_DB_ERR | VXGE_HAL_RDA_ERR_REG_RDA_TIM_ECC_DB_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rda_err_mask); val64 = 0; for (i = 0; i < 17; i++) { val64 |= VXGE_HAL_RDA_ECC_DB_REG_RDA_RXD_ERR(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rda_ecc_db_mask); val64 = VXGE_HAL_RQA_ERR_REG_RQA_SM_ERR_ALARM; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rqa_err_mask); val64 = 0; for (i = 0; i < 17; i++) { val64 |= VXGE_HAL_FRF_ALARM_REG_PRC_VP_FRF_SM_ERR(i); } VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->frf_alarm_mask); val64 = VXGE_HAL_ROCRC_ALARM_REG_QCQ_QCC_BYP_ECC_DB | VXGE_HAL_ROCRC_ALARM_REG_NOA_NMA_SM_ERR | VXGE_HAL_ROCRC_ALARM_REG_NOA_IMMM_ECC_DB | VXGE_HAL_ROCRC_ALARM_REG_UDQ_UMQM_ECC_DB | VXGE_HAL_ROCRC_ALARM_REG_NOA_RCBM_ECC_DB | VXGE_HAL_ROCRC_ALARM_REG_NOA_WCT_CMD_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rocrc_alarm_mask); val64 = VXGE_HAL_WDE0_ALARM_REG_WDE0_DCC_SM_ERR | VXGE_HAL_WDE0_ALARM_REG_WDE0_PRM_SM_ERR | VXGE_HAL_WDE0_ALARM_REG_WDE0_CP_SM_ERR | VXGE_HAL_WDE0_ALARM_REG_WDE0_CP_CMD_ERR | VXGE_HAL_WDE0_ALARM_REG_WDE0_PCR_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->wde0_alarm_mask); val64 = VXGE_HAL_WDE1_ALARM_REG_WDE1_DCC_SM_ERR | VXGE_HAL_WDE1_ALARM_REG_WDE1_PRM_SM_ERR | VXGE_HAL_WDE1_ALARM_REG_WDE1_CP_SM_ERR | VXGE_HAL_WDE1_ALARM_REG_WDE1_CP_CMD_ERR | VXGE_HAL_WDE1_ALARM_REG_WDE1_PCR_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->wde1_alarm_mask); val64 = VXGE_HAL_WDE2_ALARM_REG_WDE2_DCC_SM_ERR | VXGE_HAL_WDE2_ALARM_REG_WDE2_PRM_SM_ERR | VXGE_HAL_WDE2_ALARM_REG_WDE2_CP_SM_ERR | VXGE_HAL_WDE2_ALARM_REG_WDE2_CP_CMD_ERR | VXGE_HAL_WDE2_ALARM_REG_WDE2_PCR_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->wde2_alarm_mask); val64 = VXGE_HAL_WDE3_ALARM_REG_WDE3_DCC_SM_ERR | VXGE_HAL_WDE3_ALARM_REG_WDE3_PRM_SM_ERR | VXGE_HAL_WDE3_ALARM_REG_WDE3_CP_SM_ERR | VXGE_HAL_WDE3_ALARM_REG_WDE3_CP_CMD_ERR | VXGE_HAL_WDE3_ALARM_REG_WDE3_PCR_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->wde3_alarm_mask); val64 = VXGE_HAL_WRDMA_INT_STATUS_RC_ALARM_RC_INT | VXGE_HAL_WRDMA_INT_STATUS_RXDRM_SM_ERR_RXDRM_INT | VXGE_HAL_WRDMA_INT_STATUS_RXDCM_SM_ERR_RXDCM_SM_INT | VXGE_HAL_WRDMA_INT_STATUS_RXDWM_SM_ERR_RXDWM_INT | VXGE_HAL_WRDMA_INT_STATUS_RDA_ERR_RDA_INT | VXGE_HAL_WRDMA_INT_STATUS_RDA_ECC_DB_RDA_ECC_DB_INT | VXGE_HAL_WRDMA_INT_STATUS_FRF_ALARM_FRF_INT | VXGE_HAL_WRDMA_INT_STATUS_ROCRC_ALARM_ROCRC_INT | VXGE_HAL_WRDMA_INT_STATUS_WDE0_ALARM_WDE0_INT | VXGE_HAL_WRDMA_INT_STATUS_WDE1_ALARM_WDE1_INT | VXGE_HAL_WRDMA_INT_STATUS_WDE2_ALARM_WDE2_INT | VXGE_HAL_WRDMA_INT_STATUS_WDE3_ALARM_WDE3_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->wrdma_int_mask); val64 = VXGE_HAL_G3CMCT_ERR_REG_G3IF_SM_ERR | VXGE_HAL_G3CMCT_ERR_REG_G3IF_GDDR3_DECC | VXGE_HAL_G3CMCT_ERR_REG_G3IF_GDDR3_U_DECC | VXGE_HAL_G3CMCT_ERR_REG_G3IF_CTRL_FIFO_DECC; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3cmct_err_mask); val64 = VXGE_HAL_G3CMCT_INT_STATUS_ERR_G3IF_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3cmct_int_mask); val64 = VXGE_HAL_GSSCC_ERR_REG_SSCC_SSR_DB_ERR(0x3) | VXGE_HAL_GSSCC_ERR_REG_SSCC_TSR_DB_ERR(0x3f) | VXGE_HAL_GSSCC_ERR_REG_SSCC_CP2STE_UFLOW_ERR | VXGE_HAL_GSSCC_ERR_REG_SSCC_CP2TTE_UFLOW_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gsscc_err_mask); for (i = 0; i < 3; i++) { val64 = VXGE_HAL_GSSC_ERR0_REG_SSCC_STATE_DB_ERR(0xff) | VXGE_HAL_GSSC_ERR0_REG_SSCC_CM_RESP_DB_ERR(0xf) | VXGE_HAL_GSSC_ERR0_REG_SSCC_SSR_RESP_DB_ERR(0x3) | VXGE_HAL_GSSC_ERR0_REG_SSCC_TSR_RESP_DB_ERR(0x3f); VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gssc_err0_mask[i]); val64 = VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_RESP_DB_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SCREQ_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_RESP_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_RESP_R_WN_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_RESP_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_REQ_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_CM_REQ_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_FSM_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_FSM_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SSR_REQ_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SSR_REQ_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SSR_RESP_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SSR_RESP_R_WN_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SSR_RESP_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_TSR_REQ_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_TSR_REQ_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_TSR_RESP_OFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_TSR_RESP_R_WN_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_TSR_RESP_UFLOW_ERR | VXGE_HAL_GSSC_ERR1_REG_SSCC_SCRESP_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gssc_err1_mask[i]); } val64 = VXGE_HAL_GCMG1_INT_STATUS_GSSCC_ERR_GSSCC_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC0_ERR0_GSSC0_0_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC0_ERR1_GSSC0_1_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC1_ERR0_GSSC1_0_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC1_ERR1_GSSC1_1_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC2_ERR0_GSSC2_0_INT | VXGE_HAL_GCMG1_INT_STATUS_GSSC2_ERR1_GSSC2_1_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gcmg1_int_mask); val64 = VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_MEM_DB_ERR(0xf) | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMC_RD_DATA_DB_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_REQ_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_WRP_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_WRP_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_RRP_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_RRP_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_DATA_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_CMC0_IF_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_CFC_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_OVERFLOW | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_CREDIT_UNDERFLOW | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_DFETCH_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_OVERFLOW | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_CREDIT_UNDERFLOW | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_RCTRL_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_WCOMPL_TAG_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_SM_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_BDT_CMI_WREQ_FIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_POP_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_XTMC_BDT_CMI_OP_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFETCH_OP_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_XTMC_BDT_DFIFO_ERR | VXGE_HAL_GXTMC_ERR_REG_XTMC_CMI_ARB_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gxtmc_err_mask); val64 = VXGE_HAL_GCP_ERR_REG_CP_H2L2CP_FIFO_ERR | VXGE_HAL_GCP_ERR_REG_CP_STC2CP_FIFO_ERR | VXGE_HAL_GCP_ERR_REG_CP_STE2CP_FIFO_ERR | VXGE_HAL_GCP_ERR_REG_CP_TTE2CP_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gcp_err_mask); val64 = VXGE_HAL_CMC_ERR_REG_CMC_CMC_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->cmc_err_mask); val64 = VXGE_HAL_GCMG2_INT_STATUS_GXTMC_ERR_GXTMC_INT | VXGE_HAL_GCMG2_INT_STATUS_GCP_ERR_GCP_INT | VXGE_HAL_GCMG2_INT_STATUS_CMC_ERR_CMC_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->gcmg2_int_mask); val64 = VXGE_HAL_G3IFCMD_CML_ERR_REG_G3IF_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_cml_err_mask); val64 = VXGE_HAL_G3IFCMD_CML_INT_STATUS_ERR_G3IF_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_cml_int_mask); val64 = VXGE_HAL_G3IFCMD_CMU_ERR_REG_G3IF_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_cmu_err_mask); val64 = VXGE_HAL_G3IFCMD_CMU_INT_STATUS_ERR_G3IF_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_cmu_int_mask); val64 = VXGE_HAL_PSSCC_ERR_REG_SSCC_CP2STE_OFLOW_ERR | VXGE_HAL_PSSCC_ERR_REG_SSCC_CP2TTE_OFLOW_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->psscc_err_mask); val64 = VXGE_HAL_PCMG1_INT_STATUS_PSSCC_ERR_PSSCC_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pcmg1_int_mask); val64 = VXGE_HAL_PXTMC_ERR_REG_XTMC_XT_PIF_SRAM_DB_ERR(0x3) | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_REQ_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_PRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_WRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_REQ_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_PRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_WRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_REQ_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_PRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_WRSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_REQ_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_REQ_DATA_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_WR_RSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_RD_RSP_FIFO_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_REQ_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_RSP_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_REQ_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_RSP_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_REQ_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_RSP_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_XIL_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_ARB_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_RAM_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CMW_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CMR_SHADOW_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_REQ_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MPT_RSP_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_REQ_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UPT_RSP_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_REQ_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CPT_RSP_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_XIL_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_ARB_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CMW_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CMR_FSM_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MXP_RD_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UXP_RD_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CXP_RD_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MXP_WR_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UXP_WR_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CXP_WR_PROT_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_MXP_INV_ADDR_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_UXP_INV_ADDR_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CXP_INV_ADDR_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CP2BDT_DFIFO_PUSH_ERR | VXGE_HAL_PXTMC_ERR_REG_XTMC_CP2BDT_RFIFO_PUSH_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pxtmc_err_mask); val64 = VXGE_HAL_CP_EXC_REG_CP_CP_CAUSE_CRIT_INT | VXGE_HAL_CP_EXC_REG_CP_CP_SERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->cp_exc_mask); val64 = VXGE_HAL_CP_ERR_REG_CP_CP_DCACHE_DB_ERR(0xff) | VXGE_HAL_CP_ERR_REG_CP_CP_ICACHE_DB_ERR(0x3) | VXGE_HAL_CP_ERR_REG_CP_CP_DTAG_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_CP_ITAG_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_CP_TRACE_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_DMA2CP_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_MP2CP_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_QCC2CP_DB_ERR | VXGE_HAL_CP_ERR_REG_CP_STC2CP_DB_ERR(0x3) | VXGE_HAL_CP_ERR_REG_CP_H2L2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_STC2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_STE2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_TTE2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_SWIF2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_CP2DMA_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_DAM2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_MP2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_QCC2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_DMA2CP_FIFO_ERR | VXGE_HAL_CP_ERR_REG_CP_CP_WAKE_FSM_INTEGRITY_ERR | VXGE_HAL_CP_ERR_REG_CP_CP_PMON_FSM_INTEGRITY_ERR | VXGE_HAL_CP_ERR_REG_CP_DMA_RD_SHADOW_ERR | VXGE_HAL_CP_ERR_REG_CP_PIFT_CREDIT_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->cp_err_mask); val64 = VXGE_HAL_PCMG2_INT_STATUS_PXTMC_ERR_PXTMC_INT | VXGE_HAL_PCMG2_INT_STATUS_CP_EXC_CP_XT_EXC_INT | VXGE_HAL_PCMG2_INT_STATUS_CP_ERR_CP_ERR_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pcmg2_int_mask); val64 = VXGE_HAL_DAM_ERR_REG_DAM_RDSB_ECC_DB_ERR | VXGE_HAL_DAM_ERR_REG_DAM_WRSB_ECC_DB_ERR | VXGE_HAL_DAM_ERR_REG_DAM_HPPEDAT_ECC_DB_ERR | VXGE_HAL_DAM_ERR_REG_DAM_LPPEDAT_ECC_DB_ERR | VXGE_HAL_DAM_ERR_REG_DAM_WRRESP_ECC_DB_ERR | VXGE_HAL_DAM_ERR_REG_DAM_HPRD_ERR | VXGE_HAL_DAM_ERR_REG_DAM_LPRD_0_ERR | VXGE_HAL_DAM_ERR_REG_DAM_LPRD_1_ERR | VXGE_HAL_DAM_ERR_REG_DAM_HPPEDAT_OVERFLOW_ERR | VXGE_HAL_DAM_ERR_REG_DAM_LPPEDAT_OVERFLOW_ERR | VXGE_HAL_DAM_ERR_REG_DAM_WRRESP_OVERFLOW_ERR | VXGE_HAL_DAM_ERR_REG_DAM_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->dam_err_mask); val64 = VXGE_HAL_PCMG3_INT_STATUS_DAM_ERR_DAM_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->pcmg3_int_mask); val64 = VXGE_HAL_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE0_DB_ERR(0x3) | VXGE_HAL_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE1_DB_ERR(0x3) | VXGE_HAL_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE2_DB_ERR(0x3) | VXGE_HAL_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE3_DB_ERR(0x3) | VXGE_HAL_XMAC_GEN_ERR_REG_XSTATS_RMAC_STATS_TILE4_DB_ERR(0x3) | VXGE_HAL_XMAC_GEN_ERR_REG_XMACJ_XMAC_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->xmac_gen_err_mask); val64 = VXGE_HAL_XGXS_GEN_ERR_REG_XGXS_XGXS_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->xgxs_gen_err_mask); val64 = VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_DOWN | VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_UP | VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_DOWN | VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_WENT_UP | VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT | VXGE_HAL_ASIC_NTWK_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->asic_ntwk_err_mask); val64 = VXGE_HAL_XGMAC_INT_STATUS_XMAC_GEN_ERR_XMAC_GEN_INT | VXGE_HAL_XGMAC_INT_STATUS_XGXS_GEN_ERR_XGXS_GEN_INT | VXGE_HAL_XGMAC_INT_STATUS_ASIC_NTWK_ERR_ASIC_NTWK_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->xgmac_int_mask); val64 = VXGE_HAL_RXMAC_ECC_ERR_REG_RMAC_PORT0_RMAC_RTS_PART_DB_ERR(0xf) | VXGE_HAL_RXMAC_ECC_ERR_REG_RMAC_PORT1_RMAC_RTS_PART_DB_ERR(0xf) | VXGE_HAL_RXMAC_ECC_ERR_REG_RMAC_PORT2_RMAC_RTS_PART_DB_ERR(0xf) | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT0_DB_ERR(0x3) | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DA_LKP_PRT1_DB_ERR(0x3) | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_VID_LKP_DB_ERR | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT0_DB_ERR | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT1_DB_ERR | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_PN_LKP_PRT2_DB_ERR | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_MASK_DB_ERR(0x3f) | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_RTH_LKP_DB_ERR(0x7) | VXGE_HAL_RXMAC_ECC_ERR_REG_RTSJ_RMAC_DS_LKP_DB_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxmac_ecc_err_mask); val64 = VXGE_HAL_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT0_FSM_ERR | VXGE_HAL_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT1_FSM_ERR | VXGE_HAL_RXMAC_VARIOUS_ERR_REG_RMAC_RMAC_PORT2_FSM_ERR | VXGE_HAL_RXMAC_VARIOUS_ERR_REG_RMACJ_RMACJ_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxmac_various_err_mask); val64 = VXGE_HAL_RXMAC_INT_STATUS_RXMAC_ECC_ERR_RXMAC_ECC_INT | VXGE_HAL_RXMAC_INT_STATUS_RXMAC_VARIOUS_ERR_RXMAC_VARIOUS_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->rxmac_int_mask); val64 = VXGE_HAL_TXMAC_GEN_ERR_REG_TMACJ_PERMANENT_STOP; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->txmac_gen_err_mask); val64 = VXGE_HAL_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2MAC_DB_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_SB_DB_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMACJ_TMAC_TPA2M_DA_DB_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT0_FSM_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT1_FSM_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMAC_TMAC_PORT2_FSM_ERR | VXGE_HAL_TXMAC_ECC_ERR_REG_TMACJ_TMACJ_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->txmac_ecc_err_mask); val64 = VXGE_HAL_TMAC_INT_STATUS_TXMAC_GEN_ERR_TXMAC_GEN_INT | VXGE_HAL_TMAC_INT_STATUS_TXMAC_ECC_ERR_TXMAC_ECC_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->tmac_int_mask); val64 = VXGE_HAL_G3IFCMD_FB_ERR_REG_G3IF_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_fb_err_mask); val64 = VXGE_HAL_G3IFCMD_FB_INT_STATUS_ERR_G3IF_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3ifcmd_fb_int_mask); val64 = VXGE_HAL_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_A | VXGE_HAL_MC_ERR_REG_MC_XFMD_MEM_ECC_DB_ERR_B | VXGE_HAL_MC_ERR_REG_MC_G3IF_RD_FIFO_ECC_DB_ERR | VXGE_HAL_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_0 | VXGE_HAL_MC_ERR_REG_MC_MIRI_ECC_DB_ERR_1 | VXGE_HAL_MC_ERR_REG_MC_SM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->mc_err_mask); val64 = VXGE_HAL_GROCRC_ALARM_REG_XFMD_WR_FIFO_ERR | VXGE_HAL_GROCRC_ALARM_REG_WDE2MSR_RD_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->grocrc_alarm_mask); val64 = VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_N_DB_ERR | VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT0_FAU_MAC2F_W_DB_ERR(0x3) | VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_N_DB_ERR | VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT1_FAU_MAC2F_W_DB_ERR(0x3) | VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_N_DB_ERR | VXGE_HAL_FAU_ECC_ERR_REG_FAU_PORT2_FAU_MAC2F_W_DB_ERR(0x3) | VXGE_HAL_FAU_ECC_ERR_REG_FAU_FAU_XFMD_INS_DB_ERR(0x3) | VXGE_HAL_FAU_ECC_ERR_REG_FAUJ_FAU_FSM_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->fau_ecc_err_mask); val64 = VXGE_HAL_MC_INT_STATUS_MC_ERR_MC_INT | VXGE_HAL_MC_INT_STATUS_GROCRC_ALARM_ROCRC_INT | VXGE_HAL_MC_INT_STATUS_FAU_ECC_ERR_FAU_ECC_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->mc_int_mask); val64 = VXGE_HAL_G3FBCT_ERR_REG_G3IF_SM_ERR | VXGE_HAL_G3FBCT_ERR_REG_G3IF_GDDR3_DECC | VXGE_HAL_G3FBCT_ERR_REG_G3IF_GDDR3_U_DECC | VXGE_HAL_G3FBCT_ERR_REG_G3IF_CTRL_FIFO_DECC; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3fbct_err_mask); val64 = VXGE_HAL_G3FBCT_INT_STATUS_ERR_G3IF_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->g3fbct_int_mask); val64 = VXGE_HAL_ORP_ERR_REG_ORP_FIFO_DB_ERR | VXGE_HAL_ORP_ERR_REG_ORP_XFMD_FIFO_UFLOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_FRM_FIFO_UFLOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_XFMD_RCV_FSM_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OUTREAD_FSM_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OUTQEM_FSM_ERR | VXGE_HAL_ORP_ERR_REG_ORP_XFMD_RCV_SHADOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OUTREAD_SHADOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OUTQEM_SHADOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OUTFRM_SHADOW_ERR | VXGE_HAL_ORP_ERR_REG_ORP_OPTPRS_SHADOW_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->orp_err_mask); val64 = VXGE_HAL_PTM_ALARM_REG_PTM_RDCTRL_SYNC_ERR | VXGE_HAL_PTM_ALARM_REG_PTM_RDCTRL_FIFO_ERR | VXGE_HAL_PTM_ALARM_REG_XFMD_RD_FIFO_ERR | VXGE_HAL_PTM_ALARM_REG_WDE2MSR_WR_FIFO_ERR | VXGE_HAL_PTM_ALARM_REG_PTM_FRMM_ECC_DB_ERR(0x3); VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->ptm_alarm_mask); val64 = VXGE_HAL_TPA_ERROR_REG_TPA_FSM_ERR_ALARM | VXGE_HAL_TPA_ERROR_REG_TPA_TPA_DA_LKUP_PRT0_DB_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->tpa_error_mask); val64 = VXGE_HAL_TPA_INT_STATUS_ORP_ERR_ORP_INT | VXGE_HAL_TPA_INT_STATUS_PTM_ALARM_PTM_INT | VXGE_HAL_TPA_INT_STATUS_TPA_ERROR_TPA_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->tpa_int_mask); val64 = VXGE_HAL_KDFC_ERR_REG_KDFC_KDFC_ECC_DB_ERR | VXGE_HAL_KDFC_ERR_REG_KDFC_KDFC_SM_ERR_ALARM; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->kdfc_err_mask); val64 = VXGE_HAL_DOORBELL_INT_STATUS_KDFC_ERR_REG_TXDMA_KDFC_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->doorbell_int_mask); val64 = VXGE_HAL_TIM_ERR_REG_TIM_VBLS_DB_ERR | VXGE_HAL_TIM_ERR_REG_TIM_BMAP_PA_DB_ERR | VXGE_HAL_TIM_ERR_REG_TIM_BMAP_PB_DB_ERR | VXGE_HAL_TIM_ERR_REG_TIM_BMAP_MSG_DB_ERR | VXGE_HAL_TIM_ERR_REG_TIM_BMAP_MEM_CNTRL_SM_ERR | VXGE_HAL_TIM_ERR_REG_TIM_BMAP_MSG_MEM_CNTRL_SM_ERR | VXGE_HAL_TIM_ERR_REG_TIM_MPIF_PCIWR_ERR | VXGE_HAL_TIM_ERR_REG_TIM_ROCRC_BMAP_UPDT_FIFO_ERR | VXGE_HAL_TIM_ERR_REG_TIM_CREATE_BMAPMSG_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->tim_err_mask); val64 = VXGE_HAL_MSG_EXC_REG_MP_MXP_CAUSE_CRIT_INT | VXGE_HAL_MSG_EXC_REG_UP_UXP_CAUSE_CRIT_INT | VXGE_HAL_MSG_EXC_REG_MP_MXP_SERR | VXGE_HAL_MSG_EXC_REG_UP_UXP_SERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->msg_exc_mask); val64 = VXGE_HAL_MSG_ERR_REG_UP_UXP_WAKE_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MP_MXP_WAKE_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMQ_DMA_READ_CMD_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMQ_DMA_RESP_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMQ_OWN_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_PDA_ACC_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MP_MXP_PMON_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_UP_UXP_PMON_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_XFMDQRY_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_FRMQRY_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_UMQ_WRITE_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_UMQ_BWR_PF_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_REG_RESP_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_UP_UXP_DTAG_DB_ERR | VXGE_HAL_MSG_ERR_REG_UP_UXP_ITAG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MP_MXP_DTAG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MP_MXP_ITAG_DB_ERR | VXGE_HAL_MSG_ERR_REG_UP_UXP_TRACE_DB_ERR | VXGE_HAL_MSG_ERR_REG_MP_MXP_TRACE_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_CMG2MSG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_TXPE2MSG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_RXPE2MSG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_RPE2MSG_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_REG_READ_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_MXP2UXP_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_KDFC_SIF_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_CXP2SWIF_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_UMQ_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_BWR_PF_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_BWR_SIF_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMQ_ECC_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMA_READ_FIFO_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_DMA_RESP_ECC_DB_ERR | VXGE_HAL_MSG_ERR_REG_MSG_QUE_UXP2MXP_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->msg_err_mask); val64 = VXGE_HAL_MSG_ERR2_REG_MSG_QUE_CMG2MSG_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_DMQ_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_SWIF_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_PIC_WRITE_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_SWIFREG_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_TIM_WRITE_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_UMQ_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_TXPE_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_RXPE_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_SWIF_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_DMA_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_CP_TA_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA16_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA15_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA14_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA13_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA12_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA11_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA10_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA9_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA8_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA7_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA6_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA5_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA4_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA3_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA2_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA1_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_LONGTERMUMQ_TA0_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_FBMC_OWN_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_TXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_RXPE2MSG_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_RPE2MSG_DISPATCH_FSM_INTEGRITY_ERR | VXGE_HAL_MSG_ERR2_REG_MP_MP_PIFT_IF_CREDIT_CNT_ERR | VXGE_HAL_MSG_ERR2_REG_UP_UP_PIFT_IF_CREDIT_CNT_ERR | VXGE_HAL_MSG_ERR2_REG_MSG_QUE_UMQ2PIC_CMD_FIFO_ERR | VXGE_HAL_MSG_ERR2_REG_TIM_TIM2MSG_CMD_FIFO_ERR; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->msg_err2_mask); val64 = VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR0 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR1 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR2 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR3 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR4 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR5 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR6 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_DCACHE_DB_ERR7 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR0 | VXGE_HAL_MSG_ERR3_REG_UP_UXP_ICACHE_DB_ERR1 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR0 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR1 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR2 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR3 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR4 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR5 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR6 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_DCACHE_DB_ERR7 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR0 | VXGE_HAL_MSG_ERR3_REG_MP_MXP_ICACHE_DB_ERR1; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->msg_err3_mask); val64 = VXGE_HAL_MSG_INT_STATUS_TIM_ERR_TIM_INT | VXGE_HAL_MSG_INT_STATUS_MSG_EXC_MSG_XT_EXC_INT | VXGE_HAL_MSG_INT_STATUS_MSG_ERR3_MSG_ERR3_INT | VXGE_HAL_MSG_INT_STATUS_MSG_ERR2_MSG_ERR2_INT | VXGE_HAL_MSG_INT_STATUS_MSG_ERR_MSG_ERR_INT; VXGE_HAL_MRPCIM_ERROR_REG_UNMASK(val64, &mrpcim_reg->msg_int_mask); val64 = VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_PIC_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_PCI_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_RTDMA_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_WRDMA_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_G3CMCT_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_GCMG1_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_GCMG2_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_G3CMIFL_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_G3CMIFU_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_PCMG1_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_PCMG2_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_PCMG3_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_XMAC_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_RXMAC_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_TMAC_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_G3FBIF_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_FBMC_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_G3FBCT_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_TPA_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_DRBELL_INT | VXGE_HAL_MRPCIM_GENERAL_INT_STATUS_MSG_INT; vxge_hal_pio_mem_write32_upper( hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(~val64, 0), &mrpcim_reg->mrpcim_general_int_mask); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_intr_disable - Disable the interrupts on mrpcim. * @devh: HAL device handle. * * Disable mrpcim interrupts * * See also: vxge_hal_mrpcim_intr_enable(). */ vxge_hal_status_e vxge_hal_mrpcim_intr_disable(vxge_hal_device_h devh) { u32 i; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_mrpcim_reg_t *mrpcim_reg; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } mrpcim_reg = hldev->mrpcim_reg; VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->ini_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->dma_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->tgt_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->config_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->crdt_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->mrpcim_general_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pll_errors_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->mrpcim_ppif_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->dbecc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->general_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pcipif_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pda_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pcc_error_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->lso_error_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->sm_error_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rtdma_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rc_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxdrm_sm_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxdcm_sm_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxdwm_sm_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rda_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rda_ecc_db_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rqa_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->frf_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rocrc_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->wde0_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->wde1_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->wde2_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->wde3_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->wrdma_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3cmct_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3cmct_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gsscc_err_mask); for (i = 0; i < 3; i++) { VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gssc_err0_mask[i]); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gssc_err1_mask[i]); } VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gcmg1_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gxtmc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gcp_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->cmc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->gcmg2_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_cml_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_cml_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_cmu_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_cmu_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->psscc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pcmg1_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pxtmc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->cp_exc_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->cp_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pcmg2_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->dam_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->pcmg3_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->xmac_gen_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->xgxs_gen_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->asic_ntwk_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->xgmac_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxmac_ecc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxmac_various_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->rxmac_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->txmac_gen_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->txmac_ecc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->tmac_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_fb_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3ifcmd_fb_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->mc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->grocrc_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->fau_ecc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->mc_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3fbct_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->g3fbct_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->orp_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->ptm_alarm_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->tpa_error_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->tpa_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->kdfc_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->doorbell_int_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->tim_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->msg_exc_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->msg_err_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->msg_err2_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->msg_err3_mask); VXGE_HAL_MRPCIM_ERROR_REG_MASK(&mrpcim_reg->msg_int_mask); vxge_hal_pio_mem_write32_upper( hldev->header.pdev, hldev->header.regh0, (u32) VXGE_HAL_INTR_MASK_ALL, &mrpcim_reg->mrpcim_general_int_mask); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_reset - Reset the entire device. * @devh: HAL device handle. * * Soft-reset the device, reset the device stats except reset_cnt. * * * Returns: VXGE_HAL_OK - success. * VXGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. * VXGE_HAL_ERR_RESET_FAILED - Reset failed. * * See also: vxge_hal_status_e {}. */ vxge_hal_status_e vxge_hal_mrpcim_reset(vxge_hal_device_h devh) { u64 val64; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (!hldev->header.is_initialized) return (VXGE_HAL_ERR_DEVICE_NOT_INITIALIZED); if (hldev->device_resetting == 1) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_RESET_IN_PROGRESS); return (VXGE_HAL_ERR_RESET_IN_PROGRESS); } (void) __hal_ifmsg_wmsg_post(hldev, hldev->first_vp_id, VXGE_HAL_RTS_ACCESS_STEER_MSG_DEST_BROADCAST, VXGE_HAL_RTS_ACCESS_STEER_DATA0_MSG_TYPE_DEVICE_RESET_BEGIN, 0); vxge_os_mdelay(100); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->sw_reset_cfg1); val64 |= VXGE_HAL_SW_RESET_CFG1_TYPE; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->sw_reset_cfg1); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, VXGE_HAL_PF_SW_RESET_PF_SW_RESET( VXGE_HAL_PF_SW_RESET_COMMAND), &hldev->mrpcim_reg->bf_sw_reset); hldev->stats.sw_dev_info_stats.soft_reset_cnt++; hldev->device_resetting = 1; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_PENDING); return (VXGE_HAL_PENDING); } /* * vxge_hal_mrpcim_reset_poll - Poll the device for reset complete. * @devh: HAL device handle. * * Soft-reset the device, reset the device stats except reset_cnt. * * After reset is done, will try to re-initialize HW. * * Returns: VXGE_HAL_OK - success. * VXGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized. * VXGE_HAL_ERR_RESET_FAILED - Reset failed. * * See also: vxge_hal_status_e {}. */ vxge_hal_status_e vxge_hal_mrpcim_reset_poll(vxge_hal_device_h devh) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (!hldev->header.is_initialized) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_DEVICE_NOT_INITIALIZED); return (VXGE_HAL_ERR_DEVICE_NOT_INITIALIZED); } if ((status = __hal_device_reg_addr_get(hldev)) != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); hldev->device_resetting = 0; return (status); } __hal_device_id_get(hldev); __hal_device_host_info_get(hldev); hldev->hw_is_initialized = 0; hldev->device_resetting = 0; vxge_os_memzero(hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); vxge_os_memzero(&hldev->mrpcim->mrpcim_stats_sav, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); status = __hal_mrpcim_mac_configure(hldev); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } status = __hal_mrpcim_lag_configure(hldev); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_gen_cfg_port[0]); hldev->mrpcim->mdio_phy_prtad0 = (u32) VXGE_HAL_MDIO_GEN_CFG_PORT_GET_MDIO_PHY_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_gen_cfg_port[1]); hldev->mrpcim->mdio_phy_prtad1 = (u32) VXGE_HAL_MDIO_GEN_CFG_PORT_GET_MDIO_PHY_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xgxs_static_cfg_port[0]); hldev->mrpcim->mdio_dte_prtad0 = (u32) VXGE_HAL_XGXS_STATIC_CFG_PORT_GET_MDIO_DTE_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xgxs_static_cfg_port[1]); hldev->mrpcim->mdio_dte_prtad1 = (u32) VXGE_HAL_XGXS_STATIC_CFG_PORT_GET_MDIO_DTE_PRTAD(val64); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, hldev->mrpcim->mrpcim_stats_block->dma_addr, &hldev->mrpcim_reg->mrpcim_stats_start_host_addr); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, hldev->vpath_assignments, &hldev->mrpcim_reg->rxmac_authorize_all_addr); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, hldev->vpath_assignments, &hldev->mrpcim_reg->rxmac_authorize_all_vid); (void) __hal_ifmsg_wmsg_post(hldev, hldev->first_vp_id, VXGE_HAL_RTS_ACCESS_STEER_MSG_DEST_BROADCAST, VXGE_HAL_RTS_ACCESS_STEER_DATA0_MSG_TYPE_DEVICE_RESET_END, 0); (void) vxge_hal_device_reset_poll(devh); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_xpak_counter_check - check the Xpak error count and log the msg * @hldev: pointer to __hal_device_t structure * @port: Port number * @type: xpak stats error type * @value: xpak stats value * * It is used to log the error message based on the xpak stats value * Return value: * None */ void __hal_mrpcim_xpak_counter_check(__hal_device_t *hldev, u32 port, u32 type, u32 value) { vxge_assert(hldev != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats( "hldev = 0x"VXGE_OS_STXFMT", port = %d, type = %d, value = %d", (ptr_t) hldev, port, type, value); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return; } /* * If the value is high for three consecutive cylce, * log a error message */ if (value == 3) { switch (type) { case VXGE_HAL_XPAK_ALARM_EXCESS_TEMP: hldev->mrpcim->xpak_stats[port].excess_temp = 0; /* * Notify the ULD on Excess Xpak temperature alarm msg */ if (g_vxge_hal_driver->uld_callbacks.xpak_alarm_log) { g_vxge_hal_driver->uld_callbacks.xpak_alarm_log( hldev->header.upper_layer_data, port, VXGE_HAL_XPAK_ALARM_EXCESS_TEMP); } break; case VXGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT: hldev->mrpcim->xpak_stats[port].excess_bias_current = 0; /* * Notify the ULD on Excess xpak bias current alarm msg */ if (g_vxge_hal_driver->uld_callbacks.xpak_alarm_log) { g_vxge_hal_driver->uld_callbacks.xpak_alarm_log( hldev->header.upper_layer_data, port, VXGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT); } break; case VXGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT: hldev->mrpcim->xpak_stats[port].excess_laser_output = 0; /* * Notify the ULD on Excess Xpak Laser o/p power * alarm msg */ if (g_vxge_hal_driver->uld_callbacks.xpak_alarm_log) { g_vxge_hal_driver->uld_callbacks.xpak_alarm_log( hldev->header.upper_layer_data, port, VXGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT); } break; default: vxge_hal_info_log_stats("%s", "Incorrect XPAK Alarm type"); } } vxge_hal_trace_log_stats("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); } /* * vxge_hal_mrpcim_xpak_stats_poll - Poll and update the Xpak error count. * @devh: HAL device handle * @port: Port number * * It is used to update the xpak stats value */ vxge_hal_status_e vxge_hal_mrpcim_xpak_stats_poll( vxge_hal_device_h devh, u32 port) { u16 val; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("hldev = 0x"VXGE_OS_STXFMT", port = %d", (ptr_t) hldev, port); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } /* Loading the DOM register to MDIO register */ val = 0; status = __hal_mrpcim_mdio_access(devh, port, VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_WRITE, VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD_PMA_PMD, VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_CMD_STAT, &val); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } status = __hal_mrpcim_mdio_access(devh, port, VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_READ, VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD_PMA_PMD, VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_CMD_STAT, &val); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * Reading the Alarm flags */ status = __hal_mrpcim_mdio_access(devh, port, VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_READ, VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD_PMA_PMD, VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG, &val); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_TEMP_HIGH) { hldev->mrpcim->xpak_stats[port].alarm_transceiver_temp_high++; hldev->mrpcim->xpak_stats[port].excess_temp++; __hal_mrpcim_xpak_counter_check(hldev, port, VXGE_HAL_XPAK_ALARM_EXCESS_TEMP, hldev->mrpcim->xpak_stats[port].excess_temp); } else { hldev->mrpcim->xpak_stats[port].excess_temp = 0; } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_TEMP_LOW) { hldev->mrpcim->xpak_stats[port].alarm_transceiver_temp_low++; } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_CUR_HIGH) { hldev->mrpcim->xpak_stats[port].alarm_laser_bias_current_high++; hldev->mrpcim->xpak_stats[port].excess_bias_current++; __hal_mrpcim_xpak_counter_check(hldev, port, VXGE_HAL_XPAK_ALARM_EXCESS_BIAS_CURRENT, hldev->mrpcim->xpak_stats[port].excess_bias_current); } else { hldev->mrpcim->xpak_stats[port].excess_bias_current = 0; } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_CUR_LOW) { hldev->mrpcim->xpak_stats[port].alarm_laser_bias_current_low++; } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_PWR_HIGH) { hldev->mrpcim->xpak_stats[port].alarm_laser_output_power_high++; hldev->mrpcim->xpak_stats[port].excess_laser_output++; __hal_mrpcim_xpak_counter_check(hldev, port, VXGE_HAL_XPAK_ALARM_EXCESS_LASER_OUTPUT, hldev->mrpcim->xpak_stats[port].excess_laser_output); } else { hldev->mrpcim->xpak_stats[port].excess_laser_output = 0; } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_ALARM_FLAG_PWR_LOW) { hldev->mrpcim->xpak_stats[port].alarm_laser_output_power_low++; } /* * Reading the warning flags */ status = __hal_mrpcim_mdio_access(devh, port, VXGE_HAL_MDIO_MGR_ACCESS_PORT_OP_TYPE_ADDR_READ, VXGE_HAL_MDIO_MGR_ACCESS_PORT_DEVAD_PMA_PMD, VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG, &val); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_TEMP_HIGH) hldev->mrpcim->xpak_stats[port].warn_transceiver_temp_high++; if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_TEMP_LOW) hldev->mrpcim->xpak_stats[port].warn_transceiver_temp_low++; if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_CUR_HIGH) hldev->mrpcim->xpak_stats[port].warn_laser_bias_current_high++; if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_CUR_LOW) hldev->mrpcim->xpak_stats[port].warn_laser_bias_current_low++; if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_PWR_HIGH) hldev->mrpcim->xpak_stats[port].warn_laser_output_power_high++; if (val & VXGE_HAL_MDIO_MGR_ACCESS_PORT_ADDR_DOM_TX_WARN_FLAG_PWR_LOW) hldev->mrpcim->xpak_stats[port].warn_laser_output_power_low++; vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_stats_enable - Enable mrpcim statistics. * @devh: HAL Device. * * Enable the DMA mrpcim statistics for the device. The function is to be called * to re-enable the adapter to update stats into the host memory * * See also: vxge_hal_mrpcim_stats_disable() */ vxge_hal_status_e vxge_hal_mrpcim_stats_enable(vxge_hal_device_h devh) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } vxge_os_memcpy(&hldev->mrpcim->mrpcim_stats_sav, hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); if (hldev->header.config.stats_read_method == VXGE_HAL_STATS_READ_METHOD_DMA) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_general_cfg2); val64 |= VXGE_HAL_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->mrpcim_general_cfg2); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->common_reg->stats_cfg0); val64 |= VXGE_HAL_STATS_CFG0_STATS_ENABLE( (1 << (16 - hldev->first_vp_id))); vxge_hal_pio_mem_write32_upper(hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(val64, 0), &hldev->common_reg->stats_cfg0); } else { status = __hal_mrpcim_stats_get( hldev, hldev->mrpcim->mrpcim_stats); } vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_stats_disable - Disable mrpcim statistics. * @devh: HAL Device. * * Enable the DMA mrpcim statistics for the device. The function is to be called * to disable the adapter to update stats into the host memory. This function * is not needed to be called, normally. * * See also: vxge_hal_mrpcim_stats_enable() */ vxge_hal_status_e vxge_hal_mrpcim_stats_disable(vxge_hal_device_h devh) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_general_cfg2); val64 &= ~VXGE_HAL_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->mrpcim_general_cfg2); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_stats_get - Get the device mrpcim statistics. * @devh: HAL Device. * @stats: mrpcim stats * * Returns the device mrpcim stats for the device. * * See also: vxge_hal_device_stats_get() */ vxge_hal_status_e vxge_hal_mrpcim_stats_get( vxge_hal_device_h devh, vxge_hal_mrpcim_stats_hw_info_t *stats) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert((hldev != NULL) && (stats != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats( "devh = 0x"VXGE_OS_STXFMT", stats = 0x"VXGE_OS_STXFMT, (ptr_t) devh, (ptr_t) stats); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (hldev->header.config.stats_read_method == VXGE_HAL_STATS_READ_METHOD_DMA) { status = vxge_hal_device_register_poll(hldev->header.pdev, hldev->header.regh0, &hldev->common_reg->stats_cfg0, 0, VXGE_HAL_STATS_CFG0_STATS_ENABLE( (1 << (16 - hldev->first_vp_id))), hldev->header.config.device_poll_millis); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_general_cfg2); val64 &= ~VXGE_HAL_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_ENABLE; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->mrpcim_general_cfg2); } if (status == VXGE_HAL_OK) { vxge_os_memcpy(stats, hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); } vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_stats_access - Access the statistics from the given location * and offset and perform an operation * @devh: HAL Device handle. * @operation: Operation to be performed * @location: Location (one of vpath id, aggregate or port) * @offset: Offset with in the location * @stat: Pointer to a buffer to return the value * * Get the statistics from the given location and offset. * */ vxge_hal_status_e vxge_hal_mrpcim_stats_access( vxge_hal_device_h devh, u32 operation, u32 location, u32 offset, u64 *stat) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert((devh != NULL) && (stat != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT", operation = %d, " "location = %d, offset = %d, stat = 0x"VXGE_OS_STXFMT, (ptr_t) devh, operation, location, offset, (ptr_t) stat); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } val64 = VXGE_HAL_XMAC_STATS_SYS_CMD_OP(operation) | VXGE_HAL_XMAC_STATS_SYS_CMD_STROBE | VXGE_HAL_XMAC_STATS_SYS_CMD_LOC_SEL(location) | VXGE_HAL_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset); vxge_hal_pio_mem_write32_lower(hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(val64, 32), &hldev->mrpcim_reg->xmac_stats_sys_cmd); vxge_os_wmb(); vxge_hal_pio_mem_write32_upper(hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(val64, 0), &hldev->mrpcim_reg->xmac_stats_sys_cmd); vxge_os_wmb(); status = vxge_hal_device_register_poll(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xmac_stats_sys_cmd, 0, VXGE_HAL_XMAC_STATS_SYS_CMD_STROBE, hldev->header.config.device_poll_millis); if ((status == VXGE_HAL_OK) && (operation == VXGE_HAL_STATS_OP_READ)) { *stat = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xmac_stats_sys_data); } else { *stat = 0; } vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_xmac_aggr_stats_get - Get the Statistics on aggregate port * @devh: HAL device handle. * @port: Number of the port (0 or 1) * @aggr_stats: Buffer to return Statistics on aggregate port. * * Get the Statistics on aggregate port * */ vxge_hal_status_e vxge_hal_mrpcim_xmac_aggr_stats_get(vxge_hal_device_h devh, u32 port, vxge_hal_xmac_aggr_stats_t *aggr_stats) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert((devh != NULL) && (aggr_stats != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT", port = %d, " "aggr_stats = 0x"VXGE_OS_STXFMT, (ptr_t) devh, port, (ptr_t) aggr_stats); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_FRMS_OFFSET(port)); aggr_stats->tx_frms = VXGE_HAL_STATS_GET_AGGRn_TX_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_DATA_OCTETS_OFFSET(port)); aggr_stats->tx_data_octets = VXGE_HAL_STATS_GET_AGGRn_TX_DATA_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_MCAST_FRMS_OFFSET(port)); aggr_stats->tx_mcast_frms = VXGE_HAL_STATS_GET_AGGRn_TX_MCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_BCAST_FRMS_OFFSET(port)); aggr_stats->tx_bcast_frms = VXGE_HAL_STATS_GET_AGGRn_TX_BCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_DISCARDED_FRMS_OFFSET(port)); aggr_stats->tx_discarded_frms = VXGE_HAL_STATS_GET_AGGRn_TX_DISCARDED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_TX_ERRORED_FRMS_OFFSET(port)); aggr_stats->tx_errored_frms = VXGE_HAL_STATS_GET_AGGRn_TX_ERRORED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_FRMS_OFFSET(port)); aggr_stats->rx_frms = VXGE_HAL_STATS_GET_AGGRn_RX_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_DATA_OCTETS_OFFSET(port)); aggr_stats->rx_data_octets = VXGE_HAL_STATS_GET_AGGRn_RX_DATA_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_MCAST_FRMS_OFFSET(port)); aggr_stats->rx_mcast_frms = VXGE_HAL_STATS_GET_AGGRn_RX_MCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_BCAST_FRMS_OFFSET(port)); aggr_stats->rx_bcast_frms = VXGE_HAL_STATS_GET_AGGRn_RX_BCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_DISCARDED_FRMS_OFFSET(port)); aggr_stats->rx_discarded_frms = VXGE_HAL_STATS_GET_AGGRn_RX_DISCARDED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_ERRORED_FRMS_OFFSET(port)); aggr_stats->rx_errored_frms = VXGE_HAL_STATS_GET_AGGRn_RX_ERRORED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_AGGRn_RX_U_SLOW_PROTO_FRMS_OFFSET(port)); aggr_stats->rx_unknown_slow_proto_frms = VXGE_HAL_STATS_GET_AGGRn_RX_U_SLOW_PROTO_FRMS(val64); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_xmac_port_stats_get - Get the Statistics on a port * @devh: HAL device handle. * @port: Number of the port (wire 0, wire 1 or LAG) * @port_stats: Buffer to return Statistics on a port. * * Get the Statistics on port * */ vxge_hal_status_e vxge_hal_mrpcim_xmac_port_stats_get(vxge_hal_device_h devh, u32 port, vxge_hal_xmac_port_stats_t *port_stats) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert((devh != NULL) && (port_stats != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT", port = %d, " "port_stats = 0x"VXGE_OS_STXFMT, (ptr_t) devh, port, (ptr_t) port_stats); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_TTL_FRMS_OFFSET(port)); port_stats->tx_ttl_frms = VXGE_HAL_STATS_GET_PORTn_TX_TTL_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_TTL_FRMS_OFFSET(port)); port_stats->tx_ttl_octets = VXGE_HAL_STATS_GET_PORTn_TX_TTL_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_DATA_OCTETS_OFFSET(port)); port_stats->tx_data_octets = VXGE_HAL_STATS_GET_PORTn_TX_DATA_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_MCAST_FRMS_OFFSET(port)); port_stats->tx_mcast_frms = VXGE_HAL_STATS_GET_PORTn_TX_MCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_BCAST_FRMS_OFFSET(port)); port_stats->tx_bcast_frms = VXGE_HAL_STATS_GET_PORTn_TX_BCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_UCAST_FRMS_OFFSET(port)); port_stats->tx_ucast_frms = VXGE_HAL_STATS_GET_PORTn_TX_UCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_TAGGED_FRMS_OFFSET(port)); port_stats->tx_tagged_frms = VXGE_HAL_STATS_GET_PORTn_TX_TAGGED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_VLD_IP_OFFSET(port)); port_stats->tx_vld_ip = VXGE_HAL_STATS_GET_PORTn_TX_VLD_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_VLD_IP_OCTETS_OFFSET(port)); port_stats->tx_vld_ip_octets = VXGE_HAL_STATS_GET_PORTn_TX_VLD_IP_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_ICMP_OFFSET(port)); port_stats->tx_icmp = VXGE_HAL_STATS_GET_PORTn_TX_ICMP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_TCP_OFFSET(port)); port_stats->tx_tcp = VXGE_HAL_STATS_GET_PORTn_TX_TCP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_RST_TCP_OFFSET(port)); port_stats->tx_rst_tcp = VXGE_HAL_STATS_GET_PORTn_TX_RST_TCP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_UDP_OFFSET(port)); port_stats->tx_udp = VXGE_HAL_STATS_GET_PORTn_TX_UDP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_UNKNOWN_PROTOCOL_OFFSET(port)); port_stats->tx_unknown_protocol = (u32) VXGE_HAL_STATS_GET_PORTn_TX_UNKNOWN_PROTOCOL(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_PARSE_ERROR_OFFSET(port)); port_stats->tx_parse_error = (u32) VXGE_HAL_STATS_GET_PORTn_TX_PARSE_ERROR(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_PAUSE_CTRL_FRMS_OFFSET(port)); port_stats->tx_pause_ctrl_frms = VXGE_HAL_STATS_GET_PORTn_TX_PAUSE_CTRL_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_LACPDU_FRMS_OFFSET(port)); port_stats->tx_lacpdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_TX_LACPDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_MRKR_PDU_FRMS_OFFSET(port)); port_stats->tx_marker_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_TX_MRKR_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_MRKR_RESP_PDU_FRMS_OFFSET(port)); port_stats->tx_marker_resp_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_TX_MRKR_RESP_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_DROP_IP_OFFSET(port)); port_stats->tx_drop_ip = (u32) VXGE_HAL_STATS_GET_PORTn_TX_DROP_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_XGMII_CHAR1_MATCH_OFFSET(port)); port_stats->tx_xgmii_char1_match = (u32) VXGE_HAL_STATS_GET_PORTn_TX_XGMII_CHAR1_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_XGMII_CHAR2_MATCH_OFFSET(port)); port_stats->tx_xgmii_char2_match = (u32) VXGE_HAL_STATS_GET_PORTn_TX_XGMII_CHAR2_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_XGMII_COL1_MATCH_OFFSET(port)); port_stats->tx_xgmii_column1_match = (u32) VXGE_HAL_STATS_GET_PORTn_TX_XGMII_COL1_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_XGMII_COL2_MATCH_OFFSET(port)); port_stats->tx_xgmii_column2_match = (u32) VXGE_HAL_STATS_GET_PORTn_TX_XGMII_COL2_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_DROP_FRMS_OFFSET(port)); port_stats->tx_drop_frms = (u16) VXGE_HAL_STATS_GET_PORTn_TX_DROP_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_TX_ANY_ERR_FRMS_OFFSET(port)); port_stats->tx_any_err_frms = (u16) VXGE_HAL_STATS_GET_PORTn_TX_ANY_ERR_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_FRMS_OFFSET(port)); port_stats->rx_ttl_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_VLD_FRMS_OFFSET(port)); port_stats->rx_vld_frms = VXGE_HAL_STATS_GET_PORTn_RX_VLD_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_OFFLOAD_FRMS_OFFSET(port)); port_stats->rx_offload_frms = VXGE_HAL_STATS_GET_PORTn_RX_OFFLOAD_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_OCTETS_OFFSET(port)); port_stats->rx_ttl_octets = VXGE_HAL_STATS_GET_PORTn_RX_TTL_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_DATA_OCTETS_OFFSET(port)); port_stats->rx_data_octets = VXGE_HAL_STATS_GET_PORTn_RX_DATA_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_OFFLOAD_OCTETS_OFFSET(port)); port_stats->rx_offload_octets = VXGE_HAL_STATS_GET_PORTn_RX_OFFLOAD_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_VLD_MCAST_FRMS_OFFSET(port)); port_stats->rx_vld_mcast_frms = VXGE_HAL_STATS_GET_PORTn_RX_VLD_MCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_VLD_BCAST_FRMS_OFFSET(port)); port_stats->rx_vld_bcast_frms = VXGE_HAL_STATS_GET_PORTn_RX_VLD_BCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ACC_UCAST_FRMS_OFFSET(port)); port_stats->rx_accepted_ucast_frms = VXGE_HAL_STATS_GET_PORTn_RX_ACC_UCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ACC_NUCAST_FRMS_OFFSET(port)); port_stats->rx_accepted_nucast_frms = VXGE_HAL_STATS_GET_PORTn_RX_ACC_NUCAST_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TAGGED_FRMS_OFFSET(port)); port_stats->rx_tagged_frms = VXGE_HAL_STATS_GET_PORTn_RX_TAGGED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_LONG_FRMS_OFFSET(port)); port_stats->rx_long_frms = VXGE_HAL_STATS_GET_PORTn_RX_LONG_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_USIZED_FRMS_OFFSET(port)); port_stats->rx_usized_frms = VXGE_HAL_STATS_GET_PORTn_RX_USIZED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_OSIZED_FRMS_OFFSET(port)); port_stats->rx_osized_frms = VXGE_HAL_STATS_GET_PORTn_RX_OSIZED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_FRAG_FRMS_OFFSET(port)); port_stats->rx_frag_frms = VXGE_HAL_STATS_GET_PORTn_RX_FRAG_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_JABBER_FRMS_OFFSET(port)); port_stats->rx_jabber_frms = VXGE_HAL_STATS_GET_PORTn_RX_JABBER_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_64_FRMS_OFFSET(port)); port_stats->rx_ttl_64_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_64_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_65_127_FRMS_OFFSET(port)); port_stats->rx_ttl_65_127_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_65_127_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_128_255_FRMS_OFFSET(port)); port_stats->rx_ttl_128_255_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_128_255_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_256_511_FRMS_OFFSET(port)); port_stats->rx_ttl_256_511_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_256_511_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_512_1023_FRMS_OFFSET(port)); port_stats->rx_ttl_512_1023_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_512_1023_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_1024_1518_FRMS_OFFSET(port)); port_stats->rx_ttl_1024_1518_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_1024_1518_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_1519_4095_FRMS_OFFSET(port)); port_stats->rx_ttl_1519_4095_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_1519_4095_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_4096_81915_FRMS_OFFSET(port)); port_stats->rx_ttl_4096_8191_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_4096_8191_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_8192_MAX_FRMS_OFFSET(port)); port_stats->rx_ttl_8192_max_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_8192_MAX_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TTL_GT_MAX_FRMS_OFFSET(port)); port_stats->rx_ttl_gt_max_frms = VXGE_HAL_STATS_GET_PORTn_RX_TTL_GT_MAX_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_IP_OFFSET(port)); port_stats->rx_ip = VXGE_HAL_STATS_GET_PORTn_RX_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ACC_IP_OFFSET(port)); port_stats->rx_accepted_ip = VXGE_HAL_STATS_GET_PORTn_RX_ACC_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_IP_OCTETS_OFFSET(port)); port_stats->rx_ip_octets = VXGE_HAL_STATS_GET_PORTn_RX_IP_OCTETS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ERR_IP_OFFSET(port)); port_stats->rx_err_ip = VXGE_HAL_STATS_GET_PORTn_RX_ERR_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ICMP_OFFSET(port)); port_stats->rx_icmp = VXGE_HAL_STATS_GET_PORTn_RX_ICMP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TCP_OFFSET(port)); port_stats->rx_tcp = VXGE_HAL_STATS_GET_PORTn_RX_TCP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_UDP_OFFSET(port)); port_stats->rx_udp = VXGE_HAL_STATS_GET_PORTn_RX_UDP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ERR_TCP_OFFSET(port)); port_stats->rx_err_tcp = VXGE_HAL_STATS_GET_PORTn_RX_ERR_TCP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_PAUSE_CNT_OFFSET(port)); port_stats->rx_pause_count = VXGE_HAL_STATS_GET_PORTn_RX_PAUSE_CNT(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_PAUSE_CTRL_FRMS_OFFSET(port)); port_stats->rx_pause_ctrl_frms = VXGE_HAL_STATS_GET_PORTn_RX_PAUSE_CTRL_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_UNSUP_CTRL_FRMS_OFFSET(port)); port_stats->rx_unsup_ctrl_frms = VXGE_HAL_STATS_GET_PORTn_RX_UNSUP_CTRL_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_FCS_ERR_FRMS_OFFSET(port)); port_stats->rx_fcs_err_frms = VXGE_HAL_STATS_GET_PORTn_RX_FCS_ERR_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_IN_RNG_LEN_ERR_FRMS_OFFSET(port)); port_stats->rx_in_rng_len_err_frms = VXGE_HAL_STATS_GET_PORTn_RX_IN_RNG_LEN_ERR_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_OUT_RNG_LEN_ERR_FRMS_OFFSET(port)); port_stats->rx_out_rng_len_err_frms = VXGE_HAL_STATS_GET_PORTn_RX_OUT_RNG_LEN_ERR_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_DROP_FRMS_OFFSET(port)); port_stats->rx_drop_frms = VXGE_HAL_STATS_GET_PORTn_RX_DROP_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_DISCARDED_FRMS_OFFSET(port)); port_stats->rx_discarded_frms = VXGE_HAL_STATS_GET_PORTn_RX_DISCARDED_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_DROP_IP_OFFSET(port)); port_stats->rx_drop_ip = VXGE_HAL_STATS_GET_PORTn_RX_DROP_IP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_DRP_UDP_OFFSET(port)); port_stats->rx_drop_udp = VXGE_HAL_STATS_GET_PORTn_RX_DRP_UDP(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_LACPDU_FRMS_OFFSET(port)); port_stats->rx_lacpdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_RX_LACPDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_MRKR_PDU_FRMS_OFFSET(port)); port_stats->rx_marker_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_RX_MRKR_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_MRKR_RESP_PDU_FRMS_OFFSET(port)); port_stats->rx_marker_resp_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_RX_MRKR_RESP_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_UNKNOWN_PDU_FRMS_OFFSET(port)); port_stats->rx_unknown_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_RX_UNKNOWN_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_ILLEGAL_PDU_FRMS_OFFSET(port)); port_stats->rx_illegal_pdu_frms = (u32) VXGE_HAL_STATS_GET_PORTn_RX_ILLEGAL_PDU_FRMS(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_FCS_DISCARD_OFFSET(port)); port_stats->rx_fcs_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_FCS_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_LEN_DISCARD_OFFSET(port)); port_stats->rx_len_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_LEN_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_SWITCH_DISCARD_OFFSET(port)); port_stats->rx_switch_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_SWITCH_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_L2_MGMT_DISCARD_OFFSET(port)); port_stats->rx_l2_mgmt_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_L2_MGMT_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_RPA_DISCARD_OFFSET(port)); port_stats->rx_rpa_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_RPA_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_TRASH_DISCARD_OFFSET(port)); port_stats->rx_trash_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_TRASH_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_RTS_DISCARD_OFFSET(port)); port_stats->rx_rts_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_RTS_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_RED_DISCARD_OFFSET(port)); port_stats->rx_red_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_RED_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_BUFF_FULL_DISCARD_OFFSET(port)); port_stats->rx_buff_full_discard = (u32) VXGE_HAL_STATS_GET_PORTn_RX_BUFF_FULL_DISCARD(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_DATA_ERR_CNT_OFFSET(port)); port_stats->rx_xgmii_data_err_cnt = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_DATA_ERR_CNT(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_CTRL_ERR_CNT_OFFSET(port)); port_stats->rx_xgmii_ctrl_err_cnt = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_CTRL_ERR_CNT(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_ERR_SYM_OFFSET(port)); port_stats->rx_xgmii_err_sym = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_ERR_SYM(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_CHAR1_MATCH_OFFSET(port)); port_stats->rx_xgmii_char1_match = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_CHAR1_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_CHAR2_MATCH_OFFSET(port)); port_stats->rx_xgmii_char2_match = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_CHAR2_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_COL1_MATCH_OFFSET(port)); port_stats->rx_xgmii_column1_match = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_COL1_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_XGMII_COL2_MATCH_OFFSET(port)); port_stats->rx_xgmii_column2_match = (u32) VXGE_HAL_STATS_GET_PORTn_RX_XGMII_COL2_MATCH(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_LOCAL_FAULT_OFFSET(port)); port_stats->rx_local_fault = (u32) VXGE_HAL_STATS_GET_PORTn_RX_LOCAL_FAULT(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_REMOTE_FAULT_OFFSET(port)); port_stats->rx_remote_fault = (u32) VXGE_HAL_STATS_GET_PORTn_RX_REMOTE_FAULT(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_PORT, VXGE_HAL_STATS_PORTn_RX_JETTISON_OFFSET(port)); port_stats->rx_jettison = (u32) VXGE_HAL_STATS_GET_PORTn_RX_JETTISON(val64); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_xmac_stats_get - Get the XMAC Statistics * @devh: HAL device handle. * @xmac_stats: Buffer to return XMAC Statistics. * * Get the XMAC Statistics * */ vxge_hal_status_e vxge_hal_mrpcim_xmac_stats_get(vxge_hal_device_h devh, vxge_hal_mrpcim_xmac_stats_t *xmac_stats) { u32 i; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert((devh != NULL) && (xmac_stats != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats( "hldev = 0x"VXGE_OS_STXFMT", mrpcim_stats = 0x"VXGE_OS_STXFMT, (ptr_t) devh, (ptr_t) xmac_stats); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } status = vxge_hal_mrpcim_xmac_aggr_stats_get(devh, 0, &xmac_stats->aggr_stats[0]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } status = vxge_hal_mrpcim_xmac_aggr_stats_get(devh, 1, &xmac_stats->aggr_stats[1]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } for (i = 0; i < VXGE_HAL_MAC_MAX_PORTS; i++) { status = vxge_hal_mrpcim_xmac_port_stats_get(devh, i, &xmac_stats->port_stats[i]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } } vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * _hal_mrpcim_stats_get - Get the mrpcim statistics using PIO * @hldev: hal device. * @mrpcim_stats: MRPCIM stats * * Returns the mrpcim stats. * * See also: vxge_hal_mrpcim_stats_enable(), vxge_hal_mrpcim_stats_disable() */ vxge_hal_status_e __hal_mrpcim_stats_get( __hal_device_t *hldev, vxge_hal_mrpcim_stats_hw_info_t *mrpcim_stats) { u32 i; u64 val64; vxge_hal_device_h devh = (vxge_hal_device_h) hldev; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert((hldev != NULL) && (mrpcim_stats != NULL)); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats( "hldev = 0x"VXGE_OS_STXFMT", mrpcim_stats = 0x"VXGE_OS_STXFMT, (ptr_t) hldev, (ptr_t) mrpcim_stats); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_debug_stats0); mrpcim_stats->pic_ini_rd_drop = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS0_GET_INI_RD_DROP(val64); mrpcim_stats->pic_ini_wr_drop = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS0_GET_INI_WR_DROP(val64); for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_debug_stats1_vplane[i]); mrpcim_stats->pic_wrcrdtarb_ph_crdt_depleted_vplane[i]. pic_wrcrdtarb_ph_crdt_depleted = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS1_GET_VPLANE_WRCRDTARB_PH_CRDT_DEPLETED( val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_debug_stats2_vplane[i]); mrpcim_stats->pic_wrcrdtarb_pd_crdt_depleted_vplane[i]. pic_wrcrdtarb_pd_crdt_depleted = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS2_GET_VPLANE_WRCRDTARB_PD_CRDT_DEPLETED( val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_debug_stats3_vplane[i]); mrpcim_stats->pic_rdcrdtarb_nph_crdt_depleted_vplane[i]. pic_rdcrdtarb_nph_crdt_depleted = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS3_GET_VPLANE_RDCRDTARB_NPH_CRDT_DEPLETED( val64); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_debug_stats4); mrpcim_stats->pic_ini_rd_vpin_drop = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS4_GET_INI_RD_VPIN_DROP(val64); mrpcim_stats->pic_ini_wr_vpin_drop = (u32) VXGE_HAL_MRPCIM_DEBUG_STATS4_GET_INI_WR_VPIN_DROP(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->genstats_count01); mrpcim_stats->pic_genstats_count0 = (u32) VXGE_HAL_GENSTATS_COUNT01_GET_GENSTATS_COUNT0(val64); mrpcim_stats->pic_genstats_count1 = (u32) VXGE_HAL_GENSTATS_COUNT01_GET_GENSTATS_COUNT1(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->genstats_count23); mrpcim_stats->pic_genstats_count2 = (u32) VXGE_HAL_GENSTATS_COUNT23_GET_GENSTATS_COUNT2(val64); mrpcim_stats->pic_genstats_count3 = (u32) VXGE_HAL_GENSTATS_COUNT23_GET_GENSTATS_COUNT3(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->genstats_count4); mrpcim_stats->pic_genstats_count4 = (u32) VXGE_HAL_GENSTATS_COUNT4_GET_GENSTATS_COUNT4(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->genstats_count5); mrpcim_stats->pic_genstats_count5 = (u32) VXGE_HAL_GENSTATS_COUNT5_GET_GENSTATS_COUNT5(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->debug_stats0); mrpcim_stats->pci_rstdrop_cpl = (u32) VXGE_HAL_DEBUG_STATS0_GET_RSTDROP_CPL(val64); mrpcim_stats->pci_rstdrop_msg = (u32) VXGE_HAL_DEBUG_STATS0_GET_RSTDROP_MSG(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->debug_stats1); mrpcim_stats->pci_rstdrop_client0 = (u32) VXGE_HAL_DEBUG_STATS1_GET_RSTDROP_CLIENT0(val64); mrpcim_stats->pci_rstdrop_client1 = (u32) VXGE_HAL_DEBUG_STATS1_GET_RSTDROP_CLIENT1(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->debug_stats2); mrpcim_stats->pci_rstdrop_client2 = (u32) VXGE_HAL_DEBUG_STATS2_GET_RSTDROP_CLIENT2(val64); for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->debug_stats3_vplane); mrpcim_stats->pci_depl_h_vplane[i].pci_depl_cplh = (u16) VXGE_HAL_DEBUG_STATS3_GET_VPLANE_DEPL_CPLH(val64); mrpcim_stats->pci_depl_h_vplane[i].pci_depl_nph = (u16) VXGE_HAL_DEBUG_STATS3_GET_VPLANE_DEPL_NPH(val64); mrpcim_stats->pci_depl_h_vplane[i].pci_depl_ph = (u16) VXGE_HAL_DEBUG_STATS3_GET_VPLANE_DEPL_PH(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->debug_stats4_vplane); mrpcim_stats->pci_depl_d_vplane[i].pci_depl_cpld = (u16) VXGE_HAL_DEBUG_STATS4_GET_VPLANE_DEPL_CPLD(val64); mrpcim_stats->pci_depl_d_vplane[i].pci_depl_npd = (u16) VXGE_HAL_DEBUG_STATS4_GET_VPLANE_DEPL_NPD(val64); mrpcim_stats->pci_depl_d_vplane[i].pci_depl_pd = (u16) VXGE_HAL_DEBUG_STATS4_GET_VPLANE_DEPL_PD(val64); } status = vxge_hal_mrpcim_xmac_aggr_stats_get(hldev, 0, &mrpcim_stats->xgmac_aggr[0]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } status = vxge_hal_mrpcim_xmac_aggr_stats_get(hldev, 1, &mrpcim_stats->xgmac_aggr[1]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } for (i = 0; i < VXGE_HAL_MAC_MAX_PORTS; i++) { status = vxge_hal_mrpcim_xmac_port_stats_get(hldev, i, &mrpcim_stats->xgmac_port[i]); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } } VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_GLOBAL_PROG_EVENT_GNUM0_OFFSET); mrpcim_stats->xgmac_global_prog_event_gnum0 = VXGE_HAL_STATS_GET_GLOBAL_PROG_EVENT_GNUM0(val64); VXGE_HAL_MRPCIM_STATS_PIO_READ(VXGE_HAL_STATS_LOC_AGGR, VXGE_HAL_STATS_GLOBAL_PROG_EVENT_GNUM1_OFFSET); mrpcim_stats->xgmac_global_prog_event_gnum1 = VXGE_HAL_STATS_GET_GLOBAL_PROG_EVENT_GNUM1(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->orp_lro_events); mrpcim_stats->xgmac_orp_lro_events = VXGE_HAL_ORP_LRO_EVENTS_GET_ORP_LRO_EVENTS(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->orp_bs_events); mrpcim_stats->xgmac_orp_bs_events = VXGE_HAL_ORP_BS_EVENTS_GET_ORP_BS_EVENTS(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->orp_iwarp_events); mrpcim_stats->xgmac_orp_iwarp_events = VXGE_HAL_ORP_IWARP_EVENTS_GET_ORP_IWARP_EVENTS(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->dbg_stats_tpa_tx_path); mrpcim_stats->xgmac_tx_permitted_frms = (u32) VXGE_HAL_DBG_STATS_TPA_TX_PATH_GET_TX_PERMITTED_FRMS(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->dbg_stat_tx_any_frms); mrpcim_stats->xgmac_port0_tx_any_frms = (u8) VXGE_HAL_DBG_STAT_TX_ANY_FRMS_GET_PORT0_TX_ANY_FRMS(val64); mrpcim_stats->xgmac_port1_tx_any_frms = (u8) VXGE_HAL_DBG_STAT_TX_ANY_FRMS_GET_PORT1_TX_ANY_FRMS(val64); mrpcim_stats->xgmac_port2_tx_any_frms = (u8) VXGE_HAL_DBG_STAT_TX_ANY_FRMS_GET_PORT2_TX_ANY_FRMS(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->dbg_stat_rx_any_frms); mrpcim_stats->xgmac_port0_rx_any_frms = (u8) VXGE_HAL_DBG_STAT_RX_ANY_FRMS_GET_PORT0_RX_ANY_FRMS(val64); mrpcim_stats->xgmac_port1_rx_any_frms = (u8) VXGE_HAL_DBG_STAT_RX_ANY_FRMS_GET_PORT1_RX_ANY_FRMS(val64); mrpcim_stats->xgmac_port2_rx_any_frms = (u8) VXGE_HAL_DBG_STAT_RX_ANY_FRMS_GET_PORT2_RX_ANY_FRMS(val64); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_stats_clear - Clear the statistics of the device * @devh: HAL Device handle. * * Clear the statistics of the given Device. * */ vxge_hal_status_e vxge_hal_mrpcim_stats_clear(vxge_hal_device_h devh) { u32 i; u64 stat; vxge_hal_status_e status; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } vxge_os_memcpy(&hldev->mrpcim->mrpcim_stats_sav, hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); vxge_os_memzero(hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); vxge_os_memzero(&hldev->stats.sw_dev_err_stats, sizeof(vxge_hal_device_stats_sw_err_t)); hldev->stats.sw_dev_info_stats.soft_reset_cnt = 0; for (i = 0; i < VXGE_HAL_MAX_VIRTUAL_PATHS; i++) { if (!(hldev->vpaths_deployed & mBIT(i))) continue; (void) vxge_hal_vpath_stats_clear( VXGE_HAL_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i])); } status = vxge_hal_mrpcim_stats_access( devh, VXGE_HAL_STATS_OP_CLEAR_ALL_STATS, 0, 0, &stat); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_udp_rth_enable - Enable UDP/RTH. * @devh: HAL device handle. * * enable udp rth * */ vxge_hal_status_e vxge_hal_mrpcim_udp_rth_enable( vxge_hal_device_h devh) { vxge_hal_status_e status = VXGE_HAL_OK; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_stats("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_stats("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_stats("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } status = __hal_vpath_udp_rth_set(hldev, hldev->first_vp_id, TRUE); vxge_hal_trace_log_stats("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_mac_configure - Initialize mac * @hldev: hal device. * * Initializes mac * */ vxge_hal_status_e __hal_mrpcim_mac_configure(__hal_device_t *hldev) { u64 val64; u32 i, port_id; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_mac_config_t *mac_config = &hldev->header.config.mrpcim_config.mac_config; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t) hldev); for (i = 0; i < VXGE_HAL_MAC_MAX_WIRE_PORTS; i++) { port_id = mac_config->wire_port_config[i].port_id; if (mac_config->wire_port_config[i].tmac_en == VXGE_HAL_WIRE_PORT_TMAC_DEFAULT) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_cfg0_port[port_id]); if (val64 & VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN) { mac_config->wire_port_config[i].tmac_en = VXGE_HAL_WIRE_PORT_TMAC_ENABLE; } else { mac_config->wire_port_config[i].tmac_en = VXGE_HAL_WIRE_PORT_TMAC_DISABLE; } } if (mac_config->wire_port_config[i].rmac_en == VXGE_HAL_WIRE_PORT_RMAC_DEFAULT) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg0_port[port_id]); if (val64 & VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN) { mac_config->wire_port_config[i].rmac_en = VXGE_HAL_WIRE_PORT_RMAC_ENABLE; } else { mac_config->wire_port_config[i].rmac_en = VXGE_HAL_WIRE_PORT_RMAC_DISABLE; } } if ((!(mac_config->wire_port_config[i].rmac_en)) && (!(mac_config->wire_port_config[i].tmac_en))) val64 = 0; else val64 = VXGE_HAL_XGMAC_MAIN_CFG_PORT_PORT_EN; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->xgmac_main_cfg_port[port_id]); if (!val64) continue; val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg0_port[port_id]); if (mac_config->wire_port_config[i].rmac_en) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN; if (mac_config->wire_port_config[i].rmac_strip_fcs != VXGE_HAL_WIRE_PORT_RMAC_STRIP_FCS_DEFAULT) { if (mac_config->wire_port_config[i].rmac_strip_fcs) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_STRIP_FCS; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_STRIP_FCS; } if (mac_config->wire_port_config[i].rmac_discard_pfrm != VXGE_HAL_WIRE_PORT_RMAC_DISCARD_PFRM_DEFAULT) { if (mac_config->wire_port_config[i].rmac_discard_pfrm) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_DISCARD_PFRM; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_DISCARD_PFRM; } if (mac_config->wire_port_config[i].mtu != VXGE_HAL_WIRE_PORT_DEF_INITIAL_MTU) { val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_MAX_PYLD_LEN(0x3fff); val64 |= VXGE_HAL_RXMAC_CFG0_PORT_MAX_PYLD_LEN( mac_config->wire_port_config[i].mtu); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_cfg0_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg2_port[port_id]); if (mac_config->wire_port_config[i].rmac_prom_en != VXGE_HAL_WIRE_PORT_RMAC_PROM_EN_DEFAULT) { if (mac_config->wire_port_config[i].rmac_prom_en) val64 |= VXGE_HAL_RXMAC_CFG2_PORT_PROM_EN; else val64 &= ~VXGE_HAL_RXMAC_CFG2_PORT_PROM_EN; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_cfg2_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port_id]); if (mac_config->wire_port_config[i].rmac_pause_gen_en != VXGE_HAL_WIRE_PORT_RMAC_PAUSE_GEN_EN_DEFAULT) { if (mac_config->wire_port_config[i].rmac_pause_gen_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; } if (mac_config->wire_port_config[i].rmac_pause_rcv_en != VXGE_HAL_WIRE_PORT_RMAC_PAUSE_RCV_EN_DEFAULT) { if (mac_config->wire_port_config[i].rmac_pause_rcv_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; } if (mac_config->wire_port_config[i].rmac_pause_time != VXGE_HAL_WIRE_PORT_DEF_RMAC_HIGH_PTIME) { val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(0xffff); val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME( mac_config->wire_port_config[i].rmac_pause_time); } if (mac_config->wire_port_config[i].rmac_pause_time != VXGE_HAL_WIRE_PORT_RMAC_PAUSE_LIMITER_DEFAULT) { if (mac_config->wire_port_config[i].limiter_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_LIMITER_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_LIMITER_EN; } if (mac_config->wire_port_config[i].max_limit != VXGE_HAL_WIRE_PORT_DEF_RMAC_MAX_LIMIT) { val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(0xff); val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT( mac_config->wire_port_config[i].max_limit); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_link_util_port[port_id]); if (mac_config->wire_port_config[i].rmac_util_period != VXGE_HAL_WIRE_PORT_DEF_TMAC_UTIL_PERIOD) { val64 &= ~VXGE_HAL_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(0xf); val64 |= VXGE_HAL_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG( mac_config->wire_port_config[i].rmac_util_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_link_util_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xgmac_debounce_port[port_id]); if (mac_config->wire_port_config[i].link_stability_period != VXGE_HAL_WIRE_PORT_DEF_LINK_STABILITY_PERIOD) { val64 &= ~(VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_LINK_UP(0xf) | VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_LINK_DOWN(0xf)); val64 |= VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_LINK_UP( mac_config->wire_port_config[i].link_stability_period) | VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_LINK_DOWN( mac_config->wire_port_config[i].link_stability_period); } if (mac_config->wire_port_config[i].port_stability_period != VXGE_HAL_WIRE_PORT_DEF_PORT_STABILITY_PERIOD) { val64 &= ~(VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_PORT_UP(0xf) | VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_PORT_DOWN(0xf)); val64 |= VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_PORT_UP( mac_config->wire_port_config[i].port_stability_period) | VXGE_HAL_XGMAC_DEBOUNCE_PORT_PERIOD_PORT_DOWN( mac_config->wire_port_config[i].port_stability_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->xgmac_debounce_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_cfg0_port[port_id]); if (mac_config->wire_port_config[i].tmac_en) val64 |= VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN; else val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN; if (mac_config->wire_port_config[i].tmac_pad != VXGE_HAL_WIRE_PORT_TMAC_PAD_DEFAULT) { if (mac_config->wire_port_config[i].tmac_pad) val64 |= VXGE_HAL_TXMAC_CFG0_PORT_APPEND_PAD; else val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_APPEND_PAD; } if (mac_config->wire_port_config[i].tmac_pad_byte != VXGE_HAL_WIRE_PORT_TMAC_PAD_DEFAULT) { val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_PAD_BYTE(0xff); val64 |= VXGE_HAL_TXMAC_CFG0_PORT_PAD_BYTE( mac_config->wire_port_config[i].tmac_pad_byte); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->txmac_cfg0_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_link_util_port); if (mac_config->wire_port_config[i].tmac_util_period != VXGE_HAL_WIRE_PORT_DEF_TMAC_UTIL_PERIOD) { val64 &= ~VXGE_HAL_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(0xf); val64 |= VXGE_HAL_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG( mac_config->wire_port_config[i].tmac_util_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->txmac_link_util_port[port_id]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->ratemgmt_cfg_port); if (mac_config->wire_port_config[i].autoneg_mode != VXGE_HAL_WIRE_PORT_AUTONEG_MODE_DEFAULT) { val64 &= ~VXGE_HAL_RATEMGMT_CFG_PORT_MODE(0x3); val64 |= VXGE_HAL_RATEMGMT_CFG_PORT_MODE( mac_config->wire_port_config[i].autoneg_mode); } if (mac_config->wire_port_config[i].autoneg_rate != VXGE_HAL_WIRE_PORT_AUTONEG_RATE_DEFAULT) { if (mac_config->wire_port_config[i].autoneg_rate) val64 |= VXGE_HAL_RATEMGMT_CFG_PORT_RATE; else val64 &= ~VXGE_HAL_RATEMGMT_CFG_PORT_RATE; } if (mac_config->wire_port_config[i].fixed_use_fsm != VXGE_HAL_WIRE_PORT_FIXED_USE_FSM_DEFAULT) { if (mac_config->wire_port_config[i].fixed_use_fsm) val64 |= VXGE_HAL_RATEMGMT_CFG_PORT_FIXED_USE_FSM; else val64 &= ~VXGE_HAL_RATEMGMT_CFG_PORT_FIXED_USE_FSM; } if (mac_config->wire_port_config[i].antp_use_fsm != VXGE_HAL_WIRE_PORT_ANTP_USE_FSM_DEFAULT) { if (mac_config->wire_port_config[i].antp_use_fsm) val64 |= VXGE_HAL_RATEMGMT_CFG_PORT_ANTP_USE_FSM; else val64 &= ~VXGE_HAL_RATEMGMT_CFG_PORT_ANTP_USE_FSM; } if (mac_config->wire_port_config[i].anbe_use_fsm != VXGE_HAL_WIRE_PORT_ANBE_USE_FSM_DEFAULT) { if (mac_config->wire_port_config[i].anbe_use_fsm) val64 |= VXGE_HAL_RATEMGMT_CFG_PORT_ANBE_USE_FSM; else val64 &= ~VXGE_HAL_RATEMGMT_CFG_PORT_ANBE_USE_FSM; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->ratemgmt_cfg_port[port_id]); } if (mac_config->switch_port_config.tmac_en == VXGE_HAL_SWITCH_PORT_TMAC_DEFAULT) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (val64 & VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN) { mac_config->switch_port_config.tmac_en = VXGE_HAL_SWITCH_PORT_TMAC_ENABLE; } else { mac_config->switch_port_config.tmac_en = VXGE_HAL_SWITCH_PORT_TMAC_DISABLE; } } if (mac_config->switch_port_config.rmac_en == VXGE_HAL_SWITCH_PORT_RMAC_DEFAULT) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (val64 & VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN) { mac_config->switch_port_config.rmac_en = VXGE_HAL_SWITCH_PORT_RMAC_ENABLE; } else { mac_config->switch_port_config.rmac_en = VXGE_HAL_SWITCH_PORT_RMAC_DISABLE; } } if (mac_config->switch_port_config.rmac_en || mac_config->switch_port_config.tmac_en) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (mac_config->switch_port_config.rmac_en) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_RMAC_EN; if (mac_config->switch_port_config.rmac_strip_fcs != VXGE_HAL_SWITCH_PORT_RMAC_STRIP_FCS_DEFAULT) { if (mac_config->switch_port_config.rmac_strip_fcs) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_STRIP_FCS; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_STRIP_FCS; } if (mac_config->switch_port_config.rmac_discard_pfrm != VXGE_HAL_SWITCH_PORT_RMAC_DISCARD_PFRM_DEFAULT) { if (mac_config->switch_port_config.rmac_discard_pfrm) val64 |= VXGE_HAL_RXMAC_CFG0_PORT_DISCARD_PFRM; else val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_DISCARD_PFRM; } if (mac_config->switch_port_config.mtu != VXGE_HAL_SWITCH_PORT_DEF_INITIAL_MTU) { val64 &= ~VXGE_HAL_RXMAC_CFG0_PORT_MAX_PYLD_LEN(0x3fff); val64 |= VXGE_HAL_RXMAC_CFG0_PORT_MAX_PYLD_LEN( mac_config->switch_port_config.mtu); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_cfg2_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (mac_config->switch_port_config.rmac_prom_en != VXGE_HAL_SWITCH_PORT_RMAC_PROM_EN_DEFAULT) { if (mac_config->switch_port_config.rmac_prom_en) val64 |= VXGE_HAL_RXMAC_CFG2_PORT_PROM_EN; else val64 &= ~VXGE_HAL_RXMAC_CFG2_PORT_PROM_EN; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_cfg2_port[ VXGE_HAL_MAC_SWITCH_PORT]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_pause_cfg_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (mac_config->switch_port_config.rmac_pause_gen_en != VXGE_HAL_SWITCH_PORT_RMAC_PAUSE_GEN_EN_DEFAULT) { if (mac_config->switch_port_config.rmac_pause_gen_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; } if (mac_config->switch_port_config.rmac_pause_rcv_en != VXGE_HAL_SWITCH_PORT_RMAC_PAUSE_RCV_EN_DEFAULT) { if (mac_config->switch_port_config.rmac_pause_rcv_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; } if (mac_config->switch_port_config.rmac_pause_time != VXGE_HAL_SWITCH_PORT_DEF_RMAC_HIGH_PTIME) { val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME(0xffff); val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_HIGH_PTIME( mac_config->switch_port_config.rmac_pause_time); } if (mac_config->switch_port_config.rmac_pause_time != VXGE_HAL_SWITCH_PORT_RMAC_PAUSE_LIMITER_DEFAULT) { if (mac_config->switch_port_config.limiter_en) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_LIMITER_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_LIMITER_EN; } if (mac_config->switch_port_config.max_limit != VXGE_HAL_SWITCH_PORT_DEF_RMAC_MAX_LIMIT) { val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT(0xff); val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_MAX_LIMIT( mac_config->switch_port_config.max_limit); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[ VXGE_HAL_MAC_SWITCH_PORT]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_link_util_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (mac_config->switch_port_config.rmac_util_period != VXGE_HAL_SWITCH_PORT_DEF_TMAC_UTIL_PERIOD) { val64 &= ~VXGE_HAL_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG(0xf); val64 |= VXGE_HAL_RXMAC_LINK_UTIL_PORT_RMAC_UTIL_CFG( mac_config->switch_port_config.rmac_util_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_link_util_port[ VXGE_HAL_MAC_SWITCH_PORT]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); if (mac_config->switch_port_config.tmac_en) val64 |= VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN; else val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_TMAC_EN; if (mac_config->switch_port_config.tmac_pad != VXGE_HAL_SWITCH_PORT_TMAC_PAD_DEFAULT) { if (mac_config->switch_port_config.tmac_pad) val64 |= VXGE_HAL_TXMAC_CFG0_PORT_APPEND_PAD; else val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_APPEND_PAD; } if (mac_config->switch_port_config.tmac_pad_byte != VXGE_HAL_SWITCH_PORT_TMAC_PAD_DEFAULT) { val64 &= ~VXGE_HAL_TXMAC_CFG0_PORT_PAD_BYTE(0xff); val64 |= VXGE_HAL_TXMAC_CFG0_PORT_PAD_BYTE( mac_config->switch_port_config.tmac_pad_byte); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->txmac_cfg0_port[ VXGE_HAL_MAC_SWITCH_PORT]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_link_util_port); if (mac_config->switch_port_config.tmac_util_period != VXGE_HAL_SWITCH_PORT_DEF_TMAC_UTIL_PERIOD) { val64 &= ~VXGE_HAL_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG(0xf); val64 |= VXGE_HAL_TXMAC_LINK_UTIL_PORT_TMAC_UTIL_CFG( mac_config->switch_port_config.tmac_util_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->txmac_link_util_port[ VXGE_HAL_MAC_SWITCH_PORT]); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->txmac_gen_cfg1); if (mac_config->tmac_perma_stop_en != VXGE_HAL_MAC_TMAC_PERMA_STOP_DEFAULT) { if (mac_config->tmac_perma_stop_en) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_TMAC_PERMA_STOP_EN; } if (mac_config->tmac_tx_switch_dis != VXGE_HAL_MAC_TMAC_TX_SWITCH_DEFAULT) { if (mac_config->tmac_tx_switch_dis) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_TX_SWITCH_DISABLE; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_TX_SWITCH_DISABLE; } if (mac_config->tmac_lossy_switch_en != VXGE_HAL_MAC_TMAC_LOSSY_SWITCH_DEFAULT) { if (mac_config->tmac_lossy_switch_en) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_LOSSY_SWITCH; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_LOSSY_SWITCH; } if (mac_config->tmac_lossy_switch_en != VXGE_HAL_MAC_TMAC_LOSSY_WIRE_DEFAULT) { if (mac_config->tmac_lossy_wire_en) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_LOSSY_WIRE; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_LOSSY_WIRE; } if (mac_config->tmac_bcast_to_wire_dis != VXGE_HAL_MAC_TMAC_BCAST_TO_WIRE_DEFAULT) { if (mac_config->tmac_bcast_to_wire_dis) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_WIRE; } if (mac_config->tmac_bcast_to_wire_dis != VXGE_HAL_MAC_TMAC_BCAST_TO_SWITCH_DEFAULT) { if (mac_config->tmac_bcast_to_switch_dis) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_BLOCK_BCAST_TO_SWITCH; } if (mac_config->tmac_host_append_fcs_en != VXGE_HAL_MAC_TMAC_HOST_APPEND_FCS_DEFAULT) { if (mac_config->tmac_host_append_fcs_en) val64 |= VXGE_HAL_TXMAC_GEN_CFG1_HOST_APPEND_FCS; else val64 &= ~VXGE_HAL_TXMAC_GEN_CFG1_HOST_APPEND_FCS; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->txmac_gen_cfg1); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_rx_pa_cfg0); if (mac_config->rpa_ignore_frame_err != VXGE_HAL_MAC_RPA_IGNORE_FRAME_ERR_DEFAULT) { if (mac_config->rpa_ignore_frame_err) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_IGNORE_FRAME_ERR; } if (mac_config->rpa_support_snap_ab_n != VXGE_HAL_MAC_RPA_SUPPORT_SNAP_AB_N_DEFAULT) { if (mac_config->rpa_support_snap_ab_n) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_SUPPORT_SNAP_AB_N; } if (mac_config->rpa_search_for_hao != VXGE_HAL_MAC_RPA_SEARCH_FOR_HAO_DEFAULT) { if (mac_config->rpa_search_for_hao) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_SEARCH_FOR_HAO; } if (mac_config->rpa_support_ipv6_mobile_hdrs != VXGE_HAL_MAC_RPA_SUPPORT_IPV6_MOBILE_HDRS_DEFAULT) { if (mac_config->rpa_support_ipv6_mobile_hdrs) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_SUPPORT_MOBILE_IPV6_HDRS; } if (mac_config->rpa_ipv6_stop_searching != VXGE_HAL_MAC_RPA_IPV6_STOP_SEARCHING_DEFAULT) { if (mac_config->rpa_ipv6_stop_searching) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_IPV6_STOP_SEARCHING; } if (mac_config->rpa_no_ps_if_unknown != VXGE_HAL_MAC_RPA_NO_PS_IF_UNKNOWN_DEFAULT) { if (mac_config->rpa_no_ps_if_unknown) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_NO_PS_IF_UNKNOWN; } if (mac_config->rpa_search_for_etype != VXGE_HAL_MAC_RPA_SEARCH_FOR_ETYPE_DEFAULT) { if (mac_config->rpa_search_for_etype) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG0_SEARCH_FOR_ETYPE; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_rx_pa_cfg0); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->fau_pa_cfg); if (mac_config->rpa_repl_l4_comp_csum != VXGE_HAL_MAC_RPA_REPL_l4_COMP_CSUM_DEFAULT) { if (mac_config->rpa_repl_l4_comp_csum) val64 |= VXGE_HAL_FAU_PA_CFG_REPL_L4_COMP_CSUM; else val64 &= ~VXGE_HAL_FAU_PA_CFG_REPL_L4_COMP_CSUM; } if (mac_config->rpa_repl_l3_incl_cf != VXGE_HAL_MAC_RPA_REPL_L3_INCL_CF_DEFAULT) { if (mac_config->rpa_repl_l3_incl_cf) val64 |= VXGE_HAL_FAU_PA_CFG_REPL_L3_INCL_CF; else val64 &= ~VXGE_HAL_FAU_PA_CFG_REPL_L3_INCL_CF; } if (mac_config->rpa_repl_l3_comp_csum != VXGE_HAL_MAC_RPA_REPL_l3_COMP_CSUM_DEFAULT) { if (mac_config->rpa_repl_l3_comp_csum) val64 |= VXGE_HAL_FAU_PA_CFG_REPL_L3_COMP_CSUM; else val64 &= ~VXGE_HAL_FAU_PA_CFG_REPL_L3_COMP_CSUM; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->fau_pa_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); if (mac_config->rpa_repl_ipv4_tcp_incl_ph != VXGE_HAL_MAC_RPA_REPL_IPV4_TCP_INCL_PH_DEFAULT) { if (mac_config->rpa_repl_ipv4_tcp_incl_ph) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV4_TCP_INCL_PH; } if (mac_config->rpa_repl_ipv6_tcp_incl_ph != VXGE_HAL_MAC_RPA_REPL_IPV6_TCP_INCL_PH_DEFAULT) { if (mac_config->rpa_repl_ipv6_tcp_incl_ph) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV6_TCP_INCL_PH; } if (mac_config->rpa_repl_ipv4_udp_incl_ph != VXGE_HAL_MAC_RPA_REPL_IPV4_UDP_INCL_PH_DEFAULT) { if (mac_config->rpa_repl_ipv4_udp_incl_ph) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV4_UDP_INCL_PH; } if (mac_config->rpa_repl_ipv6_udp_incl_ph != VXGE_HAL_MAC_RPA_REPL_IPV6_UDP_INCL_PH_DEFAULT) { if (mac_config->rpa_repl_ipv6_udp_incl_ph) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_IPV6_UDP_INCL_PH; } if (mac_config->rpa_repl_l4_incl_cf != VXGE_HAL_MAC_RPA_REPL_L4_INCL_CF_DEFAULT) { if (mac_config->rpa_repl_l4_incl_cf) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_L4_INCL_CF; } if (mac_config->rpa_repl_strip_vlan_tag != VXGE_HAL_MAC_RPA_REPL_STRIP_VLAN_TAG_DEFAULT) { if (mac_config->rpa_repl_strip_vlan_tag) val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG; else val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xmac_gen_cfg); if (mac_config->network_stability_period != VXGE_HAL_MAC_DEF_NETWORK_STABILITY_PERIOD) { val64 &= ~(VXGE_HAL_XMAC_GEN_CFG_PERIOD_NTWK_DOWN(0xf) | VXGE_HAL_XMAC_GEN_CFG_PERIOD_NTWK_UP(0xf)); val64 |= VXGE_HAL_XMAC_GEN_CFG_PERIOD_NTWK_DOWN( mac_config->network_stability_period) | VXGE_HAL_XMAC_GEN_CFG_PERIOD_NTWK_UP( mac_config->network_stability_period); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->xmac_gen_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->tpa_global_cfg); if (mac_config->tpa_support_snap_ab_n != VXGE_HAL_MAC_TPA_SUPPORT_SNAP_AB_N_DEFAULT) { if (mac_config->tpa_support_snap_ab_n) val64 |= VXGE_HAL_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N; else val64 &= ~VXGE_HAL_TPA_GLOBAL_CFG_SUPPORT_SNAP_AB_N; } if (mac_config->tpa_ecc_enable_n != VXGE_HAL_MAC_TPA_ECC_ENABLE_N_DEFAULT) { if (mac_config->tpa_ecc_enable_n) val64 |= VXGE_HAL_TPA_GLOBAL_CFG_ECC_ENABLE_N; else val64 &= ~VXGE_HAL_TPA_GLOBAL_CFG_ECC_ENABLE_N; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->tpa_global_cfg); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_lag_configure - Initialize LAG registers * @hldev: hal device. * * Initializes LAG registers * */ vxge_hal_status_e __hal_mrpcim_lag_configure(__hal_device_t *hldev) { u64 val64; u64 mac_addr; u32 i, j; vxge_hal_status_e status = VXGE_HAL_OK; vxge_hal_lag_config_t *lag_config = &hldev->header.config.mrpcim_config.lag_config; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t) hldev); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_cfg); if (lag_config->lag_en == VXGE_HAL_LAG_LAG_EN_DEFAULT) { if (val64 & VXGE_HAL_LAG_CFG_EN) lag_config->lag_en = VXGE_HAL_LAG_LAG_EN_ENABLE; else lag_config->lag_en = VXGE_HAL_LAG_LAG_EN_DISABLE; } if (lag_config->lag_en == VXGE_HAL_LAG_LAG_EN_DISABLE) { if (val64 & VXGE_HAL_LAG_CFG_EN) { val64 &= ~VXGE_HAL_LAG_CFG_EN; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_cfg); } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } if (lag_config->lag_mode != VXGE_HAL_LAG_LAG_MODE_DEFAULT) { val64 &= ~VXGE_HAL_LAG_CFG_MODE(0x3); val64 |= VXGE_HAL_LAG_CFG_MODE(lag_config->lag_mode); } else { lag_config->lag_mode = (u32) VXGE_HAL_LAG_CFG_GET_MODE(val64); } if (lag_config->la_mode_config.tx_discard != VXGE_HAL_LAG_TX_DISCARD_DEFAULT) { if (lag_config->la_mode_config.tx_discard == VXGE_HAL_LAG_TX_DISCARD_ENABLE) val64 |= VXGE_HAL_LAG_CFG_TX_DISCARD_BEHAV; else val64 &= ~VXGE_HAL_LAG_CFG_TX_DISCARD_BEHAV; } if (lag_config->la_mode_config.rx_discard != VXGE_HAL_LAG_RX_DISCARD_DEFAULT) { if (lag_config->la_mode_config.rx_discard == VXGE_HAL_LAG_RX_DISCARD_ENABLE) val64 |= VXGE_HAL_LAG_CFG_RX_DISCARD_BEHAV; else val64 &= ~VXGE_HAL_LAG_CFG_RX_DISCARD_BEHAV; } if (lag_config->sl_mode_config.pref_indiv_port != VXGE_HAL_LAG_PREF_INDIV_PORT_DEFAULT) { if (lag_config->sl_mode_config.pref_indiv_port == VXGE_HAL_LAG_RX_DISCARD_ENABLE) val64 |= VXGE_HAL_LAG_CFG_PREF_INDIV_PORT_NUM; else val64 &= ~VXGE_HAL_LAG_CFG_PREF_INDIV_PORT_NUM; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_tx_cfg); if (lag_config->incr_tx_aggr_stats != VXGE_HAL_LAG_INCR_TX_AGGR_STATS_DEFAULT) { if (lag_config->incr_tx_aggr_stats == VXGE_HAL_LAG_INCR_TX_AGGR_STATS_ENABLE) val64 |= VXGE_HAL_LAG_TX_CFG_INCR_TX_AGGR_STATS; else val64 &= ~VXGE_HAL_LAG_TX_CFG_INCR_TX_AGGR_STATS; } if (lag_config->la_mode_config.distrib_alg_sel != VXGE_HAL_LAG_DISTRIB_ALG_SEL_DEFAULT) { val64 &= ~VXGE_HAL_LAG_TX_CFG_DISTRIB_ALG_SEL(0x3); val64 |= VXGE_HAL_LAG_TX_CFG_DISTRIB_ALG_SEL( lag_config->la_mode_config.distrib_alg_sel); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, lag_config->la_mode_config.distrib_dest, &hldev->mrpcim_reg->lag_distrib_dest); } else { lag_config->la_mode_config.distrib_alg_sel = (u32) VXGE_HAL_LAG_TX_CFG_GET_DISTRIB_ALG_SEL(val64); lag_config->la_mode_config.distrib_dest = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_distrib_dest); } if (lag_config->la_mode_config.distrib_remap_if_fail != VXGE_HAL_LAG_DISTRIB_REMAP_IF_FAIL_DEFAULT) { if (lag_config->la_mode_config.distrib_remap_if_fail == VXGE_HAL_LAG_DISTRIB_REMAP_IF_FAIL_ENABLE) val64 |= VXGE_HAL_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL; else val64 &= ~VXGE_HAL_LAG_TX_CFG_DISTRIB_REMAP_IF_FAIL; } if (lag_config->la_mode_config.coll_max_delay != VXGE_HAL_LAG_DEF_COLL_MAX_DELAY) { val64 &= ~VXGE_HAL_LAG_TX_CFG_COLL_MAX_DELAY(0xffff); val64 |= VXGE_HAL_LAG_TX_CFG_DISTRIB_ALG_SEL( lag_config->la_mode_config.coll_max_delay); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_tx_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_active_passive_cfg); if (lag_config->ap_mode_config.hot_standby != VXGE_HAL_LAG_HOT_STANDBY_DEFAULT) { if (lag_config->ap_mode_config.hot_standby == VXGE_HAL_LAG_HOT_STANDBY_KEEP_UP_PORT) val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY; else val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_HOT_STANDBY; } if (lag_config->ap_mode_config.lacp_decides != VXGE_HAL_LAG_LACP_DECIDES_DEFAULT) { if (lag_config->ap_mode_config.lacp_decides == VXGE_HAL_LAG_LACP_DECIDES_ENBALE) val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES; else val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_LACP_DECIDES; } if (lag_config->ap_mode_config.pref_active_port != VXGE_HAL_LAG_PREF_ACTIVE_PORT_DEFAULT) { if (lag_config->ap_mode_config.pref_active_port == VXGE_HAL_LAG_PREF_ACTIVE_PORT_1) val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM; else val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_PREF_ACTIVE_PORT_NUM; } if (lag_config->ap_mode_config.auto_failback != VXGE_HAL_LAG_AUTO_FAILBACK_DEFAULT) { if (lag_config->ap_mode_config.auto_failback == VXGE_HAL_LAG_AUTO_FAILBACK_ENBALE) val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK; else val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_AUTO_FAILBACK; } if (lag_config->ap_mode_config.failback_en != VXGE_HAL_LAG_FAILBACK_EN_DEFAULT) { if (lag_config->ap_mode_config.failback_en == VXGE_HAL_LAG_FAILBACK_EN_ENBALE) val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN; else val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_FAILBACK_EN; } if (lag_config->ap_mode_config.cold_failover_timeout != VXGE_HAL_LAG_DEF_COLD_FAILOVER_TIMEOUT) { val64 &= ~VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT( 0xffff); val64 |= VXGE_HAL_LAG_ACTIVE_PASSIVE_CFG_COLD_FAILOVER_TIMEOUT( lag_config->ap_mode_config.cold_failover_timeout); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_active_passive_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_lacp_cfg); if (lag_config->lacp_config.lacp_en != VXGE_HAL_LAG_LACP_EN_DEFAULT) { if (lag_config->lacp_config.lacp_en == VXGE_HAL_LAG_LACP_EN_ENABLE) val64 |= VXGE_HAL_LAG_LACP_CFG_EN; else val64 &= ~VXGE_HAL_LAG_LACP_CFG_EN; } if (lag_config->lacp_config.lacp_begin != VXGE_HAL_LAG_LACP_BEGIN_DEFAULT) { if (lag_config->lacp_config.lacp_begin == VXGE_HAL_LAG_LACP_BEGIN_RESET) val64 |= VXGE_HAL_LAG_LACP_CFG_LACP_BEGIN; else val64 &= ~VXGE_HAL_LAG_LACP_CFG_LACP_BEGIN; } if (lag_config->lacp_config.discard_lacp != VXGE_HAL_LAG_DISCARD_LACP_DEFAULT) { if (lag_config->lacp_config.discard_lacp == VXGE_HAL_LAG_DISCARD_LACP_ENABLE) val64 |= VXGE_HAL_LAG_LACP_CFG_DISCARD_LACP; else val64 &= ~VXGE_HAL_LAG_LACP_CFG_DISCARD_LACP; } if (lag_config->lacp_config.liberal_len_chk != VXGE_HAL_LAG_LIBERAL_LEN_CHK_DEFAULT) { if (lag_config->lacp_config.liberal_len_chk == VXGE_HAL_LAG_LIBERAL_LEN_CHK_ENABLE) val64 |= VXGE_HAL_LAG_LACP_CFG_LIBERAL_LEN_CHK; else val64 &= ~VXGE_HAL_LAG_LACP_CFG_LIBERAL_LEN_CHK; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_lacp_cfg); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_marker_cfg); if (lag_config->lacp_config.marker_gen_recv_en != VXGE_HAL_LAG_MARKER_GEN_RECV_EN_DEFAULT) { if (lag_config->lacp_config.marker_gen_recv_en == VXGE_HAL_LAG_MARKER_GEN_RECV_EN_ENABLE) val64 |= VXGE_HAL_LAG_MARKER_CFG_GEN_RCVR_EN; else val64 &= ~VXGE_HAL_LAG_MARKER_CFG_GEN_RCVR_EN; } if (lag_config->lacp_config.marker_resp_en != VXGE_HAL_LAG_MARKER_RESP_EN_DEFAULT) { if (lag_config->lacp_config.marker_resp_en == VXGE_HAL_LAG_MARKER_RESP_EN_ENABLE) val64 |= VXGE_HAL_LAG_MARKER_CFG_RESP_EN; else val64 &= ~VXGE_HAL_LAG_MARKER_CFG_RESP_EN; } if (lag_config->lacp_config.marker_resp_timeout != VXGE_HAL_LAG_DEF_MARKER_RESP_TIMEOUT) { val64 &= ~VXGE_HAL_LAG_MARKER_CFG_RESP_TIMEOUT(0xffff); val64 |= VXGE_HAL_LAG_MARKER_CFG_RESP_TIMEOUT( lag_config->lacp_config.marker_resp_timeout); } if (lag_config->lacp_config.slow_proto_mrkr_min_interval != VXGE_HAL_LAG_DEF_SLOW_PROTO_MRKR_MIN_INTERVAL) { val64 &= ~VXGE_HAL_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL( 0xffff); val64 |= VXGE_HAL_LAG_MARKER_CFG_SLOW_PROTO_MRKR_MIN_INTERVAL( lag_config->lacp_config.slow_proto_mrkr_min_interval); } if (lag_config->lacp_config.throttle_mrkr_resp != VXGE_HAL_LAG_THROTTLE_MRKR_RESP_DEFAULT) { if (lag_config->lacp_config.throttle_mrkr_resp == VXGE_HAL_LAG_THROTTLE_MRKR_RESP_ENABLE) val64 |= VXGE_HAL_LAG_MARKER_CFG_THROTTLE_MRKR_RESP; else val64 &= ~VXGE_HAL_LAG_MARKER_CFG_THROTTLE_MRKR_RESP; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_marker_cfg); for (i = 0; i < VXGE_HAL_LAG_PORT_MAX_PORTS; i++) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_cfg[i]); if (lag_config->port_config[i].lag_en != VXGE_HAL_LAG_PORT_LAG_EN_DEFAULT) { if (lag_config->port_config[i].lag_en == VXGE_HAL_LAG_PORT_LAG_EN_ENABLE) val64 |= VXGE_HAL_LAG_PORT_CFG_EN; else val64 &= ~VXGE_HAL_LAG_PORT_CFG_EN; } if (lag_config->port_config[i].discard_slow_proto != VXGE_HAL_LAG_PORT_DISCARD_SLOW_PROTO_DEFAULT) { if (lag_config->port_config[i].discard_slow_proto == VXGE_HAL_LAG_PORT_DISCARD_SLOW_PROTO_ENABLE) val64 |= VXGE_HAL_LAG_PORT_CFG_DISCARD_SLOW_PROTO; else val64 &= ~VXGE_HAL_LAG_PORT_CFG_DISCARD_SLOW_PROTO; } if (lag_config->port_config[i].host_chosen_aggr != VXGE_HAL_LAG_PORT_HOST_CHOSEN_AGGR_DEFAULT) { if (lag_config->port_config[i].host_chosen_aggr == VXGE_HAL_LAG_PORT_HOST_CHOSEN_AGGR_1) val64 |= VXGE_HAL_LAG_PORT_CFG_HOST_CHOSEN_AGGR; else val64 &= ~VXGE_HAL_LAG_PORT_CFG_HOST_CHOSEN_AGGR; } if (lag_config->port_config[i].discard_unknown_slow_proto != VXGE_HAL_LAG_PORT_DISCARD_UNKNOWN_SLOW_PROTO_DEFAULT) { if (lag_config->port_config[i].discard_unknown_slow_proto == VXGE_HAL_LAG_PORT_DISCARD_UNKNOWN_SLOW_PROTO_ENABLE) val64 |= VXGE_HAL_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO; else val64 &= ~VXGE_HAL_LAG_PORT_CFG_DISCARD_UNKNOWN_SLOW_PROTO; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_cfg[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_actor_admin_cfg[i]); if (lag_config->port_config[i].actor_port_num != VXGE_HAL_LAG_PORT_DEF_ACTOR_PORT_NUM) { val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM( 0xffff); val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_PORT_NUM( lag_config->port_config[i].actor_port_num); } if (lag_config->port_config[i].actor_port_priority != VXGE_HAL_LAG_PORT_DEF_ACTOR_PORT_PRIORITY) { val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI( 0xffff); val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_PORT_PRI( lag_config->port_config[i].actor_port_priority); } if (lag_config->port_config[i].actor_key_10g != VXGE_HAL_LAG_PORT_DEF_ACTOR_KEY_10G) { val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G( 0xffff); val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_KEY_10G( lag_config->port_config[i].actor_key_10g); } if (lag_config->port_config[i].actor_key_1g != VXGE_HAL_LAG_PORT_DEF_ACTOR_KEY_1G) { val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G( 0xffff); val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_CFG_KEY_1G( lag_config->port_config[i].actor_key_1g); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_actor_admin_cfg[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_actor_admin_state[i]); if (lag_config->port_config[i].actor_lacp_activity != VXGE_HAL_LAG_PORT_ACTOR_LACP_ACTIVITY_DEFAULT) { if (lag_config->port_config[i].actor_lacp_activity == VXGE_HAL_LAG_PORT_ACTOR_LACP_ACTIVITY_ACTIVE) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_LACP_ACTIVITY; } if (lag_config->port_config[i].actor_lacp_timeout != VXGE_HAL_LAG_PORT_ACTOR_LACP_ACTIVITY_DEFAULT) { if (lag_config->port_config[i].actor_lacp_timeout == VXGE_HAL_LAG_PORT_ACTOR_LACP_TIMEOUT_SHORT) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_LACP_TIMEOUT; } if (lag_config->port_config[i].actor_aggregation != VXGE_HAL_LAG_PORT_ACTOR_AGGREGATION_DEFAULT) { if (lag_config->port_config[i].actor_aggregation == VXGE_HAL_LAG_PORT_ACTOR_AGGREGATION_AGGREGATEABLE) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_AGGREGATION; } if (lag_config->port_config[i].actor_synchronization != VXGE_HAL_LAG_PORT_ACTOR_SYNCHRONIZATION_DEFAULT) { if (lag_config->port_config[i].actor_aggregation == VXGE_HAL_LAG_PORT_ACTOR_SYNCHRONIZATION_IN_SYNC) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_SYNCHRONIZATION; } if (lag_config->port_config[i].actor_collecting != VXGE_HAL_LAG_PORT_ACTOR_COLLECTING_DEFAULT) { if (lag_config->port_config[i].actor_collecting == VXGE_HAL_LAG_PORT_ACTOR_COLLECTING_ENABLE) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_COLLECTING; } if (lag_config->port_config[i].actor_distributing != VXGE_HAL_LAG_PORT_ACTOR_DISTRIBUTING_DEFAULT) { if (lag_config->port_config[i].actor_distributing == VXGE_HAL_LAG_PORT_ACTOR_DISTRIBUTING_ENABLE) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_DISTRIBUTING; } if (lag_config->port_config[i].actor_defaulted != VXGE_HAL_LAG_PORT_ACTOR_DEFAULTED_DEFAULT) { if (lag_config->port_config[i].actor_defaulted == VXGE_HAL_LAG_PORT_ACTOR_NOT_DEFAULTED) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_DEFAULTED; } if (lag_config->port_config[i].actor_expired != VXGE_HAL_LAG_PORT_ACTOR_EXPIRED_DEFAULT) { if (lag_config->port_config[i].actor_expired == VXGE_HAL_LAG_PORT_ACTOR_NOT_EXPIRED) val64 |= VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED; else val64 &= ~VXGE_HAL_LAG_PORT_ACTOR_ADMIN_STATE_EXPIRED; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_actor_admin_state[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_partner_admin_cfg[i]); if (lag_config->port_config[i].partner_sys_pri != VXGE_HAL_LAG_PORT_DEF_PARTNER_SYS_PRI) { val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI( 0xffff); val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_SYS_PRI( lag_config->port_config[i].partner_sys_pri); } if (lag_config->port_config[i].partner_key != VXGE_HAL_LAG_PORT_DEF_PARTNER_KEY) { val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_KEY( 0xffff); val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_KEY( lag_config->port_config[i].partner_key); } if (lag_config->port_config[i].partner_port_num != VXGE_HAL_LAG_PORT_DEF_PARTNER_PORT_NUM) { val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM( 0xffff); val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_PORT_NUM( lag_config->port_config[i].partner_port_num); } if (lag_config->port_config[i].partner_port_priority != VXGE_HAL_LAG_PORT_DEF_PARTNER_PORT_PRIORITY) { val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI( 0xffff); val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_CFG_PORT_PRI( lag_config->port_config[i].actor_port_priority); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_partner_admin_cfg[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_partner_admin_state[i]); if (lag_config->port_config[i].partner_lacp_activity != VXGE_HAL_LAG_PORT_PARTNER_LACP_ACTIVITY_DEFAULT) { if (lag_config->port_config[i].partner_lacp_activity == VXGE_HAL_LAG_PORT_PARTNER_LACP_ACTIVITY_ACTIVE) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_LACP_ACTIVITY; } if (lag_config->port_config[i].partner_lacp_timeout != VXGE_HAL_LAG_PORT_PARTNER_LACP_ACTIVITY_DEFAULT) { if (lag_config->port_config[i].partner_lacp_timeout == VXGE_HAL_LAG_PORT_PARTNER_LACP_TIMEOUT_SHORT) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_LACP_TIMEOUT; } if (lag_config->port_config[i].partner_aggregation != VXGE_HAL_LAG_PORT_PARTNER_AGGREGATION_DEFAULT) { if (lag_config->port_config[i].partner_aggregation == VXGE_HAL_LAG_PORT_PARTNER_AGGREGATION_AGGREGATEABLE) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_AGGREGATION; } if (lag_config->port_config[i].partner_synchronization != VXGE_HAL_LAG_PORT_PARTNER_SYNCHRONIZATION_DEFAULT) { if (lag_config->port_config[i].partner_aggregation == VXGE_HAL_LAG_PORT_PARTNER_SYNCHRONIZATION_IN_SYNC) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_SYNCHRONIZATION; } if (lag_config->port_config[i].partner_collecting != VXGE_HAL_LAG_PORT_PARTNER_COLLECTING_DEFAULT) { if (lag_config->port_config[i].partner_collecting == VXGE_HAL_LAG_PORT_PARTNER_COLLECTING_ENABLE) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_COLLECTING; } if (lag_config->port_config[i].partner_distributing != VXGE_HAL_LAG_PORT_PARTNER_DISTRIBUTING_DEFAULT) { if (lag_config->port_config[i].partner_distributing == VXGE_HAL_LAG_PORT_PARTNER_DISTRIBUTING_ENABLE) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_DISTRIBUTING; } if (lag_config->port_config[i].partner_defaulted != VXGE_HAL_LAG_PORT_PARTNER_DEFAULTED_DEFAULT) { if (lag_config->port_config[i].partner_defaulted == VXGE_HAL_LAG_PORT_PARTNER_NOT_DEFAULTED) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_DEFAULTED; } if (lag_config->port_config[i].partner_expired != VXGE_HAL_LAG_PORT_PARTNER_EXPIRED_DEFAULT) { if (lag_config->port_config[i].partner_expired == VXGE_HAL_LAG_PORT_PARTNER_NOT_EXPIRED) val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED; else val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_STATE_EXPIRED; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_partner_admin_state[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_port_partner_admin_sys_id[i]); mac_addr = 0; for (j = 0; j < VXGE_HAL_ETH_ALEN; j++) { mac_addr <<= 8; mac_addr |= (u8) lag_config->port_config[i].partner_mac_addr[j]; } if (mac_addr != 0xffffffffffffULL) { val64 &= ~VXGE_HAL_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR( 0xffffffffffffULL); val64 |= VXGE_HAL_LAG_PORT_PARTNER_ADMIN_SYS_ID_ADDR( mac_addr); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_port_partner_admin_sys_id[i]); } for (i = 0; i < VXGE_HAL_LAG_AGGR_MAX_PORTS; i++) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_aggr_id_cfg[i]); val64 &= ~VXGE_HAL_LAG_AGGR_ID_CFG_ID(0xffff); val64 |= VXGE_HAL_LAG_AGGR_ID_CFG_ID( lag_config->aggr_config[i].aggr_id); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_aggr_id_cfg[i]); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_aggr_addr_cfg[i]); mac_addr = 0; for (j = 0; j < VXGE_HAL_ETH_ALEN; j++) { mac_addr <<= 8; mac_addr |= (u8) lag_config->aggr_config[i].mac_addr[j]; } if (mac_addr != 0xffffffffffffULL) { val64 &= ~VXGE_HAL_LAG_AGGR_ADDR_CFG_ADDR(0xffffffffffffULL); val64 |= VXGE_HAL_LAG_AGGR_ADDR_CFG_ADDR(mac_addr); } if (lag_config->aggr_config[i].use_port_mac_addr != VXGE_HAL_LAG_AGGR_USE_PORT_MAC_ADDR_DEFAULT) { if (lag_config->aggr_config[i].use_port_mac_addr == VXGE_HAL_LAG_AGGR_USE_PORT_MAC_ADDR_ENABLE) val64 |= VXGE_HAL_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR; else val64 &= ~VXGE_HAL_LAG_AGGR_ADDR_CFG_USE_PORT_ADDR; } if (lag_config->aggr_config[i].mac_addr_sel != VXGE_HAL_LAG_AGGR_MAC_ADDR_SEL_DEFAULT) { if (lag_config->aggr_config[i].mac_addr_sel == VXGE_HAL_LAG_AGGR_MAC_ADDR_SEL_PORT_1) val64 |= VXGE_HAL_LAG_AGGR_ADDR_CFG_ADDR_SEL; else val64 &= ~VXGE_HAL_LAG_AGGR_ADDR_CFG_ADDR_SEL; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_aggr_addr_cfg[i]); if (lag_config->aggr_config[i].admin_key == VXGE_HAL_LAG_AGGR_DEF_ADMIN_KEY) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_aggr_admin_key[i]); val64 &= ~VXGE_HAL_LAG_AGGR_ADMIN_KEY_KEY(0xffff); val64 |= VXGE_HAL_LAG_AGGR_ADMIN_KEY_KEY( lag_config->aggr_config[i].admin_key); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_aggr_admin_key[i]); } } if (lag_config->sys_pri != VXGE_HAL_LAG_DEF_SYS_PRI) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_sys_cfg); val64 &= ~VXGE_HAL_LAG_SYS_CFG_SYS_PRI(0xffff); val64 |= VXGE_HAL_LAG_SYS_CFG_SYS_PRI( lag_config->sys_pri); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_sys_cfg); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_sys_id); mac_addr = 0; for (j = 0; j < VXGE_HAL_ETH_ALEN; j++) { mac_addr <<= 8; mac_addr |= (u8) lag_config->mac_addr[j]; } if (mac_addr != 0xffffffffffffULL) { val64 &= ~VXGE_HAL_LAG_SYS_ID_ADDR(0xffffffffffffULL); val64 |= VXGE_HAL_LAG_SYS_ID_ADDR(mac_addr); } if (lag_config->use_port_mac_addr != VXGE_HAL_LAG_USE_PORT_MAC_ADDR_DEFAULT) { if (lag_config->use_port_mac_addr == VXGE_HAL_LAG_USE_PORT_MAC_ADDR_ENABLE) val64 |= VXGE_HAL_LAG_SYS_ID_USE_PORT_ADDR; else val64 &= ~VXGE_HAL_LAG_SYS_ID_USE_PORT_ADDR; } if (lag_config->mac_addr_sel != VXGE_HAL_LAG_MAC_ADDR_SEL_DEFAULT) { if (lag_config->mac_addr_sel == VXGE_HAL_LAG_MAC_ADDR_SEL_PORT_1) val64 |= VXGE_HAL_LAG_SYS_ID_ADDR_SEL; else val64 &= ~VXGE_HAL_LAG_SYS_ID_ADDR_SEL; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_sys_id); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_aggr_alt_admin_key); if (lag_config->ap_mode_config.alt_admin_key != VXGE_HAL_LAG_DEF_ALT_ADMIN_KEY) { val64 &= ~VXGE_HAL_LAG_AGGR_ALT_ADMIN_KEY_KEY(0xffff); val64 |= VXGE_HAL_LAG_AGGR_ALT_ADMIN_KEY_KEY( lag_config->ap_mode_config.alt_admin_key); } if (lag_config->ap_mode_config.alt_aggr != VXGE_HAL_LAG_ALT_AGGR_DEFAULT) { if (lag_config->ap_mode_config.alt_aggr == VXGE_HAL_LAG_ALT_AGGR_1) val64 |= VXGE_HAL_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR; else val64 &= ~VXGE_HAL_LAG_AGGR_ALT_ADMIN_KEY_ALT_AGGR; } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_aggr_alt_admin_key); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_timer_cfg_1); if (lag_config->fast_per_time != VXGE_HAL_LAG_DEF_FAST_PER_TIME) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_1_FAST_PER(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_1_FAST_PER( lag_config->fast_per_time); } if (lag_config->slow_per_time != VXGE_HAL_LAG_DEF_SLOW_PER_TIME) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_1_SLOW_PER(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_1_SLOW_PER( lag_config->slow_per_time); } if (lag_config->short_timeout != VXGE_HAL_LAG_DEF_SHORT_TIMEOUT) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_1_SHORT_TIMEOUT(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_1_SHORT_TIMEOUT( lag_config->short_timeout); } if (lag_config->long_timeout != VXGE_HAL_LAG_DEF_LONG_TIMEOUT) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_1_LONG_TIMEOUT(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_1_LONG_TIMEOUT( lag_config->short_timeout); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_timer_cfg_1); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->lag_timer_cfg_2); if (lag_config->churn_det_time != VXGE_HAL_LAG_DEF_CHURN_DET_TIME) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_2_CHURN_DET(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_2_CHURN_DET( lag_config->churn_det_time); } if (lag_config->aggr_wait_time != VXGE_HAL_LAG_DEF_AGGR_WAIT_TIME) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_2_AGGR_WAIT(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_2_AGGR_WAIT( lag_config->slow_per_time); } if (lag_config->short_timer_scale != VXGE_HAL_LAG_SHORT_TIMER_SCALE_DEFAULT) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_2_SHORT_TIMER_SCALE( lag_config->short_timer_scale); } if (lag_config->long_timer_scale != VXGE_HAL_LAG_LONG_TIMER_SCALE_DEFAULT) { val64 &= ~VXGE_HAL_LAG_TIMER_CFG_2_LONG_TIMER_SCALE(0xffff); val64 |= VXGE_HAL_LAG_TIMER_CFG_2_LONG_TIMER_SCALE( lag_config->long_timer_scale); } vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->lag_timer_cfg_2); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_get_vpd_data - Getting vpd_data. * * @hldev: HAL device handle. * * Getting product name and serial number from vpd capabilites structure * */ void __hal_mrpcim_get_vpd_data(__hal_device_t *hldev) { u8 *vpd_data; u16 data; u32 data32; u32 i, j, count, fail = 0; u32 addr_offset, data_offset; u32 max_count = hldev->header.config.device_poll_millis * 10; vxge_assert(hldev); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t) hldev); addr_offset = hldev->pci_caps.vpd_cap_offset + vxge_offsetof(vxge_hal_vpid_capability_le_t, vpd_address); data_offset = hldev->pci_caps.vpd_cap_offset + vxge_offsetof(vxge_hal_vpid_capability_le_t, vpd_data); vxge_os_strlcpy((char *) hldev->mrpcim->vpd_data.product_name, "10 Gigabit Ethernet Adapter", sizeof(hldev->mrpcim->vpd_data.product_name)); vxge_os_strlcpy((char *) hldev->mrpcim->vpd_data.serial_num, "not available", sizeof(hldev->mrpcim->vpd_data.serial_num)); if (hldev->func_id != 0) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return; } vpd_data = (u8 *) vxge_os_malloc(hldev->header.pdev, VXGE_HAL_VPD_BUFFER_SIZE + 16); - if (vpd_data == 0) + if (vpd_data == NULL) return; for (i = 0; i < VXGE_HAL_VPD_BUFFER_SIZE; i += 4) { vxge_os_pci_write16(hldev->header.pdev, hldev->header.cfgh, addr_offset, (u16) i); for (count = 0; count < max_count; count++) { vxge_os_udelay(100); (void) __hal_vpath_pci_read(hldev, hldev->first_vp_id, addr_offset, 2, &data); if (data & VXGE_HAL_PCI_VPID_COMPL_FALG) break; } if (count >= max_count) { vxge_hal_info_log_device("%s:ERR, \ Reading VPD data failed", __func__); fail = 1; break; } (void) __hal_vpath_pci_read(hldev, hldev->first_vp_id, data_offset, 4, &data32); for (j = 0; j < 4; j++) { vpd_data[i + j] = (u8) (data32 & 0xff); data32 >>= 8; } } if (!fail) { /* read serial number of adapter */ for (count = 0; count < VXGE_HAL_VPD_BUFFER_SIZE; count++) { if ((vpd_data[count] == 'S') && (vpd_data[count + 1] == 'N') && (vpd_data[count + 2] < VXGE_HAL_VPD_LENGTH)) { (void) vxge_os_memzero( hldev->mrpcim->vpd_data.serial_num, VXGE_HAL_VPD_LENGTH); (void) vxge_os_memcpy( hldev->mrpcim->vpd_data.serial_num, &vpd_data[count + 3], vpd_data[count + 2]); break; } } if (vpd_data[1] < VXGE_HAL_VPD_LENGTH) { (void) vxge_os_memzero( hldev->mrpcim->vpd_data.product_name, vpd_data[1]); (void) vxge_os_memcpy(hldev->mrpcim->vpd_data.product_name, &vpd_data[3], vpd_data[1]); } } vxge_os_free(hldev->header.pdev, vpd_data, VXGE_HAL_VPD_BUFFER_SIZE + 16); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, fail); } /* * __hal_mrpcim_rts_table_access - Get/Set the entries from RTS access tables * @devh: Device handle. * @action: Write Enable. 0 - Read Operation; 1 - Write Operation * @rts_table: Data structure select. Identifies the RTS data structure * (i.e. lookup table) to access. * 0; DA; Destination Address * 1; VID; VLAN ID * 2; ETYPE; Ethertype * 3; PN; Layer 4 Port Number * 4; RANGE_PN; Range of Layer 4 Port Numbers * 5; RTH_GEN_CFG; Receive-Traffic Hashing General Configuration * 6; RTH_SOLO_IT; Receive-Traffic Hashing Indirection Table * (Single Bucket Programming) * 7; RTH_JHASH_CFG; Receive-Traffic Hashing Jenkins Hash Config * 8; RTH_MASK; Receive-Traffic Hashing Mask * 9; RTH_KEY; Receive-Traffic Hashing Key * 10; QOS; VLAN Quality of Service * 11; DS; IP Differentiated Services * @offset: Offset (into the data structure) to execute the command on. * @data1: Pointer to the data 1 to be read from the table * @data2: Pointer to the data 2 to be read from the table * @vpath_vector: Identifies the candidate VPATH(s) for the given entry. * These VPATH(s) determine the set of target destinations for * a frame that matches this steering entry. Any or all bits * can be set, which handles 16+1 virtual paths in an 'n-hot' * basis. VPATH 0 is the MSbit. * * Read from the RTS table * */ vxge_hal_status_e __hal_mrpcim_rts_table_access( vxge_hal_device_h devh, u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2, u64 *vpath_vector) { u64 val64; __hal_device_t *hldev; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert((devh != NULL) && (data1 != NULL) && (data2 != NULL) && (vpath_vector != NULL)); hldev = (__hal_device_t *) devh; vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", action = %d, rts_table = %d, " "offset = %d, data1 = 0x"VXGE_OS_STXFMT", " "data2 = 0x"VXGE_OS_STXFMT", vpath_vector = 0x"VXGE_OS_STXFMT, (ptr_t) devh, action, rts_table, offset, (ptr_t) data1, (ptr_t) data2, (ptr_t) vpath_vector); val64 = VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL(rts_table) | VXGE_HAL_RTS_MGR_STEER_CTRL_STROBE | VXGE_HAL_RTS_MGR_STEER_CTRL_OFFSET(offset); if (action == VXGE_HAL_RTS_MGR_STEER_CTRL_WE_WRITE) val64 = VXGE_HAL_RTS_MGR_STEER_CTRL_WE; if ((rts_table == VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || (rts_table == VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || (rts_table == VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || (rts_table == VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { val64 |= VXGE_HAL_RTS_MGR_STEER_CTRL_TABLE_SEL; } vxge_hal_pio_mem_write32_lower(hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(val64, 32), &hldev->mrpcim_reg->rts_mgr_steer_ctrl); vxge_os_wmb(); vxge_hal_pio_mem_write32_upper(hldev->header.pdev, hldev->header.regh0, (u32) bVAL32(val64, 0), &hldev->mrpcim_reg->rts_mgr_steer_ctrl); vxge_os_wmb(); status = vxge_hal_device_register_poll( hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rts_mgr_steer_ctrl, 0, VXGE_HAL_RTS_MGR_STEER_CTRL_STROBE, WAIT_FACTOR * hldev->header.config.device_poll_millis); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } val64 = vxge_os_pio_mem_read64( hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rts_mgr_steer_ctrl); if ((val64 & VXGE_HAL_RTS_MGR_STEER_CTRL_RMACJ_STATUS) && (action == VXGE_HAL_RTS_MGR_STEER_CTRL_WE_READ)) { *data1 = vxge_os_pio_mem_read64( hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rts_mgr_steer_data0); *data2 = vxge_os_pio_mem_read64( hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rts_mgr_steer_data1); *vpath_vector = vxge_os_pio_mem_read64( hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rts_mgr_steer_vpath_vector); status = VXGE_HAL_OK; } else { status = VXGE_HAL_FAIL; } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_mac_addr_add - Add the mac address entry * into MAC address table. * @devh: Device handle. * @offset: Index into the DA table to add the mac address. * @macaddr: MAC address to be added for this vpath into the list * @macaddr_mask: MAC address mask for macaddr * @vpath_vector: Bit mask specifying the vpaths to which * the mac address applies * @duplicate_mode: Duplicate MAC address add mode. Please see * vxge_hal_vpath_mac_addr_add_mode_e {} * * Adds the given mac address, mac address mask and vpath vector into the list * * see also: vxge_hal_mrpcim_mac_addr_get * */ vxge_hal_status_e vxge_hal_mrpcim_mac_addr_add( vxge_hal_device_h devh, u32 offset, macaddr_t macaddr, macaddr_t macaddr_mask, u64 vpath_vector, u32 duplicate_mode) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; __hal_device_t *hldev; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert(devh != NULL); hldev = (__hal_device_t *) devh; vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", offset = %d, " "macaddr = %02x-%02x-%02x-%02x-%02x-%02x, " "macaddr_mask = %02x-%02x-%02x-%02x-%02x-%02x, " "vpath_vector = 0x"VXGE_OS_LLXFMT, (ptr_t) devh, offset, macaddr[0], macaddr[1], macaddr[2], macaddr[3], macaddr[4], macaddr[5], macaddr_mask[0], macaddr_mask[1], macaddr_mask[2], macaddr_mask[3], macaddr_mask[4], macaddr_mask[5], vpath_vector); for (i = 0; i < VXGE_HAL_ETH_ALEN; i++) { data1 <<= 8; data1 |= (u8) macaddr[i]; } data1 = VXGE_HAL_RTS_MGR_STEER_DATA0_DA_MAC_ADDR(data1); for (i = 0; i < VXGE_HAL_ETH_ALEN; i++) { data2 <<= 8; data2 |= (u8) macaddr_mask[i]; } switch (duplicate_mode) { case VXGE_HAL_VPATH_MAC_ADDR_ADD_DUPLICATE: i = 0; break; case VXGE_HAL_VPATH_MAC_ADDR_DISCARD_DUPLICATE: i = 1; break; case VXGE_HAL_VPATH_MAC_ADDR_REPLACE_DUPLICATE: i = 2; break; default: i = 0; break; } data2 = VXGE_HAL_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MASK(data2) | VXGE_HAL_RTS_MGR_STEER_DATA1_DA_MAC_ADDR_MODE(i); status = __hal_mrpcim_rts_table_access(devh, VXGE_HAL_RTS_MGR_STEER_CTRL_WE_WRITE, VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA, offset, &data1, &data2, &vpath_vector); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_mac_addr_get - Read the mac address entry into * MAC address table. * @devh: Device handle. * @offset: Index into the DA table to execute the command on. * @macaddr: Buffer to return MAC address to be added for this vpath * into the list * @macaddr_mask: Buffer to return MAC address mask for macaddr * @vpath_vector: Buffer to return Bit mask specifying the vpaths * to which the mac address applies * * Reads the mac address, mac address mask and vpath vector from * the given offset * * see also: vxge_hal_mrpcim_mac_addr_add * */ vxge_hal_status_e vxge_hal_mrpcim_mac_addr_get( vxge_hal_device_h devh, u32 offset, macaddr_t macaddr, macaddr_t macaddr_mask, u64 *vpath_vector) { u32 i; u64 data1 = 0ULL; u64 data2 = 0ULL; __hal_device_t *hldev; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert(devh != NULL); hldev = (__hal_device_t *) devh; vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t) hldev); status = __hal_mrpcim_rts_table_access(devh, VXGE_HAL_RTS_MGR_STEER_CTRL_WE_WRITE, VXGE_HAL_RTS_MGR_STEER_CTRL_DATA_STRUCT_SEL_DA, offset, &data1, &data2, vpath_vector); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } data1 = VXGE_HAL_RTS_MGR_STEER_DATA0_GET_DA_MAC_ADDR(data1); data2 = VXGE_HAL_RTS_MGR_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2); for (i = VXGE_HAL_ETH_ALEN; i > 0; i--) { macaddr[i - 1] = (u8) (data1 & 0xFF); data1 >>= 8; } for (i = VXGE_HAL_ETH_ALEN; i > 0; i--) { macaddr_mask[i - 1] = (u8) (data2 & 0xFF); data2 >>= 8; } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * vxge_hal_mrpcim_strip_repl_vlan_tag_enable - Enable strip Repl vlan tag. * @devh: Device handle. * * Enable X3100 strip Repl vlan tag. * Returns: VXGE_HAL_OK on success. * */ vxge_hal_status_e vxge_hal_mrpcim_strip_repl_vlan_tag_enable( vxge_hal_device_h devh) { u64 val64; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (hldev->header.config.mrpcim_config.mac_config. rpa_repl_strip_vlan_tag == VXGE_HAL_MAC_RPA_REPL_STRIP_VLAN_TAG_ENABLE) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); val64 |= VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); hldev->header.config.mrpcim_config.mac_config.rpa_repl_strip_vlan_tag = VXGE_HAL_MAC_RPA_REPL_STRIP_VLAN_TAG_ENABLE; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_strip_repl_vlan_tag_disable - Disable strip Repl vlan tag. * @devh: Device handle. * * Disable X3100 strip Repl vlan tag. * Returns: VXGE_HAL_OK on success. * */ vxge_hal_status_e vxge_hal_mrpcim_strip_repl_vlan_tag_disable( vxge_hal_device_h devh) { u64 val64; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t) devh); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } if (hldev->header.config.mrpcim_config.mac_config. rpa_repl_strip_vlan_tag == VXGE_HAL_MAC_RPA_REPL_STRIP_VLAN_TAG_DISABLE) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); val64 &= ~VXGE_HAL_RXMAC_RX_PA_CFG1_REPL_STRIP_VLAN_TAG; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_rx_pa_cfg1); hldev->header.config.mrpcim_config.mac_config.rpa_repl_strip_vlan_tag = VXGE_HAL_MAC_RPA_REPL_STRIP_VLAN_TAG_DISABLE; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_lag_config_get - Get the LAG config. * @devh: Device handle. * @lconfig: LAG Configuration * * Returns the current LAG configuration. * Returns: VXGE_HAL_OK on success. * */ vxge_hal_status_e vxge_hal_mrpcim_lag_config_get( vxge_hal_device_h devh, vxge_hal_lag_config_t *lconfig) { __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", lconfig = 0x"VXGE_OS_STXFMT, (ptr_t) devh, (ptr_t) lconfig); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } vxge_os_memcpy(lconfig, &hldev->header.config.mrpcim_config.lag_config, sizeof(vxge_hal_lag_config_t)); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_lag_config_set - Set the LAG config. * @devh: Device handle. * @lconfig: LAG Configuration * * Sets the LAG configuration. * Returns: VXGE_HAL_OK on success. * */ vxge_hal_status_e vxge_hal_mrpcim_lag_config_set( vxge_hal_device_h devh, vxge_hal_lag_config_t *lconfig) { vxge_hal_status_e status; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", lconfig = 0x"VXGE_OS_STXFMT, (ptr_t) devh, (ptr_t) lconfig); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } status = __hal_device_lag_config_check(lconfig); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } vxge_os_memcpy(&hldev->header.config.mrpcim_config.lag_config, lconfig, sizeof(vxge_hal_lag_config_t)); status = __hal_mrpcim_lag_configure(hldev); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_getpause_data -Pause frame frame generation and reception. * @devh: HAL device handle. * @port : Port number 0, 1, or 2 * @tx : A field to return the pause generation capability of the NIC. * @rx : A field to return the pause reception capability of the NIC. * * Returns the Pause frame generation and reception capability of the NIC. * Return value: * status */ vxge_hal_status_e vxge_hal_mrpcim_getpause_data( vxge_hal_device_h devh, u32 port, u32 *tx, u32 *rx) { u64 val64; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", port = %d, tx = 0x"VXGE_OS_STXFMT", " "rx = 0x"VXGE_OS_STXFMT, (ptr_t) devh, port, (ptr_t) tx, (ptr_t) rx); if (hldev->header.magic != VXGE_HAL_DEVICE_MAGIC) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_DEVICE); return (VXGE_HAL_ERR_INVALID_DEVICE); } if (port >= VXGE_HAL_MAC_MAX_PORTS) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_PORT); return (VXGE_HAL_ERR_INVALID_PORT); } if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); if (val64 & VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN) *tx = 1; if (val64 & VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN) *rx = 1; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_setpause_data - set/reset pause frame generation. * @devh: HAL device handle. * @port : Port number 0, 1, or 2 * @tx: A field that indicates the pause generation capability to be * set on the NIC. * @rx: A field that indicates the pause reception capability to be * set on the NIC. * * It can be used to set or reset Pause frame generation or reception * support of the NIC. * Return value: * int, returns 0 on Success */ vxge_hal_status_e vxge_hal_mrpcim_setpause_data( vxge_hal_device_h devh, u32 port, u32 tx, u32 rx) { u64 val64; __hal_device_t *hldev = (__hal_device_t *) devh; vxge_assert(devh != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim( "devh = 0x"VXGE_OS_STXFMT", port = %d, tx = %d, rx = %d", (ptr_t) devh, port, tx, rx); if (hldev->header.magic != VXGE_HAL_DEVICE_MAGIC) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_DEVICE); return (VXGE_HAL_ERR_INVALID_DEVICE); } if (port >= VXGE_HAL_MAC_MAX_PORTS) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_PORT); return (VXGE_HAL_ERR_INVALID_PORT); } if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); if (tx) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_GEN_EN; if (rx) val64 |= VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; else val64 &= ~VXGE_HAL_RXMAC_PAUSE_CFG_PORT_RCV_EN; vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: 0", __FILE__, __func__, __LINE__); return (VXGE_HAL_OK); } /* * vxge_hal_mrpcim_bist_test - invokes the MemBist test of the card . * @devh: HAL device handle. * vxge_nic structure. * @data:variable that returns the result of each of the test conducted by * the driver. * * This invokes the MemBist test of the card. We give around * 2 secs time for the Test to complete. If it's still not complete * within this peiod, we consider that the test failed. * Return value: * 0 on success and -1 on failure. */ vxge_hal_status_e vxge_hal_mrpcim_bist_test(vxge_hal_device_h devh, u64 *data) { __hal_device_t *hldev = (__hal_device_t *) devh; u8 bist = 0; int retry = 0; vxge_hal_status_e status = VXGE_HAL_FAIL; vxge_assert(devh != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("devh = 0x"VXGE_OS_STXFMT, (ptr_t)devh); if (hldev->header.magic != VXGE_HAL_DEVICE_MAGIC) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_INVALID_DEVICE); return (VXGE_HAL_ERR_INVALID_DEVICE); } if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } (void) __hal_vpath_pci_read(hldev, hldev->first_vp_id, vxge_offsetof(vxge_hal_pci_config_le_t, bist), 1, &bist); bist |= 0x40; vxge_os_pci_write8(hldev->header.pdev, hldev->header.cfgh, vxge_offsetof(vxge_hal_pci_config_le_t, bist), bist); while (retry < 20) { (void) __hal_vpath_pci_read(hldev, hldev->first_vp_id, vxge_offsetof(vxge_hal_pci_config_le_t, bist), 1, &bist); if (!(bist & 0x40)) { *data = (bist & 0x0f); status = VXGE_HAL_OK; break; } vxge_os_mdelay(100); retry++; } vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result: %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_initialize - Initialize mrpcim * @hldev: hal device. * * Initializes mrpcim * * See also: __hal_mrpcim_terminate() */ vxge_hal_status_e __hal_mrpcim_initialize(__hal_device_t *hldev) { u64 val64; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert(hldev != NULL); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t)hldev); if (!(hldev->access_rights & VXGE_HAL_DEVICE_ACCESS_RIGHT_MRPCIM)) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_PRIVILAGED_OPEARATION); return (VXGE_HAL_ERR_PRIVILAGED_OPEARATION); } hldev->mrpcim = (__hal_mrpcim_t *) vxge_os_malloc(hldev->header.pdev, sizeof(__hal_mrpcim_t)); if (hldev->mrpcim == NULL) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); return (VXGE_HAL_ERR_OUT_OF_MEMORY); } vxge_os_memzero(hldev->mrpcim, sizeof(__hal_mrpcim_t)); __hal_mrpcim_get_vpd_data(hldev); hldev->mrpcim->mrpcim_stats_block = __hal_blockpool_block_allocate(hldev, VXGE_OS_HOST_PAGE_SIZE); if (hldev->mrpcim->mrpcim_stats_block == NULL) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, VXGE_HAL_ERR_OUT_OF_MEMORY); return (VXGE_HAL_ERR_OUT_OF_MEMORY); } hldev->mrpcim->mrpcim_stats = (vxge_hal_mrpcim_stats_hw_info_t *) hldev->mrpcim->mrpcim_stats_block->memblock; vxge_os_memzero(hldev->mrpcim->mrpcim_stats, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); vxge_os_memzero(&hldev->mrpcim->mrpcim_stats_sav, sizeof(vxge_hal_mrpcim_stats_hw_info_t)); status = __hal_mrpcim_mac_configure(hldev); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } status = __hal_mrpcim_lag_configure(hldev); if (status != VXGE_HAL_OK) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_gen_cfg_port[0]); hldev->mrpcim->mdio_phy_prtad0 = (u32) VXGE_HAL_MDIO_GEN_CFG_PORT_GET_MDIO_PHY_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mdio_gen_cfg_port[1]); hldev->mrpcim->mdio_phy_prtad1 = (u32) VXGE_HAL_MDIO_GEN_CFG_PORT_GET_MDIO_PHY_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xgxs_static_cfg_port[0]); hldev->mrpcim->mdio_dte_prtad0 = (u32) VXGE_HAL_XGXS_STATIC_CFG_PORT_GET_MDIO_DTE_PRTAD(val64); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->xgxs_static_cfg_port[1]); hldev->mrpcim->mdio_dte_prtad1 = (u32) VXGE_HAL_XGXS_STATIC_CFG_PORT_GET_MDIO_DTE_PRTAD(val64); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, hldev->mrpcim->mrpcim_stats_block->dma_addr, &hldev->mrpcim_reg->mrpcim_stats_start_host_addr); val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->mrpcim_general_cfg2); val64 &= ~VXGE_HAL_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH(0x1f); val64 |= VXGE_HAL_MRPCIM_GENERAL_CFG2_MRPCIM_STATS_MAP_TO_VPATH( hldev->first_vp_id); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->mrpcim_general_cfg2); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, vBIT(0xFFFFFFFFFFFFFFFFULL, 0, VXGE_HAL_MAX_VIRTUAL_PATHS), &hldev->mrpcim_reg->rxmac_authorize_all_addr); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, vBIT(0xFFFFFFFFFFFFFFFFULL, 0, VXGE_HAL_MAX_VIRTUAL_PATHS), &hldev->mrpcim_reg->rxmac_authorize_all_vid); if (hldev->header.config.intr_mode == VXGE_HAL_INTR_MODE_EMULATED_INTA) { val64 = vxge_os_pio_mem_read64(hldev->header.pdev, hldev->header.regh0, &hldev->mrpcim_reg->rdcrdtarb_cfg0); /* Set MOST to 8 for HP-ISS platform */ val64 &= ~VXGE_HAL_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(0x3f); val64 |= VXGE_HAL_RDCRDTARB_CFG0_MAX_OUTSTANDING_RDS(8); vxge_os_pio_mem_write64(hldev->header.pdev, hldev->header.regh0, val64, &hldev->mrpcim_reg->rdcrdtarb_cfg0); } (void) __hal_ifmsg_wmsg_post(hldev, hldev->first_vp_id, VXGE_HAL_RTS_ACCESS_STEER_MSG_DEST_BROADCAST, VXGE_HAL_RTS_ACCESS_STEER_DATA0_MSG_TYPE_PRIV_DRIVER_UP, 0); vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } /* * __hal_mrpcim_terminate - Terminates mrpcim * @hldev: hal device. * * Terminates mrpcim. * * See also: __hal_mrpcim_initialize() */ vxge_hal_status_e __hal_mrpcim_terminate(__hal_device_t *hldev) { vxge_hal_device_h devh = (vxge_hal_device_h) hldev; vxge_hal_status_e status = VXGE_HAL_OK; vxge_assert((hldev != NULL) && (hldev->mrpcim != NULL)); vxge_hal_trace_log_mrpcim("==> %s:%s:%d", __FILE__, __func__, __LINE__); vxge_hal_trace_log_mrpcim("hldev = 0x"VXGE_OS_STXFMT, (ptr_t) hldev); if (hldev->mrpcim == NULL) { vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); } (void) __hal_ifmsg_wmsg_post(hldev, hldev->first_vp_id, VXGE_HAL_RTS_ACCESS_STEER_MSG_DEST_BROADCAST, VXGE_HAL_RTS_ACCESS_STEER_DATA0_MSG_TYPE_PRIV_DRIVER_DOWN, 0); if (hldev->mrpcim->mrpcim_stats_block != NULL) { __hal_blockpool_block_free(devh, hldev->mrpcim->mrpcim_stats_block); hldev->mrpcim->mrpcim_stats_block = NULL; } vxge_os_free(hldev->header.pdev, hldev->mrpcim, sizeof(__hal_mrpcim_t)); hldev->mrpcim = NULL; vxge_hal_trace_log_mrpcim("<== %s:%s:%d Result = %d", __FILE__, __func__, __LINE__, status); return (status); }