Index: head/sys/contrib/ncsw/user/env/xx.c =================================================================== --- head/sys/contrib/ncsw/user/env/xx.c (revision 303889) +++ head/sys/contrib/ncsw/user/env/xx.c (revision 303890) @@ -1,949 +1,949 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "error_ext.h" #include "std_ext.h" #include "list_ext.h" #include "mm_ext.h" /* Configuration */ /* Define the number of dTSEC ports active in system */ #define MALLOCSMART_DTSEC_IN_USE 4 /* * Calculate malloc's pool size for dTSEC's buffers. * We reserve 1MB pool for each dTSEC port. */ #define MALLOCSMART_POOL_SIZE \ (MALLOCSMART_DTSEC_IN_USE * 1024 * 1024) #define MALLOCSMART_SLICE_SIZE (PAGE_SIZE / 2) /* 2kB */ /* Defines */ #define MALLOCSMART_SIZE_TO_SLICE(x) \ (((x) + MALLOCSMART_SLICE_SIZE - 1) / MALLOCSMART_SLICE_SIZE) #define MALLOCSMART_SLICES \ MALLOCSMART_SIZE_TO_SLICE(MALLOCSMART_POOL_SIZE) /* Malloc Pool for NetCommSW */ MALLOC_DEFINE(M_NETCOMMSW, "NetCommSW", "NetCommSW software stack"); MALLOC_DEFINE(M_NETCOMMSW_MT, "NetCommSWTrack", "NetCommSW software allocation tracker"); /* MallocSmart data structures */ static void *XX_MallocSmartPool; static int XX_MallocSmartMap[MALLOCSMART_SLICES]; static struct mtx XX_MallocSmartLock; static struct mtx XX_MallocTrackLock; MTX_SYSINIT(XX_MallocSmartLockInit, &XX_MallocSmartLock, "NetCommSW MallocSmart Lock", MTX_DEF); MTX_SYSINIT(XX_MallocTrackLockInit, &XX_MallocTrackLock, "NetCommSW MallocTrack Lock", MTX_DEF); /* Interrupt info */ #define XX_INTR_FLAG_PREALLOCATED (1 << 0) #define XX_INTR_FLAG_BOUND (1 << 1) #define XX_INTR_FLAG_FMAN_FIX (1 << 2) struct XX_IntrInfo { driver_intr_t *handler; void *arg; int cpu; int flags; void *cookie; }; static struct XX_IntrInfo XX_IntrInfo[INTR_VECTORS]; /* Portal type identifiers */ enum XX_PortalIdent{ BM_PORTAL = 0, QM_PORTAL, }; /* Structure to store portals' properties */ struct XX_PortalInfo { vm_paddr_t portal_ce_pa[2][MAXCPU]; vm_paddr_t portal_ci_pa[2][MAXCPU]; uint32_t portal_ce_size[2][MAXCPU]; uint32_t portal_ci_size[2][MAXCPU]; vm_offset_t portal_ce_va[2]; vm_offset_t portal_ci_va[2]; uint32_t portal_intr[2][MAXCPU]; }; static struct XX_PortalInfo XX_PInfo; /* The lower 9 bits, through emprical testing, tend to be 0. */ #define XX_MALLOC_TRACK_SHIFT 9 typedef struct XX_MallocTrackStruct { LIST_ENTRY(XX_MallocTrackStruct) entries; physAddress_t pa; void *va; } XX_MallocTrackStruct; LIST_HEAD(XX_MallocTrackerList, XX_MallocTrackStruct) *XX_MallocTracker; u_long XX_MallocHashMask; static XX_MallocTrackStruct * XX_FindTracker(physAddress_t pa); void XX_Exit(int status) { panic("NetCommSW: Exit called with status %i", status); } void XX_Print(char *str, ...) { va_list ap; va_start(ap, str); vprintf(str, ap); va_end(ap); } void * XX_Malloc(uint32_t size) { void *p = (malloc(size, M_NETCOMMSW, M_NOWAIT)); return (p); } static int XX_MallocSmartMapCheck(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) if (XX_MallocSmartMap[i]) return (FALSE); return (TRUE); } static void XX_MallocSmartMapSet(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) XX_MallocSmartMap[i] = ((i == start) ? slices : -1); } static void XX_MallocSmartMapClear(unsigned int start, unsigned int slices) { unsigned int i; mtx_assert(&XX_MallocSmartLock, MA_OWNED); for (i = start; i < start + slices; i++) XX_MallocSmartMap[i] = 0; } int XX_MallocSmartInit(void) { int error; error = E_OK; mtx_lock(&XX_MallocSmartLock); if (XX_MallocSmartPool) goto out; /* Allocate MallocSmart pool */ XX_MallocSmartPool = contigmalloc(MALLOCSMART_POOL_SIZE, M_NETCOMMSW, M_NOWAIT, 0, 0xFFFFFFFFFull, MALLOCSMART_POOL_SIZE, 0); if (!XX_MallocSmartPool) { error = E_NO_MEMORY; goto out; } out: mtx_unlock(&XX_MallocSmartLock); return (error); } void * XX_MallocSmart(uint32_t size, int memPartitionId, uint32_t alignment) { unsigned int i; vm_offset_t addr; addr = 0; /* Convert alignment and size to number of slices */ alignment = MALLOCSMART_SIZE_TO_SLICE(alignment); size = MALLOCSMART_SIZE_TO_SLICE(size); /* Lock resources */ mtx_lock(&XX_MallocSmartLock); /* Allocate region */ for (i = 0; i + size <= MALLOCSMART_SLICES; i += alignment) { if (XX_MallocSmartMapCheck(i, size)) { XX_MallocSmartMapSet(i, size); addr = (vm_offset_t)XX_MallocSmartPool + (i * MALLOCSMART_SLICE_SIZE); break; } } /* Unlock resources */ mtx_unlock(&XX_MallocSmartLock); return ((void *)addr); } void XX_FreeSmart(void *p) { unsigned int start, slices; /* Calculate first slice of region */ start = MALLOCSMART_SIZE_TO_SLICE((vm_offset_t)(p) - (vm_offset_t)XX_MallocSmartPool); /* Lock resources */ mtx_lock(&XX_MallocSmartLock); KASSERT(XX_MallocSmartMap[start] > 0, ("XX_FreeSmart: Double or mid-block free!\n")); XX_UntrackAddress(p); /* Free region */ slices = XX_MallocSmartMap[start]; XX_MallocSmartMapClear(start, slices); /* Unlock resources */ mtx_unlock(&XX_MallocSmartLock); } void XX_Free(void *p) { if (p != NULL) XX_UntrackAddress(p); free(p, M_NETCOMMSW); } uint32_t XX_DisableAllIntr(void) { return (intr_disable()); } void XX_RestoreAllIntr(uint32_t flags) { intr_restore(flags); } t_Error XX_Call(uint32_t qid, t_Error (* f)(t_Handle), t_Handle id, t_Handle appId, uint16_t flags ) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } static bool XX_IsPortalIntr(int irq) { int cpu, type; /* Check interrupt numbers of all available portals */ for (cpu = 0, type = 0; XX_PInfo.portal_intr[type][cpu] != 0; cpu++) { if (irq == XX_PInfo.portal_intr[type][cpu]) { /* Found it! */ return (1); } if (XX_PInfo.portal_intr[type][cpu + 1] == 0) { type++; cpu = 0; } } return (0); } void XX_FmanFixIntr(int irq) { XX_IntrInfo[irq].flags |= XX_INTR_FLAG_FMAN_FIX; } static bool XX_FmanNeedsIntrFix(int irq) { if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_FMAN_FIX) return (1); return (0); } static void XX_Dispatch(void *arg) { struct XX_IntrInfo *info; info = arg; /* Bind this thread to proper CPU when SMP has been already started. */ if ((info->flags & XX_INTR_FLAG_BOUND) == 0 && smp_started && info->cpu >= 0) { thread_lock(curthread); sched_bind(curthread, info->cpu); thread_unlock(curthread); info->flags |= XX_INTR_FLAG_BOUND; } if (info->handler == NULL) { printf("%s(): IRQ handler is NULL!\n", __func__); return; } info->handler(info->arg); } t_Error XX_PreallocAndBindIntr(int irq, unsigned int cpu) { struct resource *r; unsigned int inum; t_Error error; r = (struct resource *)irq; inum = rman_get_start(r); error = XX_SetIntr(irq, XX_Dispatch, &XX_IntrInfo[inum]); if (error != 0) return (error); XX_IntrInfo[inum].flags = XX_INTR_FLAG_PREALLOCATED; XX_IntrInfo[inum].cpu = cpu; return (E_OK); } t_Error XX_DeallocIntr(int irq) { struct resource *r; unsigned int inum; r = (struct resource *)irq; inum = rman_get_start(r); if ((XX_IntrInfo[inum].flags & XX_INTR_FLAG_PREALLOCATED) == 0) return (E_INVALID_STATE); XX_IntrInfo[inum].flags = 0; return (XX_FreeIntr(irq)); } t_Error XX_SetIntr(int irq, t_Isr *f_Isr, t_Handle handle) { - struct device *dev; + device_t dev; struct resource *r; unsigned int flags; int err; r = (struct resource *)irq; dev = rman_get_device(r); irq = rman_get_start(r); /* Handle preallocated interrupts */ if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { if (XX_IntrInfo[irq].handler != NULL) return (E_BUSY); XX_IntrInfo[irq].handler = f_Isr; XX_IntrInfo[irq].arg = handle; return (E_OK); } flags = INTR_TYPE_NET | INTR_MPSAFE; /* BMAN/QMAN Portal interrupts must be exlusive */ if (XX_IsPortalIntr(irq)) flags |= INTR_EXCL; err = bus_setup_intr(dev, r, flags, NULL, f_Isr, handle, &XX_IntrInfo[irq].cookie); if (err) goto finish; /* * XXX: Bind FMan IRQ to CPU0. Current interrupt subsystem directs each * interrupt to all CPUs. Race between an interrupt assertion and * masking may occur and interrupt handler may be called multiple times * per one interrupt. FMan doesn't support such a situation. Workaround * is to bind FMan interrupt to one CPU0 only. */ #ifdef SMP if (XX_FmanNeedsIntrFix(irq)) err = powerpc_bind_intr(irq, 0); #endif finish: return (err); } t_Error XX_FreeIntr(int irq) { - struct device *dev; + device_t dev; struct resource *r; r = (struct resource *)irq; dev = rman_get_device(r); irq = rman_get_start(r); /* Handle preallocated interrupts */ if (XX_IntrInfo[irq].flags & XX_INTR_FLAG_PREALLOCATED) { if (XX_IntrInfo[irq].handler == NULL) return (E_INVALID_STATE); XX_IntrInfo[irq].handler = NULL; XX_IntrInfo[irq].arg = NULL; return (E_OK); } return (bus_teardown_intr(dev, r, XX_IntrInfo[irq].cookie)); } t_Error XX_EnableIntr(int irq) { struct resource *r; r = (struct resource *)irq; irq = rman_get_start(r); powerpc_intr_unmask(irq); return (E_OK); } t_Error XX_DisableIntr(int irq) { struct resource *r; r = (struct resource *)irq; irq = rman_get_start(r); powerpc_intr_mask(irq); return (E_OK); } t_TaskletHandle XX_InitTasklet (void (*routine)(void *), void *data) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (NULL); } void XX_FreeTasklet (t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } int XX_ScheduleTask(t_TaskletHandle h_Tasklet, int immediate) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } void XX_FlushScheduledTasks(void) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } int XX_TaskletIsQueued(t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } void XX_SetTaskletData(t_TaskletHandle h_Tasklet, t_Handle data) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } t_Handle XX_GetTaskletData(t_TaskletHandle h_Tasklet) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (NULL); } t_Handle XX_InitSpinlock(void) { struct mtx *m; m = malloc(sizeof(*m), M_NETCOMMSW, M_NOWAIT | M_ZERO); if (!m) return (0); mtx_init(m, "NetCommSW Lock", NULL, MTX_DEF | MTX_DUPOK); return (m); } void XX_FreeSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_destroy(m); free(m, M_NETCOMMSW); } void XX_LockSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_lock(m); } void XX_UnlockSpinlock(t_Handle h_Spinlock) { struct mtx *m; m = h_Spinlock; mtx_unlock(m); } uint32_t XX_LockIntrSpinlock(t_Handle h_Spinlock) { XX_LockSpinlock(h_Spinlock); return (0); } void XX_UnlockIntrSpinlock(t_Handle h_Spinlock, uint32_t intrFlags) { XX_UnlockSpinlock(h_Spinlock); } uint32_t XX_CurrentTime(void) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } t_Handle XX_CreateTimer(void) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (NULL); } void XX_FreeTimer(t_Handle h_Timer) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } void XX_StartTimer(t_Handle h_Timer, uint32_t msecs, bool periodic, void (*f_TimerExpired)(t_Handle), t_Handle h_Arg) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } uint32_t XX_GetExpirationTime(t_Handle h_Timer) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } void XX_StopTimer(t_Handle h_Timer) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } void XX_ModTimer(t_Handle h_Timer, uint32_t msecs) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); } int XX_TimerIsActive(t_Handle h_Timer) { /* Not referenced */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (0); } uint32_t XX_Sleep(uint32_t msecs) { XX_UDelay(1000 * msecs); return (0); } void XX_UDelay(uint32_t usecs) { DELAY(usecs); } t_Error XX_IpcRegisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH], t_IpcMsgHandler *f_MsgHandler, t_Handle h_Module, uint32_t replyLength) { /* * This function returns fake E_OK status and does nothing * as NetCommSW IPC is not used by FreeBSD drivers. */ return (E_OK); } t_Error XX_IpcUnregisterMsgHandler(char addr[XX_IPC_MAX_ADDR_NAME_LENGTH]) { /* * This function returns fake E_OK status and does nothing * as NetCommSW IPC is not used by FreeBSD drivers. */ return (E_OK); } t_Error XX_IpcSendMessage(t_Handle h_Session, uint8_t *p_Msg, uint32_t msgLength, uint8_t *p_Reply, uint32_t *p_ReplyLength, t_IpcMsgCompletion *f_Completion, t_Handle h_Arg) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } t_Handle XX_IpcInitSession(char destAddr[XX_IPC_MAX_ADDR_NAME_LENGTH], char srcAddr[XX_IPC_MAX_ADDR_NAME_LENGTH]) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } t_Error XX_IpcFreeSession(t_Handle h_Session) { /* Should not be called */ printf("NetCommSW: Unimplemented function %s() called!\n", __func__); return (E_OK); } extern void db_trace_self(void); physAddress_t XX_VirtToPhys(void *addr) { vm_paddr_t paddr; int cpu; cpu = PCPU_GET(cpuid); /* Handle NULL address */ if (addr == NULL) return (-1); /* Handle BMAN mappings */ if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[BM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ce_va[BM_PORTAL] + XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) return (XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ce_va[BM_PORTAL]); if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[BM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ci_va[BM_PORTAL] + XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) return (XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ci_va[BM_PORTAL]); /* Handle QMAN mappings */ if (((vm_offset_t)addr >= XX_PInfo.portal_ce_va[QM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ce_va[QM_PORTAL] + XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) return (XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ce_va[QM_PORTAL]); if (((vm_offset_t)addr >= XX_PInfo.portal_ci_va[QM_PORTAL]) && ((vm_offset_t)addr < XX_PInfo.portal_ci_va[QM_PORTAL] + XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) return (XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + (vm_offset_t)addr - XX_PInfo.portal_ci_va[QM_PORTAL]); paddr = pmap_kextract((vm_offset_t)addr); if (!paddr) printf("NetCommSW: " "Unable to translate virtual address 0x%08X!\n", addr); else XX_TrackAddress(addr); return (paddr); } void * XX_PhysToVirt(physAddress_t addr) { XX_MallocTrackStruct *ts; int cpu; cpu = PCPU_GET(cpuid); /* Handle BMAN mappings */ if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] + XX_PInfo.portal_ce_size[BM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] + XX_PInfo.portal_ci_size[BM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]))); /* Handle QMAN mappings */ if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] + XX_PInfo.portal_ce_size[QM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]))); if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) && (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] + XX_PInfo.portal_ci_size[QM_PORTAL][cpu])) return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] + (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]))); mtx_lock(&XX_MallocTrackLock); ts = XX_FindTracker(addr); mtx_unlock(&XX_MallocTrackLock); if (ts != NULL) return ts->va; printf("NetCommSW: " "Unable to translate physical address 0x%08llX!\n", addr); return (NULL); } void XX_PortalSetInfo(device_t dev) { char *dev_name; struct dpaa_portals_softc *sc; int i, type, len; dev_name = malloc(sizeof(*dev_name), M_TEMP, M_WAITOK | M_ZERO); len = strlen("bman-portals"); strncpy(dev_name, device_get_name(dev), len); if (strncmp(dev_name, "bman-portals", len) && strncmp(dev_name, "qman-portals", len)) goto end; if (strncmp(dev_name, "bman-portals", len) == 0) type = BM_PORTAL; else type = QM_PORTAL; sc = device_get_softc(dev); for (i = 0; sc->sc_dp[i].dp_ce_pa != 0; i++) { XX_PInfo.portal_ce_pa[type][i] = sc->sc_dp[i].dp_ce_pa; XX_PInfo.portal_ci_pa[type][i] = sc->sc_dp[i].dp_ci_pa; XX_PInfo.portal_ce_size[type][i] = sc->sc_dp[i].dp_ce_size; XX_PInfo.portal_ci_size[type][i] = sc->sc_dp[i].dp_ci_size; XX_PInfo.portal_intr[type][i] = sc->sc_dp[i].dp_intr_num; } XX_PInfo.portal_ce_va[type] = rman_get_bushandle(sc->sc_rres[0]); XX_PInfo.portal_ci_va[type] = rman_get_bushandle(sc->sc_rres[1]); end: free(dev_name, M_TEMP); } static XX_MallocTrackStruct * XX_FindTracker(physAddress_t pa) { struct XX_MallocTrackerList *l; XX_MallocTrackStruct *tp; l = &XX_MallocTracker[(pa >> XX_MALLOC_TRACK_SHIFT) & XX_MallocHashMask]; LIST_FOREACH(tp, l, entries) { if (tp->pa == pa) return tp; } return NULL; } void XX_TrackInit(void) { if (XX_MallocTracker == NULL) { XX_MallocTracker = hashinit(64, M_NETCOMMSW_MT, &XX_MallocHashMask); } } void XX_TrackAddress(void *addr) { physAddress_t pa; struct XX_MallocTrackerList *l; XX_MallocTrackStruct *ts; pa = pmap_kextract((vm_offset_t)addr); ts = malloc(sizeof(*ts), M_NETCOMMSW_MT, M_NOWAIT); ts->va = addr; ts->pa = pa; l = &XX_MallocTracker[(pa >> XX_MALLOC_TRACK_SHIFT) & XX_MallocHashMask]; mtx_lock(&XX_MallocTrackLock); if (XX_FindTracker(pa) != NULL) free(ts, M_NETCOMMSW_MT); else LIST_INSERT_HEAD(l, ts, entries); mtx_unlock(&XX_MallocTrackLock); } void XX_UntrackAddress(void *addr) { physAddress_t pa; XX_MallocTrackStruct *ts; pa = pmap_kextract((vm_offset_t)addr); KASSERT(XX_MallocTracker != NULL, ("Untracking an address before it's even initialized!\n")); mtx_lock(&XX_MallocTrackLock); ts = XX_FindTracker(pa); if (ts != NULL) LIST_REMOVE(ts, entries); mtx_unlock(&XX_MallocTrackLock); free(ts, M_NETCOMMSW_MT); } Index: head/sys/contrib/octeon-sdk/cvmx-twsi.c =================================================================== --- head/sys/contrib/octeon-sdk/cvmx-twsi.c (revision 303889) +++ head/sys/contrib/octeon-sdk/cvmx-twsi.c (revision 303890) @@ -1,559 +1,559 @@ /***********************license start*************** * Copyright (c) 2003-2010 Cavium Inc. (support@cavium.com). All rights * reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of Cavium Inc. nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * This Software, including technical data, may be subject to U.S. export control * laws, including the U.S. Export Administration Act and its associated * regulations, and may be subject to export or import regulations in other * countries. * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE, * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR * PERFORMANCE OF THE SOFTWARE LIES WITH YOU. ***********************license end**************************************/ /** * @file * * Interface to the TWSI / I2C bus * *
$Revision: 70030 $
* */ #ifdef CVMX_BUILD_FOR_LINUX_KERNEL #include #include #include #else #include "cvmx.h" #include "cvmx-twsi.h" #if !defined(CVMX_BUILD_FOR_FREEBSD_KERNEL) #include "cvmx-csr-db.h" #endif #endif //#define PRINT_TWSI_CONFIG #ifdef PRINT_TWSI_CONFIG #define twsi_printf printf #else #define twsi_printf(...) #define cvmx_csr_db_decode(...) #endif #ifdef CVMX_BUILD_FOR_LINUX_KERNEL # if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static struct i2c_adapter *__cvmx_twsix_get_adapter(int twsi_id) { struct octeon_i2c { wait_queue_head_t queue; struct i2c_adapter adap; int irq; int twsi_freq; int sys_freq; resource_size_t twsi_phys; void __iomem *twsi_base; resource_size_t regsize; - struct device *dev; + device_t dev; int broken_irq_mode; }; struct i2c_adapter *adapter; struct octeon_i2c *i2c; adapter = i2c_get_adapter(0); if (adapter == NULL) return NULL; i2c = container_of(adapter, struct octeon_i2c, adap); return &i2c[twsi_id].adap; } #endif #endif /** * Do a twsi read from a 7 bit device address using an (optional) internal address. * Up to 8 bytes can be read at a time. * * @param twsi_id which Octeon TWSI bus to use * @param dev_addr Device address (7 bit) * @param internal_addr * Internal address. Can be 0, 1 or 2 bytes in width * @param num_bytes Number of data bytes to read * @param ia_width_bytes * Internal address size in bytes (0, 1, or 2) * @param data Pointer argument where the read data is returned. * * @return read data returned in 'data' argument * Number of bytes read on success * -1 on failure */ int cvmx_twsix_read_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t *data) { #ifdef CVMX_BUILD_FOR_LINUX_KERNEL # if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) struct i2c_adapter *adapter; u8 data_buf[8]; u8 addr_buf[8]; struct i2c_msg msg[2]; uint64_t r; int i, j; if (ia_width_bytes == 0) return cvmx_twsix_read(twsi_id, dev_addr, num_bytes, data); BUG_ON(ia_width_bytes > 2); BUG_ON(num_bytes > 8 || num_bytes < 1); adapter = __cvmx_twsix_get_adapter(twsi_id); if (adapter == NULL) return -1; for (j = 0, i = ia_width_bytes - 1; i >= 0; i--, j++) addr_buf[j] = (u8)(internal_addr >> (i * 8)); msg[0].addr = dev_addr; msg[0].flags = 0; msg[0].len = ia_width_bytes; msg[0].buf = addr_buf; msg[1].addr = dev_addr; msg[1].flags = I2C_M_RD; msg[1].len = num_bytes; msg[1].buf = data_buf; i = i2c_transfer(adapter, msg, 2); i2c_put_adapter(adapter); if (i == 2) { r = 0; for (i = 0; i < num_bytes; i++) r = (r << 8) | data_buf[i]; *data = r; return num_bytes; } else { return -1; } # else BUG(); /* The I2C driver is not compiled in */ # endif #else cvmx_mio_twsx_sw_twsi_t sw_twsi_val; cvmx_mio_twsx_sw_twsi_ext_t twsi_ext; int retry_limit = 5; if (num_bytes < 1 || num_bytes > 8 || !data || ia_width_bytes < 0 || ia_width_bytes > 2) return -1; retry: twsi_ext.u64 = 0; sw_twsi_val.u64 = 0; sw_twsi_val.s.v = 1; sw_twsi_val.s.r = 1; sw_twsi_val.s.sovr = 1; sw_twsi_val.s.size = num_bytes - 1; sw_twsi_val.s.a = dev_addr; if (ia_width_bytes > 0) { sw_twsi_val.s.op = 1; sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f; sw_twsi_val.s.eop_ia = internal_addr & 0x7; } if (ia_width_bytes == 2) { sw_twsi_val.s.eia = 1; twsi_ext.s.ia = internal_addr >> 8; cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64); } cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v) cvmx_wait(1000); twsi_printf("Results:\n"); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); if (!sw_twsi_val.s.r) { /* Check the reason for the failure. We may need to retry to handle multi-master ** configurations. ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78 ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8 */ if (sw_twsi_val.s.d == 0x38 || sw_twsi_val.s.d == 0x68 || sw_twsi_val.s.d == 0xB0 || sw_twsi_val.s.d == 0x78 || sw_twsi_val.s.d == 0x80 || sw_twsi_val.s.d == 0x88 || sw_twsi_val.s.d == 0xA0 || sw_twsi_val.s.d == 0xA8 || sw_twsi_val.s.d == 0xB8 || sw_twsi_val.s.d == 0xC8) { if (retry_limit-- > 0) { cvmx_wait_usec(100); goto retry; } } /* For all other errors, return an error code */ return -1; } *data = (sw_twsi_val.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))); if (num_bytes > 4) { twsi_ext.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id)); *data |= ((unsigned long long)(twsi_ext.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))) << 32); } return num_bytes; #endif } /** * Read from a TWSI device (7 bit device address only) without generating any * internal addresses. * Read from 1-8 bytes and returns them in the data pointer. * * @param twsi_id TWSI interface on Octeon to use * @param dev_addr TWSI device address (7 bit only) * @param num_bytes number of bytes to read * @param data Pointer to data read from TWSI device * * @return Number of bytes read on success * -1 on error */ int cvmx_twsix_read(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t *data) { #ifdef CVMX_BUILD_FOR_LINUX_KERNEL # if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) struct i2c_adapter *adapter; u8 data_buf[8]; struct i2c_msg msg[1]; uint64_t r; int i; BUG_ON(num_bytes > 8 || num_bytes < 1); adapter = __cvmx_twsix_get_adapter(twsi_id); if (adapter == NULL) return -1; msg[0].addr = dev_addr; msg[0].flags = I2C_M_RD; msg[0].len = num_bytes; msg[0].buf = data_buf; i = i2c_transfer(adapter, msg, 1); i2c_put_adapter(adapter); if (i == 1) { r = 0; for (i = 0; i < num_bytes; i++) r = (r << 8) | data_buf[i]; *data = r; return num_bytes; } else { return -1; } # else BUG(); /* The I2C driver is not compiled in */ # endif #else cvmx_mio_twsx_sw_twsi_t sw_twsi_val; cvmx_mio_twsx_sw_twsi_ext_t twsi_ext; int retry_limit = 5; if (num_bytes > 8 || num_bytes < 1) return -1; retry: sw_twsi_val.u64 = 0; sw_twsi_val.s.v = 1; sw_twsi_val.s.r = 1; sw_twsi_val.s.a = dev_addr; sw_twsi_val.s.sovr = 1; sw_twsi_val.s.size = num_bytes - 1; cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v) cvmx_wait(1000); twsi_printf("Results:\n"); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); if (!sw_twsi_val.s.r) if (!sw_twsi_val.s.r) { /* Check the reason for the failure. We may need to retry to handle multi-master ** configurations. ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78 ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8 */ if (sw_twsi_val.s.d == 0x38 || sw_twsi_val.s.d == 0x68 || sw_twsi_val.s.d == 0xB0 || sw_twsi_val.s.d == 0x78 || sw_twsi_val.s.d == 0x80 || sw_twsi_val.s.d == 0x88 || sw_twsi_val.s.d == 0xA0 || sw_twsi_val.s.d == 0xA8 || sw_twsi_val.s.d == 0xB8 || sw_twsi_val.s.d == 0xC8) { if (retry_limit-- > 0) { cvmx_wait_usec(100); goto retry; } } /* For all other errors, return an error code */ return -1; } *data = (sw_twsi_val.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))); if (num_bytes > 4) { twsi_ext.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id)); *data |= ((unsigned long long)(twsi_ext.s.d & (0xFFFFFFFF >> (32 - num_bytes*8))) << 32); } return num_bytes; #endif } /** * Perform a twsi write operation to a 7 bit device address. * * Note that many eeprom devices have page restrictions regarding address boundaries * that can be crossed in one write operation. This is device dependent, and this routine * does nothing in this regard. * This command does not generate any internal addressess. * * @param twsi_id Octeon TWSI interface to use * @param dev_addr TWSI device address * @param num_bytes Number of bytes to write (between 1 and 8 inclusive) * @param data Data to write * * @return 0 on success * -1 on failure */ int cvmx_twsix_write(int twsi_id, uint8_t dev_addr, int num_bytes, uint64_t data) { #ifdef CVMX_BUILD_FOR_LINUX_KERNEL # if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) struct i2c_adapter *adapter; u8 data_buf[8]; struct i2c_msg msg[1]; int i, j; BUG_ON(num_bytes > 8 || num_bytes < 1); adapter = __cvmx_twsix_get_adapter(twsi_id); if (adapter == NULL) return -1; for (j = 0, i = num_bytes - 1; i >= 0; i--, j++) data_buf[j] = (u8)(data >> (i * 8)); msg[0].addr = dev_addr; msg[0].flags = 0; msg[0].len = num_bytes; msg[0].buf = data_buf; i = i2c_transfer(adapter, msg, 1); i2c_put_adapter(adapter); if (i == 1) return num_bytes; else return -1; # else BUG(); /* The I2C driver is not compiled in */ # endif #else cvmx_mio_twsx_sw_twsi_t sw_twsi_val; if (num_bytes > 8 || num_bytes < 1) return -1; sw_twsi_val.u64 = 0; sw_twsi_val.s.v = 1; sw_twsi_val.s.a = dev_addr; sw_twsi_val.s.d = data & 0xffffffff; sw_twsi_val.s.sovr = 1; sw_twsi_val.s.size = num_bytes - 1; if (num_bytes > 4) { /* Upper four bytes go into a separate register */ cvmx_mio_twsx_sw_twsi_ext_t twsi_ext; twsi_ext.u64 = 0; twsi_ext.s.d = data >> 32; cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64); } cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v) ; twsi_printf("Results:\n"); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); if (!sw_twsi_val.s.r) return -1; return 0; #endif } /** * Write 1-8 bytes to a TWSI device using an internal address. * * @param twsi_id which TWSI interface on Octeon to use * @param dev_addr TWSI device address (7 bit only) * @param internal_addr * TWSI internal address (0, 8, or 16 bits) * @param num_bytes Number of bytes to write (1-8) * @param ia_width_bytes * internal address width, in bytes (0, 1, 2) * @param data Data to write. Data is written MSB first on the twsi bus, and only the lower * num_bytes bytes of the argument are valid. (If a 2 byte write is done, only * the low 2 bytes of the argument is used. * * @return Number of bytes read on success, * -1 on error */ int cvmx_twsix_write_ia(int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data) { #ifdef CVMX_BUILD_FOR_LINUX_KERNEL # if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) struct i2c_adapter *adapter; u8 data_buf[8]; u8 addr_buf[8]; struct i2c_msg msg[2]; int i, j; if (ia_width_bytes == 0) return cvmx_twsix_write(twsi_id, dev_addr, num_bytes, data); BUG_ON(ia_width_bytes > 2); BUG_ON(num_bytes > 8 || num_bytes < 1); adapter = __cvmx_twsix_get_adapter(twsi_id); if (adapter == NULL) return -1; for (j = 0, i = ia_width_bytes - 1; i >= 0; i--, j++) addr_buf[j] = (u8)(internal_addr >> (i * 8)); for (j = 0, i = num_bytes - 1; i >= 0; i--, j++) data_buf[j] = (u8)(data >> (i * 8)); msg[0].addr = dev_addr; msg[0].flags = 0; msg[0].len = ia_width_bytes; msg[0].buf = addr_buf; msg[1].addr = dev_addr; msg[1].flags = 0; msg[1].len = num_bytes; msg[1].buf = data_buf; i = i2c_transfer(adapter, msg, 2); i2c_put_adapter(adapter); if (i == 2) { /* Poll until reads succeed, or polling times out */ int to = 100; while (to-- > 0) { uint64_t data; if (cvmx_twsix_read(twsi_id, dev_addr, 1, &data) >= 0) break; } } if (i == 2) return num_bytes; else return -1; # else BUG(); /* The I2C driver is not compiled in */ # endif #else cvmx_mio_twsx_sw_twsi_t sw_twsi_val; cvmx_mio_twsx_sw_twsi_ext_t twsi_ext; int to; if (num_bytes < 1 || num_bytes > 8 || ia_width_bytes < 0 || ia_width_bytes > 2) return -1; twsi_ext.u64 = 0; sw_twsi_val.u64 = 0; sw_twsi_val.s.v = 1; sw_twsi_val.s.sovr = 1; sw_twsi_val.s.size = num_bytes - 1; sw_twsi_val.s.a = dev_addr; sw_twsi_val.s.d = 0xFFFFFFFF & data; if (ia_width_bytes > 0) { sw_twsi_val.s.op = 1; sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f; sw_twsi_val.s.eop_ia = internal_addr & 0x7; } if (ia_width_bytes == 2) { sw_twsi_val.s.eia = 1; twsi_ext.s.ia = internal_addr >> 8; } if (num_bytes > 4) twsi_ext.s.d = data >> 32; twsi_printf("%s: twsi_id=%x, dev_addr=%x, internal_addr=%x\n\tnum_bytes=%d, ia_width_bytes=%d, data=%lx\n", __FUNCTION__, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, data); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64); cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u64); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); cvmx_write_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); while (((cvmx_mio_twsx_sw_twsi_t)(sw_twsi_val.u64 = cvmx_read_csr(CVMX_MIO_TWSX_SW_TWSI(twsi_id)))).s.v) ; twsi_printf("Results:\n"); cvmx_csr_db_decode(cvmx_get_proc_id(), CVMX_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u64); /* Poll until reads succeed, or polling times out */ to = 100; while (to-- > 0) { uint64_t data; if (cvmx_twsix_read(twsi_id, dev_addr, 1, &data) >= 0) break; } if (to <= 0) return -1; return num_bytes; #endif } Index: head/sys/dev/auxio/auxio.c =================================================================== --- head/sys/dev/auxio/auxio.c (revision 303889) +++ head/sys/dev/auxio/auxio.c (revision 303890) @@ -1,323 +1,323 @@ /*- * Copyright (c) 2004 Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* $NetBSD: auxio.c,v 1.11 2003/07/15 03:36:04 lukem Exp $ */ /*- * Copyright (c) 2000, 2001 Matthew R. Green * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * AUXIO registers support on the SBus & EBus2, used for the floppy driver * and to control the system LED, for the BLINK option. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * On sun4u, auxio exists with one register (LED) on the SBus, and 5 * registers on the EBus2 (pci) (LED, PCIMODE, FREQUENCY, SCSI * OSCILLATOR, and TEMP SENSE. */ #define AUXIO_PCIO_LED 0 #define AUXIO_PCIO_PCI 1 #define AUXIO_PCIO_FREQ 2 #define AUXIO_PCIO_OSC 3 #define AUXIO_PCIO_TEMP 4 #define AUXIO_PCIO_NREG 5 struct auxio_softc { - struct device *sc_dev; + device_t sc_dev; int sc_nauxio; struct resource *sc_res[AUXIO_PCIO_NREG]; int sc_rid[AUXIO_PCIO_NREG]; bus_space_tag_t sc_regt[AUXIO_PCIO_NREG]; bus_space_handle_t sc_regh[AUXIO_PCIO_NREG]; struct cdev *sc_led_dev; u_int32_t sc_led_stat; int sc_flags; #define AUXIO_LEDONLY 0x1 #define AUXIO_EBUS 0x2 #define AUXIO_SBUS 0x4 struct mtx sc_lock; }; static void auxio_led_func(void *arg, int onoff); static int auxio_attach_common(struct auxio_softc *); static int auxio_bus_probe(device_t); static int auxio_sbus_attach(device_t); static int auxio_ebus_attach(device_t); static int auxio_bus_detach(device_t); static void auxio_free_resource(struct auxio_softc *); static __inline u_int32_t auxio_led_read(struct auxio_softc *); static __inline void auxio_led_write(struct auxio_softc *, u_int32_t); /* SBus */ static device_method_t auxio_sbus_methods[] = { DEVMETHOD(device_probe, auxio_bus_probe), DEVMETHOD(device_attach, auxio_sbus_attach), DEVMETHOD(device_detach, auxio_bus_detach), DEVMETHOD_END }; static driver_t auxio_sbus_driver = { "auxio", auxio_sbus_methods, sizeof(struct auxio_softc) }; static devclass_t auxio_devclass; /* The probe order is handled by sbus(4). */ EARLY_DRIVER_MODULE(auxio, sbus, auxio_sbus_driver, auxio_devclass, 0, 0, BUS_PASS_DEFAULT); MODULE_DEPEND(auxio, sbus, 1, 1, 1); /* EBus */ static device_method_t auxio_ebus_methods[] = { DEVMETHOD(device_probe, auxio_bus_probe), DEVMETHOD(device_attach, auxio_ebus_attach), DEVMETHOD(device_detach, auxio_bus_detach), DEVMETHOD_END }; static driver_t auxio_ebus_driver = { "auxio", auxio_ebus_methods, sizeof(struct auxio_softc) }; EARLY_DRIVER_MODULE(auxio, ebus, auxio_ebus_driver, auxio_devclass, 0, 0, BUS_PASS_DEFAULT); MODULE_DEPEND(auxio, ebus, 1, 1, 1); MODULE_VERSION(auxio, 1); #define AUXIO_LOCK_INIT(sc) \ mtx_init(&sc->sc_lock, "auxio mtx", NULL, MTX_DEF) #define AUXIO_LOCK(sc) mtx_lock(&sc->sc_lock) #define AUXIO_UNLOCK(sc) mtx_unlock(&sc->sc_lock) #define AUXIO_LOCK_DESTROY(sc) mtx_destroy(&sc->sc_lock) static __inline void auxio_led_write(struct auxio_softc *sc, u_int32_t v) { if (sc->sc_flags & AUXIO_EBUS) bus_space_write_4(sc->sc_regt[AUXIO_PCIO_LED], sc->sc_regh[AUXIO_PCIO_LED], 0, v); else bus_space_write_1(sc->sc_regt[AUXIO_PCIO_LED], sc->sc_regh[AUXIO_PCIO_LED], 0, v); } static __inline u_int32_t auxio_led_read(struct auxio_softc *sc) { u_int32_t led; if (sc->sc_flags & AUXIO_EBUS) led = bus_space_read_4(sc->sc_regt[AUXIO_PCIO_LED], sc->sc_regh[AUXIO_PCIO_LED], 0); else led = bus_space_read_1(sc->sc_regt[AUXIO_PCIO_LED], sc->sc_regh[AUXIO_PCIO_LED], 0); return (led); } static void auxio_led_func(void *arg, int onoff) { struct auxio_softc *sc; u_int32_t led; sc = (struct auxio_softc *)arg; AUXIO_LOCK(sc); /* * NB: We must not touch the other bits of the SBus AUXIO reg. */ led = auxio_led_read(sc); if (onoff) led |= AUXIO_LED_LED; else led &= ~AUXIO_LED_LED; auxio_led_write(sc, led); AUXIO_UNLOCK(sc); } static int auxio_bus_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (strcmp("auxio", name) == 0) { device_set_desc(dev, "Sun Auxiliary I/O"); return (0); } return (ENXIO); } static int auxio_ebus_attach(device_t dev) { struct auxio_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; AUXIO_LOCK_INIT(sc); sc->sc_nauxio = AUXIO_PCIO_NREG; sc->sc_flags = AUXIO_LEDONLY | AUXIO_EBUS; return(auxio_attach_common(sc)); } static int auxio_attach_common(struct auxio_softc *sc) { struct resource *res; int i; for (i = 0; i < sc->sc_nauxio; i++) { sc->sc_rid[i] = i; res = bus_alloc_resource_any(sc->sc_dev, SYS_RES_MEMORY, &sc->sc_rid[i], RF_ACTIVE); if (res == NULL) { device_printf(sc->sc_dev, "could not allocate resources\n"); goto attach_fail; } sc->sc_res[i] = res; sc->sc_regt[i] = rman_get_bustag(res); sc->sc_regh[i] = rman_get_bushandle(res); } sc->sc_led_stat = auxio_led_read(sc) & AUXIO_LED_LED; sc->sc_led_dev = led_create(auxio_led_func, sc, "auxioled"); /* turn on the LED */ auxio_led_func(sc, 1); return (0); attach_fail: auxio_free_resource(sc); return (ENXIO); } static int auxio_bus_detach(device_t dev) { struct auxio_softc *sc; sc = device_get_softc(dev); led_destroy(sc->sc_led_dev); auxio_led_func(sc, sc->sc_led_stat); auxio_free_resource(sc); return (0); } static void auxio_free_resource(struct auxio_softc *sc) { int i; for (i = 0; i < sc->sc_nauxio; i++) if (sc->sc_res[i]) bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_rid[i], sc->sc_res[i]); AUXIO_LOCK_DESTROY(sc); } static int auxio_sbus_attach(device_t dev) { struct auxio_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; AUXIO_LOCK_INIT(sc); sc->sc_nauxio = 1; sc->sc_flags = AUXIO_LEDONLY | AUXIO_SBUS; return (auxio_attach_common(sc)); } Index: head/sys/dev/bktr/bktr_os.c =================================================================== --- head/sys/dev/bktr/bktr_os.c (revision 303889) +++ head/sys/dev/bktr/bktr_os.c (revision 303890) @@ -1,1334 +1,1344 @@ /*- * 1. Redistributions of source code must retain the * Copyright (c) 1997 Amancio Hasty, 1999 Roger Hardiman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Amancio Hasty and * Roger Hardiman * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This is part of the Driver for Video Capture Cards (Frame grabbers) * and TV Tuner cards using the Brooktree Bt848, Bt848A, Bt849A, Bt878, Bt879 * chipset. * Copyright Roger Hardiman and Amancio Hasty. * * bktr_os : This has all the Operating System dependent code, * probe/attach and open/close/ioctl/read/mmap * memory allocation * PCI bus interfacing */ #include "opt_bktr.h" /* include any kernel config options */ #define FIFO_RISC_DISABLED 0 #define ALL_INTS_DISABLED 0 /*******************/ /* *** FreeBSD *** */ /*******************/ #ifdef __FreeBSD__ #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 500014 #include #else #include #endif #include #include #include #include #include /* used by smbus and newbus */ #include /* used by bus space and newbus */ #include #include /* used by newbus */ #include /* used by newbus */ #if (__FreeBSD_version < 500000) #include /* for DELAY */ #include #include #else #include #include #endif #include int bt848_card = -1; int bt848_tuner = -1; int bt848_reverse_mute = -1; int bt848_format = -1; int bt848_slow_msp_audio = -1; #ifdef BKTR_NEW_MSP34XX_DRIVER int bt848_stereo_once = 0; /* no continuous stereo monitoring */ int bt848_amsound = 0; /* hard-wire AM sound at 6.5 Hz (france), the autoscan seems work well only with FM... */ int bt848_dolby = 0; #endif static SYSCTL_NODE(_hw, OID_AUTO, bt848, CTLFLAG_RW, 0, "Bt848 Driver mgmt"); SYSCTL_INT(_hw_bt848, OID_AUTO, card, CTLFLAG_RW, &bt848_card, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, tuner, CTLFLAG_RW, &bt848_tuner, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, reverse_mute, CTLFLAG_RW, &bt848_reverse_mute, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, format, CTLFLAG_RW, &bt848_format, -1, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, slow_msp_audio, CTLFLAG_RW, &bt848_slow_msp_audio, -1, ""); #ifdef BKTR_NEW_MSP34XX_DRIVER SYSCTL_INT(_hw_bt848, OID_AUTO, stereo_once, CTLFLAG_RW, &bt848_stereo_once, 0, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, amsound, CTLFLAG_RW, &bt848_amsound, 0, ""); SYSCTL_INT(_hw_bt848, OID_AUTO, dolby, CTLFLAG_RW, &bt848_dolby, 0, ""); #endif #endif /* end freebsd section */ /****************/ /* *** BSDI *** */ /****************/ #ifdef __bsdi__ #endif /* __bsdi__ */ /**************************/ /* *** OpenBSD/NetBSD *** */ /**************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) #include #include #include #include #include #include #include #include #include #include #include #include /* extensions to ioctl_meteor.h */ #ifndef __NetBSD__ #include #include #include #endif #include #include #include #include #define BKTR_DEBUG #ifdef BKTR_DEBUG int bktr_debug = 0; #define DPR(x) (bktr_debug ? printf x : 0) #else #define DPR(x) #endif #endif /* __NetBSD__ || __OpenBSD__ */ #ifdef __NetBSD__ #include /* NetBSD location for .h files */ #include #include #include #include #include #include #else /* Traditional location for .h files */ #include #include /* extensions to ioctl_meteor.h */ #include #include #include #include #include #include #if defined(BKTR_USE_FREEBSD_SMBUS) #include #include "iicbb_if.h" #include "smbus_if.h" #endif #endif /****************************/ /* *** FreeBSD 4.x code *** */ /****************************/ static int bktr_probe( device_t dev ); static int bktr_attach( device_t dev ); static int bktr_detach( device_t dev ); static int bktr_shutdown( device_t dev ); static void bktr_intr(void *arg) { common_bktr_intr(arg); } static device_method_t bktr_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bktr_probe), DEVMETHOD(device_attach, bktr_attach), DEVMETHOD(device_detach, bktr_detach), DEVMETHOD(device_shutdown, bktr_shutdown), #if defined(BKTR_USE_FREEBSD_SMBUS) /* iicbb interface */ DEVMETHOD(iicbb_callback, bti2c_iic_callback), DEVMETHOD(iicbb_setsda, bti2c_iic_setsda), DEVMETHOD(iicbb_setscl, bti2c_iic_setscl), DEVMETHOD(iicbb_getsda, bti2c_iic_getsda), DEVMETHOD(iicbb_getscl, bti2c_iic_getscl), DEVMETHOD(iicbb_reset, bti2c_iic_reset), /* smbus interface */ DEVMETHOD(smbus_callback, bti2c_smb_callback), DEVMETHOD(smbus_writeb, bti2c_smb_writeb), DEVMETHOD(smbus_writew, bti2c_smb_writew), DEVMETHOD(smbus_readb, bti2c_smb_readb), #endif { 0, 0 } }; static driver_t bktr_driver = { "bktr", bktr_methods, sizeof(struct bktr_softc), }; static devclass_t bktr_devclass; static d_open_t bktr_open; static d_close_t bktr_close; static d_read_t bktr_read; static d_write_t bktr_write; static d_ioctl_t bktr_ioctl; static d_mmap_t bktr_mmap; static d_poll_t bktr_poll; static struct cdevsw bktr_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = bktr_open, .d_close = bktr_close, .d_read = bktr_read, .d_write = bktr_write, .d_ioctl = bktr_ioctl, .d_poll = bktr_poll, .d_mmap = bktr_mmap, .d_name = "bktr", }; #ifdef BKTR_USE_FREEBSD_SMBUS #include #include MODULE_DEPEND(bktr, iicbb, IICBB_MINVER, IICBB_MODVER, IICBB_MAXVER); MODULE_DEPEND(bktr, iicbus, IICBUS_MINVER, IICBUS_MODVER, IICBUS_MAXVER); MODULE_DEPEND(bktr, smbus, SMBUS_MINVER, SMBUS_MODVER, SMBUS_MAXVER); #endif DRIVER_MODULE(bktr, pci, bktr_driver, bktr_devclass, 0, 0); MODULE_DEPEND(bktr, bktr_mem, 1,1,1); MODULE_VERSION(bktr, 1); /* * the boot time probe routine. */ static int bktr_probe( device_t dev ) { unsigned int type = pci_get_devid(dev); unsigned int rev = pci_get_revid(dev); if (PCI_VENDOR(type) == PCI_VENDOR_BROOKTREE) { switch (PCI_PRODUCT(type)) { case PCI_PRODUCT_BROOKTREE_BT848: if (rev == 0x12) device_set_desc(dev, "BrookTree 848A"); else device_set_desc(dev, "BrookTree 848"); return BUS_PROBE_DEFAULT; case PCI_PRODUCT_BROOKTREE_BT849: device_set_desc(dev, "BrookTree 849A"); return BUS_PROBE_DEFAULT; case PCI_PRODUCT_BROOKTREE_BT878: device_set_desc(dev, "BrookTree 878"); return BUS_PROBE_DEFAULT; case PCI_PRODUCT_BROOKTREE_BT879: device_set_desc(dev, "BrookTree 879"); return BUS_PROBE_DEFAULT; } } return ENXIO; } /* * the attach routine. */ static int bktr_attach( device_t dev ) { u_long latency; u_long fun; unsigned int rev; unsigned int unit; int error = 0; #ifdef BROOKTREE_IRQ u_long old_irq, new_irq; #endif struct bktr_softc *bktr = device_get_softc(dev); unit = device_get_unit(dev); /* build the device name for bktr_name() */ snprintf(bktr->bktr_xname, sizeof(bktr->bktr_xname), "bktr%d",unit); /* * Enable bus mastering and Memory Mapped device */ pci_enable_busmaster(dev); /* * Map control/status registers. */ bktr->mem_rid = PCIR_BAR(0); bktr->res_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &bktr->mem_rid, RF_ACTIVE); if (!bktr->res_mem) { device_printf(dev, "could not map memory\n"); error = ENXIO; goto fail; } bktr->memt = rman_get_bustag(bktr->res_mem); bktr->memh = rman_get_bushandle(bktr->res_mem); /* * Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); #ifdef BROOKTREE_IRQ /* from the configuration file */ old_irq = pci_conf_read(tag, PCI_INTERRUPT_REG); pci_conf_write(tag, PCI_INTERRUPT_REG, BROOKTREE_IRQ); new_irq = pci_conf_read(tag, PCI_INTERRUPT_REG); printf("bktr%d: attach: irq changed from %d to %d\n", unit, (old_irq & 0xff), (new_irq & 0xff)); #endif /* * Allocate our interrupt. */ bktr->irq_rid = 0; bktr->res_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &bktr->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (bktr->res_irq == NULL) { device_printf(dev, "could not map interrupt\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, bktr->res_irq, INTR_TYPE_TTY, NULL, bktr_intr, bktr, &bktr->res_ih); if (error) { device_printf(dev, "could not setup irq\n"); goto fail; } /* Update the Device Control Register */ /* on Bt878 and Bt879 cards */ fun = pci_read_config( dev, 0x40, 2); fun = fun | 1; /* Enable writes to the sub-system vendor ID */ #if defined( BKTR_430_FX_MODE ) if (bootverbose) printf("Using 430 FX chipset compatibility mode\n"); fun = fun | 2; /* Enable Intel 430 FX compatibility mode */ #endif #if defined( BKTR_SIS_VIA_MODE ) if (bootverbose) printf("Using SiS/VIA chipset compatibility mode\n"); fun = fun | 4; /* Enable SiS/VIA compatibility mode (useful for OPTi chipset motherboards too */ #endif pci_write_config(dev, 0x40, fun, 2); #if defined(BKTR_USE_FREEBSD_SMBUS) if (bt848_i2c_attach(dev)) printf("bktr%d: i2c_attach: can't attach\n", unit); #endif /* * PCI latency timer. 32 is a good value for 4 bus mastering slots, if * you have more than four, then 16 would probably be a better value. */ #ifndef BROOKTREE_DEF_LATENCY_VALUE #define BROOKTREE_DEF_LATENCY_VALUE 10 #endif latency = pci_read_config(dev, PCI_LATENCY_TIMER, 4); latency = (latency >> 8) & 0xff; if ( bootverbose ) { if (latency) printf("brooktree%d: PCI bus latency is", unit); else printf("brooktree%d: PCI bus latency was 0 changing to", unit); } if ( !latency ) { latency = BROOKTREE_DEF_LATENCY_VALUE; pci_write_config(dev, PCI_LATENCY_TIMER, latency<<8, 4); } if ( bootverbose ) { printf(" %d.\n", (int) latency); } /* read the pci device id and revision id */ fun = pci_get_devid(dev); rev = pci_get_revid(dev); /* call the common attach code */ common_bktr_attach( bktr, unit, fun, rev ); /* make the device entries */ bktr->bktrdev = make_dev(&bktr_cdevsw, unit, 0, 0, 0444, "bktr%d", unit); bktr->tunerdev= make_dev(&bktr_cdevsw, unit+16, 0, 0, 0444, "tuner%d", unit); bktr->vbidev = make_dev(&bktr_cdevsw, unit+32, 0, 0, 0444, "vbi%d" , unit); /* if this is unit 0 (/dev/bktr0, /dev/tuner0, /dev/vbi0) then make */ /* alias entries to /dev/bktr /dev/tuner and /dev/vbi */ #if (__FreeBSD_version >=500000) if (unit == 0) { bktr->bktrdev_alias = make_dev_alias(bktr->bktrdev, "bktr"); bktr->tunerdev_alias= make_dev_alias(bktr->tunerdev, "tuner"); bktr->vbidev_alias = make_dev_alias(bktr->vbidev, "vbi"); } #endif return 0; fail: if (bktr->res_irq) bus_release_resource(dev, SYS_RES_IRQ, bktr->irq_rid, bktr->res_irq); if (bktr->res_mem) bus_release_resource(dev, SYS_RES_MEMORY, bktr->mem_rid, bktr->res_mem); return error; } /* * the detach routine. */ static int bktr_detach( device_t dev ) { struct bktr_softc *bktr = device_get_softc(dev); #ifdef BKTR_NEW_MSP34XX_DRIVER /* Disable the soundchip and kernel thread */ if (bktr->msp3400c_info != NULL) msp_detach(bktr); #endif /* Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); #if defined(BKTR_USE_FREEBSD_SMBUS) if (bt848_i2c_detach(dev)) printf("bktr%d: i2c_attach: can't attach\n", device_get_unit(dev)); #endif #ifdef USE_VBIMUTEX mtx_destroy(&bktr->vbimutex); #endif /* Note: We do not free memory for RISC programs, grab buffer, vbi buffers */ /* The memory is retained by the bktr_mem module so we can unload and */ /* then reload the main bktr driver module */ /* Unregister the /dev/bktrN, tunerN and vbiN devices, * the aliases for unit 0 are automatically destroyed */ destroy_dev(bktr->vbidev); destroy_dev(bktr->tunerdev); destroy_dev(bktr->bktrdev); /* * Deallocate resources. */ bus_teardown_intr(dev, bktr->res_irq, bktr->res_ih); bus_release_resource(dev, SYS_RES_IRQ, bktr->irq_rid, bktr->res_irq); bus_release_resource(dev, SYS_RES_MEMORY, bktr->mem_rid, bktr->res_mem); return 0; } /* * the shutdown routine. */ static int bktr_shutdown( device_t dev ) { struct bktr_softc *bktr = device_get_softc(dev); /* Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); return 0; } /* * Special Memory Allocation */ vm_offset_t get_bktr_mem( int unit, unsigned size ) { vm_offset_t addr = 0; addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, 1<<24, 0); if (addr == 0) addr = (vm_offset_t)contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (addr == 0) { printf("bktr%d: Unable to allocate %d bytes of memory.\n", unit, size); } return( addr ); } /*--------------------------------------------------------- ** ** BrookTree 848 character device driver routines ** **--------------------------------------------------------- */ #define VIDEO_DEV 0x00 #define TUNER_DEV 0x01 #define VBI_DEV 0x02 #define UNIT(x) ((x) & 0x0f) #define FUNCTION(x) (x >> 4) /* * */ static int bktr_open( struct cdev *dev, int flags, int fmt, struct thread *td ) { bktr_ptr_t bktr; int unit; int result; unit = UNIT( dev2unit(dev) ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } if (!(bktr->flags & METEOR_INITALIZED)) /* device not found */ return( ENXIO ); /* Record that the device is now busy */ device_busy(devclass_get_device(bktr_devclass, unit)); if (bt848_card != -1) { if ((bt848_card >> 8 == unit ) && ( (bt848_card & 0xff) < Bt848_MAX_CARD )) { if ( bktr->bt848_card != (bt848_card & 0xff) ) { bktr->bt848_card = (bt848_card & 0xff); probeCard(bktr, FALSE, unit); } } } if (bt848_tuner != -1) { if ((bt848_tuner >> 8 == unit ) && ( (bt848_tuner & 0xff) < Bt848_MAX_TUNER )) { if ( bktr->bt848_tuner != (bt848_tuner & 0xff) ) { bktr->bt848_tuner = (bt848_tuner & 0xff); probeCard(bktr, FALSE, unit); } } } if (bt848_reverse_mute != -1) { if ((bt848_reverse_mute >> 8) == unit ) { bktr->reverse_mute = bt848_reverse_mute & 0xff; } } if (bt848_slow_msp_audio != -1) { if ((bt848_slow_msp_audio >> 8) == unit ) { bktr->slow_msp_audio = (bt848_slow_msp_audio & 0xff); } } #ifdef BKTR_NEW_MSP34XX_DRIVER if (bt848_stereo_once != 0) { if ((bt848_stereo_once >> 8) == unit ) { bktr->stereo_once = (bt848_stereo_once & 0xff); } } if (bt848_amsound != -1) { if ((bt848_amsound >> 8) == unit ) { bktr->amsound = (bt848_amsound & 0xff); } } if (bt848_dolby != -1) { if ((bt848_dolby >> 8) == unit ) { bktr->dolby = (bt848_dolby & 0xff); } } #endif switch ( FUNCTION( dev2unit(dev) ) ) { case VIDEO_DEV: result = video_open( bktr ); break; case TUNER_DEV: result = tuner_open( bktr ); break; case VBI_DEV: result = vbi_open( bktr ); break; default: result = ENXIO; break; } /* If there was an error opening the device, undo the busy status */ if (result != 0) device_unbusy(devclass_get_device(bktr_devclass, unit)); return( result ); } /* * */ static int bktr_close( struct cdev *dev, int flags, int fmt, struct thread *td ) { bktr_ptr_t bktr; int unit; int result; unit = UNIT( dev2unit(dev) ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } switch ( FUNCTION( dev2unit(dev) ) ) { case VIDEO_DEV: result = video_close( bktr ); break; case TUNER_DEV: result = tuner_close( bktr ); break; case VBI_DEV: result = vbi_close( bktr ); break; default: return (ENXIO); break; } device_unbusy(devclass_get_device(bktr_devclass, unit)); return( result ); } /* * */ static int bktr_read( struct cdev *dev, struct uio *uio, int ioflag ) { bktr_ptr_t bktr; int unit; unit = UNIT(dev2unit(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } switch ( FUNCTION( dev2unit(dev) ) ) { case VIDEO_DEV: return( video_read( bktr, unit, dev, uio ) ); case VBI_DEV: return( vbi_read( bktr, uio, ioflag ) ); } return( ENXIO ); } /* * */ static int bktr_write( struct cdev *dev, struct uio *uio, int ioflag ) { return( EINVAL ); /* XXX or ENXIO ? */ } /* * */ static int bktr_ioctl( struct cdev *dev, ioctl_cmd_t cmd, caddr_t arg, int flag, struct thread *td ) { bktr_ptr_t bktr; int unit; unit = UNIT(dev2unit(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } #ifdef BKTR_GPIO_ACCESS if (bktr->bigbuf == 0 && cmd != BT848_GPIO_GET_EN && cmd != BT848_GPIO_SET_EN && cmd != BT848_GPIO_GET_DATA && cmd != BT848_GPIO_SET_DATA) /* no frame buffer allocated (ioctl failed) */ return( ENOMEM ); #else if (bktr->bigbuf == 0) /* no frame buffer allocated (ioctl failed) */ return( ENOMEM ); #endif switch ( FUNCTION( dev2unit(dev) ) ) { case VIDEO_DEV: return( video_ioctl( bktr, unit, cmd, arg, td ) ); case TUNER_DEV: return( tuner_ioctl( bktr, unit, cmd, arg, td ) ); } return( ENXIO ); } /* * */ static int bktr_mmap( struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr ) { int unit; bktr_ptr_t bktr; unit = UNIT(dev2unit(dev)); if (FUNCTION(dev2unit(dev)) > 0) /* only allow mmap on /dev/bktr[n] */ return( -1 ); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } if (nprot & PROT_EXEC) return( -1 ); if (offset < 0) return( -1 ); if (offset >= bktr->alloc_pages * PAGE_SIZE) return( -1 ); *paddr = vtophys(bktr->bigbuf) + offset; return( 0 ); } static int bktr_poll( struct cdev *dev, int events, struct thread *td) { int unit; bktr_ptr_t bktr; int revents = 0; DECLARE_INTR_MASK(s); unit = UNIT(dev2unit(dev)); /* Get the device data */ bktr = (struct bktr_softc*)devclass_get_softc(bktr_devclass, unit); if (bktr == NULL) { /* the device is no longer valid/functioning */ return (ENXIO); } LOCK_VBI(bktr); DISABLE_INTR(s); if (events & (POLLIN | POLLRDNORM)) { switch ( FUNCTION( dev2unit(dev) ) ) { case VBI_DEV: if(bktr->vbisize == 0) selrecord(td, &bktr->vbi_select); else revents |= events & (POLLIN | POLLRDNORM); break; } } ENABLE_INTR(s); UNLOCK_VBI(bktr); return (revents); } /*****************/ /* *** BSDI *** */ /*****************/ #if defined(__bsdi__) #endif /* __bsdi__ BSDI specific kernel interface routines */ /*****************************/ /* *** OpenBSD / NetBSD *** */ /*****************************/ #if defined(__NetBSD__) || defined(__OpenBSD__) #define IPL_VIDEO IPL_BIO /* XXX */ static int bktr_intr(void *arg) { return common_bktr_intr(arg); } #define bktr_open bktropen #define bktr_close bktrclose #define bktr_read bktrread #define bktr_write bktrwrite #define bktr_ioctl bktrioctl #define bktr_mmap bktrmmap vm_offset_t vm_page_alloc_contig(vm_offset_t, vm_offset_t, vm_offset_t, vm_offset_t); #if defined(__OpenBSD__) static int bktr_probe(struct device *, void *, void *); +static void bktr_attach(struct device *, struct device *, void *); #else -static int bktr_probe(struct device *, struct cfdata *, void *); +static int bktr_probe(device_t, struct cfdata *, void *); +static void bktr_attach(device_t, device_t, void *); #endif -static void bktr_attach(struct device *, struct device *, void *); struct cfattach bktr_ca = { sizeof(struct bktr_softc), bktr_probe, bktr_attach }; #if defined(__NetBSD__) extern struct cfdriver bktr_cd; #else struct cfdriver bktr_cd = { NULL, "bktr", DV_DULL }; #endif int bktr_probe(parent, match, aux) - struct device *parent; #if defined(__OpenBSD__) + struct device *parent; void *match; #else + device_t parent; struct cfdata *match; #endif void *aux; { struct pci_attach_args *pa = aux; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROOKTREE && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT848 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT849 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT878 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROOKTREE_BT879)) return 1; return 0; } /* * the attach routine. */ static void -bktr_attach(struct device *parent, struct device *self, void *aux) +bktr_attach(parent, self, aux) +#if defined(__OpenBSD__) + struct device *parent; + struct device *self; +#else + device_t parent; + device_t self; +#endif + void *aux; { bktr_ptr_t bktr; u_long latency; u_long fun; unsigned int rev; #if defined(__OpenBSD__) struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr; int retval; int unit; bktr = (bktr_ptr_t)self; unit = bktr->bktr_dev.dv_unit; bktr->pc = pa->pa_pc; bktr->tag = pa->pa_tag; bktr->dmat = pa->pa_dmat; /* * map memory */ bktr->memt = pa->pa_memt; retval = pci_mem_find(pc, pa->pa_tag, PCI_MAPREG_START, &bktr->phys_base, &bktr->obmemsz, NULL); if (!retval) retval = bus_space_map(pa->pa_memt, bktr->phys_base, bktr->obmemsz, 0, &bktr->memh); if (retval) { printf(": couldn't map memory\n"); return; } /* * map interrupt */ if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, pa->pa_intrline, &ih)) { printf(": couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); bktr->ih = pci_intr_establish(pa->pa_pc, ih, IPL_VIDEO, bktr_intr, bktr, bktr->bktr_dev.dv_xname); if (bktr->ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } if (intrstr != NULL) printf(": %s\n", intrstr); #endif /* __OpenBSD__ */ #if defined(__NetBSD__) struct pci_attach_args *pa = aux; pci_intr_handle_t ih; const char *intrstr; int retval; int unit; bktr = (bktr_ptr_t)self; unit = bktr->bktr_dev.dv_unit; bktr->dmat = pa->pa_dmat; printf("\n"); /* * map memory */ retval = pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, &bktr->memt, &bktr->memh, NULL, &bktr->obmemsz); DPR(("pci_mapreg_map: memt %x, memh %x, size %x\n", bktr->memt, (u_int)bktr->memh, (u_int)bktr->obmemsz)); if (retval) { printf("%s: couldn't map memory\n", bktr_name(bktr)); return; } /* * Disable the brooktree device */ OUTL(bktr, BKTR_INT_MASK, ALL_INTS_DISABLED); OUTW(bktr, BKTR_GPIO_DMA_CTL, FIFO_RISC_DISABLED); /* * map interrupt */ if (pci_intr_map(pa->pa_pc, pa->pa_intrtag, pa->pa_intrpin, pa->pa_intrline, &ih)) { printf("%s: couldn't map interrupt\n", bktr_name(bktr)); return; } intrstr = pci_intr_string(pa->pa_pc, ih); bktr->ih = pci_intr_establish(pa->pa_pc, ih, IPL_VIDEO, bktr_intr, bktr); if (bktr->ih == NULL) { printf("%s: couldn't establish interrupt", bktr_name(bktr)); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } if (intrstr != NULL) printf("%s: interrupting at %s\n", bktr_name(bktr), intrstr); #endif /* __NetBSD__ */ /* * PCI latency timer. 32 is a good value for 4 bus mastering slots, if * you have more than four, then 16 would probably be a better value. */ #ifndef BROOKTREE_DEF_LATENCY_VALUE #define BROOKTREE_DEF_LATENCY_VALUE 10 #endif latency = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_LATENCY_TIMER); latency = (latency >> 8) & 0xff; if (!latency) { if (bootverbose) { printf("%s: PCI bus latency was 0 changing to %d", bktr_name(bktr), BROOKTREE_DEF_LATENCY_VALUE); } latency = BROOKTREE_DEF_LATENCY_VALUE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_LATENCY_TIMER, latency<<8); } /* Enabled Bus Master XXX: check if all old DMA is stopped first (e.g. after warm boot) */ fun = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, fun | PCI_COMMAND_MASTER_ENABLE); /* read the pci id and determine the card type */ fun = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG); rev = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG) & 0x000000ff; common_bktr_attach(bktr, unit, fun, rev); } /* * Special Memory Allocation */ vm_offset_t get_bktr_mem(bktr, dmapp, size) bktr_ptr_t bktr; bus_dmamap_t *dmapp; unsigned int size; { bus_dma_tag_t dmat = bktr->dmat; bus_dma_segment_t seg; bus_size_t align; int rseg; caddr_t kva; /* * Allocate a DMA area */ align = 1 << 24; if (bus_dmamem_alloc(dmat, size, align, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { align = PAGE_SIZE; if (bus_dmamem_alloc(dmat, size, align, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { printf("%s: Unable to dmamem_alloc of %d bytes\n", bktr_name(bktr), size); return 0; } } if (bus_dmamem_map(dmat, &seg, rseg, size, &kva, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) { printf("%s: Unable to dmamem_map of %d bytes\n", bktr_name(bktr), size); bus_dmamem_free(dmat, &seg, rseg); return 0; } #ifdef __OpenBSD__ bktr->dm_mapsize = size; #endif /* * Create and locd the DMA map for the DMA area */ if (bus_dmamap_create(dmat, size, 1, size, 0, BUS_DMA_NOWAIT, dmapp)) { printf("%s: Unable to dmamap_create of %d bytes\n", bktr_name(bktr), size); bus_dmamem_unmap(dmat, kva, size); bus_dmamem_free(dmat, &seg, rseg); return 0; } if (bus_dmamap_load(dmat, *dmapp, kva, size, NULL, BUS_DMA_NOWAIT)) { printf("%s: Unable to dmamap_load of %d bytes\n", bktr_name(bktr), size); bus_dmamem_unmap(dmat, kva, size); bus_dmamem_free(dmat, &seg, rseg); bus_dmamap_destroy(dmat, *dmapp); return 0; } return (vm_offset_t)kva; } void free_bktr_mem(bktr, dmap, kva) bktr_ptr_t bktr; bus_dmamap_t dmap; vm_offset_t kva; { bus_dma_tag_t dmat = bktr->dmat; #ifdef __NetBSD__ bus_dmamem_unmap(dmat, (caddr_t)kva, dmap->dm_mapsize); #else bus_dmamem_unmap(dmat, (caddr_t)kva, bktr->dm_mapsize); #endif bus_dmamem_free(dmat, dmap->dm_segs, 1); bus_dmamap_destroy(dmat, dmap); } /*--------------------------------------------------------- ** ** BrookTree 848 character device driver routines ** **--------------------------------------------------------- */ #define VIDEO_DEV 0x00 #define TUNER_DEV 0x01 #define VBI_DEV 0x02 #define UNIT(x) (dev2unit((x) & 0x0f)) #define FUNCTION(x) (dev2unit((x >> 4) & 0x0f)) /* * */ int bktr_open(dev_t dev, int flags, int fmt, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); /* unit out of range */ if ((unit > bktr_cd.cd_ndevs) || (bktr_cd.cd_devs[unit] == NULL)) return(ENXIO); bktr = bktr_cd.cd_devs[unit]; if (!(bktr->flags & METEOR_INITALIZED)) /* device not found */ return(ENXIO); switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_open(bktr)); case TUNER_DEV: return(tuner_open(bktr)); case VBI_DEV: return(vbi_open(bktr)); } return(ENXIO); } /* * */ int bktr_close(dev_t dev, int flags, int fmt, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_close(bktr)); case TUNER_DEV: return(tuner_close(bktr)); case VBI_DEV: return(vbi_close(bktr)); } return(ENXIO); } /* * */ int bktr_read(dev_t dev, struct uio *uio, int ioflag) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_read(bktr, unit, dev, uio)); case VBI_DEV: return(vbi_read(bktr, uio, ioflag)); } return(ENXIO); } /* * */ int bktr_write(dev_t dev, struct uio *uio, int ioflag) { /* operation not supported */ return(EOPNOTSUPP); } /* * */ int bktr_ioctl(dev_t dev, ioctl_cmd_t cmd, caddr_t arg, int flag, struct thread *td) { bktr_ptr_t bktr; int unit; unit = UNIT(dev); bktr = bktr_cd.cd_devs[unit]; if (bktr->bigbuf == 0) /* no frame buffer allocated (ioctl failed) */ return(ENOMEM); switch (FUNCTION(dev)) { case VIDEO_DEV: return(video_ioctl(bktr, unit, cmd, arg, pr)); case TUNER_DEV: return(tuner_ioctl(bktr, unit, cmd, arg, pr)); } return(ENXIO); } /* * */ paddr_t bktr_mmap(dev_t dev, off_t offset, int nprot) { int unit; bktr_ptr_t bktr; unit = UNIT(dev); if (FUNCTION(dev) > 0) /* only allow mmap on /dev/bktr[n] */ return(-1); bktr = bktr_cd.cd_devs[unit]; if ((vaddr_t)offset < 0) return(-1); if ((vaddr_t)offset >= bktr->alloc_pages * PAGE_SIZE) return(-1); #ifdef __NetBSD__ return (bus_dmamem_mmap(bktr->dmat, bktr->dm_mem->dm_segs, 1, (vaddr_t)offset, nprot, BUS_DMA_WAITOK)); #else return(i386_btop(vtophys(bktr->bigbuf) + offset)); #endif } #endif /* __NetBSD__ || __OpenBSD__ */ Index: head/sys/dev/bktr/bktr_reg.h =================================================================== --- head/sys/dev/bktr/bktr_reg.h (revision 303889) +++ head/sys/dev/bktr/bktr_reg.h (revision 303890) @@ -1,728 +1,728 @@ /*- * $FreeBSD$ * * Copyright (c) 1999 Roger Hardiman * Copyright (c) 1998 Amancio Hasty * Copyright (c) 1995 Mark Tinguely and Jim Lowe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Tinguely and Jim Lowe * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ #ifdef __NetBSD__ -#include /* struct device */ +#include /* device_t */ #include #include /* struct selinfo */ # ifdef DEBUG # define bootverbose 1 # else # define bootverbose 0 # endif #endif /* * The kernel options for the driver now all begin with BKTR. * Support the older kernel options on FreeBSD and OpenBSD. * */ #if defined(__FreeBSD__) || defined(__OpenBSD__) #if defined(BROOKTREE_ALLOC_PAGES) #define BKTR_ALLOC_PAGES BROOKTREE_ALLOC_PAGES #endif #if defined(BROOKTREE_SYSTEM_DEFAULT) #define BKTR_SYSTEM_DEFAULT BROOKTREE_SYSTEM_DEFAULT #endif #if defined(OVERRIDE_CARD) #define BKTR_OVERRIDE_CARD OVERRIDE_CARD #endif #if defined(OVERRIDE_TUNER) #define BKTR_OVERRIDE_TUNER OVERRIDE_TUNER #endif #if defined(OVERRIDE_DBX) #define BKTR_OVERRIDE_DBX OVERRIDE_DBX #endif #if defined(OVERRIDE_MSP) #define BKTR_OVERRIDE_MSP OVERRIDE_MSP #endif #endif #ifndef PCI_LATENCY_TIMER #define PCI_LATENCY_TIMER 0x0c /* pci timer register */ #endif /* * Definitions for the Brooktree 848/878 video capture to pci interface. */ #ifndef __NetBSD__ #define PCI_VENDOR_SHIFT 0 #define PCI_VENDOR_MASK 0xffff #define PCI_VENDOR(id) \ (((id) >> PCI_VENDOR_SHIFT) & PCI_VENDOR_MASK) #define PCI_PRODUCT_SHIFT 16 #define PCI_PRODUCT_MASK 0xffff #define PCI_PRODUCT(id) \ (((id) >> PCI_PRODUCT_SHIFT) & PCI_PRODUCT_MASK) /* PCI vendor ID */ #define PCI_VENDOR_BROOKTREE 0x109e /* Brooktree */ /* Brooktree products */ #define PCI_PRODUCT_BROOKTREE_BT848 0x0350 /* Bt848 Video Capture */ #define PCI_PRODUCT_BROOKTREE_BT849 0x0351 /* Bt849 Video Capture */ #define PCI_PRODUCT_BROOKTREE_BT878 0x036e /* Bt878 Video Capture */ #define PCI_PRODUCT_BROOKTREE_BT879 0x036f /* Bt879 Video Capture */ #endif #define BROOKTREE_848 1 #define BROOKTREE_848A 2 #define BROOKTREE_849A 3 #define BROOKTREE_878 4 #define BROOKTREE_879 5 typedef volatile u_int bregister_t; /* * if other persuasion endian, then compiler will probably require that * these next * macros be reversed */ #define BTBYTE(what) bregister_t what:8; int :24 #define BTWORD(what) bregister_t what:16; int: 16 #define BTLONG(what) bregister_t what:32 struct bt848_registers { BTBYTE (dstatus); /* 0, 1,2,3 */ #define BT848_DSTATUS_PRES (1<<7) #define BT848_DSTATUS_HLOC (1<<6) #define BT848_DSTATUS_FIELD (1<<5) #define BT848_DSTATUS_NUML (1<<4) #define BT848_DSTATUS_CSEL (1<<3) #define BT848_DSTATUS_PLOCK (1<<2) #define BT848_DSTATUS_LOF (1<<1) #define BT848_DSTATUS_COF (1<<0) BTBYTE (iform); /* 4, 5,6,7 */ #define BT848_IFORM_MUXSEL (0x3<<5) # define BT848_IFORM_M_MUX1 (0x03<<5) # define BT848_IFORM_M_MUX0 (0x02<<5) # define BT848_IFORM_M_MUX2 (0x01<<5) # define BT848_IFORM_M_MUX3 (0x0) # define BT848_IFORM_M_RSVD (0x00<<5) #define BT848_IFORM_XTSEL (0x3<<3) # define BT848_IFORM_X_AUTO (0x03<<3) # define BT848_IFORM_X_XT1 (0x02<<3) # define BT848_IFORM_X_XT0 (0x01<<3) # define BT848_IFORM_X_RSVD (0x00<<3) BTBYTE (tdec); /* 8, 9,a,b */ BTBYTE (e_crop); /* c, d,e,f */ BTBYTE (e_vdelay_lo); /* 10, 11,12,13 */ BTBYTE (e_vactive_lo); /* 14, 15,16,17 */ BTBYTE (e_delay_lo); /* 18, 19,1a,1b */ BTBYTE (e_hactive_lo); /* 1c, 1d,1e,1f */ BTBYTE (e_hscale_hi); /* 20, 21,22,23 */ BTBYTE (e_hscale_lo); /* 24, 25,26,27 */ BTBYTE (bright); /* 28, 29,2a,2b */ BTBYTE (e_control); /* 2c, 2d,2e,2f */ #define BT848_E_CONTROL_LNOTCH (1<<7) #define BT848_E_CONTROL_COMP (1<<6) #define BT848_E_CONTROL_LDEC (1<<5) #define BT848_E_CONTROL_CBSENSE (1<<4) #define BT848_E_CONTROL_RSVD (1<<3) #define BT848_E_CONTROL_CON_MSB (1<<2) #define BT848_E_CONTROL_SAT_U_MSB (1<<1) #define BT848_E_CONTROL_SAT_V_MSB (1<<0) BTBYTE (contrast_lo); /* 30, 31,32,33 */ BTBYTE (sat_u_lo); /* 34, 35,36,37 */ BTBYTE (sat_v_lo); /* 38, 39,3a,3b */ BTBYTE (hue); /* 3c, 3d,3e,3f */ BTBYTE (e_scloop); /* 40, 41,42,43 */ #define BT848_E_SCLOOP_RSVD1 (1<<7) #define BT848_E_SCLOOP_CAGC (1<<6) #define BT848_E_SCLOOP_CKILL (1<<5) #define BT848_E_SCLOOP_HFILT (0x3<<3) # define BT848_E_SCLOOP_HFILT_ICON (0x3<<3) # define BT848_E_SCLOOP_HFILT_QCIF (0x2<<3) # define BT848_E_SCLOOP_HFILT_CIF (0x1<<3) # define BT848_E_SCLOOP_HFILT_AUTO (0x0<<3) #define BT848_E_SCLOOP_RSVD0 (0x7<<0) int :32; /* 44, 45,46,47 */ BTBYTE (oform); /* 48, 49,4a,4b */ BTBYTE (e_vscale_hi); /* 4c, 4d,4e,4f */ BTBYTE (e_vscale_lo); /* 50, 51,52,53 */ BTBYTE (test); /* 54, 55,56,57 */ int :32; /* 58, 59,5a,5b */ int :32; /* 5c, 5d,5e,5f */ BTLONG (adelay); /* 60, 61,62,63 */ BTBYTE (bdelay); /* 64, 65,66,67 */ BTBYTE (adc); /* 68, 69,6a,6b */ #define BT848_ADC_RESERVED (0x80) /* required pattern */ #define BT848_ADC_SYNC_T (1<<5) #define BT848_ADC_AGC_EN (1<<4) #define BT848_ADC_CLK_SLEEP (1<<3) #define BT848_ADC_Y_SLEEP (1<<2) #define BT848_ADC_C_SLEEP (1<<1) #define BT848_ADC_CRUSH (1<<0) BTBYTE (e_vtc); /* 6c, 6d,6e,6f */ int :32; /* 70, 71,72,73 */ int :32; /* 74, 75,76,77 */ int :32; /* 78, 79,7a,7b */ BTLONG (sreset); /* 7c, 7d,7e,7f */ u_char filler1[0x84-0x80]; BTBYTE (tgctrl); /* 84, 85,86,87 */ #define BT848_TGCTRL_TGCKI (3<<3) #define BT848_TGCTRL_TGCKI_XTAL (0<<3) #define BT848_TGCTRL_TGCKI_PLL (1<<3) #define BT848_TGCTRL_TGCKI_GPCLK (2<<3) #define BT848_TGCTRL_TGCKI_GPCLK_I (3<<3) u_char filler[0x8c-0x88]; BTBYTE (o_crop); /* 8c, 8d,8e,8f */ BTBYTE (o_vdelay_lo); /* 90, 91,92,93 */ BTBYTE (o_vactive_lo); /* 94, 95,96,97 */ BTBYTE (o_delay_lo); /* 98, 99,9a,9b */ BTBYTE (o_hactive_lo); /* 9c, 9d,9e,9f */ BTBYTE (o_hscale_hi); /* a0, a1,a2,a3 */ BTBYTE (o_hscale_lo); /* a4, a5,a6,a7 */ int :32; /* a8, a9,aa,ab */ BTBYTE (o_control); /* ac, ad,ae,af */ #define BT848_O_CONTROL_LNOTCH (1<<7) #define BT848_O_CONTROL_COMP (1<<6) #define BT848_O_CONTROL_LDEC (1<<5) #define BT848_O_CONTROL_CBSENSE (1<<4) #define BT848_O_CONTROL_RSVD (1<<3) #define BT848_O_CONTROL_CON_MSB (1<<2) #define BT848_O_CONTROL_SAT_U_MSB (1<<1) #define BT848_O_CONTROL_SAT_V_MSB (1<<0) u_char fillter4[16]; BTBYTE (o_scloop); /* c0, c1,c2,c3 */ #define BT848_O_SCLOOP_RSVD1 (1<<7) #define BT848_O_SCLOOP_CAGC (1<<6) #define BT848_O_SCLOOP_CKILL (1<<5) #define BT848_O_SCLOOP_HFILT (0x3<<3) #define BT848_O_SCLOOP_HFILT_ICON (0x3<<3) #define BT848_O_SCLOOP_HFILT_QCIF (0x2<<3) #define BT848_O_SCLOOP_HFILT_CIF (0x1<<3) #define BT848_O_SCLOOP_HFILT_AUTO (0x0<<3) #define BT848_O_SCLOOP_RSVD0 (0x7<<0) int :32; /* c4, c5,c6,c7 */ int :32; /* c8, c9,ca,cb */ BTBYTE (o_vscale_hi); /* cc, cd,ce,cf */ BTBYTE (o_vscale_lo); /* d0, d1,d2,d3 */ BTBYTE (color_fmt); /* d4, d5,d6,d7 */ bregister_t color_ctl_swap :4; /* d8 */ #define BT848_COLOR_CTL_WSWAP_ODD (1<<3) #define BT848_COLOR_CTL_WSWAP_EVEN (1<<2) #define BT848_COLOR_CTL_BSWAP_ODD (1<<1) #define BT848_COLOR_CTL_BSWAP_EVEN (1<<0) bregister_t color_ctl_gamma :1; bregister_t color_ctl_rgb_ded :1; bregister_t color_ctl_color_bars :1; bregister_t color_ctl_ext_frmrate :1; #define BT848_COLOR_CTL_GAMMA (1<<4) #define BT848_COLOR_CTL_RGB_DED (1<<5) #define BT848_COLOR_CTL_COLOR_BARS (1<<6) #define BT848_COLOR_CTL_EXT_FRMRATE (1<<7) int :24; /* d9,da,db */ BTBYTE (cap_ctl); /* dc, dd,de,df */ #define BT848_CAP_CTL_DITH_FRAME (1<<4) #define BT848_CAP_CTL_VBI_ODD (1<<3) #define BT848_CAP_CTL_VBI_EVEN (1<<2) #define BT848_CAP_CTL_ODD (1<<1) #define BT848_CAP_CTL_EVEN (1<<0) BTBYTE (vbi_pack_size); /* e0, e1,e2,e3 */ BTBYTE (vbi_pack_del); /* e4, e5,e6,e7 */ int :32; /* e8, e9,ea,eb */ BTBYTE (o_vtc); /* ec, ed,ee,ef */ BTBYTE (pll_f_lo); /* f0, f1,f2,f3 */ BTBYTE (pll_f_hi); /* f4, f5,f6,f7 */ BTBYTE (pll_f_xci); /* f8, f9,fa,fb */ #define BT848_PLL_F_C (1<<6) #define BT848_PLL_F_X (1<<7) u_char filler2[0x100-0xfc]; BTLONG (int_stat); /* 100, 101,102,103 */ BTLONG (int_mask); /* 104, 105,106,107 */ #define BT848_INT_RISCS (0xf<<28) #define BT848_INT_RISC_EN (1<<27) #define BT848_INT_RACK (1<<25) #define BT848_INT_FIELD (1<<24) #define BT848_INT_MYSTERYBIT (1<<23) #define BT848_INT_SCERR (1<<19) #define BT848_INT_OCERR (1<<18) #define BT848_INT_PABORT (1<<17) #define BT848_INT_RIPERR (1<<16) #define BT848_INT_PPERR (1<<15) #define BT848_INT_FDSR (1<<14) #define BT848_INT_FTRGT (1<<13) #define BT848_INT_FBUS (1<<12) #define BT848_INT_RISCI (1<<11) #define BT848_INT_GPINT (1<<9) #define BT848_INT_I2CDONE (1<<8) #define BT848_INT_RSV1 (1<<7) #define BT848_INT_RSV0 (1<<6) #define BT848_INT_VPRES (1<<5) #define BT848_INT_HLOCK (1<<4) #define BT848_INT_OFLOW (1<<3) #define BT848_INT_HSYNC (1<<2) #define BT848_INT_VSYNC (1<<1) #define BT848_INT_FMTCHG (1<<0) int :32; /* 108, 109,10a,10b */ BTWORD (gpio_dma_ctl); /* 10c, 10d,10e,10f */ #define BT848_DMA_CTL_PL23TP4 (0<<6) /* planar1 trigger 4 */ #define BT848_DMA_CTL_PL23TP8 (1<<6) /* planar1 trigger 8 */ #define BT848_DMA_CTL_PL23TP16 (2<<6) /* planar1 trigger 16 */ #define BT848_DMA_CTL_PL23TP32 (3<<6) /* planar1 trigger 32 */ #define BT848_DMA_CTL_PL1TP4 (0<<4) /* planar1 trigger 4 */ #define BT848_DMA_CTL_PL1TP8 (1<<4) /* planar1 trigger 8 */ #define BT848_DMA_CTL_PL1TP16 (2<<4) /* planar1 trigger 16 */ #define BT848_DMA_CTL_PL1TP32 (3<<4) /* planar1 trigger 32 */ #define BT848_DMA_CTL_PKTP4 (0<<2) /* packed trigger 4 */ #define BT848_DMA_CTL_PKTP8 (1<<2) /* packed trigger 8 */ #define BT848_DMA_CTL_PKTP16 (2<<2) /* packed trigger 16 */ #define BT848_DMA_CTL_PKTP32 (3<<2) /* packed trigger 32 */ #define BT848_DMA_CTL_RISC_EN (1<<1) #define BT848_DMA_CTL_FIFO_EN (1<<0) BTLONG (i2c_data_ctl); /* 110, 111,112,113 */ #define BT848_DATA_CTL_I2CDIV (0xf<<4) #define BT848_DATA_CTL_I2CSYNC (1<<3) #define BT848_DATA_CTL_I2CW3B (1<<2) #define BT848_DATA_CTL_I2CSCL (1<<1) #define BT848_DATA_CTL_I2CSDA (1<<0) BTLONG (risc_strt_add); /* 114, 115,116,117 */ BTLONG (gpio_out_en); /* 118, 119,11a,11b */ /* really 24 bits */ BTLONG (gpio_reg_inp); /* 11c, 11d,11e,11f */ /* really 24 bits */ BTLONG (risc_count); /* 120, 121,122,123 */ u_char filler3[0x200-0x124]; BTLONG (gpio_data); /* 200, 201,202,203 */ /* really 24 bits */ }; #define BKTR_DSTATUS 0x000 #define BKTR_IFORM 0x004 #define BKTR_TDEC 0x008 #define BKTR_E_CROP 0x00C #define BKTR_O_CROP 0x08C #define BKTR_E_VDELAY_LO 0x010 #define BKTR_O_VDELAY_LO 0x090 #define BKTR_E_VACTIVE_LO 0x014 #define BKTR_O_VACTIVE_LO 0x094 #define BKTR_E_DELAY_LO 0x018 #define BKTR_O_DELAY_LO 0x098 #define BKTR_E_HACTIVE_LO 0x01C #define BKTR_O_HACTIVE_LO 0x09C #define BKTR_E_HSCALE_HI 0x020 #define BKTR_O_HSCALE_HI 0x0A0 #define BKTR_E_HSCALE_LO 0x024 #define BKTR_O_HSCALE_LO 0x0A4 #define BKTR_BRIGHT 0x028 #define BKTR_E_CONTROL 0x02C #define BKTR_O_CONTROL 0x0AC #define BKTR_CONTRAST_LO 0x030 #define BKTR_SAT_U_LO 0x034 #define BKTR_SAT_V_LO 0x038 #define BKTR_HUE 0x03C #define BKTR_E_SCLOOP 0x040 #define BKTR_O_SCLOOP 0x0C0 #define BKTR_OFORM 0x048 #define BKTR_E_VSCALE_HI 0x04C #define BKTR_O_VSCALE_HI 0x0CC #define BKTR_E_VSCALE_LO 0x050 #define BKTR_O_VSCALE_LO 0x0D0 #define BKTR_TEST 0x054 #define BKTR_ADELAY 0x060 #define BKTR_BDELAY 0x064 #define BKTR_ADC 0x068 #define BKTR_E_VTC 0x06C #define BKTR_O_VTC 0x0EC #define BKTR_SRESET 0x07C #define BKTR_COLOR_FMT 0x0D4 #define BKTR_COLOR_CTL 0x0D8 #define BKTR_CAP_CTL 0x0DC #define BKTR_VBI_PACK_SIZE 0x0E0 #define BKTR_VBI_PACK_DEL 0x0E4 #define BKTR_INT_STAT 0x100 #define BKTR_INT_MASK 0x104 #define BKTR_RISC_COUNT 0x120 #define BKTR_RISC_STRT_ADD 0x114 #define BKTR_GPIO_DMA_CTL 0x10C #define BKTR_GPIO_OUT_EN 0x118 #define BKTR_GPIO_REG_INP 0x11C #define BKTR_GPIO_DATA 0x200 #define BKTR_I2C_DATA_CTL 0x110 #define BKTR_TGCTRL 0x084 #define BKTR_PLL_F_LO 0x0F0 #define BKTR_PLL_F_HI 0x0F4 #define BKTR_PLL_F_XCI 0x0F8 /* * device support for onboard tv tuners */ /* description of the LOGICAL tuner */ struct TVTUNER { int frequency; u_char chnlset; u_char channel; u_char band; u_char afc; u_char radio_mode; /* current mode of the radio mode */ }; /* description of the PHYSICAL tuner */ struct TUNER { char* name; u_char type; u_char pllControl[4]; u_char bandLimits[ 2 ]; u_char bandAddrs[ 4 ]; /* 3 first for the 3 TV ** bands. Last for radio ** band (0x00=NoRadio). */ }; /* description of the card */ #define EEPROMBLOCKSIZE 32 struct CARDTYPE { unsigned int card_id; /* card id (from #define's) */ char* name; const struct TUNER* tuner; /* Tuner details */ u_char tuner_pllAddr; /* Tuner i2c address */ u_char dbx; /* Has DBX chip? */ u_char msp3400c; /* Has msp3400c chip? */ u_char dpl3518a; /* Has dpl3518a chip? */ u_char eepromAddr; u_char eepromSize; /* bytes / EEPROMBLOCKSIZE */ u_int audiomuxs[ 5 ]; /* tuner, ext (line-in) */ /* int/unused (radio) */ /* mute, present */ u_int gpio_mux_bits; /* GPIO mask for audio mux */ }; struct format_params { /* Total lines, lines before image, image lines */ int vtotal, vdelay, vactive; /* Total unscaled horizontal pixels, pixels before image, image pixels */ int htotal, hdelay, hactive; /* Scaled horizontal image pixels, Total Scaled horizontal pixels */ int scaled_hactive, scaled_htotal; /* frame rate . for ntsc is 30 frames per second */ int frame_rate; /* A-delay and B-delay */ u_char adelay, bdelay; /* Iform XTSEL value */ int iform_xtsel; /* VBI number of lines per field, and number of samples per line */ int vbi_num_lines, vbi_num_samples; }; #if defined(BKTR_USE_FREEBSD_SMBUS) struct bktr_i2c_softc { int bus_owned; device_t iicbb; device_t smbus; }; #endif /* Bt848/878 register access * The registers can either be access via a memory mapped structure * or accessed via bus_space. * bus_0pace access allows cross platform support, where as the * memory mapped structure method only works on 32 bit processors * with the right type of endianness. */ #if defined(__NetBSD__) || defined(__FreeBSD__) #define INB(bktr,offset) bus_space_read_1((bktr)->memt,(bktr)->memh,(offset)) #define INW(bktr,offset) bus_space_read_2((bktr)->memt,(bktr)->memh,(offset)) #define INL(bktr,offset) bus_space_read_4((bktr)->memt,(bktr)->memh,(offset)) #define OUTB(bktr,offset,value) bus_space_write_1((bktr)->memt,(bktr)->memh,(offset),(value)) #define OUTW(bktr,offset,value) bus_space_write_2((bktr)->memt,(bktr)->memh,(offset),(value)) #define OUTL(bktr,offset,value) bus_space_write_4((bktr)->memt,(bktr)->memh,(offset),(value)) #else #define INB(bktr,offset) *(volatile unsigned char*) ((int)((bktr)->memh)+(offset)) #define INW(bktr,offset) *(volatile unsigned short*)((int)((bktr)->memh)+(offset)) #define INL(bktr,offset) *(volatile unsigned int*) ((int)((bktr)->memh)+(offset)) #define OUTB(bktr,offset,value) *(volatile unsigned char*) ((int)((bktr)->memh)+(offset)) = (value) #define OUTW(bktr,offset,value) *(volatile unsigned short*)((int)((bktr)->memh)+(offset)) = (value) #define OUTL(bktr,offset,value) *(volatile unsigned int*) ((int)((bktr)->memh)+(offset)) = (value) #endif typedef struct bktr_clip bktr_clip_t; /* * BrookTree 848 info structure, one per bt848 card installed. */ struct bktr_softc { #if defined (__bsdi__) struct device bktr_dev; /* base device */ struct isadev bktr_id; /* ISA device */ struct intrhand bktr_ih; /* interrupt vectoring */ #define pcici_t pci_devaddr_t #endif #if defined(__NetBSD__) struct device bktr_dev; /* base device */ bus_dma_tag_t dmat; /* DMA tag */ bus_space_tag_t memt; bus_space_handle_t memh; bus_size_t obmemsz; /* size of en card (bytes) */ void *ih; bus_dmamap_t dm_prog; bus_dmamap_t dm_oprog; bus_dmamap_t dm_mem; bus_dmamap_t dm_vbidata; bus_dmamap_t dm_vbibuffer; #endif #if defined(__OpenBSD__) struct device bktr_dev; /* base device */ bus_dma_tag_t dmat; /* DMA tag */ bus_space_tag_t memt; bus_space_handle_t memh; bus_size_t obmemsz; /* size of en card (bytes) */ void *ih; bus_dmamap_t dm_prog; bus_dmamap_t dm_oprog; bus_dmamap_t dm_mem; bus_dmamap_t dm_vbidata; bus_dmamap_t dm_vbibuffer; size_t dm_mapsize; pci_chipset_tag_t pc; /* Opaque PCI chipset tag */ pcitag_t tag; /* PCI tag, for doing PCI commands */ vm_offset_t phys_base; /* Bt848 register physical address */ #endif #if defined (__FreeBSD__) int mem_rid; /* 4.x resource id */ struct resource *res_mem; /* 4.x resource descriptor for registers */ int irq_rid; /* 4.x resource id */ struct resource *res_irq; /* 4.x resource descriptor for interrupt */ void *res_ih; /* 4.x newbus interrupt handler cookie */ struct cdev *bktrdev; /* 4.x device entry for /dev/bktrN */ struct cdev *tunerdev; /* 4.x device entry for /dev/tunerN */ struct cdev *vbidev; /* 4.x device entry for /dev/vbiN */ struct cdev *bktrdev_alias; /* alias /dev/bktr to /dev/bktr0 */ struct cdev *tunerdev_alias; /* alias /dev/tuner to /dev/tuner0 */ struct cdev *vbidev_alias; /* alias /dev/vbi to /dev/vbi0 */ #if (__FreeBSD_version >= 500000) struct mtx vbimutex; /* Mutex protecting vbi buffer */ #endif bus_space_tag_t memt; /* Bus space register access functions */ bus_space_handle_t memh; /* Bus space register access functions */ bus_size_t obmemsz;/* Size of card (bytes) */ #if defined(BKTR_USE_FREEBSD_SMBUS) struct bktr_i2c_softc i2c_sc; /* bt848_i2c device */ #endif char bktr_xname[7]; /* device name and unit number */ #endif /* The following definitions are for the contiguous memory */ #ifdef __NetBSD__ vaddr_t bigbuf; /* buffer that holds the captured image */ vaddr_t vbidata; /* RISC program puts VBI data from the current frame here */ vaddr_t vbibuffer; /* Circular buffer holding VBI data for the user */ vaddr_t dma_prog; /* RISC prog for single and/or even field capture*/ vaddr_t odd_dma_prog; /* RISC program for Odd field capture */ #else vm_offset_t bigbuf; /* buffer that holds the captured image */ vm_offset_t vbidata; /* RISC program puts VBI data from the current frame here */ vm_offset_t vbibuffer; /* Circular buffer holding VBI data for the user */ vm_offset_t dma_prog; /* RISC prog for single and/or even field capture*/ vm_offset_t odd_dma_prog;/* RISC program for Odd field capture */ #endif /* the following definitions are common over all platforms */ int alloc_pages; /* number of pages in bigbuf */ int vbiinsert; /* Position for next write into circular buffer */ int vbistart; /* Position of last read from circular buffer */ int vbisize; /* Number of bytes in the circular buffer */ uint32_t vbi_sequence_number; /* sequence number for VBI */ int vbi_read_blocked; /* user process blocked on read() from /dev/vbi */ struct selinfo vbi_select; /* Data used by select() on /dev/vbi */ struct proc *proc; /* process to receive raised signal */ int signal; /* signal to send to process */ int clr_on_start; /* clear cap buf on capture start? */ #define METEOR_SIG_MODE_MASK 0xffff0000 #define METEOR_SIG_FIELD_MODE 0x00010000 #define METEOR_SIG_FRAME_MODE 0x00000000 char dma_prog_loaded; struct meteor_mem *mem; /* used to control sync. multi-frame output */ u_long synch_wait; /* wait for free buffer before continuing */ short current; /* frame number in buffer (1-frames) */ short rows; /* number of rows in a frame */ short cols; /* number of columns in a frame */ int capture_area_x_offset; /* Usually the full 640x480(NTSC) image is */ int capture_area_y_offset; /* captured. The capture area allows for */ int capture_area_x_size; /* example 320x200 pixels from the centre */ int capture_area_y_size; /* of the video image to be captured. */ char capture_area_enabled; /* When TRUE use user's capture area. */ int pixfmt; /* active pixel format (idx into fmt tbl) */ int pixfmt_compat; /* Y/N - in meteor pix fmt compat mode */ u_long format; /* frame format rgb, yuv, etc.. */ short frames; /* number of frames allocated */ int frame_size; /* number of bytes in a frame */ u_long fifo_errors; /* number of fifo capture errors since open */ u_long dma_errors; /* number of DMA capture errors since open */ u_long frames_captured;/* number of frames captured since open */ u_long even_fields_captured; /* number of even fields captured */ u_long odd_fields_captured; /* number of odd fields captured */ u_long range_enable; /* enable range checking ?? */ u_short capcontrol; /* reg 0xdc capture control */ u_short bktr_cap_ctl; volatile u_int flags; #define METEOR_INITALIZED 0x00000001 #define METEOR_OPEN 0x00000002 #define METEOR_MMAP 0x00000004 #define METEOR_INTR 0x00000008 #define METEOR_READ 0x00000010 /* XXX never gets referenced */ #define METEOR_SINGLE 0x00000020 /* get single frame */ #define METEOR_CONTIN 0x00000040 /* continuously get frames */ #define METEOR_SYNCAP 0x00000080 /* synchronously get frames */ #define METEOR_CAP_MASK 0x000000f0 #define METEOR_NTSC 0x00000100 #define METEOR_PAL 0x00000200 #define METEOR_SECAM 0x00000400 #define BROOKTREE_NTSC 0x00000100 /* used in video open() and */ #define BROOKTREE_PAL 0x00000200 /* in the kernel config */ #define BROOKTREE_SECAM 0x00000400 /* file */ #define METEOR_AUTOMODE 0x00000800 #define METEOR_FORM_MASK 0x00000f00 #define METEOR_DEV0 0x00001000 #define METEOR_DEV1 0x00002000 #define METEOR_DEV2 0x00004000 #define METEOR_DEV3 0x00008000 #define METEOR_DEV_SVIDEO 0x00006000 #define METEOR_DEV_RGB 0x0000a000 #define METEOR_DEV_MASK 0x0000f000 #define METEOR_RGB16 0x00010000 #define METEOR_RGB24 0x00020000 #define METEOR_YUV_PACKED 0x00040000 #define METEOR_YUV_PLANAR 0x00080000 #define METEOR_WANT_EVEN 0x00100000 /* want even frame */ #define METEOR_WANT_ODD 0x00200000 /* want odd frame */ #define METEOR_WANT_MASK 0x00300000 #define METEOR_ONLY_EVEN_FIELDS 0x01000000 #define METEOR_ONLY_ODD_FIELDS 0x02000000 #define METEOR_ONLY_FIELDS_MASK 0x03000000 #define METEOR_YUV_422 0x04000000 #define METEOR_OUTPUT_FMT_MASK 0x040f0000 #define METEOR_WANT_TS 0x08000000 /* time-stamp a frame */ #define METEOR_RGB 0x20000000 /* meteor rgb unit */ #define METEOR_FIELD_MODE 0x80000000 u_char tflags; /* Tuner flags (/dev/tuner) */ #define TUNER_INITALIZED 0x00000001 #define TUNER_OPEN 0x00000002 u_char vbiflags; /* VBI flags (/dev/vbi) */ #define VBI_INITALIZED 0x00000001 #define VBI_OPEN 0x00000002 #define VBI_CAPTURE 0x00000004 u_short fps; /* frames per second */ struct meteor_video video; struct TVTUNER tuner; struct CARDTYPE card; u_char audio_mux_select; /* current mode of the audio */ u_char audio_mute_state; /* mute state of the audio */ u_char format_params; u_long current_sol; u_long current_col; int clip_start; int line_length; int last_y; int y; int y2; int yclip; int yclip2; int max_clip_node; bktr_clip_t clip_list[100]; int reverse_mute; /* Swap the GPIO values for Mute and TV Audio */ int bt848_tuner; int bt848_card; u_long id; #define BT848_USE_XTALS 0 #define BT848_USE_PLL 1 int xtal_pll_mode; /* Use XTAL or PLL mode for PAL/SECAM */ int remote_control; /* remote control detected */ int remote_control_addr; /* remote control i2c address */ char msp_version_string[9]; /* MSP version string 34xxx-xx */ int msp_addr; /* MSP i2c address */ char dpl_version_string[9]; /* DPL version string 35xxx-xx */ int dpl_addr; /* DPL i2c address */ int slow_msp_audio; /* 0 = use fast MSP3410/3415 programming sequence */ /* 1 = use slow MSP3410/3415 programming sequence */ /* 2 = use Tuner's Mono audio output via the MSP chip */ int msp_use_mono_source; /* use Tuner's Mono audio output via the MSP chip */ int audio_mux_present; /* 1 = has audio mux on GPIO lines, 0 = no audio mux */ int msp_source_selected; /* 0 = TV source, 1 = Line In source, 2 = FM Radio Source */ #ifdef BKTR_NEW_MSP34XX_DRIVER /* msp3400c related data */ void * msp3400c_info; int stereo_once; int amsound; int mspsimple; int dolby; #endif }; typedef struct bktr_softc bktr_reg_t; typedef struct bktr_softc* bktr_ptr_t; #define Bt848_MAX_SIGN 16 struct bt848_card_sig { int card; int tuner; u_char signature[Bt848_MAX_SIGN]; }; /***********************************************************/ /* ioctl_cmd_t int on old versions, u_long on new versions */ /***********************************************************/ #if defined(__FreeBSD__) typedef u_long ioctl_cmd_t; #endif #if defined(__NetBSD__) || defined(__OpenBSD__) typedef u_long ioctl_cmd_t; #endif Index: head/sys/dev/e1000/e1000_osdep.h =================================================================== --- head/sys/dev/e1000/e1000_osdep.h (revision 303889) +++ head/sys/dev/e1000/e1000_osdep.h (revision 303890) @@ -1,220 +1,220 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _FREEBSD_OS_H_ #define _FREEBSD_OS_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define ASSERT(x) if(!(x)) panic("EM: x") #define usec_delay(x) DELAY(x) #define usec_delay_irq(x) usec_delay(x) #define msec_delay(x) DELAY(1000*(x)) #define msec_delay_irq(x) DELAY(1000*(x)) /* Enable/disable debugging statements in shared code */ #define DBG 0 #define DEBUGOUT(...) \ do { if (DBG) printf(__VA_ARGS__); } while (0) #define DEBUGOUT1(...) DEBUGOUT(__VA_ARGS__) #define DEBUGOUT2(...) DEBUGOUT(__VA_ARGS__) #define DEBUGOUT3(...) DEBUGOUT(__VA_ARGS__) #define DEBUGOUT7(...) DEBUGOUT(__VA_ARGS__) #define DEBUGFUNC(F) DEBUGOUT(F "\n") #define STATIC static #define FALSE 0 #define TRUE 1 #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND /* Mutex used in the shared code */ #define E1000_MUTEX struct mtx #define E1000_MUTEX_INIT(mutex) mtx_init((mutex), #mutex, \ MTX_NETWORK_LOCK, \ MTX_DEF | MTX_DUPOK) #define E1000_MUTEX_DESTROY(mutex) mtx_destroy(mutex) #define E1000_MUTEX_LOCK(mutex) mtx_lock(mutex) #define E1000_MUTEX_TRYLOCK(mutex) mtx_trylock(mutex) #define E1000_MUTEX_UNLOCK(mutex) mtx_unlock(mutex) typedef uint64_t u64; typedef uint32_t u32; typedef uint16_t u16; typedef uint8_t u8; typedef int64_t s64; typedef int32_t s32; typedef int16_t s16; typedef int8_t s8; #define __le16 u16 #define __le32 u32 #define __le64 u64 #if __FreeBSD_version < 800000 #if defined(__i386__) || defined(__amd64__) #define mb() __asm volatile("mfence" ::: "memory") #define wmb() __asm volatile("sfence" ::: "memory") #define rmb() __asm volatile("lfence" ::: "memory") #else #define mb() #define rmb() #define wmb() #endif #endif /*__FreeBSD_version < 800000 */ #if defined(__i386__) || defined(__amd64__) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif struct e1000_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; bus_space_tag_t io_bus_space_tag; bus_space_handle_t io_bus_space_handle; bus_space_tag_t flash_bus_space_tag; bus_space_handle_t flash_bus_space_handle; - struct device *dev; + device_t dev; }; #define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \ ? reg : e1000_translate_register_82542(reg)) #define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS) /* Read from an absolute offset in the adapter's memory space */ #define E1000_READ_OFFSET(hw, offset) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, offset) /* Write to an absolute offset in the adapter's memory space */ #define E1000_WRITE_OFFSET(hw, offset, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, offset, value) /* Register READ/WRITE macros */ #define E1000_READ_REG(hw, reg) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg)) #define E1000_WRITE_REG(hw, reg, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg), value) #define E1000_READ_REG_ARRAY(hw, reg, index) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + ((index)<< 2)) #define E1000_WRITE_REG_ARRAY(hw, reg, index, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + ((index)<< 2), value) #define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY #define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY #define E1000_READ_REG_ARRAY_BYTE(hw, reg, index) \ bus_space_read_1(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + index) #define E1000_WRITE_REG_ARRAY_BYTE(hw, reg, index, value) \ bus_space_write_1(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + index, value) #define E1000_WRITE_REG_ARRAY_WORD(hw, reg, index, value) \ bus_space_write_2(((struct e1000_osdep *)(hw)->back)->mem_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->mem_bus_space_handle, \ E1000_REGISTER(hw, reg) + (index << 1), value) #define E1000_WRITE_REG_IO(hw, reg, value) do {\ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->io_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->io_bus_space_handle, \ (hw)->io_base, reg); \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->io_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->io_bus_space_handle, \ (hw)->io_base + 4, value); } while (0) #define E1000_READ_FLASH_REG(hw, reg) \ bus_space_read_4(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg) #define E1000_READ_FLASH_REG16(hw, reg) \ bus_space_read_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg) #define E1000_WRITE_FLASH_REG(hw, reg, value) \ bus_space_write_4(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value) #define E1000_WRITE_FLASH_REG16(hw, reg, value) \ bus_space_write_2(((struct e1000_osdep *)(hw)->back)->flash_bus_space_tag, \ ((struct e1000_osdep *)(hw)->back)->flash_bus_space_handle, reg, value) #endif /* _FREEBSD_OS_H_ */ Index: head/sys/dev/e1000/if_em.h =================================================================== --- head/sys/dev/e1000/if_em.h (revision 303889) +++ head/sys/dev/e1000/if_em.h (revision 303890) @@ -1,558 +1,558 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _EM_H_DEFINED_ #define _EM_H_DEFINED_ /* Tunables */ /* * EM_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_TXD 80 #define EM_MAX_TXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_TXD 4096 #else #define EM_DEFAULT_TXD 1024 #endif /* * EM_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_RXD 80 #define EM_MAX_RXD 4096 #ifdef EM_MULTIQUEUE #define EM_DEFAULT_RXD 4096 #else #define EM_DEFAULT_RXD 1024 #endif /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value * (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that EM_RDTR is set to 0. */ #ifdef EM_MULTIQUEUE #define EM_RDTR 64 #else #define EM_RDTR 0 #endif /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #ifdef EM_MULTIQUEUE #define EM_RADV 128 #else #define EM_RADV 64 #endif /* * This parameter controls the max duration of transmit watchdog. */ #define EM_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define EM_MASTER_SLAVE e1000_ms_hw_default /* * Micellaneous constants */ #define EM_VENDOR_ID 0x8086 #define EM_FLASH 0x0014 #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define EM_MAX_LOOP 10 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define EM_FC_PAUSE_TIME 0x0680 #define EM_EEPROM_APME 0x400; #define EM_82544_APME 0x0004; /* * Driver state logic for the detection of a hung state * in hardware. Set TX_HUNG whenever a TX packet is used * (data is sent) and clear it when txeof() is invoked if * any descriptors from the ring are cleaned/reclaimed. * Increment internal counter if no descriptors are cleaned * and compare to TX_MAXTRIES. When counter > TX_MAXTRIES, * reset adapter. */ #define EM_TX_IDLE 0x00000000 #define EM_TX_BUSY 0x00000001 #define EM_TX_HUNG 0x80000000 #define EM_TX_MAXTRIES 10 #define PCICFG_DESC_RING_STATUS 0xe4 #define FLUSH_DESC_REQUIRED 0x100 /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define EM_DBA_ALIGN 128 /* * See Intel 82574 Driver Programming Interface Manual, Section 10.2.6.9 */ #define TARC_COMPENSATION_MODE (1 << 7) /* Compensation Mode */ #define TARC_SPEED_MODE_BIT (1 << 21) /* On PCI-E MACs only */ #define TARC_MQ_FIX (1 << 23) | \ (1 << 24) | \ (1 << 25) /* Handle errata in MQ mode */ #define TARC_ERRATA_BIT (1 << 26) /* Note from errata on 82574 */ /* PCI Config defines */ #define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK) #define EM_BAR_TYPE_MASK 0x00000001 #define EM_BAR_TYPE_MMEM 0x00000000 #define EM_BAR_TYPE_FLASH 0x0014 #define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK) #define EM_BAR_MEM_TYPE_MASK 0x00000006 #define EM_BAR_MEM_TYPE_32BIT 0x00000000 #define EM_BAR_MEM_TYPE_64BIT 0x00000004 #define EM_MSIX_BAR 3 /* On 82575 */ /* More backward compatibility */ #if __FreeBSD_version < 900000 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define EM_MAX_SCATTER 40 #define EM_VFTA_SIZE 128 #define EM_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define EM_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define EM_MSIX_MASK 0x01F00000 /* For 82574 use */ #define EM_MSIX_LINK 0x01000000 /* For 82574 use */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 #define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */ /* * 82574 has a nonstandard address for EIAC * and since its only used in MSIX, and in * the em driver only 82574 uses MSIX we can * solve it just using this define. */ #define EM_EIAC 0x000DC /* * 82574 only reports 3 MSI-X vectors by default; * defines assisting with making it report 5 are * located here. */ #define EM_NVM_PCIE_CTRL 0x1B #define EM_NVM_MSIX_N_MASK (0x7 << EM_NVM_MSIX_N_SHIFT) #define EM_NVM_MSIX_N_SHIFT 7 /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* * The transmit ring, one per tx queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; char mtx_name[16]; u32 me; u32 msix; u32 ims; int busy; struct em_dma_alloc txdma; struct e1000_tx_desc *tx_base; struct task tx_task; struct taskqueue *tq; u32 next_avail_desc; u32 next_to_clean; struct em_txbuffer *tx_buffers; volatile u16 tx_avail; u32 tx_tso; /* last tx was tso */ u16 last_hw_offload; u8 last_hw_ipcso; u8 last_hw_ipcss; u8 last_hw_tucso; u8 last_hw_tucss; #if __FreeBSD_version >= 800000 struct buf_ring *br; #endif /* Interrupt resources */ bus_dma_tag_t txtag; void *tag; struct resource *res; unsigned long tx_irq; unsigned long no_desc_avail; }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct adapter *adapter; u32 me; u32 msix; u32 ims; struct mtx rx_mtx; char mtx_name[16]; u32 payload; struct task rx_task; struct taskqueue *tq; union e1000_rx_desc_extended *rx_base; struct em_dma_alloc rxdma; u32 next_to_refresh; u32 next_to_check; struct em_rxbuffer *rx_buffers; struct mbuf *fmp; struct mbuf *lmp; /* Interrupt resources */ void *tag; struct resource *res; bus_dma_tag_t rxtag; bool discard; /* Soft stats */ unsigned long rx_irq; unsigned long rx_discarded; unsigned long rx_packets; unsigned long rx_bytes; }; /* Our adapter structure */ struct adapter { if_t ifp; struct e1000_hw hw; /* FreeBSD operating-system-specific structures. */ struct e1000_osdep osdep; - struct device *dev; + device_t dev; struct cdev *led_dev; struct resource *memory; struct resource *flash; struct resource *msix_mem; struct resource *res; void *tag; u32 linkvec; u32 ivars; struct ifmedia media; struct callout timer; int msix; int if_flags; int max_frame_size; int min_frame_size; struct mtx core_mtx; int em_insert_vlan_header; u32 ims; bool in_detach; /* Task for FAST handling */ struct task link_task; struct task que_task; struct taskqueue *tq; /* private task queue */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u8 num_queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; int num_tx_desc; u32 txd_cmd; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; int num_rx_desc; u32 rx_process_limit; u32 rx_mbuf_sz; /* Management and WOL features */ u32 wol; bool has_manage; bool has_amt; /* Multicast array memory */ u8 *mta; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[EM_VFTA_SIZE]; /* Info about the interface */ u16 link_active; u16 fc; u16 link_speed; u16 link_duplex; u32 smartspeed; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; struct em_int_delay_info tx_itr; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long no_tx_map_avail; unsigned long rx_overruns; unsigned long watchdog_events; struct e1000_hw_stats stats; }; /******************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ********************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_txbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; struct em_rxbuffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ bus_addr_t paddr; }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 e1000_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define EM_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF) #define EM_TX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF) #define EM_RX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF) #define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define EM_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define EM_RSSRK_SIZE 4 #define EM_RSSRK_VAL(key, i) (key[(i) * EM_RSSRK_SIZE] | \ key[(i) * EM_RSSRK_SIZE + 1] << 8 | \ key[(i) * EM_RSSRK_SIZE + 2] << 16 | \ key[(i) * EM_RSSRK_SIZE + 3] << 24) #endif /* _EM_H_DEFINED_ */ Index: head/sys/dev/e1000/if_igb.h =================================================================== --- head/sys/dev/e1000/if_igb.h (revision 303889) +++ head/sys/dev/e1000/if_igb.h (revision 303890) @@ -1,634 +1,634 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IF_IGB_H_ #define _IF_IGB_H_ #ifdef ALTQ #define IGB_LEGACY_TX #endif #include #include #ifndef IGB_LEGACY_TX #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "e1000_api.h" #include "e1000_82575.h" /* Tunables */ /* * IGB_TXD: Maximum number of Transmit Descriptors * * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_TXD 256 #define IGB_DEFAULT_TXD 1024 #define IGB_MAX_TXD 4096 /* * IGB_RXD: Maximum number of Receive Descriptors * * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define IGB_MIN_RXD 256 #define IGB_DEFAULT_RXD 1024 #define IGB_MAX_RXD 4096 /* * IGB_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define IGB_TIDV 64 /* * IGB_TADV - Transmit Absolute Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if IGB_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with IGB_TIDV, may improve traffic throughput in specific * network conditions. */ #define IGB_TADV 64 /* * IGB_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting IGB_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that IGB_RDTR is set to 0. */ #define IGB_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if IGB_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with IGB_RDTR, may improve traffic throughput in specific network * conditions. */ #define IGB_RADV 64 /* * This parameter controls the duration of transmit watchdog timer. */ #define IGB_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. Cleaning earlier seems a win. */ #define IGB_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 2) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define IGB_MASTER_SLAVE e1000_ms_hw_default /* Support AutoMediaDetect for Marvell M88 PHY in i354 */ #define IGB_MEDIA_RESET (1 << 0) /* * Micellaneous constants */ #define IGB_INTEL_VENDOR_ID 0x8086 #define IGB_JUMBO_PBA 0x00000028 #define IGB_DEFAULT_PBA 0x00000030 #define IGB_SMARTSPEED_DOWNSHIFT 3 #define IGB_SMARTSPEED_MAX 15 #define IGB_MAX_LOOP 10 #define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : \ ((hw->mac.type <= e1000_82576) ? 16 : 8)) #define IGB_RX_HTHRESH 8 #define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \ adapter->msix_mem) ? 1 : 4) #define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8) #define IGB_TX_HTHRESH 1 #define IGB_TX_WTHRESH ((hw->mac.type != e1000_82575 && \ adapter->msix_mem) ? 1 : 16) #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define IGB_TX_BUFFER_SIZE ((uint32_t) 1514) #define IGB_FC_PAUSE_TIME 0x0680 #define IGB_EEPROM_APME 0x400; /* Queue minimum free for use */ #define IGB_QUEUE_THRESHOLD (adapter->num_tx_desc / 8) /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define IGB_DBA_ALIGN 128 #define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ /* PCI Config defines */ #define IGB_MSIX_BAR 3 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define IGB_MAX_SCATTER 40 #define IGB_VFTA_SIZE 128 #define IGB_BR_SIZE 4096 /* ring buf size */ #define IGB_TSO_SIZE (65535 + sizeof(struct ether_vlan_header)) #define IGB_TSO_SEG_SIZE 4096 /* Max dma segment size */ #define IGB_TXPBSIZE 20408 #define IGB_HDR_BUF 128 #define IGB_PKTTYPE_MASK 0x0000FFF0 #define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coalesce Flush */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 /* Offload bits in mbuf flag */ #if __FreeBSD_version >= 1000000 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_IP_TCP|CSUM_IP_UDP|CSUM_IP_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_IP6_TCP|CSUM_IP6_UDP|CSUM_IP6_SCTP) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6) #elif __FreeBSD_version >= 800000 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #else #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif /* Define the starting Interrupt rate per Queue */ #define IGB_INTS_PER_SEC 8000 #define IGB_DEFAULT_ITR ((1000000/IGB_INTS_PER_SEC) << 2) #define IGB_LINK_ITR 2000 #define I210_LINK_DELAY 1000 /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_PORT 319 /* UDP port for the protocol */ /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct igb_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring. */ struct igb_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ u32 eitr_setting; struct resource *res; void *tag; struct tx_ring *txr; struct rx_ring *rxr; struct task que_task; struct taskqueue *tq; u64 irqs; }; /* * The transmit ring, one per queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; u32 me; int watchdog_time; union e1000_adv_tx_desc *tx_base; struct igb_tx_buf *tx_buffers; struct igb_dma_alloc txdma; volatile u16 tx_avail; u16 next_avail_desc; u16 next_to_clean; u16 num_desc; enum { IGB_QUEUE_IDLE = 1, IGB_QUEUE_WORKING = 2, IGB_QUEUE_HUNG = 4, IGB_QUEUE_DEPLETED = 8, } queue_status; u32 txd_cmd; bus_dma_tag_t txtag; char mtx_name[16]; #ifndef IGB_LEGACY_TX struct buf_ring *br; struct task txq_task; #endif u32 bytes; /* used for AIM */ u32 packets; /* Soft Stats */ unsigned long tso_tx; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; u64 no_desc_avail; u64 total_packets; }; /* * Receive ring: one per queue */ struct rx_ring { struct adapter *adapter; u32 me; struct igb_dma_alloc rxdma; union e1000_adv_rx_desc *rx_base; struct lro_ctrl lro; bool lro_enabled; bool hdr_split; struct mtx rx_mtx; char mtx_name[16]; u32 next_to_refresh; u32 next_to_check; struct igb_rx_buf *rx_buffers; bus_dma_tag_t htag; /* dma tag for rx head */ bus_dma_tag_t ptag; /* dma tag for rx packet */ /* * First/last mbuf pointers, for * collecting multisegment RX packets. */ struct mbuf *fmp; struct mbuf *lmp; u32 bytes; u32 packets; int rdt; int rdh; /* Soft stats */ u64 rx_split_packets; u64 rx_discarded; u64 rx_packets; u64 rx_bytes; }; struct adapter { struct ifnet *ifp; struct e1000_hw hw; struct e1000_osdep osdep; - struct device *dev; + device_t dev; struct cdev *led_dev; struct resource *pci_mem; struct resource *msix_mem; int memrid; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct ifmedia media; struct callout timer; int msix; int if_flags; int pause_frames; struct mtx core_mtx; eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u16 num_queues; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[IGB_VFTA_SIZE]; /* Info about the interface */ u32 optics; u32 fc; /* local flow ctrl setting */ int advertise; /* link speeds */ bool link_active; u16 max_frame_size; u16 num_segs; u16 link_speed; bool link_up; u32 linkvec; u16 link_duplex; u32 dmac; int link_mask; /* Flags */ u32 flags; /* Mbuf cluster size */ u32 rx_mbuf_sz; /* Support for pluggable optics */ bool sfp_probe; struct task link_task; /* Link tasklet */ struct task mod_task; /* SFP tasklet */ struct task msf_task; /* Multispeed Fiber */ struct taskqueue *tq; /* ** Queues: ** This is the irq holder, it has ** and RX/TX pair or rings associated ** with it. */ struct igb_queue *queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; u32 num_tx_desc; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; u64 que_mask; u32 num_rx_desc; /* Multicast array memory */ u8 *mta; /* Misc stats maintained by the driver */ unsigned long device_control; unsigned long dropped_pkts; unsigned long eint_mask; unsigned long int_mask; unsigned long link_irq; unsigned long mbuf_defrag_failed; unsigned long no_tx_dma_setup; unsigned long packet_buf_alloc_rx; unsigned long packet_buf_alloc_tx; unsigned long rx_control; unsigned long rx_overruns; unsigned long watchdog_events; /* Used in pf and vf */ void *stats; int enable_aim; int has_manage; int wol; int rx_process_limit; int tx_process_limit; u16 vf_ifp; /* a VF interface */ bool in_detach; /* Used only in igb_ioctl */ }; /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _igb_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } igb_vendor_info_t; struct igb_tx_buf { union e1000_adv_tx_desc *eop; struct mbuf *m_head; bus_dmamap_t map; }; struct igb_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; bus_dmamap_t hmap; /* bus_dma map for header */ bus_dmamap_t pmap; /* bus_dma map for packet */ }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 igb_rx_unrefreshed(struct rx_ring *rxr) { struct adapter *adapter = rxr->adapter; if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((adapter->num_rx_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } #define IGB_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "IGB Core Lock", MTX_DEF) #define IGB_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define IGB_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define IGB_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define IGB_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define IGB_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define IGB_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define IGB_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define IGB_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define IGB_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #define IGB_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define IGB_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define IGB_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define IGB_RX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->rx_mtx, MA_OWNED) #define UPDATE_VF_REG(reg, last, cur) \ { \ u32 new = E1000_READ_REG(hw, reg); \ if (new < last) \ cur += 0x100000000LL; \ last = new; \ cur &= 0xFFFFFFFF00000000LL; \ cur |= new; \ } #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } #endif #endif /* _IF_IGB_H_ */ Index: head/sys/dev/e1000/if_lem.h =================================================================== --- head/sys/dev/e1000/if_lem.h (revision 303889) +++ head/sys/dev/e1000/if_lem.h (revision 303890) @@ -1,519 +1,519 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _LEM_H_DEFINED_ #define _LEM_H_DEFINED_ /* Tunables */ /* * EM_TXD: Maximum number of Transmit Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of transmit descriptors allocated by the driver. * Increasing this value allows the driver to queue more transmits. Each * descriptor is 16 bytes. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_TXD 80 #define EM_MAX_TXD_82543 256 #define EM_MAX_TXD 4096 #define EM_DEFAULT_TXD EM_MAX_TXD_82543 /* * EM_RXD - Maximum number of receive Descriptors * Valid Range: 80-256 for 82542 and 82543-based adapters * 80-4096 for others * Default Value: 256 * This value is the number of receive descriptors allocated by the driver. * Increasing this value allows the driver to buffer more incoming packets. * Each descriptor is 16 bytes. A receive buffer is also allocated for each * descriptor. The maximum MTU size is 16110. * Since TDLEN should be multiple of 128bytes, the number of transmit * desscriptors should meet the following condition. * (num_tx_desc * sizeof(struct e1000_tx_desc)) % 128 == 0 */ #define EM_MIN_RXD 80 #define EM_MAX_RXD_82543 256 #define EM_MAX_RXD 4096 #define EM_DEFAULT_RXD EM_MAX_RXD_82543 /* * EM_TIDV - Transmit Interrupt Delay Value * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value delays the generation of transmit interrupts in units of * 1.024 microseconds. Transmit interrupt reduction can improve CPU * efficiency if properly tuned for specific network traffic. If the * system is reporting dropped transmits, this value may be set too high * causing the driver to run out of available transmit descriptors. */ #define EM_TIDV 64 /* * EM_TADV - Transmit Absolute Interrupt Delay Value * (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * transmit interrupt is generated. Useful only if EM_TIDV is non-zero, * this value ensures that an interrupt is generated after the initial * packet is sent on the wire within the set amount of time. Proper tuning, * along with EM_TIDV, may improve traffic throughput in specific * network conditions. */ #define EM_TADV 64 /* * EM_RDTR - Receive Interrupt Delay Timer (Packet Timer) * Valid Range: 0-65535 (0=off) * Default Value: 0 * This value delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * * CAUTION: When setting EM_RDTR to a value other than 0, adapters * may hang (stop transmitting) under certain network conditions. * If this occurs a WATCHDOG message is logged in the system * event log. In addition, the controller is automatically reset, * restoring the network connection. To eliminate the potential * for the hang ensure that EM_RDTR is set to 0. */ #define EM_RDTR 0 /* * Receive Interrupt Absolute Delay Timer (Not valid for 82542/82543/82544) * Valid Range: 0-65535 (0=off) * Default Value: 64 * This value, in units of 1.024 microseconds, limits the delay in which a * receive interrupt is generated. Useful only if EM_RDTR is non-zero, * this value ensures that an interrupt is generated after the initial * packet is received within the set amount of time. Proper tuning, * along with EM_RDTR, may improve traffic throughput in specific network * conditions. */ #define EM_RADV 64 /* * This parameter controls the max duration of transmit watchdog. */ #define EM_WATCHDOG (10 * hz) /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define EM_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) #define EM_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) /* * This parameter controls whether or not autonegotation is enabled. * 0 - Disable autonegotiation * 1 - Enable autonegotiation */ #define DO_AUTO_NEG 1 /* * This parameter control whether or not the driver will wait for * autonegotiation to complete. * 1 - Wait for autonegotiation to complete * 0 - Don't wait for autonegotiation to complete */ #define WAIT_FOR_AUTO_NEG_DEFAULT 0 /* Tunables -- End */ #define AUTONEG_ADV_DEFAULT (ADVERTISE_10_HALF | ADVERTISE_10_FULL | \ ADVERTISE_100_HALF | ADVERTISE_100_FULL | \ ADVERTISE_1000_FULL) #define AUTO_ALL_MODES 0 /* PHY master/slave setting */ #define EM_MASTER_SLAVE e1000_ms_hw_default /* * Micellaneous constants */ #define EM_VENDOR_ID 0x8086 #define EM_FLASH 0x0014 #define EM_JUMBO_PBA 0x00000028 #define EM_DEFAULT_PBA 0x00000030 #define EM_SMARTSPEED_DOWNSHIFT 3 #define EM_SMARTSPEED_MAX 15 #define EM_MAX_LOOP 10 #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 #define EM_FC_PAUSE_TIME 0x0680 #define EM_EEPROM_APME 0x400; #define EM_82544_APME 0x0004; /* Code compatilbility between 6 and 7 */ #ifndef ETHER_BPF_MTAP #define ETHER_BPF_MTAP BPF_MTAP #endif /* * TDBA/RDBA should be aligned on 16 byte boundary. But TDLEN/RDLEN should be * multiple of 128 bytes. So we align TDBA/RDBA on 128 byte boundary. This will * also optimize cache line size effect. H/W supports up to cache line size 128. */ #define EM_DBA_ALIGN 128 #define SPEED_MODE_BIT (1<<21) /* On PCI-E MACs only */ /* PCI Config defines */ #define EM_BAR_TYPE(v) ((v) & EM_BAR_TYPE_MASK) #define EM_BAR_TYPE_MASK 0x00000001 #define EM_BAR_TYPE_MMEM 0x00000000 #define EM_BAR_TYPE_IO 0x00000001 #define EM_BAR_TYPE_FLASH 0x0014 #define EM_BAR_MEM_TYPE(v) ((v) & EM_BAR_MEM_TYPE_MASK) #define EM_BAR_MEM_TYPE_MASK 0x00000006 #define EM_BAR_MEM_TYPE_32BIT 0x00000000 #define EM_BAR_MEM_TYPE_64BIT 0x00000004 #define EM_MSIX_BAR 3 /* On 82575 */ #if __FreeBSD_version < 900000 #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define EM_MAX_SCATTER 40 #define EM_VFTA_SIZE 128 #define EM_MSIX_MASK 0x01F00000 /* For 82574 use */ #define ETH_ZLEN 60 #define ETH_ADDR_LEN 6 #define CSUM_OFFLOAD 7 /* Offload bits in mbuf flag */ /* * 82574 has a nonstandard address for EIAC * and since its only used in MSIX, and in * the em driver only 82574 uses MSIX we can * solve it just using this define. */ #define EM_EIAC 0x000DC /* Used in for 82547 10Mb Half workaround */ #define EM_PBA_BYTES_SHIFT 0xA #define EM_TX_HEAD_ADDR_SHIFT 7 #define EM_PBA_TX_MASK 0xFFFF0000 #define EM_FIFO_HDR 0x10 #define EM_82547_PKT_THRESH 0x3e0 /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_PORT 319 /* UDP port for the protocol */ #ifdef NIC_PARAVIRT #define E1000_PARA_SUBDEV 0x1101 /* special id */ #define E1000_CSBAL 0x02830 /* csb phys. addr. low */ #define E1000_CSBAH 0x02834 /* csb phys. addr. hi */ #include #endif /* NIC_PARAVIRT */ /* * Bus dma allocation structure used by * e1000_dma_malloc and e1000_dma_free. */ struct em_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; int dma_nseg; }; struct adapter; struct em_int_delay_info { struct adapter *adapter; /* Back-pointer to the adapter struct */ int offset; /* Register offset to read/write */ int value; /* Current value in usecs */ }; /* Our adapter structure */ struct adapter { if_t ifp; struct e1000_hw hw; /* FreeBSD operating-system-specific structures. */ struct e1000_osdep osdep; - struct device *dev; + device_t dev; struct cdev *led_dev; struct resource *memory; struct resource *flash; struct resource *msix; struct resource *ioport; int io_rid; /* 82574 may use 3 int vectors */ struct resource *res[3]; void *tag[3]; int rid[3]; struct ifmedia media; struct callout timer; struct callout tx_fifo_timer; bool watchdog_check; int watchdog_time; int msi; int if_flags; int max_frame_size; int min_frame_size; struct mtx core_mtx; struct mtx tx_mtx; struct mtx rx_mtx; int em_insert_vlan_header; /* Task for FAST handling */ struct task link_task; struct task rxtx_task; struct task rx_task; struct task tx_task; struct taskqueue *tq; /* private task queue */ eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u32 num_vlans; /* Management and WOL features */ u32 wol; bool has_manage; bool has_amt; /* Multicast array memory */ u8 *mta; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[EM_VFTA_SIZE]; /* Info about the interface */ uint8_t link_active; uint16_t link_speed; uint16_t link_duplex; uint32_t smartspeed; uint32_t fc_setting; struct em_int_delay_info tx_int_delay; struct em_int_delay_info tx_abs_int_delay; struct em_int_delay_info rx_int_delay; struct em_int_delay_info rx_abs_int_delay; struct em_int_delay_info tx_itr; /* * Transmit definitions * * We have an array of num_tx_desc descriptors (handled * by the controller) paired with an array of tx_buffers * (at tx_buffer_area). * The index of the next available descriptor is next_avail_tx_desc. * The number of remaining tx_desc is num_tx_desc_avail. */ struct em_dma_alloc txdma; /* bus_dma glue for tx desc */ struct e1000_tx_desc *tx_desc_base; uint32_t next_avail_tx_desc; uint32_t next_tx_to_clean; volatile uint16_t num_tx_desc_avail; uint16_t num_tx_desc; uint16_t last_hw_offload; uint32_t txd_cmd; struct em_buffer *tx_buffer_area; bus_dma_tag_t txtag; /* dma tag for tx */ uint32_t tx_tso; /* last tx was tso */ /* * Receive definitions * * we have an array of num_rx_desc rx_desc (handled by the * controller), and paired with an array of rx_buffers * (at rx_buffer_area). * The next pair to check on receive is at offset next_rx_desc_to_check */ struct em_dma_alloc rxdma; /* bus_dma glue for rx desc */ struct e1000_rx_desc *rx_desc_base; uint32_t next_rx_desc_to_check; uint32_t rx_buffer_len; uint16_t num_rx_desc; int rx_process_limit; struct em_buffer *rx_buffer_area; bus_dma_tag_t rxtag; bus_dmamap_t rx_sparemap; /* * First/last mbuf pointers, for * collecting multisegment RX packets. */ struct mbuf *fmp; struct mbuf *lmp; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long link_irq; unsigned long mbuf_cluster_failed; unsigned long mbuf_defrag_failed; unsigned long no_tx_desc_avail1; unsigned long no_tx_desc_avail2; unsigned long no_tx_dma_setup; unsigned long no_tx_map_avail; unsigned long watchdog_events; unsigned long rx_irq; unsigned long rx_overruns; unsigned long tx_irq; /* 82547 workaround */ uint32_t tx_fifo_size; uint32_t tx_fifo_head; uint32_t tx_fifo_head_addr; uint64_t tx_fifo_reset_cnt; uint64_t tx_fifo_wrk_cnt; uint32_t tx_head_addr; /* For 82544 PCIX Workaround */ boolean_t pcix_82544; boolean_t in_detach; #ifdef NIC_SEND_COMBINING /* 0 = idle; 1xxxx int-pending; 3xxxx int + d pending + tdt */ #define MIT_PENDING_INT 0x10000 /* pending interrupt */ #define MIT_PENDING_TDT 0x30000 /* both intr and tdt write are pending */ uint32_t shadow_tdt; uint32_t sc_enable; #endif /* NIC_SEND_COMBINING */ #ifdef BATCH_DISPATCH uint32_t batch_enable; #endif /* BATCH_DISPATCH */ #ifdef NIC_PARAVIRT struct em_dma_alloc csb_mem; /* phys address */ struct paravirt_csb *csb; /* virtual addr */ uint32_t rx_retries; /* optimize rx loop */ uint32_t tdt_csb_count;// XXX stat uint32_t tdt_reg_count;// XXX stat uint32_t tdt_int_count;// XXX stat uint32_t guest_need_kick_count;// XXX stat #endif /* NIC_PARAVIRT */ struct e1000_hw_stats stats; }; /* ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * * ******************************************************************************/ typedef struct _em_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } em_vendor_info_t; struct em_buffer { int next_eop; /* Index of the desc to watch */ struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; /* For 82544 PCIX Workaround */ typedef struct _ADDRESS_LENGTH_PAIR { uint64_t address; uint32_t length; } ADDRESS_LENGTH_PAIR, *PADDRESS_LENGTH_PAIR; typedef struct _DESCRIPTOR_PAIR { ADDRESS_LENGTH_PAIR descriptor[4]; uint32_t elements; } DESC_ARRAY, *PDESC_ARRAY; #define EM_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "EM Core Lock", MTX_DEF) #define EM_TX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->tx_mtx, _name, "EM TX Lock", MTX_DEF) #define EM_RX_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->rx_mtx, _name, "EM RX Lock", MTX_DEF) #define EM_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define EM_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define EM_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define EM_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define EM_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define EM_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define EM_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define EM_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define EM_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define EM_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define EM_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define EM_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) #endif /* _LEM_H_DEFINED_ */ Index: head/sys/dev/ixgb/if_ixgb.h =================================================================== --- head/sys/dev/ixgb/if_ixgb.h (revision 303889) +++ head/sys/dev/ixgb/if_ixgb.h (revision 303890) @@ -1,383 +1,383 @@ /******************************************************************************* Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifndef _IXGB_H_DEFINED_ #define _IXGB_H_DEFINED_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 502000 #include #include #else #include #include #endif #include #include #include #include #include #include /* Tunables */ /* * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the * number of transmit descriptors allocated by the driver. Increasing this * value allows the driver to queue more transmits. Each descriptor is 16 * bytes. */ #define IXGB_MAX_TXD 256 /* * RxDescriptors Valid Range: 64-4096 Default Value: 1024 This value is the * number of receive descriptors allocated by the driver. Increasing this * value allows the driver to buffer more incoming packets. Each descriptor * is 16 bytes. A receive buffer is also allocated for each descriptor. The * maximum MTU size is 16110. * */ #define IXGB_MAX_RXD 1024 /* * TxIntDelay Valid Range: 0-65535 (0=off) Default Value: 32 This value * delays the generation of transmit interrupts in units of 1.024 * microseconds. Transmit interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. If the system is reporting * dropped transmits, this value may be set too high causing the driver to * run out of available transmit descriptors. */ #define TIDV 32 /* * RxIntDelay Valid Range: 0-65535 (0=off) Default Value: 72 This value * delays the generation of receive interrupts in units of 1.024 * microseconds. Receive interrupt reduction can improve CPU efficiency if * properly tuned for specific network traffic. Increasing this value adds * extra latency to frame reception and can end up decreasing the throughput * of TCP traffic. If the system is reporting dropped receives, this value * may be set too high, causing the driver to run out of available receive * descriptors. * */ #define RDTR 72 /* * This parameter controls the maximum no of times the driver will loop in * the isr. Minimum Value = 1 */ #define IXGB_MAX_INTR 3 /* * Inform the stack about transmit checksum offload capabilities. */ #define IXGB_CHECKSUM_FEATURES (CSUM_TCP | CSUM_UDP) /* * This parameter controls the duration of transmit watchdog timer. */ #define IXGB_TX_TIMEOUT 5 /* set to 5 seconds */ /* * This parameter controls when the driver calls the routine to reclaim * transmit descriptors. */ #define IXGB_TX_CLEANUP_THRESHOLD IXGB_MAX_TXD / 8 /* * Flow Control Types. * 1. ixgb_fc_none - Flow Control Disabled * 2. ixgb_fc_rx_pause - Flow Control Receive Only * 3. ixgb_fc_tx_pause - Flow Control Transmit Only * 4. ixgb_fc_full - Flow Control Enabled */ #define FLOW_CONTROL_NONE ixgb_fc_none #define FLOW_CONTROL_RX_PAUSE ixgb_fc_rx_pause #define FLOW_CONTROL_TX_PAUSE ixgb_fc_tx_pause #define FLOW_CONTROL_FULL ixgb_fc_full /* * Set the flow control type. Assign one of the above flow control types to be enabled. * Default Value: FLOW_CONTROL_FULL */ #define FLOW_CONTROL FLOW_CONTROL_FULL /* * Receive Flow control low threshold (when we send a resume frame) (FCRTL) * Valid Range: 64 - 262,136 (0x40 - 0x3FFF8, 8 byte granularity) must be * less than high threshold by at least 8 bytes Default Value: 163,840 * (0x28000) */ #define FCRTL 0x28000 /* * Receive Flow control high threshold (when we send a pause frame) (FCRTH) * Valid Range: 1,536 - 262,136 (0x600 - 0x3FFF8, 8 byte granularity) Default * Value: 196,608 (0x30000) */ #define FCRTH 0x30000 /* * Flow control request timeout (how long to pause the link partner's tx) * (PAP 15:0) Valid Range: 1 - 65535 Default Value: 256 (0x100) */ #define FCPAUSE 0x100 /* Tunables -- End */ #define IXGB_VENDOR_ID 0x8086 #define IXGB_MMBA 0x0010 /* Mem base address */ #define IXGB_ROUNDUP(size, unit) (((size) + (unit) - 1) & ~((unit) - 1)) #define IOCTL_CMD_TYPE u_long #define MAX_NUM_MULTICAST_ADDRESSES 128 #define PCI_ANY_ID (~0U) #define ETHER_ALIGN 2 /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define _SV_ 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) /* Supported RX Buffer Sizes */ #define IXGB_RXBUFFER_2048 2048 #define IXGB_RXBUFFER_4096 4096 #define IXGB_RXBUFFER_8192 8192 #define IXGB_RXBUFFER_16384 16384 #define IXGB_MAX_SCATTER 100 /* * ****************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixgb_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixgb_vendor_info_t; struct ixgb_buffer { struct mbuf *m_head; bus_dmamap_t map; /* bus_dma map for packet */ }; /* * Bus dma allocation structure used by ixgb_dma_malloc and ixgb_dma_free. */ struct ixgb_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; typedef enum _XSUM_CONTEXT_T { OFFLOAD_NONE, OFFLOAD_TCP_IP, OFFLOAD_UDP_IP } XSUM_CONTEXT_T; /* Our adapter structure */ struct adapter { struct ifnet *ifp; struct adapter *next; struct adapter *prev; struct ixgb_hw hw; /* FreeBSD operating-system-specific structures */ struct ixgb_osdep osdep; - struct device *dev; + device_t dev; struct resource *res_memory; struct resource *res_ioport; struct resource *res_interrupt; void *int_handler_tag; struct ifmedia media; struct callout timer; int io_rid; int tx_timer; struct mtx mtx; /* Info about the board itself */ u_int32_t part_num; u_int8_t link_active; u_int16_t link_speed; u_int16_t link_duplex; u_int32_t tx_int_delay; u_int32_t tx_abs_int_delay; u_int32_t rx_int_delay; u_int32_t rx_abs_int_delay; int raidc; XSUM_CONTEXT_T active_checksum_context; /* * Transmit definitions * * We have an array of num_tx_desc descriptors (handled by the * controller) paired with an array of tx_buffers (at * tx_buffer_area). The index of the next available descriptor is * next_avail_tx_desc. The number of remaining tx_desc is * num_tx_desc_avail. */ struct ixgb_dma_alloc txdma; /* bus_dma glue for tx desc */ struct ixgb_tx_desc *tx_desc_base; u_int32_t next_avail_tx_desc; u_int32_t oldest_used_tx_desc; volatile u_int16_t num_tx_desc_avail; u_int16_t num_tx_desc; u_int32_t txd_cmd; struct ixgb_buffer *tx_buffer_area; bus_dma_tag_t txtag; /* dma tag for tx */ /* * Receive definitions * * we have an array of num_rx_desc rx_desc (handled by the controller), * and paired with an array of rx_buffers (at rx_buffer_area). The * next pair to check on receive is at offset next_rx_desc_to_check */ struct ixgb_dma_alloc rxdma; /* bus_dma glue for rx desc */ struct ixgb_rx_desc *rx_desc_base; u_int32_t next_rx_desc_to_check; u_int16_t num_rx_desc; u_int32_t rx_buffer_len; struct ixgb_buffer *rx_buffer_area; bus_dma_tag_t rxtag; /* dma tag for Rx */ u_int32_t next_rx_desc_to_use; /* Jumbo frame */ struct mbuf *fmp; struct mbuf *lmp; struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; /* Multicast array memory */ u_int8_t *mta; /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long mbuf_alloc_failed; unsigned long mbuf_cluster_failed; unsigned long no_tx_desc_avail1; unsigned long no_tx_desc_avail2; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; boolean_t in_detach; /* Board specific private data */ #ifdef _SV_ struct ixgb_sv_stats { uint64_t icr_rxdmt0; uint64_t icr_rxo; uint64_t icr_rxt0; uint64_t icr_TXDW; } sv_stats; unsigned long no_pkts_avail; unsigned long clean_tx_interrupts; #endif struct ixgb_hw_stats stats; }; #define IXGB_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->mtx, _name, MTX_NETWORK_LOCK, MTX_DEF) #define IXGB_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) #define IXGB_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXGB_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXGB_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #endif /* _IXGB_H_DEFINED_ */ Index: head/sys/dev/ixgb/if_ixgb_osdep.h =================================================================== --- head/sys/dev/ixgb/if_ixgb_osdep.h (revision 303889) +++ head/sys/dev/ixgb/if_ixgb_osdep.h (revision 303890) @@ -1,120 +1,120 @@ /******************************************************************************* Copyright (c) 2001-2004, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ /*$FreeBSD$*/ #ifndef _FREEBSD_OS_H_ #define _FREEBSD_OS_H_ #include #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 502000 #include #include #else #include #include #endif #define ASSERT(x) if(!(x)) panic("IXGB: x") /* The happy-fun DELAY macro is defined in /usr/src/sys/i386/include/clock.h */ #define usec_delay(x) DELAY(x) #define msec_delay(x) DELAY(1000*(x)) #define DBG 0 #define MSGOUT(S, A, B) printf(S "\n", A, B) #define DEBUGFUNC(F) DEBUGOUT(F); #if DBG #define DEBUGOUT(S) printf(S "\n") #define DEBUGOUT1(S,A) printf(S "\n",A) #define DEBUGOUT2(S,A,B) printf(S "\n",A,B) #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C) #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G) #else #define DEBUGOUT(S) #define DEBUGOUT1(S,A) #define DEBUGOUT2(S,A,B) #define DEBUGOUT3(S,A,B,C) #define DEBUGOUT7(S,A,B,C,D,E,F,G) #endif #define FALSE 0 #define TRUE 1 #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND #define le16_to_cpu struct ixgb_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; - struct device *dev; + device_t dev; }; #define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS) #define IXGB_READ_REG(a, reg) (\ bus_space_read_4( ((struct ixgb_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct ixgb_osdep *)(a)->back)->mem_bus_space_handle, \ IXGB_##reg)) #define IXGB_WRITE_REG(a, reg, value) (\ bus_space_write_4( ((struct ixgb_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct ixgb_osdep *)(a)->back)->mem_bus_space_handle, \ IXGB_##reg, value)) #define IXGB_READ_REG_ARRAY(a, reg, offset) (\ bus_space_read_4( ((struct ixgb_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct ixgb_osdep *)(a)->back)->mem_bus_space_handle, \ (IXGB_##reg + ((offset) << 2)))) #define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) (\ bus_space_write_4( ((struct ixgb_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct ixgb_osdep *)(a)->back)->mem_bus_space_handle, \ (IXGB_##reg + ((offset) << 2)), value)) #endif /* _FREEBSD_OS_H_ */ Index: head/sys/dev/ixgbe/ixgbe.h =================================================================== --- head/sys/dev/ixgbe/ixgbe.h (revision 303889) +++ head/sys/dev/ixgbe/ixgbe.h (revision 303890) @@ -1,900 +1,900 @@ /****************************************************************************** Copyright (c) 2001-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXGBE_H_ #define _IXGBE_H_ #include #include #ifndef IXGBE_LEGACY_TX #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PCI_IOV #include #include #include #endif #include "ixgbe_api.h" #include "ixgbe_common.h" #include "ixgbe_phy.h" #include "ixgbe_vf.h" #ifdef PCI_IOV #include "ixgbe_common.h" #include "ixgbe_mbx.h" #endif /* Tunables */ /* * TxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the * number of transmit descriptors allocated by the driver. Increasing this * value allows the driver to queue more transmits. Each descriptor is 16 * bytes. Performance tests have show the 2K value to be optimal for top * performance. */ #define DEFAULT_TXD 1024 #define PERFORM_TXD 2048 #define MAX_TXD 4096 #define MIN_TXD 64 /* * RxDescriptors Valid Range: 64-4096 Default Value: 256 This value is the * number of receive descriptors allocated for each RX queue. Increasing this * value allows the driver to buffer more incoming packets. Each descriptor * is 16 bytes. A receive buffer is also allocated for each descriptor. * * Note: with 8 rings and a dual port card, it is possible to bump up * against the system mbuf pool limit, you can tune nmbclusters * to adjust for this. */ #define DEFAULT_RXD 1024 #define PERFORM_RXD 2048 #define MAX_RXD 4096 #define MIN_RXD 64 /* Alignment for rings */ #define DBA_ALIGN 128 /* * This is the max watchdog interval, ie. the time that can * pass between any two TX clean operations, such only happening * when the TX hardware is functioning. */ #define IXGBE_WATCHDOG (10 * hz) /* * This parameters control when the driver calls the routine to reclaim * transmit descriptors. */ #define IXGBE_TX_CLEANUP_THRESHOLD (adapter->num_tx_desc / 8) #define IXGBE_TX_OP_THRESHOLD (adapter->num_tx_desc / 32) /* These defines are used in MTU calculations */ #define IXGBE_MAX_FRAME_SIZE 9728 #define IXGBE_MTU_HDR (ETHER_HDR_LEN + ETHER_CRC_LEN) #define IXGBE_MTU_HDR_VLAN (ETHER_HDR_LEN + ETHER_CRC_LEN + \ ETHER_VLAN_ENCAP_LEN) #define IXGBE_MAX_MTU (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR) #define IXGBE_MAX_MTU_VLAN (IXGBE_MAX_FRAME_SIZE - IXGBE_MTU_HDR_VLAN) /* Flow control constants */ #define IXGBE_FC_PAUSE 0xFFFF #define IXGBE_FC_HI 0x20000 #define IXGBE_FC_LO 0x10000 /* * Used for optimizing small rx mbufs. Effort is made to keep the copy * small and aligned for the CPU L1 cache. * * MHLEN is typically 168 bytes, giving us 8-byte alignment. Getting * 32 byte alignment needed for the fast bcopy results in 8 bytes being * wasted. Getting 64 byte alignment, which _should_ be ideal for * modern Intel CPUs, results in 40 bytes wasted and a significant drop * in observed efficiency of the optimization, 97.9% -> 81.8%. */ #if __FreeBSD_version < 1002000 #define MPKTHSIZE (sizeof(struct m_hdr) + sizeof(struct pkthdr)) #endif #define IXGBE_RX_COPY_HDR_PADDED ((((MPKTHSIZE - 1) / 32) + 1) * 32) #define IXGBE_RX_COPY_LEN (MSIZE - IXGBE_RX_COPY_HDR_PADDED) #define IXGBE_RX_COPY_ALIGN (IXGBE_RX_COPY_HDR_PADDED - MPKTHSIZE) /* Keep older OS drivers building... */ #if !defined(SYSCTL_ADD_UQUAD) #define SYSCTL_ADD_UQUAD SYSCTL_ADD_QUAD #endif /* Defines for printing debug information */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define INIT_DEBUGOUT(S) if (DEBUG_INIT) printf(S "\n") #define INIT_DEBUGOUT1(S, A) if (DEBUG_INIT) printf(S "\n", A) #define INIT_DEBUGOUT2(S, A, B) if (DEBUG_INIT) printf(S "\n", A, B) #define IOCTL_DEBUGOUT(S) if (DEBUG_IOCTL) printf(S "\n") #define IOCTL_DEBUGOUT1(S, A) if (DEBUG_IOCTL) printf(S "\n", A) #define IOCTL_DEBUGOUT2(S, A, B) if (DEBUG_IOCTL) printf(S "\n", A, B) #define HW_DEBUGOUT(S) if (DEBUG_HW) printf(S "\n") #define HW_DEBUGOUT1(S, A) if (DEBUG_HW) printf(S "\n", A) #define HW_DEBUGOUT2(S, A, B) if (DEBUG_HW) printf(S "\n", A, B) #define MAX_NUM_MULTICAST_ADDRESSES 128 #define IXGBE_82598_SCATTER 100 #define IXGBE_82599_SCATTER 32 #define MSIX_82598_BAR 3 #define MSIX_82599_BAR 4 #define IXGBE_TSO_SIZE 262140 #define IXGBE_RX_HDR 128 #define IXGBE_VFTA_SIZE 128 #define IXGBE_BR_SIZE 4096 #define IXGBE_QUEUE_MIN_FREE 32 #define IXGBE_MAX_TX_BUSY 10 #define IXGBE_QUEUE_HUNG 0x80000000 #define IXV_EITR_DEFAULT 128 /* Supported offload bits in mbuf flag */ #if __FreeBSD_version >= 1000000 #define CSUM_OFFLOAD (CSUM_IP_TSO|CSUM_IP6_TSO|CSUM_IP| \ CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP| \ CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP) #elif __FreeBSD_version >= 800000 #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #else #define CSUM_OFFLOAD (CSUM_IP|CSUM_TCP|CSUM_UDP) #endif /* Backward compatibility items for very old versions */ #ifndef pci_find_cap #define pci_find_cap pci_find_extcap #endif #ifndef DEVMETHOD_END #define DEVMETHOD_END { NULL, NULL } #endif /* * Interrupt Moderation parameters */ #define IXGBE_LOW_LATENCY 128 #define IXGBE_AVE_LATENCY 400 #define IXGBE_BULK_LATENCY 1200 /* Using 1FF (the max value), the interval is ~1.05ms */ #define IXGBE_LINK_ITR_QUANTA 0x1FF #define IXGBE_LINK_ITR ((IXGBE_LINK_ITR_QUANTA << 3) & \ IXGBE_EITR_ITR_INT_MASK) /* MAC type macros */ #define IXGBE_IS_X550VF(_adapter) \ ((_adapter->hw.mac.type == ixgbe_mac_X550_vf) || \ (_adapter->hw.mac.type == ixgbe_mac_X550EM_x_vf)) #define IXGBE_IS_VF(_adapter) \ (IXGBE_IS_X550VF(_adapter) || \ (_adapter->hw.mac.type == ixgbe_mac_X540_vf) || \ (_adapter->hw.mac.type == ixgbe_mac_82599_vf)) #ifdef PCI_IOV #define IXGBE_VF_INDEX(vmdq) ((vmdq) / 32) #define IXGBE_VF_BIT(vmdq) (1 << ((vmdq) % 32)) #define IXGBE_VT_MSG_MASK 0xFFFF #define IXGBE_VT_MSGINFO(msg) \ (((msg) & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT) #define IXGBE_VF_GET_QUEUES_RESP_LEN 5 #define IXGBE_API_VER_1_0 0 #define IXGBE_API_VER_2_0 1 /* Solaris API. Not supported. */ #define IXGBE_API_VER_1_1 2 #define IXGBE_API_VER_UNKNOWN UINT16_MAX enum ixgbe_iov_mode { IXGBE_64_VM, IXGBE_32_VM, IXGBE_NO_VM }; #endif /* PCI_IOV */ /* ***************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixgbe_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixgbe_vendor_info_t; struct ixgbe_tx_buf { union ixgbe_adv_tx_desc *eop; struct mbuf *m_head; bus_dmamap_t map; }; struct ixgbe_rx_buf { struct mbuf *buf; struct mbuf *fmp; bus_dmamap_t pmap; u_int flags; #define IXGBE_RX_COPY 0x01 uint64_t addr; }; /* * Bus dma allocation structure used by ixgbe_dma_malloc and ixgbe_dma_free. */ struct ixgbe_dma_alloc { bus_addr_t dma_paddr; caddr_t dma_vaddr; bus_dma_tag_t dma_tag; bus_dmamap_t dma_map; bus_dma_segment_t dma_seg; bus_size_t dma_size; int dma_nseg; }; struct ixgbe_mc_addr { u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; u32 vmdq; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring. */ struct ix_queue { struct adapter *adapter; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ u32 eitr_setting; u32 me; struct resource *res; void *tag; int busy; struct tx_ring *txr; struct rx_ring *rxr; struct task que_task; struct taskqueue *tq; u64 irqs; }; /* * The transmit ring, one per queue */ struct tx_ring { struct adapter *adapter; struct mtx tx_mtx; u32 me; u32 tail; int busy; union ixgbe_adv_tx_desc *tx_base; struct ixgbe_tx_buf *tx_buffers; struct ixgbe_dma_alloc txdma; volatile u16 tx_avail; u16 next_avail_desc; u16 next_to_clean; u16 num_desc; u32 txd_cmd; bus_dma_tag_t txtag; char mtx_name[16]; #ifndef IXGBE_LEGACY_TX struct buf_ring *br; struct task txq_task; #endif #ifdef IXGBE_FDIR u16 atr_sample; u16 atr_count; #endif u32 bytes; /* used for AIM */ u32 packets; /* Soft Stats */ unsigned long tso_tx; unsigned long no_tx_map_avail; unsigned long no_tx_dma_setup; u64 no_desc_avail; u64 total_packets; }; /* * The Receive ring, one per rx queue */ struct rx_ring { struct adapter *adapter; struct mtx rx_mtx; u32 me; u32 tail; union ixgbe_adv_rx_desc *rx_base; struct ixgbe_dma_alloc rxdma; struct lro_ctrl lro; bool lro_enabled; bool hw_rsc; bool vtag_strip; u16 next_to_refresh; u16 next_to_check; u16 num_desc; u16 mbuf_sz; char mtx_name[16]; struct ixgbe_rx_buf *rx_buffers; bus_dma_tag_t ptag; u32 bytes; /* Used for AIM calc */ u32 packets; /* Soft stats */ u64 rx_irq; u64 rx_copies; u64 rx_packets; u64 rx_bytes; u64 rx_discarded; u64 rsc_num; #ifdef IXGBE_FDIR u64 flm; #endif }; #ifdef PCI_IOV #define IXGBE_VF_CTS (1 << 0) /* VF is clear to send. */ #define IXGBE_VF_CAP_MAC (1 << 1) /* VF is permitted to change MAC. */ #define IXGBE_VF_CAP_VLAN (1 << 2) /* VF is permitted to join vlans. */ #define IXGBE_VF_ACTIVE (1 << 3) /* VF is active. */ #define IXGBE_MAX_VF_MC 30 /* Max number of multicast entries */ struct ixgbe_vf { u_int pool; u_int rar_index; u_int max_frame_size; uint32_t flags; uint8_t ether_addr[ETHER_ADDR_LEN]; uint16_t mc_hash[IXGBE_MAX_VF_MC]; uint16_t num_mc_hashes; uint16_t default_vlan; uint16_t vlan_tag; uint16_t api_ver; }; #endif /* PCI_IOV */ /* Our adapter structure */ struct adapter { struct ixgbe_hw hw; struct ixgbe_osdep osdep; - struct device *dev; + device_t dev; struct ifnet *ifp; struct resource *pci_mem; struct resource *msix_mem; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct ifmedia media; struct callout timer; int msix; int if_flags; struct mtx core_mtx; eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; u16 num_queues; /* ** Shadow VFTA table, this is needed because ** the real vlan filter table gets cleared during ** a soft reset and the driver needs to be able ** to repopulate it. */ u32 shadow_vfta[IXGBE_VFTA_SIZE]; /* Info about the interface */ u32 optics; u32 fc; /* local flow ctrl setting */ int advertise; /* link speeds */ bool enable_aim; /* adaptive interrupt moderation */ bool link_active; u16 max_frame_size; u16 num_segs; u32 link_speed; bool link_up; u32 vector; u16 dmac; bool eee_enabled; u32 phy_layer; /* Power management-related */ bool wol_support; u32 wufc; /* Mbuf cluster size */ u32 rx_mbuf_sz; /* Support for pluggable optics */ bool sfp_probe; struct task link_task; /* Link tasklet */ struct task mod_task; /* SFP tasklet */ struct task msf_task; /* Multispeed Fiber */ #ifdef PCI_IOV struct task mbx_task; /* VF -> PF mailbox interrupt */ #endif /* PCI_IOV */ #ifdef IXGBE_FDIR int fdir_reinit; struct task fdir_task; #endif struct task phy_task; /* PHY intr tasklet */ struct taskqueue *tq; /* ** Queues: ** This is the irq holder, it has ** and RX/TX pair or rings associated ** with it. */ struct ix_queue *queues; /* * Transmit rings: * Allocated at run time, an array of rings. */ struct tx_ring *tx_rings; u32 num_tx_desc; u32 tx_process_limit; /* * Receive rings: * Allocated at run time, an array of rings. */ struct rx_ring *rx_rings; u64 active_queues; u32 num_rx_desc; u32 rx_process_limit; /* Multicast array memory */ struct ixgbe_mc_addr *mta; int num_vfs; int pool; #ifdef PCI_IOV struct ixgbe_vf *vfs; #endif #ifdef DEV_NETMAP void (*init_locked)(struct adapter *); void (*stop_locked)(void *); #endif /* Misc stats maintained by the driver */ unsigned long dropped_pkts; unsigned long mbuf_defrag_failed; unsigned long mbuf_header_failed; unsigned long mbuf_packet_failed; unsigned long watchdog_events; unsigned long link_irq; union { struct ixgbe_hw_stats pf; struct ixgbevf_hw_stats vf; } stats; #if __FreeBSD_version >= 1100036 /* counter(9) stats */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 noproto; #endif }; /* Precision Time Sync (IEEE 1588) defines */ #define ETHERTYPE_IEEE1588 0x88F7 #define PICOSECS_PER_TICK 20833 #define TSYNC_UDP_PORT 319 /* UDP port for the protocol */ #define IXGBE_ADVTXD_TSTAMP 0x00080000 #define IXGBE_CORE_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->core_mtx, _name, "IXGBE Core Lock", MTX_DEF) #define IXGBE_CORE_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->core_mtx) #define IXGBE_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->tx_mtx) #define IXGBE_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->rx_mtx) #define IXGBE_CORE_LOCK(_sc) mtx_lock(&(_sc)->core_mtx) #define IXGBE_TX_LOCK(_sc) mtx_lock(&(_sc)->tx_mtx) #define IXGBE_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->tx_mtx) #define IXGBE_RX_LOCK(_sc) mtx_lock(&(_sc)->rx_mtx) #define IXGBE_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->core_mtx) #define IXGBE_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->tx_mtx) #define IXGBE_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->rx_mtx) #define IXGBE_CORE_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->core_mtx, MA_OWNED) #define IXGBE_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->tx_mtx, MA_OWNED) /* For backward compatibility */ #if !defined(PCIER_LINK_STA) #define PCIER_LINK_STA PCIR_EXPRESS_LINK_STA #endif /* Stats macros */ #if __FreeBSD_version >= 1100036 #define IXGBE_SET_IPACKETS(sc, count) (sc)->ipackets = (count) #define IXGBE_SET_IERRORS(sc, count) (sc)->ierrors = (count) #define IXGBE_SET_OPACKETS(sc, count) (sc)->opackets = (count) #define IXGBE_SET_OERRORS(sc, count) (sc)->oerrors = (count) #define IXGBE_SET_COLLISIONS(sc, count) #define IXGBE_SET_IBYTES(sc, count) (sc)->ibytes = (count) #define IXGBE_SET_OBYTES(sc, count) (sc)->obytes = (count) #define IXGBE_SET_IMCASTS(sc, count) (sc)->imcasts = (count) #define IXGBE_SET_OMCASTS(sc, count) (sc)->omcasts = (count) #define IXGBE_SET_IQDROPS(sc, count) (sc)->iqdrops = (count) #else #define IXGBE_SET_IPACKETS(sc, count) (sc)->ifp->if_ipackets = (count) #define IXGBE_SET_IERRORS(sc, count) (sc)->ifp->if_ierrors = (count) #define IXGBE_SET_OPACKETS(sc, count) (sc)->ifp->if_opackets = (count) #define IXGBE_SET_OERRORS(sc, count) (sc)->ifp->if_oerrors = (count) #define IXGBE_SET_COLLISIONS(sc, count) (sc)->ifp->if_collisions = (count) #define IXGBE_SET_IBYTES(sc, count) (sc)->ifp->if_ibytes = (count) #define IXGBE_SET_OBYTES(sc, count) (sc)->ifp->if_obytes = (count) #define IXGBE_SET_IMCASTS(sc, count) (sc)->ifp->if_imcasts = (count) #define IXGBE_SET_OMCASTS(sc, count) (sc)->ifp->if_omcasts = (count) #define IXGBE_SET_IQDROPS(sc, count) (sc)->ifp->if_iqdrops = (count) #endif /* External PHY register addresses */ #define IXGBE_PHY_CURRENT_TEMP 0xC820 #define IXGBE_PHY_OVERTEMP_STATUS 0xC830 /* Sysctl help messages; displayed with sysctl -d */ #define IXGBE_SYSCTL_DESC_ADV_SPEED \ "\nControl advertised link speed using these flags:\n" \ "\t0x1 - advertise 100M\n" \ "\t0x2 - advertise 1G\n" \ "\t0x4 - advertise 10G\n\n" \ "\t100M is only supported on certain 10GBaseT adapters.\n" #define IXGBE_SYSCTL_DESC_SET_FC \ "\nSet flow control mode using these values:\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) { switch (hw->phy.type) { case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: case ixgbe_phy_qsfp_passive_unknown: case ixgbe_phy_qsfp_active_unknown: case ixgbe_phy_qsfp_intel: case ixgbe_phy_qsfp_unknown: return TRUE; default: return FALSE; } } /* Workaround to make 8.0 buildable */ #if __FreeBSD_version >= 800000 && __FreeBSD_version < 800504 static __inline int drbr_needs_enqueue(struct ifnet *ifp, struct buf_ring *br) { #ifdef ALTQ if (ALTQ_IS_ENABLED(&ifp->if_snd)) return (1); #endif return (!buf_ring_empty(br)); } #endif /* ** Find the number of unrefreshed RX descriptors */ static inline u16 ixgbe_rx_unrefreshed(struct rx_ring *rxr) { if (rxr->next_to_check > rxr->next_to_refresh) return (rxr->next_to_check - rxr->next_to_refresh - 1); else return ((rxr->num_desc + rxr->next_to_check) - rxr->next_to_refresh - 1); } /* ** This checks for a zero mac addr, something that will be likely ** unless the Admin on the Host has created one. */ static inline bool ixv_check_ether_addr(u8 *addr) { bool status = TRUE; if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) status = FALSE; return (status); } /* Shared Prototypes */ #ifdef IXGBE_LEGACY_TX void ixgbe_start(struct ifnet *); void ixgbe_start_locked(struct tx_ring *, struct ifnet *); #else /* ! IXGBE_LEGACY_TX */ int ixgbe_mq_start(struct ifnet *, struct mbuf *); int ixgbe_mq_start_locked(struct ifnet *, struct tx_ring *); void ixgbe_qflush(struct ifnet *); void ixgbe_deferred_mq_start(void *, int); #endif /* IXGBE_LEGACY_TX */ int ixgbe_allocate_queues(struct adapter *); int ixgbe_allocate_transmit_buffers(struct tx_ring *); int ixgbe_setup_transmit_structures(struct adapter *); void ixgbe_free_transmit_structures(struct adapter *); int ixgbe_allocate_receive_buffers(struct rx_ring *); int ixgbe_setup_receive_structures(struct adapter *); void ixgbe_free_receive_structures(struct adapter *); void ixgbe_txeof(struct tx_ring *); bool ixgbe_rxeof(struct ix_queue *); int ixgbe_dma_malloc(struct adapter *, bus_size_t, struct ixgbe_dma_alloc *, int); void ixgbe_dma_free(struct adapter *, struct ixgbe_dma_alloc *); #ifdef PCI_IOV static inline boolean_t ixgbe_vf_mac_changed(struct ixgbe_vf *vf, const uint8_t *mac) { return (bcmp(mac, vf->ether_addr, ETHER_ADDR_LEN) != 0); } static inline void ixgbe_send_vf_msg(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { if (vf->flags & IXGBE_VF_CTS) msg |= IXGBE_VT_MSGTYPE_CTS; ixgbe_write_mbx(&adapter->hw, &msg, 1, vf->pool); } static inline void ixgbe_send_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { msg &= IXGBE_VT_MSG_MASK; ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_ACK); } static inline void ixgbe_send_vf_nack(struct adapter *adapter, struct ixgbe_vf *vf, u32 msg) { msg &= IXGBE_VT_MSG_MASK; ixgbe_send_vf_msg(adapter, vf, msg | IXGBE_VT_MSGTYPE_NACK); } static inline void ixgbe_process_vf_ack(struct adapter *adapter, struct ixgbe_vf *vf) { if (!(vf->flags & IXGBE_VF_CTS)) ixgbe_send_vf_nack(adapter, vf, 0); } static inline enum ixgbe_iov_mode ixgbe_get_iov_mode(struct adapter *adapter) { if (adapter->num_vfs == 0) return (IXGBE_NO_VM); if (adapter->num_queues <= 2) return (IXGBE_64_VM); else if (adapter->num_queues <= 4) return (IXGBE_32_VM); else return (IXGBE_NO_VM); } static inline u16 ixgbe_max_vfs(enum ixgbe_iov_mode mode) { /* * We return odd numbers below because we * reserve 1 VM's worth of queues for the PF. */ switch (mode) { case IXGBE_64_VM: return (63); case IXGBE_32_VM: return (31); case IXGBE_NO_VM: default: return (0); } } static inline int ixgbe_vf_queues(enum ixgbe_iov_mode mode) { switch (mode) { case IXGBE_64_VM: return (2); case IXGBE_32_VM: return (4); case IXGBE_NO_VM: default: return (0); } } static inline int ixgbe_vf_que_index(enum ixgbe_iov_mode mode, u32 vfnum, int num) { return ((vfnum * ixgbe_vf_queues(mode)) + num); } static inline int ixgbe_pf_que_index(enum ixgbe_iov_mode mode, int num) { return (ixgbe_vf_que_index(mode, ixgbe_max_vfs(mode), num)); } static inline void ixgbe_update_max_frame(struct adapter * adapter, int max_frame) { if (adapter->max_frame_size < max_frame) adapter->max_frame_size = max_frame; } static inline u32 ixgbe_get_mrqc(enum ixgbe_iov_mode mode) { u32 mrqc = 0; switch (mode) { case IXGBE_64_VM: mrqc = IXGBE_MRQC_VMDQRSS64EN; break; case IXGBE_32_VM: mrqc = IXGBE_MRQC_VMDQRSS32EN; break; case IXGBE_NO_VM: mrqc = 0; break; default: panic("Unexpected SR-IOV mode %d", mode); } return(mrqc); } static inline u32 ixgbe_get_mtqc(enum ixgbe_iov_mode mode) { uint32_t mtqc = 0; switch (mode) { case IXGBE_64_VM: mtqc |= IXGBE_MTQC_64VF | IXGBE_MTQC_VT_ENA; break; case IXGBE_32_VM: mtqc |= IXGBE_MTQC_32VF | IXGBE_MTQC_VT_ENA; break; case IXGBE_NO_VM: mtqc = IXGBE_MTQC_64Q_1PB; break; default: panic("Unexpected SR-IOV mode %d", mode); } return(mtqc); } #endif /* PCI_IOV */ #endif /* _IXGBE_H_ */ Index: head/sys/dev/ixl/i40e_osdep.h =================================================================== --- head/sys/dev/ixl/i40e_osdep.h (revision 303889) +++ head/sys/dev/ixl/i40e_osdep.h (revision 303890) @@ -1,239 +1,239 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _I40E_OSDEP_H_ #define _I40E_OSDEP_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define i40e_usec_delay(x) DELAY(x) #define i40e_msec_delay(x) DELAY(1000 * (x)) #define DBG 0 #define MSGOUT(S, A, B) printf(S "\n", A, B) #define DEBUGFUNC(F) DEBUGOUT(F); #if DBG #define DEBUGOUT(S) printf(S "\n") #define DEBUGOUT1(S,A) printf(S "\n",A) #define DEBUGOUT2(S,A,B) printf(S "\n",A,B) #define DEBUGOUT3(S,A,B,C) printf(S "\n",A,B,C) #define DEBUGOUT7(S,A,B,C,D,E,F,G) printf(S "\n",A,B,C,D,E,F,G) #else #define DEBUGOUT(S) #define DEBUGOUT1(S,A) #define DEBUGOUT2(S,A,B) #define DEBUGOUT3(S,A,B,C) #define DEBUGOUT6(S,A,B,C,D,E,F) #define DEBUGOUT7(S,A,B,C,D,E,F,G) #endif /* Remove unused shared code macros */ #define UNREFERENCED_PARAMETER(_p) #define UNREFERENCED_1PARAMETER(_p) #define UNREFERENCED_2PARAMETER(_p, _q) #define UNREFERENCED_3PARAMETER(_p, _q, _r) #define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) #define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t) #define STATIC static #define INLINE inline #define FALSE 0 #define false 0 /* shared code requires this */ #define TRUE 1 #define true 1 #define CMD_MEM_WRT_INVALIDATE 0x0010 /* BIT_4 */ #define PCI_COMMAND_REGISTER PCIR_COMMAND #define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0])) #define i40e_memset(a, b, c, d) memset((a), (b), (c)) #define i40e_memcpy(a, b, c, d) memcpy((a), (b), (c)) #define CPU_TO_LE16(o) htole16(o) #define CPU_TO_LE32(s) htole32(s) #define CPU_TO_LE64(h) htole64(h) #define LE16_TO_CPU(a) le16toh(a) #define LE32_TO_CPU(c) le32toh(c) #define LE64_TO_CPU(k) le64toh(k) #define I40E_NTOHS(a) ntohs(a) #define I40E_NTOHL(a) ntohl(a) #define I40E_HTONS(a) htons(a) #define I40E_HTONL(a) htonl(a) #define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y)) typedef uint8_t u8; typedef int8_t s8; typedef uint16_t u16; typedef int16_t s16; typedef uint32_t u32; typedef int32_t s32; typedef uint64_t u64; /* long string relief */ typedef enum i40e_status_code i40e_status; #define __le16 u16 #define __le32 u32 #define __le64 u64 #define __be16 u16 #define __be32 u32 #define __be64 u64 /* SW spinlock */ struct i40e_spinlock { struct mtx mutex; }; #define le16_to_cpu #if defined(__amd64__) || defined(i386) static __inline void prefetch(void *x) { __asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x)); } #else #define prefetch(x) #endif struct i40e_osdep { bus_space_tag_t mem_bus_space_tag; bus_space_handle_t mem_bus_space_handle; bus_size_t mem_bus_space_size; uint32_t flush_reg; - struct device *dev; + device_t dev; }; struct i40e_dma_mem { void *va; u64 pa; bus_dma_tag_t tag; bus_dmamap_t map; bus_dma_segment_t seg; bus_size_t size; int nseg; int flags; }; struct i40e_virt_mem { void *va; u32 size; }; struct i40e_hw; /* forward decl */ u16 i40e_read_pci_cfg(struct i40e_hw *, u32); void i40e_write_pci_cfg(struct i40e_hw *, u32, u16); /* ** i40e_debug - OS dependent version of shared code debug printing */ enum i40e_debug_mask; #define i40e_debug(h, m, s, ...) i40e_debug_shared(h, m, s, ##__VA_ARGS__) extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt_str, ...); /* Non-busy-wait that uses kern_yield() */ void i40e_msec_pause(int); /* ** This hardware supports either 16 or 32 byte rx descriptors; ** the driver only uses the 32 byte kind. */ #define i40e_rx_desc i40e_32byte_rx_desc static __inline uint32_t rd32_osdep(struct i40e_osdep *osdep, uint32_t reg) { KASSERT(reg < osdep->mem_bus_space_size, ("ixl: register offset %#jx too large (max is %#jx)", (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size)); return (bus_space_read_4(osdep->mem_bus_space_tag, osdep->mem_bus_space_handle, reg)); } static __inline void wr32_osdep(struct i40e_osdep *osdep, uint32_t reg, uint32_t value) { KASSERT(reg < osdep->mem_bus_space_size, ("ixl: register offset %#jx too large (max is %#jx)", (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size)); bus_space_write_4(osdep->mem_bus_space_tag, osdep->mem_bus_space_handle, reg, value); } static __inline void ixl_flush_osdep(struct i40e_osdep *osdep) { rd32_osdep(osdep, osdep->flush_reg); } #define rd32(a, reg) rd32_osdep((a)->back, (reg)) #define wr32(a, reg, value) wr32_osdep((a)->back, (reg), (value)) #define rd64(a, reg) (\ bus_space_read_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ reg)) #define wr64(a, reg, value) (\ bus_space_write_8( ((struct i40e_osdep *)(a)->back)->mem_bus_space_tag, \ ((struct i40e_osdep *)(a)->back)->mem_bus_space_handle, \ reg, value)) #define ixl_flush(a) ixl_flush_osdep((a)->back) #endif /* _I40E_OSDEP_H_ */ Index: head/sys/dev/ixl/ixl.h =================================================================== --- head/sys/dev/ixl/ixl.h (revision 303889) +++ head/sys/dev/ixl/ixl.h (revision 303890) @@ -1,676 +1,676 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_H_ #define _IXL_H_ #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef RSS #include #include #endif #include "i40e_type.h" #include "i40e_prototype.h" #define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x" #define MAC_FORMAT_ARGS(mac_addr) \ (mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \ (mac_addr)[4], (mac_addr)[5] #define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off") #ifdef IXL_DEBUG #define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__) #define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__) #define _IF_DBG_PRINTF(ifp, S, ...) if_printf(ifp, "%s: " S "\n", __func__, ##__VA_ARGS__) /* Defines for printing generic debug information */ #define DPRINTF(...) _DBG_PRINTF(__VA_ARGS__) #define DDPRINTF(...) _DEV_DBG_PRINTF(__VA_ARGS__) #define IDPRINTF(...) _IF_DBG_PRINTF(__VA_ARGS__) /* Defines for printing specific debug information */ #define DEBUG_INIT 1 #define DEBUG_IOCTL 1 #define DEBUG_HW 1 #define INIT_DEBUGOUT(...) if (DEBUG_INIT) _DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_DEV(...) if (DEBUG_INIT) _DEV_DBG_PRINTF(__VA_ARGS__) #define INIT_DBG_IF(...) if (DEBUG_INIT) _IF_DBG_PRINTF(__VA_ARGS__) #define IOCTL_DEBUGOUT(...) if (DEBUG_IOCTL) _DBG_PRINTF(__VA_ARGS__) #define IOCTL_DBG_IF2(ifp, S, ...) if (DEBUG_IOCTL) \ if_printf(ifp, S "\n", ##__VA_ARGS__) #define IOCTL_DBG_IF(...) if (DEBUG_IOCTL) _IF_DBG_PRINTF(__VA_ARGS__) #define HW_DEBUGOUT(...) if (DEBUG_HW) _DBG_PRINTF(__VA_ARGS__) #else /* no IXL_DEBUG */ #define DEBUG_INIT 0 #define DEBUG_IOCTL 0 #define DEBUG_HW 0 #define DPRINTF(...) #define DDPRINTF(...) #define IDPRINTF(...) #define INIT_DEBUGOUT(...) #define INIT_DBG_DEV(...) #define INIT_DBG_IF(...) #define IOCTL_DEBUGOUT(...) #define IOCTL_DBG_IF2(...) #define IOCTL_DBG_IF(...) #define HW_DEBUGOUT(...) #endif /* IXL_DEBUG */ enum ixl_dbg_mask { IXL_DBG_INFO = 0x00000001, IXL_DBG_EN_DIS = 0x00000002, IXL_DBG_AQ = 0x00000004, IXL_DBG_NVMUPD = 0x00000008, IXL_DBG_IOCTL_KNOWN = 0x00000010, IXL_DBG_IOCTL_UNKNOWN = 0x00000020, IXL_DBG_IOCTL_ALL = 0x00000030, I40E_DEBUG_RSS = 0x00000100, IXL_DBG_IOV = 0x00001000, IXL_DBG_IOV_VC = 0x00002000, IXL_DBG_SWITCH_INFO = 0x00010000, IXL_DBG_ALL = 0xFFFFFFFF }; /* Tunables */ /* * Ring Descriptors Valid Range: 32-4096 Default Value: 1024 This value is the * number of tx/rx descriptors allocated by the driver. Increasing this * value allows the driver to queue more operations. * * Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes. * The driver currently always uses 32 byte Rx descriptors. */ #define DEFAULT_RING 1024 #define IXL_MAX_RING 8160 #define IXL_MIN_RING 32 #define IXL_RING_INCREMENT 32 #define IXL_AQ_LEN 256 #define IXL_AQ_LEN_MAX 1024 /* ** Default number of entries in Tx queue buf_ring. */ #define DEFAULT_TXBRSZ 4096 /* Alignment for rings */ #define DBA_ALIGN 128 /* * This is the max watchdog interval, ie. the time that can * pass between any two TX clean operations, such only happening * when the TX hardware is functioning. */ #define IXL_WATCHDOG (10 * hz) /* * This parameters control when the driver calls the routine to reclaim * transmit descriptors. */ #define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8) #define IXL_TX_OP_THRESHOLD (que->num_desc / 32) #define MAX_MULTICAST_ADDR 128 #define IXL_BAR 3 #define IXL_ADM_LIMIT 2 #define IXL_TSO_SIZE 65535 #define IXL_AQ_BUF_SZ ((u32) 4096) #define IXL_RX_HDR 128 #define IXL_RX_LIMIT 512 #define IXL_RX_ITR 0 #define IXL_TX_ITR 1 #define IXL_ITR_NONE 3 #define IXL_QUEUE_EOL 0x7FF #define IXL_MAX_FRAME 9728 #define IXL_MAX_TX_SEGS 8 #define IXL_MAX_TSO_SEGS 128 #define IXL_SPARSE_CHAIN 6 #define IXL_QUEUE_HUNG 0x80000000 #define IXL_RSS_KEY_SIZE_REG 13 #define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4) #define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */ #define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F #define IXL_RSS_VF_LUT_ENTRY_MASK 0xF #define IXL_VF_MAX_BUFFER 0x3F80 #define IXL_VF_MAX_HDR_BUFFER 0x840 #define IXL_VF_MAX_FRAME 0x3FFF /* ERJ: hardware can support ~2k (SW5+) filters between all functions */ #define IXL_MAX_FILTERS 256 #define IXL_MAX_TX_BUSY 10 #define IXL_NVM_VERSION_LO_SHIFT 0 #define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT) #define IXL_NVM_VERSION_HI_SHIFT 12 #define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT) /* * Interrupt Moderation parameters */ #define IXL_MAX_ITR 0x07FF #define IXL_ITR_100K 0x0005 #define IXL_ITR_20K 0x0019 #define IXL_ITR_8K 0x003E #define IXL_ITR_4K 0x007A #define IXL_ITR_DYNAMIC 0x8000 #define IXL_LOW_LATENCY 0 #define IXL_AVE_LATENCY 1 #define IXL_BULK_LATENCY 2 /* MacVlan Flags */ #define IXL_FILTER_USED (u16)(1 << 0) #define IXL_FILTER_VLAN (u16)(1 << 1) #define IXL_FILTER_ADD (u16)(1 << 2) #define IXL_FILTER_DEL (u16)(1 << 3) #define IXL_FILTER_MC (u16)(1 << 4) /* used in the vlan field of the filter when not a vlan */ #define IXL_VLAN_ANY -1 #define CSUM_OFFLOAD_IPV4 (CSUM_IP|CSUM_TCP|CSUM_UDP|CSUM_SCTP) #define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6) #define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO) /* Misc flags for ixl_vsi.flags */ #define IXL_FLAGS_KEEP_TSO4 (1 << 0) #define IXL_FLAGS_KEEP_TSO6 (1 << 1) #define IXL_VF_RESET_TIMEOUT 100 #define IXL_VSI_DATA_PORT 0x01 #define IXLV_MAX_QUEUES 16 #define IXL_MAX_VSI_QUEUES (2 * (I40E_VSILAN_QTABLE_MAX_INDEX + 1)) #define IXL_RX_CTX_BASE_UNITS 128 #define IXL_TX_CTX_BASE_UNITS 128 #define IXL_VPINT_LNKLSTN_REG(hw, vector, vf_num) \ I40E_VPINT_LNKLSTN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_VFINT_DYN_CTLN_REG(hw, vector, vf_num) \ I40E_VFINT_DYN_CTLN(((vector) - 1) + \ (((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num))) #define IXL_PF_PCI_CIAA_VF_DEVICE_STATUS 0xAA #define IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK 0x20 #define IXL_GLGEN_VFLRSTAT_INDEX(glb_vf) ((glb_vf) / 32) #define IXL_GLGEN_VFLRSTAT_MASK(glb_vf) (1 << ((glb_vf) % 32)) #define IXL_MAX_ITR_IDX 3 #define IXL_END_OF_INTR_LNKLST 0x7FF #define IXL_DEFAULT_RSS_HENA (\ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD)) #define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) #define IXL_TX_TRYLOCK(_sc) mtx_trylock(&(_sc)->mtx) #define IXL_TX_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->mtx, MA_OWNED) #define IXL_RX_LOCK(_sc) mtx_lock(&(_sc)->mtx) #define IXL_RX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx) #define IXL_RX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx) /* Pre-11 counter(9) compatibility */ #if __FreeBSD_version >= 1100036 #define IXL_SET_IPACKETS(vsi, count) (vsi)->ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) /* Do nothing; collisions is always 0. */ #define IXL_SET_IBYTES(vsi, count) (vsi)->ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->iqdrops = (count) #define IXL_SET_OQDROPS(vsi, count) (vsi)->oqdrops = (count) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #else #define IXL_SET_IPACKETS(vsi, count) (vsi)->ifp->if_ipackets = (count) #define IXL_SET_IERRORS(vsi, count) (vsi)->ifp->if_ierrors = (count) #define IXL_SET_OPACKETS(vsi, count) (vsi)->ifp->if_opackets = (count) #define IXL_SET_OERRORS(vsi, count) (vsi)->ifp->if_oerrors = (count) #define IXL_SET_COLLISIONS(vsi, count) (vsi)->ifp->if_collisions = (count) #define IXL_SET_IBYTES(vsi, count) (vsi)->ifp->if_ibytes = (count) #define IXL_SET_OBYTES(vsi, count) (vsi)->ifp->if_obytes = (count) #define IXL_SET_IMCASTS(vsi, count) (vsi)->ifp->if_imcasts = (count) #define IXL_SET_OMCASTS(vsi, count) (vsi)->ifp->if_omcasts = (count) #define IXL_SET_IQDROPS(vsi, count) (vsi)->ifp->if_iqdrops = (count) #define IXL_SET_OQDROPS(vsi, odrops) (vsi)->ifp->if_snd.ifq_drops = (odrops) #define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count) #endif /* ***************************************************************************** * vendor_info_array * * This array contains the list of Subvendor/Subdevice IDs on which the driver * should load. * ***************************************************************************** */ typedef struct _ixl_vendor_info_t { unsigned int vendor_id; unsigned int device_id; unsigned int subvendor_id; unsigned int subdevice_id; unsigned int index; } ixl_vendor_info_t; struct ixl_tx_buf { u32 eop_index; struct mbuf *m_head; bus_dmamap_t map; bus_dma_tag_t tag; }; struct ixl_rx_buf { struct mbuf *m_head; struct mbuf *m_pack; struct mbuf *fmp; bus_dmamap_t hmap; bus_dmamap_t pmap; }; /* ** This struct has multiple uses, multicast ** addresses, vlans, and mac filters all use it. */ struct ixl_mac_filter { SLIST_ENTRY(ixl_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; s16 vlan; u16 flags; }; /* * The Transmit ring control struct */ struct tx_ring { struct ixl_queue *que; struct mtx mtx; u32 tail; struct i40e_tx_desc *base; struct i40e_dma_mem dma; u16 next_avail; u16 next_to_clean; u16 atr_rate; u16 atr_count; u32 itr; u32 latency; struct ixl_tx_buf *buffers; volatile u16 avail; u32 cmd; bus_dma_tag_t tx_tag; bus_dma_tag_t tso_tag; char mtx_name[16]; struct buf_ring *br; /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; /* Soft Stats */ u64 tx_bytes; u64 no_desc; u64 total_packets; }; /* * The Receive ring control struct */ struct rx_ring { struct ixl_queue *que; struct mtx mtx; union i40e_rx_desc *base; struct i40e_dma_mem dma; struct lro_ctrl lro; bool lro_enabled; bool hdr_split; bool discard; u32 next_refresh; u32 next_check; u32 itr; u32 latency; char mtx_name[16]; struct ixl_rx_buf *buffers; u32 mbuf_sz; u32 tail; bus_dma_tag_t htag; bus_dma_tag_t ptag; /* Used for Dynamic ITR calculation */ u32 packets; u32 bytes; /* Soft stats */ u64 split; u64 rx_packets; u64 rx_bytes; u64 desc_errs; u64 not_done; }; /* ** Driver queue struct: this is the interrupt container ** for the associated tx and rx ring pair. */ struct ixl_queue { struct ixl_vsi *vsi; u32 me; u32 msix; /* This queue's MSIX vector */ u32 eims; /* This queue's EIMS bit */ struct resource *res; void *tag; int num_desc; /* both tx and rx */ int busy; struct tx_ring txr; struct rx_ring rxr; struct task task; struct task tx_task; struct taskqueue *tq; /* Queue stats */ u64 irqs; u64 tso; u64 mbuf_defrag_failed; u64 mbuf_hdr_failed; u64 mbuf_pkt_failed; u64 tx_dmamap_failed; u64 dropped_pkts; }; /* ** Virtual Station Interface */ SLIST_HEAD(ixl_ftl_head, ixl_mac_filter); struct ixl_vsi { void *back; struct ifnet *ifp; - struct device *dev; + device_t dev; struct i40e_hw *hw; struct ifmedia media; enum i40e_vsi_type type; int id; u16 num_queues; u32 rx_itr_setting; u32 tx_itr_setting; u16 max_frame_size; struct ixl_queue *queues; /* head of queues */ u16 vsi_num; bool link_active; u16 seid; u16 uplink_seid; u16 downlink_seid; /* MAC/VLAN Filter list */ struct ixl_ftl_head ftl; u16 num_macs; /* Contains readylist & stat counter id */ struct i40e_aqc_vsi_properties_data info; eventhandler_tag vlan_attach; eventhandler_tag vlan_detach; u16 num_vlans; /* Per-VSI stats from hardware */ struct i40e_eth_stats eth_stats; struct i40e_eth_stats eth_stats_offsets; bool stat_offsets_loaded; /* VSI stat counters */ u64 ipackets; u64 ierrors; u64 opackets; u64 oerrors; u64 ibytes; u64 obytes; u64 imcasts; u64 omcasts; u64 iqdrops; u64 oqdrops; u64 noproto; /* Driver statistics */ u64 hw_filters_del; u64 hw_filters_add; /* Misc. */ u64 active_queues; u64 flags; struct sysctl_oid *vsi_node; }; /* ** Find the number of unrefreshed RX descriptors */ static inline u16 ixl_rx_unrefreshed(struct ixl_queue *que) { struct rx_ring *rxr = &que->rxr; if (rxr->next_check > rxr->next_refresh) return (rxr->next_check - rxr->next_refresh - 1); else return ((que->num_desc + rxr->next_check) - rxr->next_refresh - 1); } /* ** Find the next available unused filter */ static inline struct ixl_mac_filter * ixl_get_filter(struct ixl_vsi *vsi) { struct ixl_mac_filter *f; /* create a new empty filter */ f = malloc(sizeof(struct ixl_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) SLIST_INSERT_HEAD(&vsi->ftl, f, next); return (f); } /* ** Compare two ethernet addresses */ static inline bool cmp_etheraddr(const u8 *ea1, const u8 *ea2) { bool cmp = FALSE; if ((ea1[0] == ea2[0]) && (ea1[1] == ea2[1]) && (ea1[2] == ea2[2]) && (ea1[3] == ea2[3]) && (ea1[4] == ea2[4]) && (ea1[5] == ea2[5])) cmp = TRUE; return (cmp); } /* * Return next largest power of 2, unsigned * * Public domain, from Bit Twiddling Hacks */ static inline u32 next_power_of_two(u32 n) { n--; n |= n >> 1; n |= n >> 2; n |= n >> 4; n |= n >> 8; n |= n >> 16; n++; /* Next power of two > 0 is 1 */ n += (n == 0); return (n); } /* * Info for stats sysctls */ struct ixl_sysctl_info { u64 *stat; char *name; char *description; }; static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; /********************************************************************* * TXRX Function prototypes *********************************************************************/ int ixl_allocate_tx_data(struct ixl_queue *); int ixl_allocate_rx_data(struct ixl_queue *); void ixl_init_tx_ring(struct ixl_queue *); int ixl_init_rx_ring(struct ixl_queue *); bool ixl_rxeof(struct ixl_queue *, int); bool ixl_txeof(struct ixl_queue *); void ixl_free_que_tx(struct ixl_queue *); void ixl_free_que_rx(struct ixl_queue *); int ixl_mq_start(struct ifnet *, struct mbuf *); int ixl_mq_start_locked(struct ifnet *, struct tx_ring *); void ixl_deferred_mq_start(void *, int); void ixl_free_vsi(struct ixl_vsi *); void ixl_qflush(struct ifnet *); /* Common function prototypes between PF/VF driver */ #if __FreeBSD_version >= 1100000 uint64_t ixl_get_counter(if_t ifp, ift_counter cnt); #endif void ixl_get_default_rss_key(u32 *); #endif /* _IXL_H_ */ Index: head/sys/dev/ixl/ixl_pf.h =================================================================== --- head/sys/dev/ixl/ixl_pf.h (revision 303889) +++ head/sys/dev/ixl/ixl_pf.h (revision 303890) @@ -1,322 +1,322 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXL_PF_H_ #define _IXL_PF_H_ #include "ixl.h" #include "ixl_pf_qmgr.h" #define VF_FLAG_ENABLED 0x01 #define VF_FLAG_SET_MAC_CAP 0x02 #define VF_FLAG_VLAN_CAP 0x04 #define VF_FLAG_PROMISC_CAP 0x08 #define VF_FLAG_MAC_ANTI_SPOOF 0x10 #define IXL_PF_STATE_EMPR_RESETTING (1 << 0) struct ixl_vf { struct ixl_vsi vsi; uint32_t vf_flags; uint8_t mac[ETHER_ADDR_LEN]; uint16_t vf_num; uint32_t version; struct ixl_pf_qtag qtag; struct sysctl_ctx_list ctx; }; /* Physical controller structure */ struct ixl_pf { struct i40e_hw hw; struct i40e_osdep osdep; - struct device *dev; + device_t dev; struct ixl_vsi vsi; struct resource *pci_mem; struct resource *msix_mem; /* * Interrupt resources: this set is * either used for legacy, or for Link * when doing MSIX */ void *tag; struct resource *res; struct callout timer; int msix; int if_flags; int state; struct ixl_pf_qmgr qmgr; struct ixl_pf_qtag qtag; /* Tunable values */ bool enable_msix; int max_queues; int ringsz; bool enable_tx_fc_filter; int dynamic_rx_itr; int dynamic_tx_itr; int tx_itr; int rx_itr; struct mtx pf_mtx; u32 qbase; u32 admvec; struct task adminq; struct taskqueue *tq; bool link_up; u32 link_speed; int advertised_speed; int fc; /* link flow ctrl setting */ enum ixl_dbg_mask dbg_mask; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; /* Statistics from hw */ struct i40e_hw_port_stats stats; struct i40e_hw_port_stats stats_offsets; bool stat_offsets_loaded; /* SR-IOV */ struct ixl_vf *vfs; int num_vfs; uint16_t veb_seid; struct task vflr_task; int vc_debug_lvl; }; /* * Defines used for NVM update ioctls. * This value is used in the Solaris tool, too. */ #define I40E_NVM_ACCESS \ (((((((('E' << 4) + '1') << 4) + 'K') << 4) + 'G') << 4) | 5) #define IXL_DEFAULT_PHY_INT_MASK \ ((~(I40E_AQ_EVENT_LINK_UPDOWN | I40E_AQ_EVENT_MODULE_QUAL_FAIL \ | I40E_AQ_EVENT_MEDIA_NA)) & 0x3FF) /*** Sysctl help messages; displayed with "sysctl -d" ***/ #define IXL_SYSCTL_HELP_SET_ADVERTISE \ "\nControl advertised link speed.\n" \ "Flags:\n" \ "\t 0x1 - advertise 100M\n" \ "\t 0x2 - advertise 1G\n" \ "\t 0x4 - advertise 10G\n" \ "\t 0x8 - advertise 20G\n" \ "\t0x10 - advertise 40G\n\n" \ "Set to 0 to disable link." #define IXL_SYSCTL_HELP_FC \ "\nSet flow control mode using the values below.\n" \ "\t0 - off\n" \ "\t1 - rx pause\n" \ "\t2 - tx pause\n" \ "\t3 - tx and rx pause" #define IXL_SYSCTL_HELP_LINK_STATUS \ "\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \ " the response." \ static char *ixl_fc_string[6] = { "None", "Rx", "Tx", "Full", "Priority", "Default" }; static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations"); /*** Functions / Macros ***/ #define I40E_VC_DEBUG(pf, level, ...) \ do { \ if ((pf)->vc_debug_lvl >= (level)) \ device_printf((pf)->dev, __VA_ARGS__); \ } while (0) #define i40e_send_vf_nack(pf, vf, op, st) \ ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__) #define IXL_PF_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->pf_mtx, _name, "IXL PF Lock", MTX_DEF) #define IXL_PF_LOCK(_sc) mtx_lock(&(_sc)->pf_mtx) #define IXL_PF_UNLOCK(_sc) mtx_unlock(&(_sc)->pf_mtx) #define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx) #define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED) /* For stats sysctl naming */ #define QUEUE_NAME_LEN 32 /* * PF-only function declarations */ void ixl_set_busmaster(device_t); int ixl_setup_interface(device_t, struct ixl_vsi *); void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *); void ixl_handle_que(void *context, int pending); void ixl_init(void *); void ixl_local_timer(void *); void ixl_register_vlan(void *, struct ifnet *, u16); void ixl_unregister_vlan(void *, struct ifnet *, u16); void ixl_intr(void *); void ixl_msix_que(void *); void ixl_msix_adminq(void *); void ixl_do_adminq(void *, int); int ixl_res_alloc_cmp(const void *, const void *); char * ixl_switch_res_type_string(u8); char * ixl_switch_element_string(struct sbuf *, struct i40e_aqc_switch_config_element_resp *); void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_hw_port_stats *); void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct i40e_eth_stats *); void ixl_media_status(struct ifnet *, struct ifmediareq *); int ixl_media_change(struct ifnet *); int ixl_ioctl(struct ifnet *, u_long, caddr_t); void ixl_enable_adminq(struct i40e_hw *); void ixl_get_bus_info(struct i40e_hw *, device_t); void ixl_disable_adminq(struct i40e_hw *); void ixl_enable_queue(struct i40e_hw *, int); void ixl_disable_queue(struct i40e_hw *, int); void ixl_enable_legacy(struct i40e_hw *); void ixl_disable_legacy(struct i40e_hw *); void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf); void ixl_stat_update48(struct i40e_hw *, u32, u32, bool, u64 *, u64 *); void ixl_stat_update32(struct i40e_hw *, u32, bool, u64 *, u64 *); void ixl_stop(struct ixl_pf *); void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name); int ixl_get_hw_capabilities(struct ixl_pf *); void ixl_update_link_status(struct ixl_pf *); int ixl_allocate_pci_resources(struct ixl_pf *); int ixl_setup_stations(struct ixl_pf *); int ixl_switch_config(struct ixl_pf *); void ixl_stop_locked(struct ixl_pf *); int ixl_teardown_hw_structs(struct ixl_pf *); int ixl_reset(struct ixl_pf *); void ixl_init_locked(struct ixl_pf *); void ixl_set_rss_key(struct ixl_pf *); void ixl_set_rss_pctypes(struct ixl_pf *); void ixl_set_rss_hlut(struct ixl_pf *); int ixl_setup_adminq_msix(struct ixl_pf *); int ixl_setup_adminq_tq(struct ixl_pf *); int ixl_teardown_adminq_msix(struct ixl_pf *); void ixl_configure_intr0_msix(struct ixl_pf *); void ixl_configure_queue_intr_msix(struct ixl_pf *); void ixl_free_adminq_tq(struct ixl_pf *); int ixl_assign_vsi_legacy(struct ixl_pf *); int ixl_init_msix(struct ixl_pf *); void ixl_configure_itr(struct ixl_pf *); void ixl_configure_legacy(struct ixl_pf *); void ixl_free_pci_resources(struct ixl_pf *); void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *); void ixl_config_rss(struct ixl_pf *); int ixl_set_advertised_speeds(struct ixl_pf *, int); void ixl_get_initial_advertised_speeds(struct ixl_pf *); void ixl_print_nvm_version(struct ixl_pf *pf); void ixl_add_device_sysctls(struct ixl_pf *); void ixl_handle_mdd_event(struct ixl_pf *); void ixl_add_hw_stats(struct ixl_pf *); void ixl_update_stats_counters(struct ixl_pf *); void ixl_pf_reset_stats(struct ixl_pf *); void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...); int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *); void ixl_handle_empr_reset(struct ixl_pf *); int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *); void ixl_set_queue_rx_itr(struct ixl_queue *); void ixl_set_queue_tx_itr(struct ixl_queue *); void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan); void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan); void ixl_reconfigure_filters(struct ixl_vsi *vsi); int ixl_disable_rings(struct ixl_vsi *); int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); int ixl_enable_rings(struct ixl_vsi *); int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16); int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16); void ixl_update_eth_stats(struct ixl_vsi *); void ixl_disable_intr(struct ixl_vsi *); void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int); int ixl_initialize_vsi(struct ixl_vsi *); void ixl_add_ifmedia(struct ixl_vsi *, u32); int ixl_setup_queue_msix(struct ixl_vsi *); int ixl_setup_queue_tqs(struct ixl_vsi *); int ixl_teardown_queue_msix(struct ixl_vsi *); void ixl_free_queue_tqs(struct ixl_vsi *); void ixl_enable_intr(struct ixl_vsi *); void ixl_disable_rings_intr(struct ixl_vsi *); void ixl_set_promisc(struct ixl_vsi *); void ixl_add_multi(struct ixl_vsi *); void ixl_del_multi(struct ixl_vsi *); void ixl_setup_vlan_filters(struct ixl_vsi *); void ixl_init_filters(struct ixl_vsi *); void ixl_add_hw_filters(struct ixl_vsi *, int, int); void ixl_del_hw_filters(struct ixl_vsi *, int); struct ixl_mac_filter * ixl_find_filter(struct ixl_vsi *, u8 *, s16); void ixl_add_mc_filter(struct ixl_vsi *, u8 *); void ixl_free_mac_filters(struct ixl_vsi *vsi); void ixl_update_vsi_stats(struct ixl_vsi *); void ixl_vsi_reset_stats(struct ixl_vsi *); #endif /* _IXL_PF_H_ */ Index: head/sys/dev/ixl/ixlv.h =================================================================== --- head/sys/dev/ixl/ixlv.h (revision 303889) +++ head/sys/dev/ixl/ixlv.h (revision 303890) @@ -1,236 +1,236 @@ /****************************************************************************** Copyright (c) 2013-2015, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #ifndef _IXLV_H_ #define _IXLV_H_ #include "ixlv_vc_mgr.h" #define IXLV_AQ_MAX_ERR 200 #define IXLV_MAX_FILTERS 128 #define IXLV_MAX_QUEUES 16 #define IXLV_AQ_TIMEOUT (1 * hz) #define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */ #define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0) #define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1) #define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2) #define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3) #define IXLV_FLAG_AQ_DEL_MAC_FILTER (u32)(1 << 4) #define IXLV_FLAG_AQ_DEL_VLAN_FILTER (u32)(1 << 5) #define IXLV_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6) #define IXLV_FLAG_AQ_MAP_VECTORS (u32)(1 << 7) #define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8) #define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9) #define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10) #define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11) #define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12) #define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13) #define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14) /* printf %b arg */ #define IXLV_FLAGS \ "\20\1ENABLE_QUEUES\2DISABLE_QUEUES\3ADD_MAC_FILTER" \ "\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \ "\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \ "\12CONFIGURE_PROMISC\13GET_STATS" #define IXLV_PRINTF_VF_OFFLOAD_FLAGS \ "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \ "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \ "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \ "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \ "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \ "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \ "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \ "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \ "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \ "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF" /* Driver state */ enum ixlv_state_t { IXLV_START, IXLV_FAILED, IXLV_RESET_REQUIRED, IXLV_RESET_PENDING, IXLV_VERSION_CHECK, IXLV_GET_RESOURCES, IXLV_INIT_READY, IXLV_INIT_START, IXLV_INIT_CONFIG, IXLV_INIT_MAPPING, IXLV_INIT_ENABLE, IXLV_INIT_COMPLETE, IXLV_RUNNING, }; /* Structs */ struct ixlv_mac_filter { SLIST_ENTRY(ixlv_mac_filter) next; u8 macaddr[ETHER_ADDR_LEN]; u16 flags; }; SLIST_HEAD(mac_list, ixlv_mac_filter); struct ixlv_vlan_filter { SLIST_ENTRY(ixlv_vlan_filter) next; u16 vlan; u16 flags; }; SLIST_HEAD(vlan_list, ixlv_vlan_filter); /* Software controller structure */ struct ixlv_sc { struct i40e_hw hw; struct i40e_osdep osdep; - struct device *dev; + device_t dev; struct resource *pci_mem; struct resource *msix_mem; enum ixlv_state_t init_state; int init_in_progress; /* * Interrupt resources */ void *tag; struct resource *res; /* For the AQ */ struct ifmedia media; struct callout timer; int msix; int pf_version; int if_flags; bool link_up; u32 link_speed; struct mtx mtx; u32 qbase; u32 admvec; struct timeout_task timeout; struct task aq_irq; struct task aq_sched; struct taskqueue *tq; struct ixl_vsi vsi; /* Filter lists */ struct mac_list *mac_filters; struct vlan_list *vlan_filters; /* Promiscuous mode */ u32 promiscuous_flags; /* Admin queue task flags */ u32 aq_wait_count; struct ixl_vc_mgr vc_mgr; struct ixl_vc_cmd add_mac_cmd; struct ixl_vc_cmd del_mac_cmd; struct ixl_vc_cmd config_queues_cmd; struct ixl_vc_cmd map_vectors_cmd; struct ixl_vc_cmd enable_queues_cmd; struct ixl_vc_cmd add_vlan_cmd; struct ixl_vc_cmd del_vlan_cmd; struct ixl_vc_cmd add_multi_cmd; struct ixl_vc_cmd del_multi_cmd; struct ixl_vc_cmd config_rss_key_cmd; struct ixl_vc_cmd get_rss_hena_caps_cmd; struct ixl_vc_cmd set_rss_hena_cmd; struct ixl_vc_cmd config_rss_lut_cmd; /* Virtual comm channel */ struct i40e_virtchnl_vf_resource *vf_res; struct i40e_virtchnl_vsi_resource *vsi_res; /* Misc stats maintained by the driver */ u64 watchdog_events; u64 admin_irq; u8 aq_buffer[IXL_AQ_BUF_SZ]; }; #define IXLV_CORE_LOCK_ASSERT(sc) mtx_assert(&(sc)->mtx, MA_OWNED) /* ** This checks for a zero mac addr, something that will be likely ** unless the Admin on the Host has created one. */ static inline bool ixlv_check_ether_addr(u8 *addr) { bool status = TRUE; if ((addr[0] == 0 && addr[1]== 0 && addr[2] == 0 && addr[3] == 0 && addr[4]== 0 && addr[5] == 0)) status = FALSE; return (status); } /* ** VF Common function prototypes */ int ixlv_send_api_ver(struct ixlv_sc *); int ixlv_verify_api_ver(struct ixlv_sc *); int ixlv_send_vf_config_msg(struct ixlv_sc *); int ixlv_get_vf_config(struct ixlv_sc *); void ixlv_init(void *); int ixlv_reinit_locked(struct ixlv_sc *); void ixlv_configure_queues(struct ixlv_sc *); void ixlv_enable_queues(struct ixlv_sc *); void ixlv_disable_queues(struct ixlv_sc *); void ixlv_map_queues(struct ixlv_sc *); void ixlv_enable_intr(struct ixl_vsi *); void ixlv_disable_intr(struct ixl_vsi *); void ixlv_add_ether_filters(struct ixlv_sc *); void ixlv_del_ether_filters(struct ixlv_sc *); void ixlv_request_stats(struct ixlv_sc *); void ixlv_request_reset(struct ixlv_sc *); void ixlv_vc_completion(struct ixlv_sc *, enum i40e_virtchnl_ops, i40e_status, u8 *, u16); void ixlv_add_ether_filter(struct ixlv_sc *); void ixlv_add_vlans(struct ixlv_sc *); void ixlv_del_vlans(struct ixlv_sc *); void ixlv_update_stats_counters(struct ixlv_sc *, struct i40e_eth_stats *); void ixlv_update_link_status(struct ixlv_sc *); void ixlv_get_default_rss_key(u32 *, bool); void ixlv_config_rss_key(struct ixlv_sc *); void ixlv_set_rss_hena(struct ixlv_sc *); void ixlv_config_rss_lut(struct ixlv_sc *); #endif /* _IXLV_H_ */ Index: head/sys/dev/netmap/netmap_mem2.c =================================================================== --- head/sys/dev/netmap/netmap_mem2.c (revision 303889) +++ head/sys/dev/netmap/netmap_mem2.c (revision 303890) @@ -1,1638 +1,1638 @@ /* * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #ifdef linux #include "bsd_glue.h" #endif /* linux */ #ifdef __APPLE__ #include "osx_glue.h" #endif /* __APPLE__ */ #ifdef __FreeBSD__ #include /* prerequisite */ __FBSDID("$FreeBSD$"); #include #include #include #include /* vtophys */ #include /* vtophys */ #include /* sockaddrs */ #include #include #include #include #include #include /* bus_dmamap_* */ #endif /* __FreeBSD__ */ #include #include #include "netmap_mem2.h" #define NETMAP_BUF_MAX_NUM 20*4096*2 /* large machine */ #define NETMAP_POOL_MAX_NAMSZ 32 enum { NETMAP_IF_POOL = 0, NETMAP_RING_POOL, NETMAP_BUF_POOL, NETMAP_POOLS_NR }; struct netmap_obj_params { u_int size; u_int num; }; struct netmap_obj_pool { char name[NETMAP_POOL_MAX_NAMSZ]; /* name of the allocator */ /* ---------------------------------------------------*/ /* these are only meaningful if the pool is finalized */ /* (see 'finalized' field in netmap_mem_d) */ u_int objtotal; /* actual total number of objects. */ u_int memtotal; /* actual total memory space */ u_int numclusters; /* actual number of clusters */ u_int objfree; /* number of free objects. */ struct lut_entry *lut; /* virt,phys addresses, objtotal entries */ uint32_t *bitmap; /* one bit per buffer, 1 means free */ uint32_t bitmap_slots; /* number of uint32 entries in bitmap */ /* ---------------------------------------------------*/ /* limits */ u_int objminsize; /* minimum object size */ u_int objmaxsize; /* maximum object size */ u_int nummin; /* minimum number of objects */ u_int nummax; /* maximum number of objects */ /* these are changed only by config */ u_int _objtotal; /* total number of objects */ u_int _objsize; /* object size */ u_int _clustsize; /* cluster size */ u_int _clustentries; /* objects per cluster */ u_int _numclusters; /* number of clusters */ /* requested values */ u_int r_objtotal; u_int r_objsize; }; #define NMA_LOCK_T NM_MTX_T struct netmap_mem_ops { void (*nmd_get_lut)(struct netmap_mem_d *, struct netmap_lut*); int (*nmd_get_info)(struct netmap_mem_d *, u_int *size, u_int *memflags, uint16_t *id); vm_paddr_t (*nmd_ofstophys)(struct netmap_mem_d *, vm_ooffset_t); int (*nmd_config)(struct netmap_mem_d *); int (*nmd_finalize)(struct netmap_mem_d *); void (*nmd_deref)(struct netmap_mem_d *); ssize_t (*nmd_if_offset)(struct netmap_mem_d *, const void *vaddr); void (*nmd_delete)(struct netmap_mem_d *); struct netmap_if * (*nmd_if_new)(struct netmap_adapter *); void (*nmd_if_delete)(struct netmap_adapter *, struct netmap_if *); int (*nmd_rings_create)(struct netmap_adapter *); void (*nmd_rings_delete)(struct netmap_adapter *); }; typedef uint16_t nm_memid_t; struct netmap_mem_d { NMA_LOCK_T nm_mtx; /* protect the allocator */ u_int nm_totalsize; /* shorthand */ u_int flags; #define NETMAP_MEM_FINALIZED 0x1 /* preallocation done */ int lasterr; /* last error for curr config */ int active; /* active users */ int refcount; /* the three allocators */ struct netmap_obj_pool pools[NETMAP_POOLS_NR]; nm_memid_t nm_id; /* allocator identifier */ int nm_grp; /* iommu groupd id */ /* list of all existing allocators, sorted by nm_id */ struct netmap_mem_d *prev, *next; struct netmap_mem_ops *ops; }; #define NMD_DEFCB(t0, name) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd) \ { \ return nmd->ops->nmd_##name(nmd); \ } #define NMD_DEFCB1(t0, name, t1) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1) \ { \ return nmd->ops->nmd_##name(nmd, a1); \ } #define NMD_DEFCB3(t0, name, t1, t2, t3) \ t0 \ netmap_mem_##name(struct netmap_mem_d *nmd, t1 a1, t2 a2, t3 a3) \ { \ return nmd->ops->nmd_##name(nmd, a1, a2, a3); \ } #define NMD_DEFNACB(t0, name) \ t0 \ netmap_mem_##name(struct netmap_adapter *na) \ { \ return na->nm_mem->ops->nmd_##name(na); \ } #define NMD_DEFNACB1(t0, name, t1) \ t0 \ netmap_mem_##name(struct netmap_adapter *na, t1 a1) \ { \ return na->nm_mem->ops->nmd_##name(na, a1); \ } NMD_DEFCB1(void, get_lut, struct netmap_lut *); NMD_DEFCB3(int, get_info, u_int *, u_int *, uint16_t *); NMD_DEFCB1(vm_paddr_t, ofstophys, vm_ooffset_t); static int netmap_mem_config(struct netmap_mem_d *); NMD_DEFCB(int, config); NMD_DEFCB1(ssize_t, if_offset, const void *); NMD_DEFCB(void, delete); NMD_DEFNACB(struct netmap_if *, if_new); NMD_DEFNACB1(void, if_delete, struct netmap_if *); NMD_DEFNACB(int, rings_create); NMD_DEFNACB(void, rings_delete); static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *); static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *); -static int nm_mem_assign_group(struct netmap_mem_d *, struct device *); +static int nm_mem_assign_group(struct netmap_mem_d *, device_t); #define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx) #define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx) #define NMA_LOCK(n) NM_MTX_LOCK((n)->nm_mtx) #define NMA_UNLOCK(n) NM_MTX_UNLOCK((n)->nm_mtx) #ifdef NM_DEBUG_MEM_PUTGET #define NM_DBG_REFC(nmd, func, line) \ printf("%s:%d mem[%d] -> %d\n", func, line, (nmd)->nm_id, (nmd)->refcount); #else #define NM_DBG_REFC(nmd, func, line) #endif #ifdef NM_DEBUG_MEM_PUTGET void __netmap_mem_get(struct netmap_mem_d *nmd, const char *func, int line) #else void netmap_mem_get(struct netmap_mem_d *nmd) #endif { NMA_LOCK(nmd); nmd->refcount++; NM_DBG_REFC(nmd, func, line); NMA_UNLOCK(nmd); } #ifdef NM_DEBUG_MEM_PUTGET void __netmap_mem_put(struct netmap_mem_d *nmd, const char *func, int line) #else void netmap_mem_put(struct netmap_mem_d *nmd) #endif { int last; NMA_LOCK(nmd); last = (--nmd->refcount == 0); NM_DBG_REFC(nmd, func, line); NMA_UNLOCK(nmd); if (last) netmap_mem_delete(nmd); } int netmap_mem_finalize(struct netmap_mem_d *nmd, struct netmap_adapter *na) { if (nm_mem_assign_group(nmd, na->pdev) < 0) { return ENOMEM; } else { nmd->ops->nmd_finalize(nmd); } if (!nmd->lasterr && na->pdev) netmap_mem_map(&nmd->pools[NETMAP_BUF_POOL], na); return nmd->lasterr; } void netmap_mem_deref(struct netmap_mem_d *nmd, struct netmap_adapter *na) { NMA_LOCK(nmd); netmap_mem_unmap(&nmd->pools[NETMAP_BUF_POOL], na); NMA_UNLOCK(nmd); return nmd->ops->nmd_deref(nmd); } /* accessor functions */ static void netmap_mem2_get_lut(struct netmap_mem_d *nmd, struct netmap_lut *lut) { lut->lut = nmd->pools[NETMAP_BUF_POOL].lut; lut->objtotal = nmd->pools[NETMAP_BUF_POOL].objtotal; lut->objsize = nmd->pools[NETMAP_BUF_POOL]._objsize; } struct netmap_obj_params netmap_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 100, }, [NETMAP_RING_POOL] = { .size = 9*PAGE_SIZE, .num = 200, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = NETMAP_BUF_MAX_NUM, }, }; struct netmap_obj_params netmap_min_priv_params[NETMAP_POOLS_NR] = { [NETMAP_IF_POOL] = { .size = 1024, .num = 1, }, [NETMAP_RING_POOL] = { .size = 5*PAGE_SIZE, .num = 4, }, [NETMAP_BUF_POOL] = { .size = 2048, .num = 4098, }, }; /* * nm_mem is the memory allocator used for all physical interfaces * running in netmap mode. * Virtual (VALE) ports will have each its own allocator. */ extern struct netmap_mem_ops netmap_mem_global_ops; /* forward */ struct netmap_mem_d nm_mem = { /* Our memory allocator. */ .pools = { [NETMAP_IF_POOL] = { .name = "netmap_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 10, /* don't be stingy */ .nummax = 10000, /* XXX very large */ }, [NETMAP_RING_POOL] = { .name = "netmap_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "netmap_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .nm_id = 1, .nm_grp = -1, .prev = &nm_mem, .next = &nm_mem, .ops = &netmap_mem_global_ops }; struct netmap_mem_d *netmap_last_mem_d = &nm_mem; /* blueprint for the private memory allocators */ extern struct netmap_mem_ops netmap_mem_private_ops; /* forward */ const struct netmap_mem_d nm_blueprint = { .pools = { [NETMAP_IF_POOL] = { .name = "%s_if", .objminsize = sizeof(struct netmap_if), .objmaxsize = 4096, .nummin = 1, .nummax = 100, }, [NETMAP_RING_POOL] = { .name = "%s_ring", .objminsize = sizeof(struct netmap_ring), .objmaxsize = 32*PAGE_SIZE, .nummin = 2, .nummax = 1024, }, [NETMAP_BUF_POOL] = { .name = "%s_buf", .objminsize = 64, .objmaxsize = 65536, .nummin = 4, .nummax = 1000000, /* one million! */ }, }, .flags = NETMAP_MEM_PRIVATE, .ops = &netmap_mem_private_ops }; /* memory allocator related sysctls */ #define STRINGIFY(x) #x #define DECLARE_SYSCTLS(id, name) \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_size, \ CTLFLAG_RW, &netmap_params[id].size, 0, "Requested size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_size, \ CTLFLAG_RD, &nm_mem.pools[id]._objsize, 0, "Current size of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_num, \ CTLFLAG_RW, &netmap_params[id].num, 0, "Requested number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, name##_curr_num, \ CTLFLAG_RD, &nm_mem.pools[id].objtotal, 0, "Current number of netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_size, \ CTLFLAG_RW, &netmap_min_priv_params[id].size, 0, \ "Default size of private netmap " STRINGIFY(name) "s"); \ SYSCTL_INT(_dev_netmap, OID_AUTO, priv_##name##_num, \ CTLFLAG_RW, &netmap_min_priv_params[id].num, 0, \ "Default number of private netmap " STRINGIFY(name) "s") SYSCTL_DECL(_dev_netmap); DECLARE_SYSCTLS(NETMAP_IF_POOL, if); DECLARE_SYSCTLS(NETMAP_RING_POOL, ring); DECLARE_SYSCTLS(NETMAP_BUF_POOL, buf); static int nm_mem_assign_id(struct netmap_mem_d *nmd) { nm_memid_t id; struct netmap_mem_d *scan = netmap_last_mem_d; int error = ENOMEM; NMA_LOCK(&nm_mem); do { /* we rely on unsigned wrap around */ id = scan->nm_id + 1; if (id == 0) /* reserve 0 as error value */ id = 1; scan = scan->next; if (id != scan->nm_id) { nmd->nm_id = id; nmd->prev = scan->prev; nmd->next = scan; scan->prev->next = nmd; scan->prev = nmd; netmap_last_mem_d = nmd; error = 0; break; } } while (scan != netmap_last_mem_d); NMA_UNLOCK(&nm_mem); return error; } static void nm_mem_release_id(struct netmap_mem_d *nmd) { NMA_LOCK(&nm_mem); nmd->prev->next = nmd->next; nmd->next->prev = nmd->prev; if (netmap_last_mem_d == nmd) netmap_last_mem_d = nmd->prev; nmd->prev = nmd->next = NULL; NMA_UNLOCK(&nm_mem); } static int -nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev) +nm_mem_assign_group(struct netmap_mem_d *nmd, device_t dev) { int err = 0, id; id = nm_iommu_group_id(dev); if (netmap_verbose) D("iommu_group %d", id); NMA_LOCK(nmd); if (nmd->nm_grp < 0) nmd->nm_grp = id; if (nmd->nm_grp != id) nmd->lasterr = err = ENOMEM; NMA_UNLOCK(nmd); return err; } /* * First, find the allocator that contains the requested offset, * then locate the cluster through a lookup table. */ static vm_paddr_t netmap_mem2_ofstophys(struct netmap_mem_d* nmd, vm_ooffset_t offset) { int i; vm_ooffset_t o = offset; vm_paddr_t pa; struct netmap_obj_pool *p; NMA_LOCK(nmd); p = nmd->pools; for (i = 0; i < NETMAP_POOLS_NR; offset -= p[i].memtotal, i++) { if (offset >= p[i].memtotal) continue; // now lookup the cluster's address pa = vtophys(p[i].lut[offset / p[i]._objsize].vaddr) + offset % p[i]._objsize; NMA_UNLOCK(nmd); return pa; } /* this is only in case of errors */ D("invalid ofs 0x%x out of 0x%x 0x%x 0x%x", (u_int)o, p[NETMAP_IF_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal, p[NETMAP_IF_POOL].memtotal + p[NETMAP_RING_POOL].memtotal + p[NETMAP_BUF_POOL].memtotal); NMA_UNLOCK(nmd); return 0; // XXX bad address } static int netmap_mem2_get_info(struct netmap_mem_d* nmd, u_int* size, u_int *memflags, nm_memid_t *id) { int error = 0; NMA_LOCK(nmd); error = netmap_mem_config(nmd); if (error) goto out; if (size) { if (nmd->flags & NETMAP_MEM_FINALIZED) { *size = nmd->nm_totalsize; } else { int i; *size = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { struct netmap_obj_pool *p = nmd->pools + i; *size += (p->_numclusters * p->_clustsize); } } } if (memflags) *memflags = nmd->flags; if (id) *id = nmd->nm_id; out: NMA_UNLOCK(nmd); return error; } /* * we store objects by kernel address, need to find the offset * within the pool to export the value to userspace. * Algorithm: scan until we find the cluster, then add the * actual offset in the cluster */ static ssize_t netmap_obj_offset(struct netmap_obj_pool *p, const void *vaddr) { int i, k = p->_clustentries, n = p->objtotal; ssize_t ofs = 0; for (i = 0; i < n; i += k, ofs += p->_clustsize) { const char *base = p->lut[i].vaddr; ssize_t relofs = (const char *) vaddr - base; if (relofs < 0 || relofs >= p->_clustsize) continue; ofs = ofs + relofs; ND("%s: return offset %d (cluster %d) for pointer %p", p->name, ofs, i, vaddr); return ofs; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); return 0; /* An error occurred */ } /* Helper functions which convert virtual addresses to offsets */ #define netmap_if_offset(n, v) \ netmap_obj_offset(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_RING_POOL], (v))) #define netmap_buf_offset(n, v) \ ((n)->pools[NETMAP_IF_POOL].memtotal + \ (n)->pools[NETMAP_RING_POOL].memtotal + \ netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v))) static ssize_t netmap_mem2_if_offset(struct netmap_mem_d *nmd, const void *addr) { ssize_t v; NMA_LOCK(nmd); v = netmap_if_offset(nmd, addr); NMA_UNLOCK(nmd); return v; } /* * report the index, and use start position as a hint, * otherwise buffer allocation becomes terribly expensive. */ static void * netmap_obj_malloc(struct netmap_obj_pool *p, u_int len, uint32_t *start, uint32_t *index) { uint32_t i = 0; /* index in the bitmap */ uint32_t mask, j; /* slot counter */ void *vaddr = NULL; if (len > p->_objsize) { D("%s request size %d too large", p->name, len); // XXX cannot reduce the size return NULL; } if (p->objfree == 0) { D("no more %s objects", p->name); return NULL; } if (start) i = *start; /* termination is guaranteed by p->free, but better check bounds on i */ while (vaddr == NULL && i < p->bitmap_slots) { uint32_t cur = p->bitmap[i]; if (cur == 0) { /* bitmask is fully used */ i++; continue; } /* locate a slot */ for (j = 0, mask = 1; (cur & mask) == 0; j++, mask <<= 1) ; p->bitmap[i] &= ~mask; /* mark object as in use */ p->objfree--; vaddr = p->lut[i * 32 + j].vaddr; if (index) *index = i * 32 + j; } ND("%s allocator: allocated object @ [%d][%d]: vaddr %p", i, j, vaddr); if (start) *start = i; return vaddr; } /* * free by index, not by address. * XXX should we also cleanup the content ? */ static int netmap_obj_free(struct netmap_obj_pool *p, uint32_t j) { uint32_t *ptr, mask; if (j >= p->objtotal) { D("invalid index %u, max %u", j, p->objtotal); return 1; } ptr = &p->bitmap[j / 32]; mask = (1 << (j % 32)); if (*ptr & mask) { D("ouch, double free on buffer %d", j); return 1; } else { *ptr |= mask; p->objfree++; return 0; } } /* * free by address. This is slow but is only used for a few * objects (rings, nifp) */ static void netmap_obj_free_va(struct netmap_obj_pool *p, void *vaddr) { u_int i, j, n = p->numclusters; for (i = 0, j = 0; i < n; i++, j += p->_clustentries) { void *base = p->lut[i * p->_clustentries].vaddr; ssize_t relofs = (ssize_t) vaddr - (ssize_t) base; /* Given address, is out of the scope of the current cluster.*/ if (vaddr < base || relofs >= p->_clustsize) continue; j = j + relofs / p->_objsize; /* KASSERT(j != 0, ("Cannot free object 0")); */ netmap_obj_free(p, j); return; } D("address %p is not contained inside any cluster (%s)", vaddr, p->name); } #define netmap_mem_bufsize(n) \ ((n)->pools[NETMAP_BUF_POOL]._objsize) #define netmap_if_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_IF_POOL], len, NULL, NULL) #define netmap_if_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_IF_POOL], (v)) #define netmap_ring_malloc(n, len) netmap_obj_malloc(&(n)->pools[NETMAP_RING_POOL], len, NULL, NULL) #define netmap_ring_free(n, v) netmap_obj_free_va(&(n)->pools[NETMAP_RING_POOL], (v)) #define netmap_buf_malloc(n, _pos, _index) \ netmap_obj_malloc(&(n)->pools[NETMAP_BUF_POOL], netmap_mem_bufsize(n), _pos, _index) #if 0 // XXX unused /* Return the index associated to the given packet buffer */ #define netmap_buf_index(n, v) \ (netmap_obj_offset(&(n)->pools[NETMAP_BUF_POOL], (v)) / NETMAP_BDG_BUF_SIZE(n)) #endif /* * allocate extra buffers in a linked list. * returns the actual number. */ uint32_t netmap_extra_alloc(struct netmap_adapter *na, uint32_t *head, uint32_t n) { struct netmap_mem_d *nmd = na->nm_mem; uint32_t i, pos = 0; /* opaque, scan position in the bitmap */ NMA_LOCK(nmd); *head = 0; /* default, 'null' index ie empty list */ for (i = 0 ; i < n; i++) { uint32_t cur = *head; /* save current head */ uint32_t *p = netmap_buf_malloc(nmd, &pos, head); if (p == NULL) { D("no more buffers after %d of %d", i, n); *head = cur; /* restore */ break; } RD(5, "allocate buffer %d -> %d", *head, cur); *p = cur; /* link to previous head */ } NMA_UNLOCK(nmd); return i; } static void netmap_extra_free(struct netmap_adapter *na, uint32_t head) { struct lut_entry *lut = na->na_lut.lut; struct netmap_mem_d *nmd = na->nm_mem; struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; uint32_t i, cur, *buf; D("freeing the extra list"); for (i = 0; head >=2 && head < p->objtotal; i++) { cur = head; buf = lut[head].vaddr; head = *buf; *buf = 0; if (netmap_obj_free(p, cur)) break; } if (head != 0) D("breaking with head %d", head); D("freed %d buffers", i); } /* Return nonzero on error */ static int netmap_new_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i = 0; /* slot counter */ uint32_t pos = 0; /* slot in p->bitmap */ uint32_t index = 0; /* buffer index */ for (i = 0; i < n; i++) { void *vaddr = netmap_buf_malloc(nmd, &pos, &index); if (vaddr == NULL) { D("no more buffers after %d of %d", i, n); goto cleanup; } slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } ND("allocated %d buffers, %d available, first at %d", n, p->objfree, pos); return (0); cleanup: while (i > 0) { i--; netmap_obj_free(p, slot[i].buf_idx); } bzero(slot, n * sizeof(slot[0])); return (ENOMEM); } static void netmap_mem_set_ring(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n, uint32_t index) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; u_int i; for (i = 0; i < n; i++) { slot[i].buf_idx = index; slot[i].len = p->_objsize; slot[i].flags = 0; } } static void netmap_free_buf(struct netmap_mem_d *nmd, uint32_t i) { struct netmap_obj_pool *p = &nmd->pools[NETMAP_BUF_POOL]; if (i < 2 || i >= p->objtotal) { D("Cannot free buf#%d: should be in [2, %d[", i, p->objtotal); return; } netmap_obj_free(p, i); } static void netmap_free_bufs(struct netmap_mem_d *nmd, struct netmap_slot *slot, u_int n) { u_int i; for (i = 0; i < n; i++) { if (slot[i].buf_idx > 2) netmap_free_buf(nmd, slot[i].buf_idx); } } static void netmap_reset_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; if (p->bitmap) free(p->bitmap, M_NETMAP); p->bitmap = NULL; if (p->lut) { u_int i; size_t sz = p->_clustsize; /* * Free each cluster allocated in * netmap_finalize_obj_allocator(). The cluster start * addresses are stored at multiples of p->_clusterentries * in the lut. */ for (i = 0; i < p->objtotal; i += p->_clustentries) { if (p->lut[i].vaddr) contigfree(p->lut[i].vaddr, sz, M_NETMAP); } bzero(p->lut, sizeof(struct lut_entry) * p->objtotal); #ifdef linux vfree(p->lut); #else free(p->lut, M_NETMAP); #endif } p->lut = NULL; p->objtotal = 0; p->memtotal = 0; p->numclusters = 0; p->objfree = 0; } /* * Free all resources related to an allocator. */ static void netmap_destroy_obj_allocator(struct netmap_obj_pool *p) { if (p == NULL) return; netmap_reset_obj_allocator(p); } /* * We receive a request for objtotal objects, of size objsize each. * Internally we may round up both numbers, as we allocate objects * in small clusters multiple of the page size. * We need to keep track of objtotal and clustentries, * as they are needed when freeing memory. * * XXX note -- userspace needs the buffers to be contiguous, * so we cannot afford gaps at the end of a cluster. */ /* call with NMA_LOCK held */ static int netmap_config_obj_allocator(struct netmap_obj_pool *p, u_int objtotal, u_int objsize) { int i; u_int clustsize; /* the cluster size, multiple of page size */ u_int clustentries; /* how many objects per entry */ /* we store the current request, so we can * detect configuration changes later */ p->r_objtotal = objtotal; p->r_objsize = objsize; #define MAX_CLUSTSIZE (1<<22) // 4 MB #define LINE_ROUND NM_CACHE_ALIGN // 64 if (objsize >= MAX_CLUSTSIZE) { /* we could do it but there is no point */ D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* make sure objsize is a multiple of LINE_ROUND */ i = (objsize & (LINE_ROUND - 1)); if (i) { D("XXX aligning object by %d bytes", LINE_ROUND - i); objsize += LINE_ROUND - i; } if (objsize < p->objminsize || objsize > p->objmaxsize) { D("requested objsize %d out of range [%d, %d]", objsize, p->objminsize, p->objmaxsize); return EINVAL; } if (objtotal < p->nummin || objtotal > p->nummax) { D("requested objtotal %d out of range [%d, %d]", objtotal, p->nummin, p->nummax); return EINVAL; } /* * Compute number of objects using a brute-force approach: * given a max cluster size, * we try to fill it with objects keeping track of the * wasted space to the next page boundary. */ for (clustentries = 0, i = 1;; i++) { u_int delta, used = i * objsize; if (used > MAX_CLUSTSIZE) break; delta = used % PAGE_SIZE; if (delta == 0) { // exact solution clustentries = i; break; } } /* exact solution not found */ if (clustentries == 0) { D("unsupported allocation for %d bytes", objsize); return EINVAL; } /* compute clustsize */ clustsize = clustentries * objsize; if (netmap_verbose) D("objsize %d clustsize %d objects %d", objsize, clustsize, clustentries); /* * The number of clusters is n = ceil(objtotal/clustentries) * objtotal' = n * clustentries */ p->_clustentries = clustentries; p->_clustsize = clustsize; p->_numclusters = (objtotal + clustentries - 1) / clustentries; /* actual values (may be larger than requested) */ p->_objsize = objsize; p->_objtotal = p->_numclusters * clustentries; return 0; } /* call with NMA_LOCK held */ static int netmap_finalize_obj_allocator(struct netmap_obj_pool *p) { int i; /* must be signed */ size_t n; /* optimistically assume we have enough memory */ p->numclusters = p->_numclusters; p->objtotal = p->_objtotal; n = sizeof(struct lut_entry) * p->objtotal; #ifdef linux p->lut = vmalloc(n); #else p->lut = malloc(n, M_NETMAP, M_NOWAIT | M_ZERO); #endif if (p->lut == NULL) { D("Unable to create lookup table (%d bytes) for '%s'", (int)n, p->name); goto clean; } /* Allocate the bitmap */ n = (p->objtotal + 31) / 32; p->bitmap = malloc(sizeof(uint32_t) * n, M_NETMAP, M_NOWAIT | M_ZERO); if (p->bitmap == NULL) { D("Unable to create bitmap (%d entries) for allocator '%s'", (int)n, p->name); goto clean; } p->bitmap_slots = n; /* * Allocate clusters, init pointers and bitmap */ n = p->_clustsize; for (i = 0; i < (int)p->objtotal;) { int lim = i + p->_clustentries; char *clust; clust = contigmalloc(n, M_NETMAP, M_NOWAIT | M_ZERO, (size_t)0, -1UL, PAGE_SIZE, 0); if (clust == NULL) { /* * If we get here, there is a severe memory shortage, * so halve the allocated memory to reclaim some. */ D("Unable to create cluster at %d for '%s' allocator", i, p->name); if (i < 2) /* nothing to halve */ goto out; lim = i / 2; for (i--; i >= lim; i--) { p->bitmap[ (i>>5) ] &= ~( 1 << (i & 31) ); if (i % p->_clustentries == 0 && p->lut[i].vaddr) contigfree(p->lut[i].vaddr, n, M_NETMAP); p->lut[i].vaddr = NULL; } out: p->objtotal = i; /* we may have stopped in the middle of a cluster */ p->numclusters = (i + p->_clustentries - 1) / p->_clustentries; break; } /* * Set bitmap and lut state for all buffers in the current * cluster. * * [i, lim) is the set of buffer indexes that cover the * current cluster. * * 'clust' is really the address of the current buffer in * the current cluster as we index through it with a stride * of p->_objsize. */ for (; i < lim; i++, clust += p->_objsize) { p->bitmap[ (i>>5) ] |= ( 1 << (i & 31) ); p->lut[i].vaddr = clust; p->lut[i].paddr = vtophys(clust); } } p->objfree = p->objtotal; p->memtotal = p->numclusters * p->_clustsize; if (p->objfree == 0) goto clean; if (netmap_verbose) D("Pre-allocated %d clusters (%d/%dKB) for '%s'", p->numclusters, p->_clustsize >> 10, p->memtotal >> 10, p->name); return 0; clean: netmap_reset_obj_allocator(p); return ENOMEM; } /* call with lock held */ static int netmap_memory_config_changed(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { if (nmd->pools[i].r_objsize != netmap_params[i].size || nmd->pools[i].r_objtotal != netmap_params[i].num) return 1; } return 0; } static void netmap_mem_reset_all(struct netmap_mem_d *nmd) { int i; if (netmap_verbose) D("resetting %p", nmd); for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } static int netmap_mem_unmap(struct netmap_obj_pool *p, struct netmap_adapter *na) { int i, lim = p->_objtotal; if (na->pdev == NULL) return 0; #ifdef __FreeBSD__ (void)i; (void)lim; D("unsupported on FreeBSD"); #else /* linux */ for (i = 2; i < lim; i++) { netmap_unload_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr); } #endif /* linux */ return 0; } static int netmap_mem_map(struct netmap_obj_pool *p, struct netmap_adapter *na) { #ifdef __FreeBSD__ D("unsupported on FreeBSD"); #else /* linux */ int i, lim = p->_objtotal; if (na->pdev == NULL) return 0; for (i = 2; i < lim; i++) { netmap_load_map(na, (bus_dma_tag_t) na->pdev, &p->lut[i].paddr, p->lut[i].vaddr); } #endif /* linux */ return 0; } static int netmap_mem_finalize_all(struct netmap_mem_d *nmd) { int i; if (nmd->flags & NETMAP_MEM_FINALIZED) return 0; nmd->lasterr = 0; nmd->nm_totalsize = 0; for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_finalize_obj_allocator(&nmd->pools[i]); if (nmd->lasterr) goto error; nmd->nm_totalsize += nmd->pools[i].memtotal; } /* buffers 0 and 1 are reserved */ nmd->pools[NETMAP_BUF_POOL].objfree -= 2; nmd->pools[NETMAP_BUF_POOL].bitmap[0] = ~3; nmd->flags |= NETMAP_MEM_FINALIZED; if (netmap_verbose) D("interfaces %d KB, rings %d KB, buffers %d MB", nmd->pools[NETMAP_IF_POOL].memtotal >> 10, nmd->pools[NETMAP_RING_POOL].memtotal >> 10, nmd->pools[NETMAP_BUF_POOL].memtotal >> 20); if (netmap_verbose) D("Free buffers: %d", nmd->pools[NETMAP_BUF_POOL].objfree); return 0; error: netmap_mem_reset_all(nmd); return nmd->lasterr; } static void netmap_mem_private_delete(struct netmap_mem_d *nmd) { if (nmd == NULL) return; if (netmap_verbose) D("deleting %p", nmd); if (nmd->active > 0) D("bug: deleting mem allocator with active=%d!", nmd->active); nm_mem_release_id(nmd); if (netmap_verbose) D("done deleting %p", nmd); NMA_LOCK_DESTROY(nmd); free(nmd, M_DEVBUF); } static int netmap_mem_private_config(struct netmap_mem_d *nmd) { /* nothing to do, we are configured on creation * and configuration never changes thereafter */ return 0; } static int netmap_mem_private_finalize(struct netmap_mem_d *nmd) { int err; NMA_LOCK(nmd); nmd->active++; err = netmap_mem_finalize_all(nmd); NMA_UNLOCK(nmd); return err; } static void netmap_mem_private_deref(struct netmap_mem_d *nmd) { NMA_LOCK(nmd); if (--nmd->active <= 0) netmap_mem_reset_all(nmd); NMA_UNLOCK(nmd); } /* * allocator for private memory */ struct netmap_mem_d * netmap_mem_private_new(const char *name, u_int txr, u_int txd, u_int rxr, u_int rxd, u_int extra_bufs, u_int npipes, int *perr) { struct netmap_mem_d *d = NULL; struct netmap_obj_params p[NETMAP_POOLS_NR]; int i, err; u_int v, maxd; d = malloc(sizeof(struct netmap_mem_d), M_DEVBUF, M_NOWAIT | M_ZERO); if (d == NULL) { err = ENOMEM; goto error; } *d = nm_blueprint; err = nm_mem_assign_id(d); if (err) goto error; /* account for the fake host rings */ txr++; rxr++; /* copy the min values */ for (i = 0; i < NETMAP_POOLS_NR; i++) { p[i] = netmap_min_priv_params[i]; } /* possibly increase them to fit user request */ v = sizeof(struct netmap_if) + sizeof(ssize_t) * (txr + rxr); if (p[NETMAP_IF_POOL].size < v) p[NETMAP_IF_POOL].size = v; v = 2 + 4 * npipes; if (p[NETMAP_IF_POOL].num < v) p[NETMAP_IF_POOL].num = v; maxd = (txd > rxd) ? txd : rxd; v = sizeof(struct netmap_ring) + sizeof(struct netmap_slot) * maxd; if (p[NETMAP_RING_POOL].size < v) p[NETMAP_RING_POOL].size = v; /* each pipe endpoint needs two tx rings (1 normal + 1 host, fake) * and two rx rings (again, 1 normal and 1 fake host) */ v = txr + rxr + 8 * npipes; if (p[NETMAP_RING_POOL].num < v) p[NETMAP_RING_POOL].num = v; /* for each pipe we only need the buffers for the 4 "real" rings. * On the other end, the pipe ring dimension may be different from * the parent port ring dimension. As a compromise, we allocate twice the * space actually needed if the pipe rings were the same size as the parent rings */ v = (4 * npipes + rxr) * rxd + (4 * npipes + txr) * txd + 2 + extra_bufs; /* the +2 is for the tx and rx fake buffers (indices 0 and 1) */ if (p[NETMAP_BUF_POOL].num < v) p[NETMAP_BUF_POOL].num = v; if (netmap_verbose) D("req if %d*%d ring %d*%d buf %d*%d", p[NETMAP_IF_POOL].num, p[NETMAP_IF_POOL].size, p[NETMAP_RING_POOL].num, p[NETMAP_RING_POOL].size, p[NETMAP_BUF_POOL].num, p[NETMAP_BUF_POOL].size); for (i = 0; i < NETMAP_POOLS_NR; i++) { snprintf(d->pools[i].name, NETMAP_POOL_MAX_NAMSZ, nm_blueprint.pools[i].name, name); err = netmap_config_obj_allocator(&d->pools[i], p[i].num, p[i].size); if (err) goto error; } d->flags &= ~NETMAP_MEM_FINALIZED; NMA_LOCK_INIT(d); return d; error: netmap_mem_private_delete(d); if (perr) *perr = err; return NULL; } /* call with lock held */ static int netmap_mem_global_config(struct netmap_mem_d *nmd) { int i; if (nmd->active) /* already in use, we cannot change the configuration */ goto out; if (!netmap_memory_config_changed(nmd)) goto out; ND("reconfiguring"); if (nmd->flags & NETMAP_MEM_FINALIZED) { /* reset previous allocation */ for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_reset_obj_allocator(&nmd->pools[i]); } nmd->flags &= ~NETMAP_MEM_FINALIZED; } for (i = 0; i < NETMAP_POOLS_NR; i++) { nmd->lasterr = netmap_config_obj_allocator(&nmd->pools[i], netmap_params[i].num, netmap_params[i].size); if (nmd->lasterr) goto out; } out: return nmd->lasterr; } static int netmap_mem_global_finalize(struct netmap_mem_d *nmd) { int err; /* update configuration if changed */ if (netmap_mem_global_config(nmd)) goto out; nmd->active++; if (nmd->flags & NETMAP_MEM_FINALIZED) { /* may happen if config is not changed */ ND("nothing to do"); goto out; } if (netmap_mem_finalize_all(nmd)) goto out; nmd->lasterr = 0; out: if (nmd->lasterr) nmd->active--; err = nmd->lasterr; return err; } static void netmap_mem_global_delete(struct netmap_mem_d *nmd) { int i; for (i = 0; i < NETMAP_POOLS_NR; i++) { netmap_destroy_obj_allocator(&nm_mem.pools[i]); } NMA_LOCK_DESTROY(&nm_mem); } int netmap_mem_init(void) { NMA_LOCK_INIT(&nm_mem); netmap_mem_get(&nm_mem); return (0); } void netmap_mem_fini(void) { netmap_mem_put(&nm_mem); } static void netmap_free_rings(struct netmap_adapter *na) { enum txrx t; for_rx_tx(t) { u_int i; for (i = 0; i < netmap_real_rings(na, t); i++) { struct netmap_kring *kring = &NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; if (ring == NULL) continue; netmap_free_bufs(na->nm_mem, ring->slot, kring->nkr_num_slots); netmap_ring_free(na->nm_mem, ring); kring->ring = NULL; } } } /* call with NMA_LOCK held * * * Allocate netmap rings and buffers for this card * The rings are contiguous, but have variable size. * The kring array must follow the layout described * in netmap_krings_create(). */ static int netmap_mem2_rings_create(struct netmap_adapter *na) { enum txrx t; NMA_LOCK(na->nm_mem); for_rx_tx(t) { u_int i; for (i = 0; i <= nma_get_nrings(na, t); i++) { struct netmap_kring *kring = &NMR(na, t)[i]; struct netmap_ring *ring = kring->ring; u_int len, ndesc; if (ring) { ND("%s already created", kring->name); continue; /* already created by somebody else */ } ndesc = kring->nkr_num_slots; len = sizeof(struct netmap_ring) + ndesc * sizeof(struct netmap_slot); ring = netmap_ring_malloc(na->nm_mem, len); if (ring == NULL) { D("Cannot allocate %s_ring", nm_txrx2str(t)); goto cleanup; } ND("txring at %p", ring); kring->ring = ring; *(uint32_t *)(uintptr_t)&ring->num_slots = ndesc; *(int64_t *)(uintptr_t)&ring->buf_ofs = (na->nm_mem->pools[NETMAP_IF_POOL].memtotal + na->nm_mem->pools[NETMAP_RING_POOL].memtotal) - netmap_ring_offset(na->nm_mem, ring); /* copy values from kring */ ring->head = kring->rhead; ring->cur = kring->rcur; ring->tail = kring->rtail; *(uint16_t *)(uintptr_t)&ring->nr_buf_size = netmap_mem_bufsize(na->nm_mem); ND("%s h %d c %d t %d", kring->name, ring->head, ring->cur, ring->tail); ND("initializing slots for %s_ring", nm_txrx2str(txrx)); if (i != nma_get_nrings(na, t) || (na->na_flags & NAF_HOST_RINGS)) { /* this is a real ring */ if (netmap_new_bufs(na->nm_mem, ring->slot, ndesc)) { D("Cannot allocate buffers for %s_ring", nm_txrx2str(t)); goto cleanup; } } else { /* this is a fake ring, set all indices to 0 */ netmap_mem_set_ring(na->nm_mem, ring->slot, ndesc, 0); } /* ring info */ *(uint16_t *)(uintptr_t)&ring->ringid = kring->ring_id; *(uint16_t *)(uintptr_t)&ring->dir = kring->tx; } } NMA_UNLOCK(na->nm_mem); return 0; cleanup: netmap_free_rings(na); NMA_UNLOCK(na->nm_mem); return ENOMEM; } static void netmap_mem2_rings_delete(struct netmap_adapter *na) { /* last instance, release bufs and rings */ NMA_LOCK(na->nm_mem); netmap_free_rings(na); NMA_UNLOCK(na->nm_mem); } /* call with NMA_LOCK held */ /* * Allocate the per-fd structure netmap_if. * * We assume that the configuration stored in na * (number of tx/rx rings and descs) does not change while * the interface is in netmap mode. */ static struct netmap_if * netmap_mem2_if_new(struct netmap_adapter *na) { struct netmap_if *nifp; ssize_t base; /* handy for relative offsets between rings and nifp */ u_int i, len, n[NR_TXRX], ntot; enum txrx t; ntot = 0; for_rx_tx(t) { /* account for the (eventually fake) host rings */ n[t] = nma_get_nrings(na, t) + 1; ntot += n[t]; } /* * the descriptor is followed inline by an array of offsets * to the tx and rx rings in the shared memory region. */ NMA_LOCK(na->nm_mem); len = sizeof(struct netmap_if) + (ntot * sizeof(ssize_t)); nifp = netmap_if_malloc(na->nm_mem, len); if (nifp == NULL) { NMA_UNLOCK(na->nm_mem); return NULL; } /* initialize base fields -- override const */ *(u_int *)(uintptr_t)&nifp->ni_tx_rings = na->num_tx_rings; *(u_int *)(uintptr_t)&nifp->ni_rx_rings = na->num_rx_rings; strncpy(nifp->ni_name, na->name, (size_t)IFNAMSIZ); /* * fill the slots for the rx and tx rings. They contain the offset * between the ring and nifp, so the information is usable in * userspace to reach the ring from the nifp. */ base = netmap_if_offset(na->nm_mem, nifp); for (i = 0; i < n[NR_TX]; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i] = netmap_ring_offset(na->nm_mem, na->tx_rings[i].ring) - base; } for (i = 0; i < n[NR_RX]; i++) { *(ssize_t *)(uintptr_t)&nifp->ring_ofs[i+n[NR_TX]] = netmap_ring_offset(na->nm_mem, na->rx_rings[i].ring) - base; } NMA_UNLOCK(na->nm_mem); return (nifp); } static void netmap_mem2_if_delete(struct netmap_adapter *na, struct netmap_if *nifp) { if (nifp == NULL) /* nothing to do */ return; NMA_LOCK(na->nm_mem); if (nifp->ni_bufs_head) netmap_extra_free(na, nifp->ni_bufs_head); netmap_if_free(na->nm_mem, nifp); NMA_UNLOCK(na->nm_mem); } static void netmap_mem_global_deref(struct netmap_mem_d *nmd) { nmd->active--; if (!nmd->active) nmd->nm_grp = -1; if (netmap_verbose) D("active = %d", nmd->active); } struct netmap_mem_ops netmap_mem_global_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_global_config, .nmd_finalize = netmap_mem_global_finalize, .nmd_deref = netmap_mem_global_deref, .nmd_delete = netmap_mem_global_delete, .nmd_if_offset = netmap_mem2_if_offset, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; struct netmap_mem_ops netmap_mem_private_ops = { .nmd_get_lut = netmap_mem2_get_lut, .nmd_get_info = netmap_mem2_get_info, .nmd_ofstophys = netmap_mem2_ofstophys, .nmd_config = netmap_mem_private_config, .nmd_finalize = netmap_mem_private_finalize, .nmd_deref = netmap_mem_private_deref, .nmd_if_offset = netmap_mem2_if_offset, .nmd_delete = netmap_mem_private_delete, .nmd_if_new = netmap_mem2_if_new, .nmd_if_delete = netmap_mem2_if_delete, .nmd_rings_create = netmap_mem2_rings_create, .nmd_rings_delete = netmap_mem2_rings_delete }; Index: head/sys/dev/pci/pcivar.h =================================================================== --- head/sys/dev/pci/pcivar.h (revision 303889) +++ head/sys/dev/pci/pcivar.h (revision 303890) @@ -1,634 +1,634 @@ /*- * Copyright (c) 1997, Stefan Esser * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ * */ #ifndef _PCIVAR_H_ #define _PCIVAR_H_ #include /* some PCI bus constants */ #define PCI_MAXMAPS_0 6 /* max. no. of memory/port maps */ #define PCI_MAXMAPS_1 2 /* max. no. of maps for PCI to PCI bridge */ #define PCI_MAXMAPS_2 1 /* max. no. of maps for CardBus bridge */ typedef uint64_t pci_addr_t; /* Config registers for PCI-PCI and PCI-Cardbus bridges. */ struct pcicfg_bridge { uint8_t br_seclat; uint8_t br_subbus; uint8_t br_secbus; uint8_t br_pribus; uint16_t br_control; }; /* Interesting values for PCI power management */ struct pcicfg_pp { uint16_t pp_cap; /* PCI power management capabilities */ uint8_t pp_status; /* conf. space addr. of PM control/status reg */ uint8_t pp_bse; /* conf. space addr. of PM BSE reg */ uint8_t pp_data; /* conf. space addr. of PM data reg */ }; struct pci_map { pci_addr_t pm_value; /* Raw BAR value */ pci_addr_t pm_size; uint16_t pm_reg; STAILQ_ENTRY(pci_map) pm_link; }; struct vpd_readonly { char keyword[2]; char *value; int len; }; struct vpd_write { char keyword[2]; char *value; int start; int len; }; struct pcicfg_vpd { uint8_t vpd_reg; /* base register, + 2 for addr, + 4 data */ char vpd_cached; char *vpd_ident; /* string identifier */ int vpd_rocnt; struct vpd_readonly *vpd_ros; int vpd_wcnt; struct vpd_write *vpd_w; }; /* Interesting values for PCI MSI */ struct pcicfg_msi { uint16_t msi_ctrl; /* Message Control */ uint8_t msi_location; /* Offset of MSI capability registers. */ uint8_t msi_msgnum; /* Number of messages */ int msi_alloc; /* Number of allocated messages. */ uint64_t msi_addr; /* Contents of address register. */ uint16_t msi_data; /* Contents of data register. */ u_int msi_handlers; }; /* Interesting values for PCI MSI-X */ struct msix_vector { uint64_t mv_address; /* Contents of address register. */ uint32_t mv_data; /* Contents of data register. */ int mv_irq; }; struct msix_table_entry { u_int mte_vector; /* 1-based index into msix_vectors array. */ u_int mte_handlers; }; struct pcicfg_msix { uint16_t msix_ctrl; /* Message Control */ uint16_t msix_msgnum; /* Number of messages */ uint8_t msix_location; /* Offset of MSI-X capability registers. */ uint8_t msix_table_bar; /* BAR containing vector table. */ uint8_t msix_pba_bar; /* BAR containing PBA. */ uint32_t msix_table_offset; uint32_t msix_pba_offset; int msix_alloc; /* Number of allocated vectors. */ int msix_table_len; /* Length of virtual table. */ struct msix_table_entry *msix_table; /* Virtual table. */ struct msix_vector *msix_vectors; /* Array of allocated vectors. */ struct resource *msix_table_res; /* Resource containing vector table. */ struct resource *msix_pba_res; /* Resource containing PBA. */ }; /* Interesting values for HyperTransport */ struct pcicfg_ht { uint8_t ht_slave; /* Non-zero if device is an HT slave. */ uint8_t ht_msimap; /* Offset of MSI mapping cap registers. */ uint16_t ht_msictrl; /* MSI mapping control */ uint64_t ht_msiaddr; /* MSI mapping base address */ }; /* Interesting values for PCI-express */ struct pcicfg_pcie { uint8_t pcie_location; /* Offset of PCI-e capability registers. */ uint8_t pcie_type; /* Device type. */ uint16_t pcie_flags; /* Device capabilities register. */ uint16_t pcie_device_ctl; /* Device control register. */ uint16_t pcie_link_ctl; /* Link control register. */ uint16_t pcie_slot_ctl; /* Slot control register. */ uint16_t pcie_root_ctl; /* Root control register. */ uint16_t pcie_device_ctl2; /* Second device control register. */ uint16_t pcie_link_ctl2; /* Second link control register. */ uint16_t pcie_slot_ctl2; /* Second slot control register. */ }; struct pcicfg_pcix { uint16_t pcix_command; uint8_t pcix_location; /* Offset of PCI-X capability registers. */ }; struct pcicfg_vf { int index; }; struct pci_ea_entry { int eae_bei; uint32_t eae_flags; uint64_t eae_base; uint64_t eae_max_offset; uint32_t eae_cfg_offset; STAILQ_ENTRY(pci_ea_entry) eae_link; }; struct pcicfg_ea { int ea_location; /* Structure offset in Configuration Header */ STAILQ_HEAD(, pci_ea_entry) ea_entries; /* EA entries */ }; #define PCICFG_VF 0x0001 /* Device is an SR-IOV Virtual Function */ /* config header information common to all header types */ typedef struct pcicfg { - struct device *dev; /* device which owns this */ + device_t dev; /* device which owns this */ STAILQ_HEAD(, pci_map) maps; /* BARs */ uint16_t subvendor; /* card vendor ID */ uint16_t subdevice; /* card device ID, assigned by card vendor */ uint16_t vendor; /* chip vendor ID */ uint16_t device; /* chip device ID, assigned by chip vendor */ uint16_t cmdreg; /* disable/enable chip and PCI options */ uint16_t statreg; /* supported PCI features and error state */ uint8_t baseclass; /* chip PCI class */ uint8_t subclass; /* chip PCI subclass */ uint8_t progif; /* chip PCI programming interface */ uint8_t revid; /* chip revision ID */ uint8_t hdrtype; /* chip config header type */ uint8_t cachelnsz; /* cache line size in 4byte units */ uint8_t intpin; /* PCI interrupt pin */ uint8_t intline; /* interrupt line (IRQ for PC arch) */ uint8_t mingnt; /* min. useful bus grant time in 250ns units */ uint8_t maxlat; /* max. tolerated bus grant latency in 250ns */ uint8_t lattimer; /* latency timer in units of 30ns bus cycles */ uint8_t mfdev; /* multi-function device (from hdrtype reg) */ uint8_t nummaps; /* actual number of PCI maps used */ uint32_t domain; /* PCI domain */ uint8_t bus; /* config space bus address */ uint8_t slot; /* config space slot address */ uint8_t func; /* config space function number */ uint32_t flags; /* flags defined above */ struct pcicfg_bridge bridge; /* Bridges */ struct pcicfg_pp pp; /* Power management */ struct pcicfg_vpd vpd; /* Vital product data */ struct pcicfg_msi msi; /* PCI MSI */ struct pcicfg_msix msix; /* PCI MSI-X */ struct pcicfg_ht ht; /* HyperTransport */ struct pcicfg_pcie pcie; /* PCI Express */ struct pcicfg_pcix pcix; /* PCI-X */ struct pcicfg_iov *iov; /* SR-IOV */ struct pcicfg_vf vf; /* SR-IOV Virtual Function */ struct pcicfg_ea ea; /* Enhanced Allocation */ } pcicfgregs; /* additional type 1 device config header information (PCI to PCI bridge) */ typedef struct { pci_addr_t pmembase; /* base address of prefetchable memory */ pci_addr_t pmemlimit; /* topmost address of prefetchable memory */ uint32_t membase; /* base address of memory window */ uint32_t memlimit; /* topmost address of memory window */ uint32_t iobase; /* base address of port window */ uint32_t iolimit; /* topmost address of port window */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih1cfgregs; /* additional type 2 device config header information (CardBus bridge) */ typedef struct { uint32_t membase0; /* base address of memory window */ uint32_t memlimit0; /* topmost address of memory window */ uint32_t membase1; /* base address of memory window */ uint32_t memlimit1; /* topmost address of memory window */ uint32_t iobase0; /* base address of port window */ uint32_t iolimit0; /* topmost address of port window */ uint32_t iobase1; /* base address of port window */ uint32_t iolimit1; /* topmost address of port window */ uint32_t pccardif; /* PC Card 16bit IF legacy more base addr. */ uint16_t secstat; /* secondary bus status register */ uint16_t bridgectl; /* bridge control register */ uint8_t seclat; /* CardBus latency timer */ } pcih2cfgregs; extern uint32_t pci_numdevs; /* Only if the prerequisites are present */ #if defined(_SYS_BUS_H_) && defined(_SYS_PCIIO_H_) struct pci_devinfo { STAILQ_ENTRY(pci_devinfo) pci_links; struct resource_list resources; pcicfgregs cfg; struct pci_conf conf; }; #endif #ifdef _SYS_BUS_H_ #include "pci_if.h" enum pci_device_ivars { PCI_IVAR_SUBVENDOR, PCI_IVAR_SUBDEVICE, PCI_IVAR_VENDOR, PCI_IVAR_DEVICE, PCI_IVAR_DEVID, PCI_IVAR_CLASS, PCI_IVAR_SUBCLASS, PCI_IVAR_PROGIF, PCI_IVAR_REVID, PCI_IVAR_INTPIN, PCI_IVAR_IRQ, PCI_IVAR_DOMAIN, PCI_IVAR_BUS, PCI_IVAR_SLOT, PCI_IVAR_FUNCTION, PCI_IVAR_ETHADDR, PCI_IVAR_CMDREG, PCI_IVAR_CACHELNSZ, PCI_IVAR_MINGNT, PCI_IVAR_MAXLAT, PCI_IVAR_LATTIMER }; /* * Simplified accessors for pci devices */ #define PCI_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pci, var, PCI, ivar, type) PCI_ACCESSOR(subvendor, SUBVENDOR, uint16_t) PCI_ACCESSOR(subdevice, SUBDEVICE, uint16_t) PCI_ACCESSOR(vendor, VENDOR, uint16_t) PCI_ACCESSOR(device, DEVICE, uint16_t) PCI_ACCESSOR(devid, DEVID, uint32_t) PCI_ACCESSOR(class, CLASS, uint8_t) PCI_ACCESSOR(subclass, SUBCLASS, uint8_t) PCI_ACCESSOR(progif, PROGIF, uint8_t) PCI_ACCESSOR(revid, REVID, uint8_t) PCI_ACCESSOR(intpin, INTPIN, uint8_t) PCI_ACCESSOR(irq, IRQ, uint8_t) PCI_ACCESSOR(domain, DOMAIN, uint32_t) PCI_ACCESSOR(bus, BUS, uint8_t) PCI_ACCESSOR(slot, SLOT, uint8_t) PCI_ACCESSOR(function, FUNCTION, uint8_t) PCI_ACCESSOR(ether, ETHADDR, uint8_t *) PCI_ACCESSOR(cmdreg, CMDREG, uint8_t) PCI_ACCESSOR(cachelnsz, CACHELNSZ, uint8_t) PCI_ACCESSOR(mingnt, MINGNT, uint8_t) PCI_ACCESSOR(maxlat, MAXLAT, uint8_t) PCI_ACCESSOR(lattimer, LATTIMER, uint8_t) #undef PCI_ACCESSOR /* * Operations on configuration space. */ static __inline uint32_t pci_read_config(device_t dev, int reg, int width) { return PCI_READ_CONFIG(device_get_parent(dev), dev, reg, width); } static __inline void pci_write_config(device_t dev, int reg, uint32_t val, int width) { PCI_WRITE_CONFIG(device_get_parent(dev), dev, reg, val, width); } /* * Ivars for pci bridges. */ /*typedef enum pci_device_ivars pcib_device_ivars;*/ enum pcib_device_ivars { PCIB_IVAR_DOMAIN, PCIB_IVAR_BUS }; #define PCIB_ACCESSOR(var, ivar, type) \ __BUS_ACCESSOR(pcib, var, PCIB, ivar, type) PCIB_ACCESSOR(domain, DOMAIN, uint32_t) PCIB_ACCESSOR(bus, BUS, uint32_t) #undef PCIB_ACCESSOR /* * PCI interrupt validation. Invalid interrupt values such as 0 or 128 * on i386 or other platforms should be mapped out in the MD pcireadconf * code and not here, since the only MI invalid IRQ is 255. */ #define PCI_INVALID_IRQ 255 #define PCI_INTERRUPT_VALID(x) ((x) != PCI_INVALID_IRQ) /* * Convenience functions. * * These should be used in preference to manually manipulating * configuration space. */ static __inline int pci_enable_busmaster(device_t dev) { return(PCI_ENABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_disable_busmaster(device_t dev) { return(PCI_DISABLE_BUSMASTER(device_get_parent(dev), dev)); } static __inline int pci_enable_io(device_t dev, int space) { return(PCI_ENABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_disable_io(device_t dev, int space) { return(PCI_DISABLE_IO(device_get_parent(dev), dev, space)); } static __inline int pci_get_vpd_ident(device_t dev, const char **identptr) { return(PCI_GET_VPD_IDENT(device_get_parent(dev), dev, identptr)); } static __inline int pci_get_vpd_readonly(device_t dev, const char *kw, const char **vptr) { return(PCI_GET_VPD_READONLY(device_get_parent(dev), dev, kw, vptr)); } /* * Check if the address range falls within the VGA defined address range(s) */ static __inline int pci_is_vga_ioport_range(rman_res_t start, rman_res_t end) { return (((start >= 0x3b0 && end <= 0x3bb) || (start >= 0x3c0 && end <= 0x3df)) ? 1 : 0); } static __inline int pci_is_vga_memory_range(rman_res_t start, rman_res_t end) { return ((start >= 0xa0000 && end <= 0xbffff) ? 1 : 0); } /* * PCI power states are as defined by ACPI: * * D0 State in which device is on and running. It is receiving full * power from the system and delivering full functionality to the user. * D1 Class-specific low-power state in which device context may or may not * be lost. Buses in D1 cannot do anything to the bus that would force * devices on that bus to lose context. * D2 Class-specific low-power state in which device context may or may * not be lost. Attains greater power savings than D1. Buses in D2 * can cause devices on that bus to lose some context. Devices in D2 * must be prepared for the bus to be in D2 or higher. * D3 State in which the device is off and not running. Device context is * lost. Power can be removed from the device. */ #define PCI_POWERSTATE_D0 0 #define PCI_POWERSTATE_D1 1 #define PCI_POWERSTATE_D2 2 #define PCI_POWERSTATE_D3 3 #define PCI_POWERSTATE_UNKNOWN -1 static __inline int pci_set_powerstate(device_t dev, int state) { return PCI_SET_POWERSTATE(device_get_parent(dev), dev, state); } static __inline int pci_get_powerstate(device_t dev) { return PCI_GET_POWERSTATE(device_get_parent(dev), dev); } static __inline int pci_find_cap(device_t dev, int capability, int *capreg) { return (PCI_FIND_CAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_extcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_EXTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_find_htcap(device_t dev, int capability, int *capreg) { return (PCI_FIND_HTCAP(device_get_parent(dev), dev, capability, capreg)); } static __inline int pci_alloc_msi(device_t dev, int *count) { return (PCI_ALLOC_MSI(device_get_parent(dev), dev, count)); } static __inline int pci_alloc_msix(device_t dev, int *count) { return (PCI_ALLOC_MSIX(device_get_parent(dev), dev, count)); } static __inline void pci_enable_msi(device_t dev, uint64_t address, uint16_t data) { PCI_ENABLE_MSI(device_get_parent(dev), dev, address, data); } static __inline void pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data) { PCI_ENABLE_MSIX(device_get_parent(dev), dev, index, address, data); } static __inline void pci_disable_msi(device_t dev) { PCI_DISABLE_MSI(device_get_parent(dev), dev); } static __inline int pci_remap_msix(device_t dev, int count, const u_int *vectors) { return (PCI_REMAP_MSIX(device_get_parent(dev), dev, count, vectors)); } static __inline int pci_release_msi(device_t dev) { return (PCI_RELEASE_MSI(device_get_parent(dev), dev)); } static __inline int pci_msi_count(device_t dev) { return (PCI_MSI_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_count(device_t dev) { return (PCI_MSIX_COUNT(device_get_parent(dev), dev)); } static __inline int pci_msix_pba_bar(device_t dev) { return (PCI_MSIX_PBA_BAR(device_get_parent(dev), dev)); } static __inline int pci_msix_table_bar(device_t dev) { return (PCI_MSIX_TABLE_BAR(device_get_parent(dev), dev)); } static __inline int pci_get_id(device_t dev, enum pci_id_type type, uintptr_t *id) { return (PCI_GET_ID(device_get_parent(dev), dev, type, id)); } /* * This is the deprecated interface, there is no way to tell the difference * between a failure and a valid value that happens to be the same as the * failure value. */ static __inline uint16_t pci_get_rid(device_t dev) { uintptr_t rid; if (pci_get_id(dev, PCI_ID_RID, &rid) != 0) return (0); return (rid); } static __inline void pci_child_added(device_t dev) { return (PCI_CHILD_ADDED(device_get_parent(dev), dev)); } device_t pci_find_bsf(uint8_t, uint8_t, uint8_t); device_t pci_find_dbsf(uint32_t, uint8_t, uint8_t, uint8_t); device_t pci_find_device(uint16_t, uint16_t); device_t pci_find_class(uint8_t class, uint8_t subclass); /* Can be used by drivers to manage the MSI-X table. */ int pci_pending_msix(device_t dev, u_int index); int pci_msi_device_blacklisted(device_t dev); int pci_msix_device_blacklisted(device_t dev); void pci_ht_map_msi(device_t dev, uint64_t addr); device_t pci_find_pcie_root_port(device_t dev); int pci_get_max_payload(device_t dev); int pci_get_max_read_req(device_t dev); void pci_restore_state(device_t dev); void pci_save_state(device_t dev); int pci_set_max_read_req(device_t dev, int size); uint32_t pcie_read_config(device_t dev, int reg, int width); void pcie_write_config(device_t dev, int reg, uint32_t value, int width); uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width); #ifdef BUS_SPACE_MAXADDR #if (BUS_SPACE_MAXADDR > 0xFFFFFFFF) #define PCI_DMA_BOUNDARY 0x100000000 #else #define PCI_DMA_BOUNDARY 0 #endif #endif #endif /* _SYS_BUS_H_ */ /* * cdev switch for control device, initialised in generic PCI code */ extern struct cdevsw pcicdev; /* * List of all PCI devices, generation count for the list. */ STAILQ_HEAD(devlist, pci_devinfo); extern struct devlist pci_devq; extern uint32_t pci_generation; struct pci_map *pci_find_bar(device_t dev, int reg); int pci_bar_enabled(device_t dev, struct pci_map *pm); struct pcicfg_vpd *pci_fetch_vpd_list(device_t dev); #define VGA_PCI_BIOS_SHADOW_ADDR 0xC0000 #define VGA_PCI_BIOS_SHADOW_SIZE 131072 int vga_pci_is_boot_display(device_t dev); void * vga_pci_map_bios(device_t dev, size_t *size); void vga_pci_unmap_bios(device_t dev, void *bios); int vga_pci_repost(device_t dev); #endif /* _PCIVAR_H_ */ Index: head/sys/dev/sound/sbus/cs4231.c =================================================================== --- head/sys/dev/sound/sbus/cs4231.c (revision 303889) +++ head/sys/dev/sound/sbus/cs4231.c (revision 303890) @@ -1,1564 +1,1564 @@ /*- * Copyright (c) 1999 Jason L. Wright (jason@thought.net) * Copyright (c) 2004 Pyun YongHyeon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Effort sponsored in part by the Defense Advanced Research Projects * Agency (DARPA) and Air Force Research Laboratory, Air Force * Materiel Command, USAF, under agreement number F30602-01-2-0537. * * from: OpenBSD: cs4231.c,v 1.21 2003/07/03 20:36:07 jason Exp */ /* * Driver for CS4231 based audio found in some sun4m systems (cs4231) * based on ideas from the S/Linux project and the NetBSD project. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include "mixer_if.h" /* * The driver supports CS4231A audio chips found on Sbus/Ebus based * UltraSPARCs. Though, CS4231A says it supports full-duplex mode, I * doubt it due to the lack of independent sampling frequency register * for playback/capture. * Since I couldn't find any documentation for APCDMA programming * information, I guessed the usage of APCDMA from that of OpenBSD's * driver. The EBDMA information of PCIO can be obtained from * http://solutions.sun.com/embedded/databook/web/microprocessors/pcio.html * And CS4231A datasheet can also be obtained from * ftp://ftp.alsa-project.org/pub/manuals/cirrus/4231a.pdf * * Audio capture(recording) was not tested at all and may have bugs. * Sorry, I don't have microphone. Don't try to use full-duplex mode. * It wouldn't work. */ #define CS_TIMEOUT 90000 #define CS4231_MIN_BUF_SZ (16*1024) #define CS4231_DEFAULT_BUF_SZ (32*1024) #define CS4231_MAX_BUF_SZ (64*1024) #define CS4231_MAX_BLK_SZ (8*1024) #define CS4231_MAX_APC_DMA_SZ (8*1024) #undef CS4231_DEBUG #ifdef CS4231_DEBUG #define DPRINTF(x) printf x #else #define DPRINTF(x) #endif #define CS4231_AUTO_CALIBRATION struct cs4231_softc; struct cs4231_channel { struct cs4231_softc *parent; struct pcm_channel *channel; struct snd_dbuf *buffer; u_int32_t format; u_int32_t speed; u_int32_t nextaddr; u_int32_t togo; int dir; int locked; }; #define CS4231_RES_MEM_MAX 4 #define CS4231_RES_IRQ_MAX 2 struct cs4231_softc { - struct device *sc_dev; + device_t sc_dev; int sc_rid[CS4231_RES_MEM_MAX]; struct resource *sc_res[CS4231_RES_MEM_MAX]; bus_space_handle_t sc_regh[CS4231_RES_MEM_MAX]; bus_space_tag_t sc_regt[CS4231_RES_MEM_MAX]; int sc_irqrid[CS4231_RES_IRQ_MAX]; struct resource *sc_irqres[CS4231_RES_IRQ_MAX]; void *sc_ih[CS4231_RES_IRQ_MAX]; bus_dma_tag_t sc_dmat[CS4231_RES_IRQ_MAX]; int sc_burst; u_int32_t sc_bufsz; struct cs4231_channel sc_pch; struct cs4231_channel sc_rch; int sc_enabled; int sc_nmres; int sc_nires; int sc_codecv; int sc_chipvid; int sc_flags; #define CS4231_SBUS 0x01 #define CS4231_EBUS 0x02 struct mtx *sc_lock; }; struct mix_table { u_int32_t reg:8; u_int32_t bits:8; u_int32_t mute:8; u_int32_t shift:4; u_int32_t neg:1; u_int32_t avail:1; u_int32_t recdev:1; }; static int cs4231_bus_probe(device_t); static int cs4231_sbus_attach(device_t); static int cs4231_ebus_attach(device_t); static int cs4231_attach_common(struct cs4231_softc *); static int cs4231_bus_detach(device_t); static int cs4231_bus_suspend(device_t); static int cs4231_bus_resume(device_t); static void cs4231_getversion(struct cs4231_softc *); static void cs4231_free_resource(struct cs4231_softc *); static void cs4231_ebdma_reset(struct cs4231_softc *); static void cs4231_power_reset(struct cs4231_softc *, int); static int cs4231_enable(struct cs4231_softc *, int); static void cs4231_disable(struct cs4231_softc *); static void cs4231_write(struct cs4231_softc *, u_int8_t, u_int8_t); static u_int8_t cs4231_read(struct cs4231_softc *, u_int8_t); static void cs4231_sbus_intr(void *); static void cs4231_ebus_pintr(void *arg); static void cs4231_ebus_cintr(void *arg); static int cs4231_mixer_init(struct snd_mixer *); static void cs4231_mixer_set_value(struct cs4231_softc *, const struct mix_table *, u_int8_t); static int cs4231_mixer_set(struct snd_mixer *, u_int32_t, u_int32_t, u_int32_t); static u_int32_t cs4231_mixer_setrecsrc(struct snd_mixer *, u_int32_t); static void *cs4231_chan_init(kobj_t, void *, struct snd_dbuf *, struct pcm_channel *, int); static int cs4231_chan_setformat(kobj_t, void *, u_int32_t); static u_int32_t cs4231_chan_setspeed(kobj_t, void *, u_int32_t); static void cs4231_chan_fs(struct cs4231_softc *, int, u_int8_t); static u_int32_t cs4231_chan_setblocksize(kobj_t, void *, u_int32_t); static int cs4231_chan_trigger(kobj_t, void *, int); static u_int32_t cs4231_chan_getptr(kobj_t, void *); static struct pcmchan_caps * cs4231_chan_getcaps(kobj_t, void *); static void cs4231_trigger(struct cs4231_channel *); static void cs4231_apcdma_trigger(struct cs4231_softc *, struct cs4231_channel *); static void cs4231_ebdma_trigger(struct cs4231_softc *, struct cs4231_channel *); static void cs4231_halt(struct cs4231_channel *); #define CS4231_LOCK(sc) snd_mtxlock(sc->sc_lock) #define CS4231_UNLOCK(sc) snd_mtxunlock(sc->sc_lock) #define CS4231_LOCK_ASSERT(sc) snd_mtxassert(sc->sc_lock) #define CS_WRITE(sc,r,v) \ bus_space_write_1((sc)->sc_regt[0], (sc)->sc_regh[0], (r) << 2, (v)) #define CS_READ(sc,r) \ bus_space_read_1((sc)->sc_regt[0], (sc)->sc_regh[0], (r) << 2) #define APC_WRITE(sc,r,v) \ bus_space_write_4(sc->sc_regt[0], sc->sc_regh[0], r, v) #define APC_READ(sc,r) \ bus_space_read_4(sc->sc_regt[0], sc->sc_regh[0], r) #define EBDMA_P_WRITE(sc,r,v) \ bus_space_write_4((sc)->sc_regt[1], (sc)->sc_regh[1], (r), (v)) #define EBDMA_P_READ(sc,r) \ bus_space_read_4((sc)->sc_regt[1], (sc)->sc_regh[1], (r)) #define EBDMA_C_WRITE(sc,r,v) \ bus_space_write_4((sc)->sc_regt[2], (sc)->sc_regh[2], (r), (v)) #define EBDMA_C_READ(sc,r) \ bus_space_read_4((sc)->sc_regt[2], (sc)->sc_regh[2], (r)) #define AUXIO_CODEC 0x00 #define AUXIO_WRITE(sc,r,v) \ bus_space_write_4((sc)->sc_regt[3], (sc)->sc_regh[3], (r), (v)) #define AUXIO_READ(sc,r) \ bus_space_read_4((sc)->sc_regt[3], (sc)->sc_regh[3], (r)) #define CODEC_WARM_RESET 0 #define CODEC_COLD_RESET 1 /* SBus */ static device_method_t cs4231_sbus_methods[] = { DEVMETHOD(device_probe, cs4231_bus_probe), DEVMETHOD(device_attach, cs4231_sbus_attach), DEVMETHOD(device_detach, cs4231_bus_detach), DEVMETHOD(device_suspend, cs4231_bus_suspend), DEVMETHOD(device_resume, cs4231_bus_resume), DEVMETHOD_END }; static driver_t cs4231_sbus_driver = { "pcm", cs4231_sbus_methods, PCM_SOFTC_SIZE }; DRIVER_MODULE(snd_audiocs, sbus, cs4231_sbus_driver, pcm_devclass, 0, 0); /* EBus */ static device_method_t cs4231_ebus_methods[] = { DEVMETHOD(device_probe, cs4231_bus_probe), DEVMETHOD(device_attach, cs4231_ebus_attach), DEVMETHOD(device_detach, cs4231_bus_detach), DEVMETHOD(device_suspend, cs4231_bus_suspend), DEVMETHOD(device_resume, cs4231_bus_resume), DEVMETHOD_END }; static driver_t cs4231_ebus_driver = { "pcm", cs4231_ebus_methods, PCM_SOFTC_SIZE }; DRIVER_MODULE(snd_audiocs, ebus, cs4231_ebus_driver, pcm_devclass, 0, 0); MODULE_DEPEND(snd_audiocs, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_audiocs, 1); static u_int32_t cs4231_fmt[] = { SND_FORMAT(AFMT_U8, 1, 0), SND_FORMAT(AFMT_U8, 2, 0), SND_FORMAT(AFMT_MU_LAW, 1, 0), SND_FORMAT(AFMT_MU_LAW, 2, 0), SND_FORMAT(AFMT_A_LAW, 1, 0), SND_FORMAT(AFMT_A_LAW, 2, 0), SND_FORMAT(AFMT_IMA_ADPCM, 1, 0), SND_FORMAT(AFMT_IMA_ADPCM, 2, 0), SND_FORMAT(AFMT_S16_LE, 1, 0), SND_FORMAT(AFMT_S16_LE, 2, 0), SND_FORMAT(AFMT_S16_BE, 1, 0), SND_FORMAT(AFMT_S16_BE, 2, 0), 0 }; static struct pcmchan_caps cs4231_caps = {5510, 48000, cs4231_fmt, 0}; /* * sound(4) channel interface */ static kobj_method_t cs4231_chan_methods[] = { KOBJMETHOD(channel_init, cs4231_chan_init), KOBJMETHOD(channel_setformat, cs4231_chan_setformat), KOBJMETHOD(channel_setspeed, cs4231_chan_setspeed), KOBJMETHOD(channel_setblocksize, cs4231_chan_setblocksize), KOBJMETHOD(channel_trigger, cs4231_chan_trigger), KOBJMETHOD(channel_getptr, cs4231_chan_getptr), KOBJMETHOD(channel_getcaps, cs4231_chan_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(cs4231_chan); /* * sound(4) mixer interface */ static kobj_method_t cs4231_mixer_methods[] = { KOBJMETHOD(mixer_init, cs4231_mixer_init), KOBJMETHOD(mixer_set, cs4231_mixer_set), KOBJMETHOD(mixer_setrecsrc, cs4231_mixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(cs4231_mixer); static int cs4231_bus_probe(device_t dev) { const char *compat, *name; compat = ofw_bus_get_compat(dev); name = ofw_bus_get_name(dev); if (strcmp("SUNW,CS4231", name) == 0 || (compat != NULL && strcmp("SUNW,CS4231", compat) == 0)) { device_set_desc(dev, "Sun Audiocs"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int cs4231_sbus_attach(device_t dev) { struct cs4231_softc *sc; int burst; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->sc_dev = dev; /* * XXX * No public documentation exists on programming burst size of APCDMA. */ burst = sbus_get_burstsz(sc->sc_dev); if ((burst & SBUS_BURST_64)) sc->sc_burst = 64; else if ((burst & SBUS_BURST_32)) sc->sc_burst = 32; else if ((burst & SBUS_BURST_16)) sc->sc_burst = 16; else sc->sc_burst = 0; sc->sc_flags = CS4231_SBUS; sc->sc_nmres = 1; sc->sc_nires = 1; return cs4231_attach_common(sc); } static int cs4231_ebus_attach(device_t dev) { struct cs4231_softc *sc; sc = malloc(sizeof(struct cs4231_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc == NULL) { device_printf(dev, "cannot allocate softc\n"); return (ENOMEM); } sc->sc_dev = dev; sc->sc_burst = EBDCSR_BURST_1; sc->sc_nmres = CS4231_RES_MEM_MAX; sc->sc_nires = CS4231_RES_IRQ_MAX; sc->sc_flags = CS4231_EBUS; return cs4231_attach_common(sc); } static int cs4231_attach_common(struct cs4231_softc *sc) { char status[SND_STATUSLEN]; driver_intr_t *ihandler; int i; sc->sc_lock = snd_mtxcreate(device_get_nameunit(sc->sc_dev), "snd_cs4231 softc"); for (i = 0; i < sc->sc_nmres; i++) { sc->sc_rid[i] = i; if ((sc->sc_res[i] = bus_alloc_resource_any(sc->sc_dev, SYS_RES_MEMORY, &sc->sc_rid[i], RF_ACTIVE)) == NULL) { device_printf(sc->sc_dev, "cannot map register %d\n", i); goto fail; } sc->sc_regt[i] = rman_get_bustag(sc->sc_res[i]); sc->sc_regh[i] = rman_get_bushandle(sc->sc_res[i]); } for (i = 0; i < sc->sc_nires; i++) { sc->sc_irqrid[i] = i; if ((sc->sc_irqres[i] = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, &sc->sc_irqrid[i], RF_SHAREABLE | RF_ACTIVE)) == NULL) { if ((sc->sc_flags & CS4231_SBUS) != 0) device_printf(sc->sc_dev, "cannot allocate interrupt\n"); else device_printf(sc->sc_dev, "cannot allocate %s " "interrupt\n", i == 0 ? "capture" : "playback"); goto fail; } } ihandler = cs4231_sbus_intr; for (i = 0; i < sc->sc_nires; i++) { if ((sc->sc_flags & CS4231_EBUS) != 0) { if (i == 0) ihandler = cs4231_ebus_cintr; else ihandler = cs4231_ebus_pintr; } if (snd_setup_intr(sc->sc_dev, sc->sc_irqres[i], INTR_MPSAFE, ihandler, sc, &sc->sc_ih[i])) { if ((sc->sc_flags & CS4231_SBUS) != 0) device_printf(sc->sc_dev, "cannot set up interrupt\n"); else device_printf(sc->sc_dev, "cannot set up %s " " interrupt\n", i == 0 ? "capture" : "playback"); goto fail; } } sc->sc_bufsz = pcm_getbuffersize(sc->sc_dev, CS4231_MIN_BUF_SZ, CS4231_DEFAULT_BUF_SZ, CS4231_MAX_BUF_SZ); for (i = 0; i < sc->sc_nires; i++) { if (bus_dma_tag_create( bus_get_dma_tag(sc->sc_dev),/* parent */ 64, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ sc->sc_bufsz, /* maxsize */ 1, /* nsegments */ sc->sc_bufsz, /* maxsegsz */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->sc_dmat[i])) { if ((sc->sc_flags & CS4231_SBUS) != 0) device_printf(sc->sc_dev, "cannot allocate DMA tag\n"); else device_printf(sc->sc_dev, "cannot allocate %s " "DMA tag\n", i == 0 ? "capture" : "playback"); goto fail; } } cs4231_enable(sc, CODEC_WARM_RESET); cs4231_getversion(sc); if (mixer_init(sc->sc_dev, &cs4231_mixer_class, sc) != 0) goto fail; if (pcm_register(sc->sc_dev, sc, 1, 1)) { device_printf(sc->sc_dev, "cannot register to pcm\n"); goto fail; } if (pcm_addchan(sc->sc_dev, PCMDIR_REC, &cs4231_chan_class, sc) != 0) goto chan_fail; if (pcm_addchan(sc->sc_dev, PCMDIR_PLAY, &cs4231_chan_class, sc) != 0) goto chan_fail; if ((sc->sc_flags & CS4231_SBUS) != 0) snprintf(status, SND_STATUSLEN, "at mem 0x%lx irq %ld bufsz %u", rman_get_start(sc->sc_res[0]), rman_get_start(sc->sc_irqres[0]), sc->sc_bufsz); else snprintf(status, SND_STATUSLEN, "at io 0x%lx 0x%lx 0x%lx 0x%lx " "irq %ld %ld bufsz %u", rman_get_start(sc->sc_res[0]), rman_get_start(sc->sc_res[1]), rman_get_start(sc->sc_res[2]), rman_get_start(sc->sc_res[3]), rman_get_start(sc->sc_irqres[0]), rman_get_start(sc->sc_irqres[1]), sc->sc_bufsz); pcm_setstatus(sc->sc_dev, status); return (0); chan_fail: pcm_unregister(sc->sc_dev); fail: cs4231_free_resource(sc); return (ENXIO); } static int cs4231_bus_detach(device_t dev) { struct cs4231_softc *sc; struct cs4231_channel *pch, *rch; int error; sc = pcm_getdevinfo(dev); CS4231_LOCK(sc); pch = &sc->sc_pch; rch = &sc->sc_pch; if (pch->locked || rch->locked) { CS4231_UNLOCK(sc); return (EBUSY); } /* * Since EBDMA requires valid DMA buffer to drain its FIFO, we need * real DMA buffer for draining. */ if ((sc->sc_flags & CS4231_EBUS) != 0) cs4231_ebdma_reset(sc); CS4231_UNLOCK(sc); error = pcm_unregister(dev); if (error) return (error); cs4231_free_resource(sc); return (0); } static int cs4231_bus_suspend(device_t dev) { return (ENXIO); } static int cs4231_bus_resume(device_t dev) { return (ENXIO); } static void cs4231_getversion(struct cs4231_softc *sc) { u_int8_t v; v = cs4231_read(sc, CS_MISC_INFO); sc->sc_codecv = v & CS_CODEC_ID_MASK; v = cs4231_read(sc, CS_VERSION_ID); v &= (CS_VERSION_NUMBER | CS_VERSION_CHIPID); sc->sc_chipvid = v; switch(v) { case 0x80: device_printf(sc->sc_dev, "\n", sc->sc_codecv); break; case 0xa0: device_printf(sc->sc_dev, "\n", sc->sc_codecv); break; case 0x82: device_printf(sc->sc_dev, "\n", sc->sc_codecv); break; default: device_printf(sc->sc_dev, "sc_codecv); break; } } static void cs4231_ebdma_reset(struct cs4231_softc *sc) { int i; /* playback */ EBDMA_P_WRITE(sc, EBDMA_DCSR, EBDMA_P_READ(sc, EBDMA_DCSR) & ~(EBDCSR_INTEN | EBDCSR_NEXTEN)); EBDMA_P_WRITE(sc, EBDMA_DCSR, EBDCSR_RESET); for (i = CS_TIMEOUT; i && EBDMA_P_READ(sc, EBDMA_DCSR) & EBDCSR_DRAIN; i--) DELAY(1); if (i == 0) device_printf(sc->sc_dev, "timeout waiting for playback DMA reset\n"); EBDMA_P_WRITE(sc, EBDMA_DCSR, sc->sc_burst); /* capture */ EBDMA_C_WRITE(sc, EBDMA_DCSR, EBDMA_C_READ(sc, EBDMA_DCSR) & ~(EBDCSR_INTEN | EBDCSR_NEXTEN)); EBDMA_C_WRITE(sc, EBDMA_DCSR, EBDCSR_RESET); for (i = CS_TIMEOUT; i && EBDMA_C_READ(sc, EBDMA_DCSR) & EBDCSR_DRAIN; i--) DELAY(1); if (i == 0) device_printf(sc->sc_dev, "timeout waiting for capture DMA reset\n"); EBDMA_C_WRITE(sc, EBDMA_DCSR, sc->sc_burst); } static void cs4231_power_reset(struct cs4231_softc *sc, int how) { u_int32_t v; int i; if ((sc->sc_flags & CS4231_SBUS) != 0) { APC_WRITE(sc, APC_CSR, APC_CSR_RESET); DELAY(10); APC_WRITE(sc, APC_CSR, 0); DELAY(10); APC_WRITE(sc, APC_CSR, APC_READ(sc, APC_CSR) | APC_CSR_CODEC_RESET); DELAY(20); APC_WRITE(sc, APC_CSR, APC_READ(sc, APC_CSR) & (~APC_CSR_CODEC_RESET)); } else { v = AUXIO_READ(sc, AUXIO_CODEC); if (how == CODEC_WARM_RESET && v != 0) { AUXIO_WRITE(sc, AUXIO_CODEC, 0); DELAY(20); } else if (how == CODEC_COLD_RESET){ AUXIO_WRITE(sc, AUXIO_CODEC, 1); DELAY(20); AUXIO_WRITE(sc, AUXIO_CODEC, 0); DELAY(20); } cs4231_ebdma_reset(sc); } for (i = CS_TIMEOUT; i && CS_READ(sc, CS4231_IADDR) == CS_IN_INIT; i--) DELAY(10); if (i == 0) device_printf(sc->sc_dev, "timeout waiting for reset\n"); /* turn on cs4231 mode */ cs4231_write(sc, CS_MISC_INFO, cs4231_read(sc, CS_MISC_INFO) | CS_MODE2); /* enable interrupts & clear CSR */ cs4231_write(sc, CS_PIN_CONTROL, cs4231_read(sc, CS_PIN_CONTROL) | INTERRUPT_ENABLE); CS_WRITE(sc, CS4231_STATUS, 0); /* enable DAC output */ cs4231_write(sc, CS_LEFT_OUTPUT_CONTROL, cs4231_read(sc, CS_LEFT_OUTPUT_CONTROL) & ~OUTPUT_MUTE); cs4231_write(sc, CS_RIGHT_OUTPUT_CONTROL, cs4231_read(sc, CS_RIGHT_OUTPUT_CONTROL) & ~OUTPUT_MUTE); /* mute AUX1 since it generates noises */ cs4231_write(sc, CS_LEFT_AUX1_CONTROL, cs4231_read(sc, CS_LEFT_AUX1_CONTROL) | AUX_INPUT_MUTE); cs4231_write(sc, CS_RIGHT_AUX1_CONTROL, cs4231_read(sc, CS_RIGHT_AUX1_CONTROL) | AUX_INPUT_MUTE); /* protect buffer underrun and set output level to 0dB */ cs4231_write(sc, CS_ALT_FEATURE1, cs4231_read(sc, CS_ALT_FEATURE1) | CS_DAC_ZERO | CS_OUTPUT_LVL); /* enable high pass filter, dual xtal was disabled due to noises */ cs4231_write(sc, CS_ALT_FEATURE2, cs4231_read(sc, CS_ALT_FEATURE2) | CS_HPF_ENABLE); } static int cs4231_enable(struct cs4231_softc *sc, int how) { cs4231_power_reset(sc, how); sc->sc_enabled = 1; return (0); } static void cs4231_disable(struct cs4231_softc *sc) { u_int8_t v; CS4231_LOCK_ASSERT(sc); if (sc->sc_enabled == 0) return; sc->sc_enabled = 0; CS4231_UNLOCK(sc); cs4231_halt(&sc->sc_pch); cs4231_halt(&sc->sc_rch); CS4231_LOCK(sc); v = cs4231_read(sc, CS_PIN_CONTROL) & ~INTERRUPT_ENABLE; cs4231_write(sc, CS_PIN_CONTROL, v); if ((sc->sc_flags & CS4231_SBUS) != 0) { APC_WRITE(sc, APC_CSR, APC_CSR_RESET); DELAY(10); APC_WRITE(sc, APC_CSR, 0); DELAY(10); } else cs4231_ebdma_reset(sc); } static void cs4231_free_resource(struct cs4231_softc *sc) { int i; CS4231_LOCK(sc); cs4231_disable(sc); CS4231_UNLOCK(sc); for (i = 0; i < sc->sc_nires; i++) { if (sc->sc_irqres[i]) { if (sc->sc_ih[i]) { bus_teardown_intr(sc->sc_dev, sc->sc_irqres[i], sc->sc_ih[i]); sc->sc_ih[i] = NULL; } bus_release_resource(sc->sc_dev, SYS_RES_IRQ, sc->sc_irqrid[i], sc->sc_irqres[i]); sc->sc_irqres[i] = NULL; } } for (i = 0; i < sc->sc_nires; i++) { if (sc->sc_dmat[i]) bus_dma_tag_destroy(sc->sc_dmat[i]); } for (i = 0; i < sc->sc_nmres; i++) { if (sc->sc_res[i]) bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_rid[i], sc->sc_res[i]); } snd_mtxfree(sc->sc_lock); free(sc, M_DEVBUF); } static void cs4231_write(struct cs4231_softc *sc, u_int8_t r, u_int8_t v) { CS_WRITE(sc, CS4231_IADDR, r); CS_WRITE(sc, CS4231_IDATA, v); } static u_int8_t cs4231_read(struct cs4231_softc *sc, u_int8_t r) { CS_WRITE(sc, CS4231_IADDR, r); return (CS_READ(sc, CS4231_IDATA)); } static void cs4231_sbus_intr(void *arg) { struct cs4231_softc *sc; struct cs4231_channel *pch, *rch; u_int32_t csr; u_int8_t status; sc = arg; CS4231_LOCK(sc); csr = APC_READ(sc, APC_CSR); if ((csr & APC_CSR_GI) == 0) { CS4231_UNLOCK(sc); return; } APC_WRITE(sc, APC_CSR, csr); if ((csr & APC_CSR_EIE) && (csr & APC_CSR_EI)) { status = cs4231_read(sc, CS_TEST_AND_INIT); device_printf(sc->sc_dev, "apc error interrupt : stat = 0x%x\n", status); } pch = rch = NULL; if ((csr & APC_CSR_PMIE) && (csr & APC_CSR_PMI)) { u_long nextaddr, saddr; u_int32_t togo; pch = &sc->sc_pch; togo = pch->togo; saddr = sndbuf_getbufaddr(pch->buffer); nextaddr = pch->nextaddr + togo; if (nextaddr >= saddr + sndbuf_getsize(pch->buffer)) nextaddr = saddr; APC_WRITE(sc, APC_PNVA, nextaddr); APC_WRITE(sc, APC_PNC, togo); pch->nextaddr = nextaddr; } if ((csr & APC_CSR_CIE) && (csr & APC_CSR_CI) && (csr & APC_CSR_CD)) { u_long nextaddr, saddr; u_int32_t togo; rch = &sc->sc_rch; togo = rch->togo; saddr = sndbuf_getbufaddr(rch->buffer); nextaddr = rch->nextaddr + togo; if (nextaddr >= saddr + sndbuf_getsize(rch->buffer)) nextaddr = saddr; APC_WRITE(sc, APC_CNVA, nextaddr); APC_WRITE(sc, APC_CNC, togo); rch->nextaddr = nextaddr; } CS4231_UNLOCK(sc); if (pch) chn_intr(pch->channel); if (rch) chn_intr(rch->channel); } /* playback interrupt handler */ static void cs4231_ebus_pintr(void *arg) { struct cs4231_softc *sc; struct cs4231_channel *ch; u_int32_t csr; u_int8_t status; sc = arg; CS4231_LOCK(sc); csr = EBDMA_P_READ(sc, EBDMA_DCSR); if ((csr & EBDCSR_INT) == 0) { CS4231_UNLOCK(sc); return; } if ((csr & EBDCSR_ERR)) { status = cs4231_read(sc, CS_TEST_AND_INIT); device_printf(sc->sc_dev, "ebdma error interrupt : stat = 0x%x\n", status); } EBDMA_P_WRITE(sc, EBDMA_DCSR, csr | EBDCSR_TC); ch = NULL; if (csr & EBDCSR_TC) { u_long nextaddr, saddr; u_int32_t togo; ch = &sc->sc_pch; togo = ch->togo; saddr = sndbuf_getbufaddr(ch->buffer); nextaddr = ch->nextaddr + togo; if (nextaddr >= saddr + sndbuf_getsize(ch->buffer)) nextaddr = saddr; /* * EBDMA_DCNT is loaded automatically * EBDMA_P_WRITE(sc, EBDMA_DCNT, togo); */ EBDMA_P_WRITE(sc, EBDMA_DADDR, nextaddr); ch->nextaddr = nextaddr; } CS4231_UNLOCK(sc); if (ch) chn_intr(ch->channel); } /* capture interrupt handler */ static void cs4231_ebus_cintr(void *arg) { struct cs4231_softc *sc; struct cs4231_channel *ch; u_int32_t csr; u_int8_t status; sc = arg; CS4231_LOCK(sc); csr = EBDMA_C_READ(sc, EBDMA_DCSR); if ((csr & EBDCSR_INT) == 0) { CS4231_UNLOCK(sc); return; } if ((csr & EBDCSR_ERR)) { status = cs4231_read(sc, CS_TEST_AND_INIT); device_printf(sc->sc_dev, "dma error interrupt : stat = 0x%x\n", status); } EBDMA_C_WRITE(sc, EBDMA_DCSR, csr | EBDCSR_TC); ch = NULL; if (csr & EBDCSR_TC) { u_long nextaddr, saddr; u_int32_t togo; ch = &sc->sc_rch; togo = ch->togo; saddr = sndbuf_getbufaddr(ch->buffer); nextaddr = ch->nextaddr + togo; if (nextaddr >= saddr + sndbuf_getblksz(ch->buffer)) nextaddr = saddr; /* * EBDMA_DCNT is loaded automatically * EBDMA_C_WRITE(sc, EBDMA_DCNT, togo); */ EBDMA_C_WRITE(sc, EBDMA_DADDR, nextaddr); ch->nextaddr = nextaddr; } CS4231_UNLOCK(sc); if (ch) chn_intr(ch->channel); } static const struct mix_table cs4231_mix_table[SOUND_MIXER_NRDEVICES][2] = { [SOUND_MIXER_PCM] = { { CS_LEFT_OUTPUT_CONTROL, 6, OUTPUT_MUTE, 0, 1, 1, 0 }, { CS_RIGHT_OUTPUT_CONTROL, 6, OUTPUT_MUTE, 0, 1, 1, 0 } }, [SOUND_MIXER_SPEAKER] = { { CS_MONO_IO_CONTROL, 4, MONO_OUTPUT_MUTE, 0, 1, 1, 0 }, { CS_REG_NONE, 0, 0, 0, 0, 1, 0 } }, [SOUND_MIXER_LINE] = { { CS_LEFT_LINE_CONTROL, 5, LINE_INPUT_MUTE, 0, 1, 1, 1 }, { CS_RIGHT_LINE_CONTROL, 5, LINE_INPUT_MUTE, 0, 1, 1, 1 } }, /* * AUX1 : removed intentionally since it generates noises * AUX2 : Ultra1/Ultra2 has no internal CD-ROM audio in */ [SOUND_MIXER_CD] = { { CS_LEFT_AUX2_CONTROL, 5, LINE_INPUT_MUTE, 0, 1, 1, 1 }, { CS_RIGHT_AUX2_CONTROL, 5, LINE_INPUT_MUTE, 0, 1, 1, 1 } }, [SOUND_MIXER_MIC] = { { CS_LEFT_INPUT_CONTROL, 4, 0, 0, 0, 1, 1 }, { CS_RIGHT_INPUT_CONTROL, 4, 0, 0, 0, 1, 1 } }, [SOUND_MIXER_IGAIN] = { { CS_LEFT_INPUT_CONTROL, 4, 0, 0, 1, 0 }, { CS_RIGHT_INPUT_CONTROL, 4, 0, 0, 1, 0 } } }; static int cs4231_mixer_init(struct snd_mixer *m) { u_int32_t v; int i; v = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) if (cs4231_mix_table[i][0].avail != 0) v |= (1 << i); mix_setdevs(m, v); v = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) if (cs4231_mix_table[i][0].recdev != 0) v |= (1 << i); mix_setrecdevs(m, v); return (0); } static void cs4231_mixer_set_value(struct cs4231_softc *sc, const struct mix_table *mt, u_int8_t v) { u_int8_t mask, reg; u_int8_t old, shift, val; if (mt->avail == 0 || mt->reg == CS_REG_NONE) return; reg = mt->reg; if (mt->neg != 0) val = 100 - v; else val = v; mask = (1 << mt->bits) - 1; val = ((val * mask) + 50) / 100; shift = mt->shift; val <<= shift; if (v == 0) val |= mt->mute; old = cs4231_read(sc, reg); old &= ~(mt->mute | (mask << shift)); val |= old; if (reg == CS_LEFT_INPUT_CONTROL || reg == CS_RIGHT_INPUT_CONTROL) { if ((val & (mask << shift)) != 0) val |= ADC_INPUT_GAIN_ENABLE; else val &= ~ADC_INPUT_GAIN_ENABLE; } cs4231_write(sc, reg, val); } static int cs4231_mixer_set(struct snd_mixer *m, u_int32_t dev, u_int32_t left, u_int32_t right) { struct cs4231_softc *sc; sc = mix_getdevinfo(m); CS4231_LOCK(sc); cs4231_mixer_set_value(sc, &cs4231_mix_table[dev][0], left); cs4231_mixer_set_value(sc, &cs4231_mix_table[dev][1], right); CS4231_UNLOCK(sc); return (left | (right << 8)); } static u_int32_t cs4231_mixer_setrecsrc(struct snd_mixer *m, u_int32_t src) { struct cs4231_softc *sc; u_int8_t v; sc = mix_getdevinfo(m); switch (src) { case SOUND_MASK_LINE: v = CS_IN_LINE; break; case SOUND_MASK_CD: v = CS_IN_DAC; break; case SOUND_MASK_MIC: default: v = CS_IN_MIC; src = SOUND_MASK_MIC; break; } CS4231_LOCK(sc); cs4231_write(sc, CS_LEFT_INPUT_CONTROL, (cs4231_read(sc, CS_LEFT_INPUT_CONTROL) & CS_IN_MASK) | v); cs4231_write(sc, CS_RIGHT_INPUT_CONTROL, (cs4231_read(sc, CS_RIGHT_INPUT_CONTROL) & CS_IN_MASK) | v); CS4231_UNLOCK(sc); return (src); } static void * cs4231_chan_init(kobj_t obj, void *dev, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct cs4231_softc *sc; struct cs4231_channel *ch; bus_dma_tag_t dmat; sc = dev; ch = (dir == PCMDIR_PLAY) ? &sc->sc_pch : &sc->sc_rch; ch->parent = sc; ch->channel = c; ch->dir = dir; ch->buffer = b; if ((sc->sc_flags & CS4231_SBUS) != 0) dmat = sc->sc_dmat[0]; else { if (dir == PCMDIR_PLAY) dmat = sc->sc_dmat[1]; else dmat = sc->sc_dmat[0]; } if (sndbuf_alloc(ch->buffer, dmat, 0, sc->sc_bufsz) != 0) return (NULL); DPRINTF(("%s channel addr: 0x%lx\n", dir == PCMDIR_PLAY ? "playback" : "capture", sndbuf_getbufaddr(ch->buffer))); return (ch); } static int cs4231_chan_setformat(kobj_t obj, void *data, u_int32_t format) { struct cs4231_softc *sc; struct cs4231_channel *ch; u_int32_t encoding; u_int8_t fs, v; ch = data; sc = ch->parent; CS4231_LOCK(sc); if (ch->format == format) { CS4231_UNLOCK(sc); return (0); } encoding = AFMT_ENCODING(format); fs = 0; switch (encoding) { case AFMT_U8: fs = CS_AFMT_U8; break; case AFMT_MU_LAW: fs = CS_AFMT_MU_LAW; break; case AFMT_S16_LE: fs = CS_AFMT_S16_LE; break; case AFMT_A_LAW: fs = CS_AFMT_A_LAW; break; case AFMT_IMA_ADPCM: fs = CS_AFMT_IMA_ADPCM; break; case AFMT_S16_BE: fs = CS_AFMT_S16_BE; break; default: fs = CS_AFMT_U8; format = AFMT_U8; break; } if (AFMT_CHANNEL(format) > 1) fs |= CS_AFMT_STEREO; DPRINTF(("FORMAT: %s : 0x%x\n", ch->dir == PCMDIR_PLAY ? "playback" : "capture", format)); v = cs4231_read(sc, CS_CLOCK_DATA_FORMAT); v &= CS_CLOCK_DATA_FORMAT_MASK; fs |= v; cs4231_chan_fs(sc, ch->dir, fs); ch->format = format; CS4231_UNLOCK(sc); return (0); } static u_int32_t cs4231_chan_setspeed(kobj_t obj, void *data, u_int32_t speed) { typedef struct { u_int32_t speed; u_int8_t bits; } speed_struct; const static speed_struct speed_table[] = { {5510, (0 << 1) | CLOCK_XTAL2}, {5510, (0 << 1) | CLOCK_XTAL2}, {6620, (7 << 1) | CLOCK_XTAL2}, {8000, (0 << 1) | CLOCK_XTAL1}, {9600, (7 << 1) | CLOCK_XTAL1}, {11025, (1 << 1) | CLOCK_XTAL2}, {16000, (1 << 1) | CLOCK_XTAL1}, {18900, (2 << 1) | CLOCK_XTAL2}, {22050, (3 << 1) | CLOCK_XTAL2}, {27420, (2 << 1) | CLOCK_XTAL1}, {32000, (3 << 1) | CLOCK_XTAL1}, {33075, (6 << 1) | CLOCK_XTAL2}, {33075, (4 << 1) | CLOCK_XTAL2}, {44100, (5 << 1) | CLOCK_XTAL2}, {48000, (6 << 1) | CLOCK_XTAL1}, }; struct cs4231_softc *sc; struct cs4231_channel *ch; int i, n, sel; u_int8_t fs; ch = data; sc = ch->parent; CS4231_LOCK(sc); if (ch->speed == speed) { CS4231_UNLOCK(sc); return (speed); } n = sizeof(speed_table) / sizeof(speed_struct); for (i = 1, sel =0; i < n - 1; i++) if (abs(speed - speed_table[i].speed) < abs(speed - speed_table[sel].speed)) sel = i; DPRINTF(("SPEED: %s : %dHz -> %dHz\n", ch->dir == PCMDIR_PLAY ? "playback" : "capture", speed, speed_table[sel].speed)); speed = speed_table[sel].speed; fs = cs4231_read(sc, CS_CLOCK_DATA_FORMAT); fs &= ~CS_CLOCK_DATA_FORMAT_MASK; fs |= speed_table[sel].bits; cs4231_chan_fs(sc, ch->dir, fs); ch->speed = speed; CS4231_UNLOCK(sc); return (speed); } static void cs4231_chan_fs(struct cs4231_softc *sc, int dir, u_int8_t fs) { int i, doreset; #ifdef CS4231_AUTO_CALIBRATION u_int8_t v; #endif CS4231_LOCK_ASSERT(sc); /* set autocalibration */ doreset = 0; #ifdef CS4231_AUTO_CALIBRATION v = cs4231_read(sc, CS_INTERFACE_CONFIG) | AUTO_CAL_ENABLE; CS_WRITE(sc, CS4231_IADDR, MODE_CHANGE_ENABLE); CS_WRITE(sc, CS4231_IADDR, MODE_CHANGE_ENABLE | CS_INTERFACE_CONFIG); CS_WRITE(sc, CS4231_IDATA, v); #endif /* * We always need to write CS_CLOCK_DATA_FORMAT register since * the clock frequency is shared with playback/capture. */ CS_WRITE(sc, CS4231_IADDR, MODE_CHANGE_ENABLE | CS_CLOCK_DATA_FORMAT); CS_WRITE(sc, CS4231_IDATA, fs); CS_READ(sc, CS4231_IDATA); CS_READ(sc, CS4231_IDATA); for (i = CS_TIMEOUT; i && CS_READ(sc, CS4231_IADDR) == CS_IN_INIT; i--) DELAY(10); if (i == 0) { device_printf(sc->sc_dev, "timeout setting playback speed\n"); doreset++; } /* * capture channel * cs4231 doesn't allow separate fs setup for playback/capture. * I believe this will break full-duplex operation. */ if (dir == PCMDIR_REC) { CS_WRITE(sc, CS4231_IADDR, MODE_CHANGE_ENABLE | CS_REC_FORMAT); CS_WRITE(sc, CS4231_IDATA, fs); CS_READ(sc, CS4231_IDATA); CS_READ(sc, CS4231_IDATA); for (i = CS_TIMEOUT; i && CS_READ(sc, CS4231_IADDR) == CS_IN_INIT; i--) DELAY(10); if (i == 0) { device_printf(sc->sc_dev, "timeout setting capture format\n"); doreset++; } } CS_WRITE(sc, CS4231_IADDR, 0); for (i = CS_TIMEOUT; i && CS_READ(sc, CS4231_IADDR) == CS_IN_INIT; i--) DELAY(10); if (i == 0) { device_printf(sc->sc_dev, "timeout waiting for !MCE\n"); doreset++; } #ifdef CS4231_AUTO_CALIBRATION CS_WRITE(sc, CS4231_IADDR, CS_TEST_AND_INIT); for (i = CS_TIMEOUT; i && CS_READ(sc, CS4231_IDATA) & AUTO_CAL_IN_PROG; i--) DELAY(10); if (i == 0) { device_printf(sc->sc_dev, "timeout waiting for autocalibration\n"); doreset++; } #endif if (doreset) { /* * Maybe the last resort to avoid a dreadful message like * "pcm0:play:0: play interrupt timeout, channel dead" would * be hardware reset. */ device_printf(sc->sc_dev, "trying to hardware reset\n"); cs4231_disable(sc); cs4231_enable(sc, CODEC_COLD_RESET); CS4231_UNLOCK(sc); /* XXX */ if (mixer_reinit(sc->sc_dev) != 0) device_printf(sc->sc_dev, "unable to reinitialize the mixer\n"); CS4231_LOCK(sc); } } static u_int32_t cs4231_chan_setblocksize(kobj_t obj, void *data, u_int32_t blocksize) { struct cs4231_softc *sc; struct cs4231_channel *ch; int nblks, error; ch = data; sc = ch->parent; if (blocksize > CS4231_MAX_BLK_SZ) blocksize = CS4231_MAX_BLK_SZ; nblks = sc->sc_bufsz / blocksize; error = sndbuf_resize(ch->buffer, nblks, blocksize); if (error != 0) device_printf(sc->sc_dev, "unable to block size, blksz = %d, error = %d\n", blocksize, error); return (blocksize); } static int cs4231_chan_trigger(kobj_t obj, void *data, int go) { struct cs4231_channel *ch; ch = data; switch (go) { case PCMTRIG_EMLDMAWR: case PCMTRIG_EMLDMARD: break; case PCMTRIG_START: cs4231_trigger(ch); break; case PCMTRIG_ABORT: case PCMTRIG_STOP: cs4231_halt(ch); break; default: break; } return (0); } static u_int32_t cs4231_chan_getptr(kobj_t obj, void *data) { struct cs4231_softc *sc; struct cs4231_channel *ch; u_int32_t cur, ptr, sz; ch = data; sc = ch->parent; CS4231_LOCK(sc); if ((sc->sc_flags & CS4231_SBUS) != 0) cur = (ch->dir == PCMDIR_PLAY) ? APC_READ(sc, APC_PVA) : APC_READ(sc, APC_CVA); else cur = (ch->dir == PCMDIR_PLAY) ? EBDMA_P_READ(sc, EBDMA_DADDR) : EBDMA_C_READ(sc, EBDMA_DADDR); sz = sndbuf_getsize(ch->buffer); ptr = cur - sndbuf_getbufaddr(ch->buffer) + sz; CS4231_UNLOCK(sc); ptr %= sz; return (ptr); } static struct pcmchan_caps * cs4231_chan_getcaps(kobj_t obj, void *data) { return (&cs4231_caps); } static void cs4231_trigger(struct cs4231_channel *ch) { struct cs4231_softc *sc; sc = ch->parent; if ((sc->sc_flags & CS4231_SBUS) != 0) cs4231_apcdma_trigger(sc, ch); else cs4231_ebdma_trigger(sc, ch); } static void cs4231_apcdma_trigger(struct cs4231_softc *sc, struct cs4231_channel *ch) { u_int32_t csr, togo; u_int32_t nextaddr; CS4231_LOCK(sc); if (ch->locked) { device_printf(sc->sc_dev, "%s channel already triggered\n", ch->dir == PCMDIR_PLAY ? "playback" : "capture"); CS4231_UNLOCK(sc); return; } nextaddr = sndbuf_getbufaddr(ch->buffer); togo = sndbuf_getsize(ch->buffer) / 2; if (togo > CS4231_MAX_APC_DMA_SZ) togo = CS4231_MAX_APC_DMA_SZ; ch->togo = togo; if (ch->dir == PCMDIR_PLAY) { DPRINTF(("TRG: PNVA = 0x%x, togo = 0x%x\n", nextaddr, togo)); cs4231_read(sc, CS_TEST_AND_INIT); /* clear pending error */ csr = APC_READ(sc, APC_CSR); APC_WRITE(sc, APC_PNVA, nextaddr); APC_WRITE(sc, APC_PNC, togo); if ((csr & APC_CSR_PDMA_GO) == 0 || (csr & APC_CSR_PPAUSE) != 0) { APC_WRITE(sc, APC_CSR, APC_READ(sc, APC_CSR) & ~(APC_CSR_PIE | APC_CSR_PPAUSE)); APC_WRITE(sc, APC_CSR, APC_READ(sc, APC_CSR) | APC_CSR_GIE | APC_CSR_PIE | APC_CSR_EIE | APC_CSR_EI | APC_CSR_PMIE | APC_CSR_PDMA_GO); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) | PLAYBACK_ENABLE); } /* load next address */ if (APC_READ(sc, APC_CSR) & APC_CSR_PD) { nextaddr += togo; APC_WRITE(sc, APC_PNVA, nextaddr); APC_WRITE(sc, APC_PNC, togo); } } else { DPRINTF(("TRG: CNVA = 0x%x, togo = 0x%x\n", nextaddr, togo)); cs4231_read(sc, CS_TEST_AND_INIT); /* clear pending error */ APC_WRITE(sc, APC_CNVA, nextaddr); APC_WRITE(sc, APC_CNC, togo); csr = APC_READ(sc, APC_CSR); if ((csr & APC_CSR_CDMA_GO) == 0 || (csr & APC_CSR_CPAUSE) != 0) { csr &= APC_CSR_CPAUSE; csr |= APC_CSR_GIE | APC_CSR_CMIE | APC_CSR_CIE | APC_CSR_EI | APC_CSR_CDMA_GO; APC_WRITE(sc, APC_CSR, csr); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) | CAPTURE_ENABLE); } /* load next address */ if (APC_READ(sc, APC_CSR) & APC_CSR_CD) { nextaddr += togo; APC_WRITE(sc, APC_CNVA, nextaddr); APC_WRITE(sc, APC_CNC, togo); } } ch->nextaddr = nextaddr; ch->locked = 1; CS4231_UNLOCK(sc); } static void cs4231_ebdma_trigger(struct cs4231_softc *sc, struct cs4231_channel *ch) { u_int32_t csr, togo; u_int32_t nextaddr; CS4231_LOCK(sc); if (ch->locked) { device_printf(sc->sc_dev, "%s channel already triggered\n", ch->dir == PCMDIR_PLAY ? "playback" : "capture"); CS4231_UNLOCK(sc); return; } nextaddr = sndbuf_getbufaddr(ch->buffer); togo = sndbuf_getsize(ch->buffer) / 2; if (togo % 64 == 0) sc->sc_burst = EBDCSR_BURST_16; else if (togo % 32 == 0) sc->sc_burst = EBDCSR_BURST_8; else if (togo % 16 == 0) sc->sc_burst = EBDCSR_BURST_4; else sc->sc_burst = EBDCSR_BURST_1; ch->togo = togo; DPRINTF(("TRG: DNAR = 0x%x, togo = 0x%x\n", nextaddr, togo)); if (ch->dir == PCMDIR_PLAY) { cs4231_read(sc, CS_TEST_AND_INIT); /* clear pending error */ csr = EBDMA_P_READ(sc, EBDMA_DCSR); if (csr & EBDCSR_DMAEN) { EBDMA_P_WRITE(sc, EBDMA_DCNT, togo); EBDMA_P_WRITE(sc, EBDMA_DADDR, nextaddr); } else { EBDMA_P_WRITE(sc, EBDMA_DCSR, EBDCSR_RESET); EBDMA_P_WRITE(sc, EBDMA_DCSR, sc->sc_burst); EBDMA_P_WRITE(sc, EBDMA_DCNT, togo); EBDMA_P_WRITE(sc, EBDMA_DADDR, nextaddr); EBDMA_P_WRITE(sc, EBDMA_DCSR, sc->sc_burst | EBDCSR_DMAEN | EBDCSR_INTEN | EBDCSR_CNTEN | EBDCSR_NEXTEN); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) | PLAYBACK_ENABLE); } /* load next address */ if (EBDMA_P_READ(sc, EBDMA_DCSR) & EBDCSR_A_LOADED) { nextaddr += togo; EBDMA_P_WRITE(sc, EBDMA_DCNT, togo); EBDMA_P_WRITE(sc, EBDMA_DADDR, nextaddr); } } else { cs4231_read(sc, CS_TEST_AND_INIT); /* clear pending error */ csr = EBDMA_C_READ(sc, EBDMA_DCSR); if (csr & EBDCSR_DMAEN) { EBDMA_C_WRITE(sc, EBDMA_DCNT, togo); EBDMA_C_WRITE(sc, EBDMA_DADDR, nextaddr); } else { EBDMA_C_WRITE(sc, EBDMA_DCSR, EBDCSR_RESET); EBDMA_C_WRITE(sc, EBDMA_DCSR, sc->sc_burst); EBDMA_C_WRITE(sc, EBDMA_DCNT, togo); EBDMA_C_WRITE(sc, EBDMA_DADDR, nextaddr); EBDMA_C_WRITE(sc, EBDMA_DCSR, sc->sc_burst | EBDCSR_WRITE | EBDCSR_DMAEN | EBDCSR_INTEN | EBDCSR_CNTEN | EBDCSR_NEXTEN); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) | CAPTURE_ENABLE); } /* load next address */ if (EBDMA_C_READ(sc, EBDMA_DCSR) & EBDCSR_A_LOADED) { nextaddr += togo; EBDMA_C_WRITE(sc, EBDMA_DCNT, togo); EBDMA_C_WRITE(sc, EBDMA_DADDR, nextaddr); } } ch->nextaddr = nextaddr; ch->locked = 1; CS4231_UNLOCK(sc); } static void cs4231_halt(struct cs4231_channel *ch) { struct cs4231_softc *sc; u_int8_t status; int i; sc = ch->parent; CS4231_LOCK(sc); if (ch->locked == 0) { CS4231_UNLOCK(sc); return; } if (ch->dir == PCMDIR_PLAY ) { if ((sc->sc_flags & CS4231_SBUS) != 0) { /* XXX Kills some capture bits */ APC_WRITE(sc, APC_CSR, APC_READ(sc, APC_CSR) & ~(APC_CSR_EI | APC_CSR_GIE | APC_CSR_PIE | APC_CSR_EIE | APC_CSR_PDMA_GO | APC_CSR_PMIE)); } else { EBDMA_P_WRITE(sc, EBDMA_DCSR, EBDMA_P_READ(sc, EBDMA_DCSR) & ~EBDCSR_DMAEN); } /* Waiting for playback FIFO to empty */ status = cs4231_read(sc, CS_TEST_AND_INIT); for (i = CS_TIMEOUT; i && (status & PLAYBACK_UNDERRUN) == 0; i--) { DELAY(5); status = cs4231_read(sc, CS_TEST_AND_INIT); } if (i == 0) device_printf(sc->sc_dev, "timeout waiting for " "playback FIFO drain\n"); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) & (~PLAYBACK_ENABLE)); } else { if ((sc->sc_flags & CS4231_SBUS) != 0) { /* XXX Kills some playback bits */ APC_WRITE(sc, APC_CSR, APC_CSR_CAPTURE_PAUSE); } else { EBDMA_C_WRITE(sc, EBDMA_DCSR, EBDMA_C_READ(sc, EBDMA_DCSR) & ~EBDCSR_DMAEN); } /* Waiting for capture FIFO to empty */ status = cs4231_read(sc, CS_TEST_AND_INIT); for (i = CS_TIMEOUT; i && (status & CAPTURE_OVERRUN) == 0; i--) { DELAY(5); status = cs4231_read(sc, CS_TEST_AND_INIT); } if (i == 0) device_printf(sc->sc_dev, "timeout waiting for " "capture FIFO drain\n"); cs4231_write(sc, CS_INTERFACE_CONFIG, cs4231_read(sc, CS_INTERFACE_CONFIG) & (~CAPTURE_ENABLE)); } ch->locked = 0; CS4231_UNLOCK(sc); } Index: head/sys/dev/tpm/tpm.c =================================================================== --- head/sys/dev/tpm/tpm.c (revision 303889) +++ head/sys/dev/tpm/tpm.c (revision 303890) @@ -1,1494 +1,1494 @@ /* * Copyright (c) 2008, 2009 Michael Shalayeff * Copyright (c) 2009, 2010 Hans-Joerg Hoexer * All rights reserved. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF MIND, USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* #define TPM_DEBUG */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #ifdef __FreeBSD__ #include #include #include #include #include #include #include #include #include #include #else #include #include #include #include #include #include #include #endif #include #ifndef __FreeBSD__ /* XXX horrible hack for tcsd (-lpthread) workaround on OpenBSD */ #undef PCATCH #define PCATCH 0 #endif #define TPM_BUFSIZ 1024 #define TPM_HDRSIZE 10 #define TPM_PARAM_SIZE 0x0001 #ifdef __FreeBSD__ #define IRQUNK -1 #endif #define TPM_ACCESS 0x0000 /* access register */ #define TPM_ACCESS_ESTABLISHMENT 0x01 /* establishment */ #define TPM_ACCESS_REQUEST_USE 0x02 /* request using locality */ #define TPM_ACCESS_REQUEST_PENDING 0x04 /* pending request */ #define TPM_ACCESS_SEIZE 0x08 /* request locality seize */ #define TPM_ACCESS_SEIZED 0x10 /* locality has been seized */ #define TPM_ACCESS_ACTIVE_LOCALITY 0x20 /* locality is active */ #define TPM_ACCESS_VALID 0x80 /* bits are valid */ #define TPM_ACCESS_BITS \ "\020\01EST\02REQ\03PEND\04SEIZE\05SEIZED\06ACT\010VALID" #define TPM_INTERRUPT_ENABLE 0x0008 #define TPM_GLOBAL_INT_ENABLE 0x80000000 /* enable ints */ #define TPM_CMD_READY_INT 0x00000080 /* cmd ready enable */ #define TPM_INT_EDGE_FALLING 0x00000018 #define TPM_INT_EDGE_RISING 0x00000010 #define TPM_INT_LEVEL_LOW 0x00000008 #define TPM_INT_LEVEL_HIGH 0x00000000 #define TPM_LOCALITY_CHANGE_INT 0x00000004 /* locality change enable */ #define TPM_STS_VALID_INT 0x00000002 /* int on TPM_STS_VALID is set */ #define TPM_DATA_AVAIL_INT 0x00000001 /* int on TPM_STS_DATA_AVAIL is set */ #define TPM_INTERRUPT_ENABLE_BITS \ "\020\040ENA\010RDY\03LOCH\02STSV\01DRDY" #define TPM_INT_VECTOR 0x000c /* 8 bit reg for 4 bit irq vector */ #define TPM_INT_STATUS 0x0010 /* bits are & 0x87 from TPM_INTERRUPT_ENABLE */ #define TPM_INTF_CAPABILITIES 0x0014 /* capability register */ #define TPM_INTF_BURST_COUNT_STATIC 0x0100 /* TPM_STS_BMASK static */ #define TPM_INTF_CMD_READY_INT 0x0080 /* int on ready supported */ #define TPM_INTF_INT_EDGE_FALLING 0x0040 /* falling edge ints supported */ #define TPM_INTF_INT_EDGE_RISING 0x0020 /* rising edge ints supported */ #define TPM_INTF_INT_LEVEL_LOW 0x0010 /* level-low ints supported */ #define TPM_INTF_INT_LEVEL_HIGH 0x0008 /* level-high ints supported */ #define TPM_INTF_LOCALITY_CHANGE_INT 0x0004 /* locality-change int (mb 1) */ #define TPM_INTF_STS_VALID_INT 0x0002 /* TPM_STS_VALID int supported */ #define TPM_INTF_DATA_AVAIL_INT 0x0001 /* TPM_STS_DATA_AVAIL int supported (mb 1) */ #define TPM_CAPSREQ \ (TPM_INTF_DATA_AVAIL_INT|TPM_INTF_LOCALITY_CHANGE_INT|TPM_INTF_INT_LEVEL_LOW) #define TPM_CAPBITS \ "\020\01IDRDY\02ISTSV\03ILOCH\04IHIGH\05ILOW\06IEDGE\07IFALL\010IRDY\011BCST" #define TPM_STS 0x0018 /* status register */ #define TPM_STS_MASK 0x000000ff /* status bits */ #define TPM_STS_BMASK 0x00ffff00 /* ro io burst size */ #define TPM_STS_VALID 0x00000080 /* ro other bits are valid */ #define TPM_STS_CMD_READY 0x00000040 /* rw chip/signal ready */ #define TPM_STS_GO 0x00000020 /* wo start the command */ #define TPM_STS_DATA_AVAIL 0x00000010 /* ro data available */ #define TPM_STS_DATA_EXPECT 0x00000008 /* ro more data to be written */ #define TPM_STS_RESP_RETRY 0x00000002 /* wo resend the response */ #define TPM_STS_BITS "\020\010VALID\07RDY\06GO\05DRDY\04EXPECT\02RETRY" #define TPM_DATA 0x0024 #define TPM_ID 0x0f00 #define TPM_REV 0x0f04 #define TPM_SIZE 0x5000 /* five pages of the above */ #define TPM_ACCESS_TMO 2000 /* 2sec */ #define TPM_READY_TMO 2000 /* 2sec */ #define TPM_READ_TMO 120000 /* 2 minutes */ #define TPM_BURST_TMO 2000 /* 2sec */ #define TPM_LEGACY_BUSY 0x01 #define TPM_LEGACY_ABRT 0x01 #define TPM_LEGACY_DA 0x02 #define TPM_LEGACY_RE 0x04 #define TPM_LEGACY_LAST 0x04 #define TPM_LEGACY_BITS "\020\01BUSY\2DA\3RE\4LAST" #define TPM_LEGACY_TMO (2*60) /* sec */ #define TPM_LEGACY_SLEEP 5 /* ticks */ #define TPM_LEGACY_DELAY 100 /* Set when enabling legacy interface in host bridge. */ int tpm_enabled; #ifdef __FreeBSD__ #define TPMSOFTC(dev) \ ((struct tpm_softc *)dev->si_drv1) d_open_t tpmopen; d_close_t tpmclose; d_read_t tpmread; d_write_t tpmwrite; d_ioctl_t tpmioctl; static struct cdevsw tpm_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = tpmopen, .d_close = tpmclose, .d_read = tpmread, .d_write = tpmwrite, .d_ioctl = tpmioctl, .d_name = "tpm", }; #else #define TPMSOFTC(dev) \ (struct tpm_softc *)device_lookup(&tpm_cd, minor(dev)) struct cfdriver tpm_cd = { NULL, "tpm", DV_DULL }; -int tpm_match(struct device *, void *, void *); -void tpm_attach(struct device *, struct device *, void *); +int tpm_match(device_t , void *, void *); +void tpm_attach(device_t , device_t , void *); struct cfattach tpm_ca = { sizeof(struct tpm_softc), tpm_match, tpm_attach }; #endif const struct { u_int32_t devid; char name[32]; int flags; #define TPM_DEV_NOINTS 0x0001 } tpm_devs[] = { { 0x000615d1, "IFX SLD 9630 TT 1.1", 0 }, { 0x000b15d1, "IFX SLB 9635 TT 1.2", 0 }, { 0x100214e4, "Broadcom BCM0102", TPM_DEV_NOINTS }, { 0x00fe1050, "WEC WPCT200", 0 }, { 0x687119fa, "SNS SSX35", 0 }, { 0x2e4d5453, "STM ST19WP18", 0 }, { 0x32021114, "ATML 97SC3203", TPM_DEV_NOINTS }, { 0x10408086, "INTEL INTC0102", 0 }, { 0, "", TPM_DEV_NOINTS }, }; int tpm_tis12_irqinit(struct tpm_softc *, int, int); int tpm_tis12_init(struct tpm_softc *, int, const char *); int tpm_tis12_start(struct tpm_softc *, int); int tpm_tis12_read(struct tpm_softc *, void *, int, size_t *, int); int tpm_tis12_write(struct tpm_softc *, void *, int); int tpm_tis12_end(struct tpm_softc *, int, int); #ifdef __FreeBSD__ void tpm_intr(void *); #else int tpm_intr(void *); void tpm_powerhook(int, void *); int tpm_suspend(struct tpm_softc *, int); int tpm_resume(struct tpm_softc *, int); #endif int tpm_waitfor_poll(struct tpm_softc *, u_int8_t, int, void *); int tpm_waitfor_int(struct tpm_softc *, u_int8_t, int, void *, int); int tpm_waitfor(struct tpm_softc *, u_int8_t, int, void *); int tpm_request_locality(struct tpm_softc *, int); int tpm_getburst(struct tpm_softc *); u_int8_t tpm_status(struct tpm_softc *); int tpm_tmotohz(int); int tpm_legacy_probe(bus_space_tag_t, bus_addr_t); int tpm_legacy_init(struct tpm_softc *, int, const char *); int tpm_legacy_start(struct tpm_softc *, int); int tpm_legacy_read(struct tpm_softc *, void *, int, size_t *, int); int tpm_legacy_write(struct tpm_softc *, void *, int); int tpm_legacy_end(struct tpm_softc *, int, int); #ifdef __FreeBSD__ /* * FreeBSD specific code for probing and attaching TPM to device tree. */ #if 0 static void tpm_identify(driver_t *driver, device_t parent) { BUS_ADD_CHILD(parent, ISA_ORDER_SPECULATIVE, "tpm", 0); } #endif int tpm_attach(device_t dev) { struct tpm_softc *sc = device_get_softc(dev); int irq; sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) return ENXIO; sc->sc_bt = rman_get_bustag(sc->mem_res); sc->sc_bh = rman_get_bushandle(sc->mem_res); sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res != NULL) irq = rman_get_start(sc->irq_res); else irq = IRQUNK; /* In case PnP probe this may contain some initialization. */ tpm_tis12_probe(sc->sc_bt, sc->sc_bh); if (tpm_legacy_probe(sc->sc_bt, sc->sc_bh)) { sc->sc_init = tpm_legacy_init; sc->sc_start = tpm_legacy_start; sc->sc_read = tpm_legacy_read; sc->sc_write = tpm_legacy_write; sc->sc_end = tpm_legacy_end; } else { sc->sc_init = tpm_tis12_init; sc->sc_start = tpm_tis12_start; sc->sc_read = tpm_tis12_read; sc->sc_write = tpm_tis12_write; sc->sc_end = tpm_tis12_end; } printf("%s", device_get_name(dev)); if ((sc->sc_init)(sc, irq, "tpm")) { tpm_detach(dev); return ENXIO; } if (sc->sc_init == tpm_tis12_init && sc->irq_res != NULL && bus_setup_intr(dev, sc->irq_res, INTR_TYPE_TTY, NULL, tpm_intr, sc, &sc->intr_cookie) != 0) { tpm_detach(dev); printf(": cannot establish interrupt\n"); return 1; } sc->sc_cdev = make_dev(&tpm_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "tpm"); sc->sc_cdev->si_drv1 = sc; return 0; } int tpm_detach(device_t dev) { struct tpm_softc * sc = device_get_softc(dev); if(sc->intr_cookie){ bus_teardown_intr(dev, sc->irq_res, sc->intr_cookie); } if(sc->mem_res){ bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } if(sc->irq_res){ bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if(sc->sc_cdev){ destroy_dev(sc->sc_cdev); } return 0; } #else /* * OpenBSD specific code for probing and attaching TPM to device tree. */ int -tpm_match(struct device *parent, void *match, void *aux) +tpm_match(device_t parent, void *match, void *aux) { struct isa_attach_args *ia = aux; struct cfdata *cf = match; bus_space_tag_t bt = ia->ia_memt; bus_space_handle_t bh; int rv; /* There can be only one. */ if (cf->cf_unit) return 0; if (tpm_legacy_probe(ia->ia_iot, ia->ia_iobase)) { ia->ia_iosize = 2; return 1; } if (ia->ia_maddr == -1) return 0; if (bus_space_map(bt, ia->ia_maddr, TPM_SIZE, 0, &bh)) return 0; if ((rv = tpm_tis12_probe(bt, bh))) { ia->ia_iosize = 0; ia->ia_msize = TPM_SIZE; } bus_space_unmap(bt, bh, TPM_SIZE); return rv; } void -tpm_attach(struct device *parent, struct device *self, void *aux) +tpm_attach(device_t parent, device_t self, void *aux) { struct tpm_softc *sc = (struct tpm_softc *)self; struct isa_attach_args *ia = aux; bus_addr_t iobase; bus_size_t size; int rv; if (tpm_legacy_probe(ia->ia_iot, ia->ia_iobase)) { sc->sc_bt = ia->ia_iot; iobase = ia->ia_iobase; size = ia->ia_iosize; sc->sc_batm = ia->ia_iot; sc->sc_init = tpm_legacy_init; sc->sc_start = tpm_legacy_start; sc->sc_read = tpm_legacy_read; sc->sc_write = tpm_legacy_write; sc->sc_end = tpm_legacy_end; } else { sc->sc_bt = ia->ia_memt; iobase = ia->ia_maddr; size = TPM_SIZE; sc->sc_init = tpm_tis12_init; sc->sc_start = tpm_tis12_start; sc->sc_read = tpm_tis12_read; sc->sc_write = tpm_tis12_write; sc->sc_end = tpm_tis12_end; } if (bus_space_map(sc->sc_bt, iobase, size, 0, &sc->sc_bh)) { printf(": cannot map registers\n"); return; } if ((rv = (sc->sc_init)(sc, ia->ia_irq, sc->sc_dev.dv_xname))) { bus_space_unmap(sc->sc_bt, sc->sc_bh, size); return; } /* * Only setup interrupt handler when we have a vector and the * chip is TIS 1.2 compliant. */ if (sc->sc_init == tpm_tis12_init && ia->ia_irq != IRQUNK && (sc->sc_ih = isa_intr_establish(ia->ia_ic, ia->ia_irq, IST_EDGE, IPL_TTY, tpm_intr, sc, sc->sc_dev.dv_xname)) == NULL) { bus_space_unmap(sc->sc_bt, sc->sc_bh, TPM_SIZE); printf("%s: cannot establish interrupt\n", sc->sc_dev.dv_xname); return; } #ifdef __FreeBSD__ sc->sc_suspend = 0; #else sc->sc_suspend = PWR_RESUME; sc->sc_powerhook = powerhook_establish(tpm_powerhook, sc); #endif } #endif /* Probe TPM using TIS 1.2 interface. */ int tpm_tis12_probe(bus_space_tag_t bt, bus_space_handle_t bh) { u_int32_t r; u_int8_t save, reg; r = bus_space_read_4(bt, bh, TPM_INTF_CAPABILITIES); if (r == 0xffffffff) return 0; #ifdef TPM_DEBUG printf("tpm: caps=%b\n", r, TPM_CAPBITS); #endif if ((r & TPM_CAPSREQ) != TPM_CAPSREQ || !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) { #ifdef TPM_DEBUG printf("tpm: caps too low (caps=%b)\n", r, TPM_CAPBITS); #endif return 0; } save = bus_space_read_1(bt, bh, TPM_ACCESS); bus_space_write_1(bt, bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE); reg = bus_space_read_1(bt, bh, TPM_ACCESS); if ((reg & TPM_ACCESS_VALID) && (reg & TPM_ACCESS_ACTIVE_LOCALITY) && bus_space_read_4(bt, bh, TPM_ID) != 0xffffffff) return 1; bus_space_write_1(bt, bh, TPM_ACCESS, save); return 0; } /* * Setup interrupt vector if one is provided and interrupts are know to * work on that particular chip. */ int tpm_tis12_irqinit(struct tpm_softc *sc, int irq, int idx) { u_int32_t r; if ((irq == IRQUNK) || (tpm_devs[idx].flags & TPM_DEV_NOINTS)) { sc->sc_vector = IRQUNK; return 0; } /* Ack and disable all interrupts. */ bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) & ~TPM_GLOBAL_INT_ENABLE); bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS)); /* Program interrupt vector. */ bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_INT_VECTOR, irq); sc->sc_vector = irq; /* Program interrupt type. */ if (sc->sc_capabilities & TPM_INTF_INT_EDGE_RISING) r = TPM_INT_EDGE_RISING; else if (sc->sc_capabilities & TPM_INTF_INT_LEVEL_HIGH) r = TPM_INT_LEVEL_HIGH; else r = TPM_INT_LEVEL_LOW; bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, r); return 0; } /* Setup TPM using TIS 1.2 interface. */ int tpm_tis12_init(struct tpm_softc *sc, int irq, const char *name) { u_int32_t r; int i; r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTF_CAPABILITIES); #ifdef TPM_DEBUG printf(" caps=%b ", r, TPM_CAPBITS); #endif if ((r & TPM_CAPSREQ) != TPM_CAPSREQ || !(r & (TPM_INTF_INT_EDGE_RISING | TPM_INTF_INT_LEVEL_LOW))) { printf(": capabilities too low (caps=%b)\n", r, TPM_CAPBITS); return 1; } sc->sc_capabilities = r; sc->sc_devid = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_ID); sc->sc_rev = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_REV); for (i = 0; tpm_devs[i].devid; i++) if (tpm_devs[i].devid == sc->sc_devid) break; if (tpm_devs[i].devid) printf(": %s rev 0x%x\n", tpm_devs[i].name, sc->sc_rev); else printf(": device 0x%08x rev 0x%x\n", sc->sc_devid, sc->sc_rev); if (tpm_tis12_irqinit(sc, irq, i)) return 1; if (tpm_request_locality(sc, 0)) return 1; /* Abort whatever it thought it was doing. */ bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY); return 0; } int tpm_request_locality(struct tpm_softc *sc, int l) { u_int32_t r; int to, rv; if (l != 0) return EINVAL; if ((bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) == (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) return 0; bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS, TPM_ACCESS_REQUEST_USE); to = tpm_tmotohz(TPM_ACCESS_TMO); while ((r = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_ACCESS) & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) != (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY) && to--) { rv = tsleep(sc->sc_init, PRIBIO | PCATCH, "tpm_locality", 1); if (rv && rv != EWOULDBLOCK) { #ifdef TPM_DEBUG printf("tpm_request_locality: interrupted %d\n", rv); #endif return rv; } } if ((r & (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) != (TPM_ACCESS_VALID | TPM_ACCESS_ACTIVE_LOCALITY)) { #ifdef TPM_DEBUG printf("tpm_request_locality: access %b\n", r, TPM_ACCESS_BITS); #endif return EBUSY; } return 0; } int tpm_getburst(struct tpm_softc *sc) { int burst, to, rv; to = tpm_tmotohz(TPM_BURST_TMO); burst = 0; while (burst == 0 && to--) { /* * Burst count has to be read from bits 8 to 23 without * touching any other bits, eg. the actual status bits 0 * to 7. */ burst = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 1); burst |= bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS + 2) << 8; #ifdef TPM_DEBUG printf("tpm_getburst: read %d\n", burst); #endif if (burst) return burst; rv = tsleep(sc, PRIBIO | PCATCH, "tpm_getburst", 1); if (rv && rv != EWOULDBLOCK) { return 0; } } return 0; } u_int8_t tpm_status(struct tpm_softc *sc) { u_int8_t status; status = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_STS) & TPM_STS_MASK; return status; } int tpm_tmotohz(int tmo) { struct timeval tv; tv.tv_sec = tmo / 1000; tv.tv_usec = 1000 * (tmo % 1000); return tvtohz(&tv); } /* Save TPM state on suspend. */ int #ifdef __FreeBSD__ tpm_suspend(device_t dev) #else tpm_suspend(struct tpm_softc *sc, int why) #endif { #ifdef __FreeBSD__ struct tpm_softc *sc = device_get_softc(dev); int why = 1; #endif u_int8_t command[] = { 0, 193, /* TPM_TAG_RQU_COMMAND */ 0, 0, 0, 10, /* Length in bytes */ 0, 0, 0, 156 /* TPM_ORD_SaveStates */ }; /* * Power down: We have to issue the SaveStates command. */ sc->sc_write(sc, &command, sizeof(command)); sc->sc_read(sc, &command, sizeof(command), NULL, TPM_HDRSIZE); #ifdef TPM_DEBUG printf("tpm_suspend: power down: %d -> %d\n", sc->sc_suspend, why); #endif sc->sc_suspend = why; return 0; } /* * Handle resume event. Actually nothing to do as the BIOS is supposed * to restore the previously saved state. */ int #ifdef __FreeBSD__ tpm_resume(device_t dev) #else tpm_resume(struct tpm_softc *sc, int why) #endif { #ifdef __FreeBSD__ struct tpm_softc *sc = device_get_softc(dev); int why = 0; #endif #ifdef TPM_DEBUG printf("tpm_resume: resume: %d -> %d\n", sc->sc_suspend, why); #endif sc->sc_suspend = why; return 0; } /* Dispatch suspend and resume events. */ #ifndef __FreeBSD__ void tpm_powerhook(int why, void *self) { struct tpm_softc *sc = (struct tpm_softc *)self; if (why != PWR_RESUME) tpm_suspend(sc, why); else tpm_resume(sc, why); } #endif /* !__FreeBSD__ */ /* Wait for given status bits using polling. */ int tpm_waitfor_poll(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c) { int rv; /* * Poll until either the requested condition or a time out is * met. */ while (((sc->sc_stat = tpm_status(sc)) & mask) != mask && tmo--) { rv = tsleep(c, PRIBIO | PCATCH, "tpm_poll", 1); if (rv && rv != EWOULDBLOCK) { #ifdef TPM_DEBUG printf("tpm_waitfor_poll: interrupted %d\n", rv); #endif return rv; } } return 0; } /* Wait for given status bits using interrupts. */ int tpm_waitfor_int(struct tpm_softc *sc, u_int8_t mask, int tmo, void *c, int inttype) { int rv, to; /* Poll and return when condition is already met. */ sc->sc_stat = tpm_status(sc); if ((sc->sc_stat & mask) == mask) return 0; /* * Enable interrupt on tpm chip. Note that interrupts on our * level (SPL_TTY) are disabled (see tpm{read,write} et al) and * will not be delivered to the cpu until we call tsleep(9) below. */ bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) | inttype); bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) | TPM_GLOBAL_INT_ENABLE); /* * Poll once more to remedy the race between previous polling * and enabling interrupts on the tpm chip. */ sc->sc_stat = tpm_status(sc); if ((sc->sc_stat & mask) == mask) { rv = 0; goto out; } to = tpm_tmotohz(tmo); #ifdef TPM_DEBUG printf("tpm_waitfor_int: sleeping for %d ticks on %p\n", to, c); #endif /* * tsleep(9) enables interrupts on the cpu and returns after * wake up with interrupts disabled again. Note that interrupts * generated by the tpm chip while being at SPL_TTY are not lost * but held and delivered as soon as the cpu goes below SPL_TTY. */ rv = tsleep(c, PRIBIO | PCATCH, "tpm_intr", to); sc->sc_stat = tpm_status(sc); #ifdef TPM_DEBUG printf("tpm_waitfor_int: woke up with rv %d stat %b\n", rv, sc->sc_stat, TPM_STS_BITS); #endif if ((sc->sc_stat & mask) == mask) rv = 0; /* Disable interrupts on tpm chip again. */ out: bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) & ~TPM_GLOBAL_INT_ENABLE); bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE, bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INTERRUPT_ENABLE) & ~inttype); return rv; } /* * Wait on given status bits, uses interrupts where possible, otherwise polls. */ int tpm_waitfor(struct tpm_softc *sc, u_int8_t b0, int tmo, void *c) { u_int8_t b; int re, to, rv; #ifdef TPM_DEBUG printf("tpm_waitfor: b0 %b\n", b0, TPM_STS_BITS); #endif /* * If possible, use interrupts, otherwise poll. * * We use interrupts for TPM_STS_VALID and TPM_STS_DATA_AVAIL (if * the tpm chips supports them) as waiting for those can take * really long. The other TPM_STS* are not needed very often * so we do not support them. */ if (sc->sc_vector != IRQUNK) { b = b0; /* * Wait for data ready. This interrupt only occurs * when both TPM_STS_VALID and TPM_STS_DATA_AVAIL are asserted. * Thus we don't have to bother with TPM_STS_VALID * separately and can just return. * * This only holds for interrupts! When using polling * both flags have to be waited for, see below. */ if ((b & TPM_STS_DATA_AVAIL) && (sc->sc_capabilities & TPM_INTF_DATA_AVAIL_INT)) return tpm_waitfor_int(sc, b, tmo, c, TPM_DATA_AVAIL_INT); /* Wait for status valid bit. */ if ((b & TPM_STS_VALID) && (sc->sc_capabilities & TPM_INTF_STS_VALID_INT)) { rv = tpm_waitfor_int(sc, b, tmo, c, TPM_STS_VALID_INT); if (rv != 0) return rv; else b = b0 & ~TPM_STS_VALID; } /* * When all flags are taken care of, return. Otherwise * use polling for eg. TPM_STS_CMD_READY. */ if (b == 0) return 0; } re = 3; restart: /* * If requested wait for TPM_STS_VALID before dealing with * any other flag. Eg. when both TPM_STS_DATA_AVAIL and TPM_STS_VALID * are requested, wait for the latter first. */ b = b0; if (b0 & TPM_STS_VALID) b = TPM_STS_VALID; to = tpm_tmotohz(tmo); again: if ((rv = tpm_waitfor_poll(sc, b, to, c)) != 0) return rv; if ((b & sc->sc_stat) == TPM_STS_VALID) { /* Now wait for other flags. */ b = b0 & ~TPM_STS_VALID; to++; goto again; } if ((sc->sc_stat & b) != b) { #ifdef TPM_DEBUG printf("tpm_waitfor: timeout: stat=%b b=%b\n", sc->sc_stat, TPM_STS_BITS, b, TPM_STS_BITS); #endif if (re-- && (b0 & TPM_STS_VALID)) { bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_RESP_RETRY); goto restart; } return EIO; } return 0; } /* Start transaction. */ int tpm_tis12_start(struct tpm_softc *sc, int flag) { int rv; if (flag == UIO_READ) { rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID, TPM_READ_TMO, sc->sc_read); return rv; } /* Own our (0th) locality. */ if ((rv = tpm_request_locality(sc, 0)) != 0) return rv; sc->sc_stat = tpm_status(sc); if (sc->sc_stat & TPM_STS_CMD_READY) { #ifdef TPM_DEBUG printf("tpm_tis12_start: UIO_WRITE status %b\n", sc->sc_stat, TPM_STS_BITS); #endif return 0; } #ifdef TPM_DEBUG printf("tpm_tis12_start: UIO_WRITE readying chip\n"); #endif /* Abort previous and restart. */ bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY); if ((rv = tpm_waitfor(sc, TPM_STS_CMD_READY, TPM_READY_TMO, sc->sc_write))) { #ifdef TPM_DEBUG printf("tpm_tis12_start: UIO_WRITE readying failed %d\n", rv); #endif return rv; } #ifdef TPM_DEBUG printf("tpm_tis12_start: UIO_WRITE readying done\n"); #endif return 0; } int tpm_tis12_read(struct tpm_softc *sc, void *buf, int len, size_t *count, int flags) { u_int8_t *p = buf; size_t cnt; int rv, n, bcnt; #ifdef TPM_DEBUG printf("tpm_tis12_read: len %d\n", len); #endif cnt = 0; while (len > 0) { if ((rv = tpm_waitfor(sc, TPM_STS_DATA_AVAIL | TPM_STS_VALID, TPM_READ_TMO, sc->sc_read))) return rv; bcnt = tpm_getburst(sc); n = MIN(len, bcnt); #ifdef TPM_DEBUG printf("tpm_tis12_read: fetching %d, burst is %d\n", n, bcnt); #endif for (; n--; len--) { *p++ = bus_space_read_1(sc->sc_bt, sc->sc_bh, TPM_DATA); cnt++; } if ((flags & TPM_PARAM_SIZE) == 0 && cnt >= 6) break; } #ifdef TPM_DEBUG printf("tpm_tis12_read: read %zd bytes, len %d\n", cnt, len); #endif if (count) *count = cnt; return 0; } int tpm_tis12_write(struct tpm_softc *sc, void *buf, int len) { u_int8_t *p = buf; size_t cnt; int rv, r; #ifdef TPM_DEBUG printf("tpm_tis12_write: sc %p buf %p len %d\n", sc, buf, len); #endif if ((rv = tpm_request_locality(sc, 0)) != 0) return rv; cnt = 0; while (cnt < len - 1) { for (r = tpm_getburst(sc); r > 0 && cnt < len - 1; r--) { bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++); cnt++; } if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) { #ifdef TPM_DEBUG printf("tpm_tis12_write: failed burst rv %d\n", rv); #endif return rv; } sc->sc_stat = tpm_status(sc); if (!(sc->sc_stat & TPM_STS_DATA_EXPECT)) { #ifdef TPM_DEBUG printf("tpm_tis12_write: failed rv %d stat=%b\n", rv, sc->sc_stat, TPM_STS_BITS); #endif return EIO; } } bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_DATA, *p++); cnt++; if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc))) { #ifdef TPM_DEBUG printf("tpm_tis12_write: failed last byte rv %d\n", rv); #endif return rv; } if ((sc->sc_stat & TPM_STS_DATA_EXPECT) != 0) { #ifdef TPM_DEBUG printf("tpm_tis12_write: failed rv %d stat=%b\n", rv, sc->sc_stat, TPM_STS_BITS); #endif return EIO; } #ifdef TPM_DEBUG printf("tpm_tis12_write: wrote %d byte\n", cnt); #endif return 0; } /* Finish transaction. */ int tpm_tis12_end(struct tpm_softc *sc, int flag, int err) { int rv = 0; if (flag == UIO_READ) { if ((rv = tpm_waitfor(sc, TPM_STS_VALID, TPM_READ_TMO, sc->sc_read))) return rv; /* Still more data? */ sc->sc_stat = tpm_status(sc); if (!err && ((sc->sc_stat & TPM_STS_DATA_AVAIL) == TPM_STS_DATA_AVAIL)) { #ifdef TPM_DEBUG printf("tpm_tis12_end: read failed stat=%b\n", sc->sc_stat, TPM_STS_BITS); #endif rv = EIO; } bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, TPM_STS_CMD_READY); /* Release our (0th) locality. */ bus_space_write_1(sc->sc_bt, sc->sc_bh,TPM_ACCESS, TPM_ACCESS_ACTIVE_LOCALITY); } else { /* Hungry for more? */ sc->sc_stat = tpm_status(sc); if (!err && (sc->sc_stat & TPM_STS_DATA_EXPECT)) { #ifdef TPM_DEBUG printf("tpm_tis12_end: write failed stat=%b\n", sc->sc_stat, TPM_STS_BITS); #endif rv = EIO; } bus_space_write_1(sc->sc_bt, sc->sc_bh, TPM_STS, err ? TPM_STS_CMD_READY : TPM_STS_GO); } return rv; } #ifdef __FreeBSD__ void #else int #endif tpm_intr(void *v) { struct tpm_softc *sc = v; u_int32_t r; #ifdef TPM_DEBUG static int cnt = 0; #endif r = bus_space_read_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS); #ifdef TPM_DEBUG if (r != 0) printf("tpm_intr: int=%b (%d)\n", r, TPM_INTERRUPT_ENABLE_BITS, cnt); else cnt++; #endif if (!(r & (TPM_CMD_READY_INT | TPM_LOCALITY_CHANGE_INT | TPM_STS_VALID_INT | TPM_DATA_AVAIL_INT))) #ifdef __FreeBSD__ return; #else return 0; #endif if (r & TPM_STS_VALID_INT) wakeup(sc); if (r & TPM_CMD_READY_INT) wakeup(sc->sc_write); if (r & TPM_DATA_AVAIL_INT) wakeup(sc->sc_read); if (r & TPM_LOCALITY_CHANGE_INT) wakeup(sc->sc_init); bus_space_write_4(sc->sc_bt, sc->sc_bh, TPM_INT_STATUS, r); #ifdef __FreeBSD__ return; #else return 1; #endif } /* Read single byte using legacy interface. */ static inline u_int8_t tpm_legacy_in(bus_space_tag_t iot, bus_space_handle_t ioh, int reg) { bus_space_write_1(iot, ioh, 0, reg); return bus_space_read_1(iot, ioh, 1); } #if 0 /* Write single byte using legacy interface. */ static inline void tpm_legacy_out(bus_space_tag_t iot, bus_space_handle_t ioh, int reg, u_int8_t v) { bus_space_write_1(iot, ioh, 0, reg); bus_space_write_1(iot, ioh, 1, v); } #endif /* Probe for TPM using legacy interface. */ int tpm_legacy_probe(bus_space_tag_t iot, bus_addr_t iobase) { bus_space_handle_t ioh; u_int8_t r, v; int i, rv = 0; char id[8]; if (!tpm_enabled || iobase == -1) return 0; if (bus_space_map(iot, iobase, 2, 0, &ioh)) return 0; v = bus_space_read_1(iot, ioh, 0); if (v == 0xff) { bus_space_unmap(iot, ioh, 2); return 0; } r = bus_space_read_1(iot, ioh, 1); for (i = sizeof(id); i--; ) id[i] = tpm_legacy_in(iot, ioh, TPM_ID + i); #ifdef TPM_DEBUG printf("tpm_legacy_probe %.4s %d.%d.%d.%d\n", &id[4], id[0], id[1], id[2], id[3]); #endif /* * The only chips using the legacy interface we are aware of are * by Atmel. For other chips more signature would have to be added. */ if (!bcmp(&id[4], "ATML", 4)) rv = 1; if (!rv) { bus_space_write_1(iot, ioh, r, 1); bus_space_write_1(iot, ioh, v, 0); } bus_space_unmap(iot, ioh, 2); return rv; } /* Setup TPM using legacy interface. */ int tpm_legacy_init(struct tpm_softc *sc, int irq, const char *name) { char id[8]; u_int8_t ioh, iol; int i; if ((i = bus_space_map(sc->sc_batm, tpm_enabled, 2, 0, &sc->sc_bahm))) { printf(": cannot map tpm registers (%d)\n", i); tpm_enabled = 0; return 1; } for (i = sizeof(id); i--; ) id[i] = tpm_legacy_in(sc->sc_bt, sc->sc_bh, TPM_ID + i); printf(": %.4s %d.%d @0x%x\n", &id[4], id[0], id[1], tpm_enabled); iol = tpm_enabled & 0xff; ioh = tpm_enabled >> 16; tpm_enabled = 0; return 0; } /* Start transaction. */ int tpm_legacy_start(struct tpm_softc *sc, int flag) { struct timeval tv; u_int8_t bits, r; int to, rv; bits = flag == UIO_READ ? TPM_LEGACY_DA : 0; tv.tv_sec = TPM_LEGACY_TMO; tv.tv_usec = 0; to = tvtohz(&tv) / TPM_LEGACY_SLEEP; while (((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) & (TPM_LEGACY_BUSY|bits)) != bits && to--) { rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_start", TPM_LEGACY_SLEEP); if (rv && rv != EWOULDBLOCK) return rv; } #if defined(TPM_DEBUG) && !defined(__FreeBSD__) printf("%s: bits %b\n", sc->sc_dev.dv_xname, r, TPM_LEGACY_BITS); #endif if ((r & (TPM_LEGACY_BUSY|bits)) != bits) return EIO; return 0; } int tpm_legacy_read(struct tpm_softc *sc, void *buf, int len, size_t *count, int flags) { u_int8_t *p; size_t cnt; int to, rv; cnt = rv = 0; for (p = buf; !rv && len > 0; len--) { for (to = 1000; !(bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1) & TPM_LEGACY_DA); DELAY(1)) if (!to--) return EIO; DELAY(TPM_LEGACY_DELAY); *p++ = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 0); cnt++; } *count = cnt; return 0; } int tpm_legacy_write(struct tpm_softc *sc, void *buf, int len) { u_int8_t *p; int n; for (p = buf, n = len; n--; DELAY(TPM_LEGACY_DELAY)) { if (!n && len != TPM_BUFSIZ) { bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_LAST); DELAY(TPM_LEGACY_DELAY); } bus_space_write_1(sc->sc_batm, sc->sc_bahm, 0, *p++); } return 0; } /* Finish transaction. */ int tpm_legacy_end(struct tpm_softc *sc, int flag, int rv) { struct timeval tv; u_int8_t r; int to; if (rv || flag == UIO_READ) bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_ABRT); else { tv.tv_sec = TPM_LEGACY_TMO; tv.tv_usec = 0; to = tvtohz(&tv) / TPM_LEGACY_SLEEP; while(((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) & TPM_LEGACY_BUSY) && to--) { rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_end", TPM_LEGACY_SLEEP); if (rv && rv != EWOULDBLOCK) return rv; } #if defined(TPM_DEBUG) && !defined(__FreeBSD__) printf("%s: bits %b\n", sc->sc_dev.dv_xname, r, TPM_LEGACY_BITS); #endif if (r & TPM_LEGACY_BUSY) return EIO; if (r & TPM_LEGACY_RE) return EIO; /* XXX Retry the loop? */ } return rv; } int #ifdef __FreeBSD__ tpmopen(struct cdev *dev, int flag, int mode, struct thread *td) #else tpmopen(dev_t dev, int flag, int mode, struct proc *p) #endif { struct tpm_softc *sc = TPMSOFTC(dev); if (!sc) return ENXIO; if (sc->sc_flags & TPM_OPEN) return EBUSY; sc->sc_flags |= TPM_OPEN; return 0; } int #ifdef __FreeBSD__ tpmclose(struct cdev *dev, int flag, int mode, struct thread *td) #else tpmclose(dev_t dev, int flag, int mode, struct proc *p) #endif { struct tpm_softc *sc = TPMSOFTC(dev); if (!sc) return ENXIO; if (!(sc->sc_flags & TPM_OPEN)) return EINVAL; sc->sc_flags &= ~TPM_OPEN; return 0; } int #ifdef __FreeBSD__ tpmread(struct cdev *dev, struct uio *uio, int flags) #else tpmread(dev_t dev, struct uio *uio, int flags) #endif { struct tpm_softc *sc = TPMSOFTC(dev); u_int8_t buf[TPM_BUFSIZ], *p; size_t cnt; int n, len, rv, s; if (!sc) return ENXIO; s = spltty(); if ((rv = (sc->sc_start)(sc, UIO_READ))) { splx(s); return rv; } #ifdef TPM_DEBUG printf("tpmread: getting header\n"); #endif if ((rv = (sc->sc_read)(sc, buf, TPM_HDRSIZE, &cnt, 0))) { (sc->sc_end)(sc, UIO_READ, rv); splx(s); return rv; } len = (buf[2] << 24) | (buf[3] << 16) | (buf[4] << 8) | buf[5]; #ifdef TPM_DEBUG printf("tpmread: len %d, io count %d\n", len, uio->uio_resid); #endif if (len > uio->uio_resid) { rv = EIO; (sc->sc_end)(sc, UIO_READ, rv); #ifdef TPM_DEBUG printf("tpmread: bad residual io count 0x%x\n", uio->uio_resid); #endif splx(s); return rv; } /* Copy out header. */ if ((rv = uiomove((caddr_t)buf, cnt, uio))) { (sc->sc_end)(sc, UIO_READ, rv); splx(s); return rv; } /* Get remaining part of the answer (if anything is left). */ for (len -= cnt, p = buf, n = sizeof(buf); len > 0; p = buf, len -= n, n = sizeof(buf)) { n = MIN(n, len); #ifdef TPM_DEBUG printf("tpmread: n %d len %d\n", n, len); #endif if ((rv = (sc->sc_read)(sc, p, n, NULL, TPM_PARAM_SIZE))) { (sc->sc_end)(sc, UIO_READ, rv); splx(s); return rv; } p += n; if ((rv = uiomove((caddr_t)buf, p - buf, uio))) { (sc->sc_end)(sc, UIO_READ, rv); splx(s); return rv; } } rv = (sc->sc_end)(sc, UIO_READ, rv); splx(s); return rv; } int #ifdef __FreeBSD__ tpmwrite(struct cdev *dev, struct uio *uio, int flags) #else tpmwrite(dev_t dev, struct uio *uio, int flags) #endif { struct tpm_softc *sc = TPMSOFTC(dev); u_int8_t buf[TPM_BUFSIZ]; int n, rv, s; if (!sc) return ENXIO; s = spltty(); #ifdef TPM_DEBUG printf("tpmwrite: io count %d\n", uio->uio_resid); #endif n = MIN(sizeof(buf), uio->uio_resid); if ((rv = uiomove((caddr_t)buf, n, uio))) { splx(s); return rv; } if ((rv = (sc->sc_start)(sc, UIO_WRITE))) { splx(s); return rv; } if ((rv = (sc->sc_write(sc, buf, n)))) { splx(s); return rv; } rv = (sc->sc_end)(sc, UIO_WRITE, rv); splx(s); return rv; } int #ifdef __FreeBSD__ tpmioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags, struct thread *td) #else tpmioctl(dev_t dev, u_long cmd, caddr_t data, int flags, struct proc *p) #endif { return ENOTTY; } Index: head/sys/kern/subr_bus.c =================================================================== --- head/sys/kern/subr_bus.c (revision 303889) +++ head/sys/kern/subr_bus.c (revision 303890) @@ -1,5637 +1,5637 @@ /*- * Copyright (c) 1997,1998,2003 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_bus.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include SYSCTL_NODE(_hw, OID_AUTO, bus, CTLFLAG_RW, NULL, NULL); SYSCTL_ROOT_NODE(OID_AUTO, dev, CTLFLAG_RW, NULL, NULL); /* * Used to attach drivers to devclasses. */ typedef struct driverlink *driverlink_t; struct driverlink { kobj_class_t driver; TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */ int pass; TAILQ_ENTRY(driverlink) passlink; }; /* * Forward declarations */ typedef TAILQ_HEAD(devclass_list, devclass) devclass_list_t; typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t; typedef TAILQ_HEAD(device_list, device) device_list_t; struct devclass { TAILQ_ENTRY(devclass) link; devclass_t parent; /* parent in devclass hierarchy */ driver_list_t drivers; /* bus devclasses store drivers for bus */ char *name; device_t *devices; /* array of devices indexed by unit */ int maxunit; /* size of devices array */ int flags; #define DC_HAS_CHILDREN 1 struct sysctl_ctx_list sysctl_ctx; struct sysctl_oid *sysctl_tree; }; /** * @brief Implementation of device. */ struct device { /* * A device is a kernel object. The first field must be the * current ops table for the object. */ KOBJ_FIELDS; /* * Device hierarchy. */ TAILQ_ENTRY(device) link; /**< list of devices in parent */ TAILQ_ENTRY(device) devlink; /**< global device list membership */ device_t parent; /**< parent of this device */ device_list_t children; /**< list of child devices */ /* * Details of this device. */ driver_t *driver; /**< current driver */ devclass_t devclass; /**< current device class */ int unit; /**< current unit number */ char* nameunit; /**< name+unit e.g. foodev0 */ char* desc; /**< driver specific description */ int busy; /**< count of calls to device_busy() */ device_state_t state; /**< current device state */ uint32_t devflags; /**< api level flags for device_get_flags() */ u_int flags; /**< internal device flags */ u_int order; /**< order from device_add_child_ordered() */ void *ivars; /**< instance variables */ void *softc; /**< current driver's variables */ struct sysctl_ctx_list sysctl_ctx; /**< state for sysctl variables */ struct sysctl_oid *sysctl_tree; /**< state for sysctl variables */ }; static MALLOC_DEFINE(M_BUS, "bus", "Bus data structures"); static MALLOC_DEFINE(M_BUS_SC, "bus-sc", "Bus data structures, softc"); static void devctl2_init(void); #ifdef BUS_DEBUG static int bus_debug = 1; SYSCTL_INT(_debug, OID_AUTO, bus_debug, CTLFLAG_RWTUN, &bus_debug, 0, "Bus debug level"); #define PDEBUG(a) if (bus_debug) {printf("%s:%d: ", __func__, __LINE__), printf a; printf("\n");} #define DEVICENAME(d) ((d)? device_get_name(d): "no device") #define DRIVERNAME(d) ((d)? d->name : "no driver") #define DEVCLANAME(d) ((d)? d->name : "no devclass") /** * Produce the indenting, indent*2 spaces plus a '.' ahead of that to * prevent syslog from deleting initial spaces */ #define indentprintf(p) do { int iJ; printf("."); for (iJ=0; iJparent ? dc->parent->name : ""; break; default: return (EINVAL); } return (SYSCTL_OUT_STR(req, value)); } static void devclass_sysctl_init(devclass_t dc) { if (dc->sysctl_tree != NULL) return; sysctl_ctx_init(&dc->sysctl_ctx); dc->sysctl_tree = SYSCTL_ADD_NODE(&dc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_dev), OID_AUTO, dc->name, CTLFLAG_RD, NULL, ""); SYSCTL_ADD_PROC(&dc->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD, dc, DEVCLASS_SYSCTL_PARENT, devclass_sysctl_handler, "A", "parent class"); } enum { DEVICE_SYSCTL_DESC, DEVICE_SYSCTL_DRIVER, DEVICE_SYSCTL_LOCATION, DEVICE_SYSCTL_PNPINFO, DEVICE_SYSCTL_PARENT, }; static int device_sysctl_handler(SYSCTL_HANDLER_ARGS) { device_t dev = (device_t)arg1; const char *value; char *buf; int error; buf = NULL; switch (arg2) { case DEVICE_SYSCTL_DESC: value = dev->desc ? dev->desc : ""; break; case DEVICE_SYSCTL_DRIVER: value = dev->driver ? dev->driver->name : ""; break; case DEVICE_SYSCTL_LOCATION: value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO); bus_child_location_str(dev, buf, 1024); break; case DEVICE_SYSCTL_PNPINFO: value = buf = malloc(1024, M_BUS, M_WAITOK | M_ZERO); bus_child_pnpinfo_str(dev, buf, 1024); break; case DEVICE_SYSCTL_PARENT: value = dev->parent ? dev->parent->nameunit : ""; break; default: return (EINVAL); } error = SYSCTL_OUT_STR(req, value); if (buf != NULL) free(buf, M_BUS); return (error); } static void device_sysctl_init(device_t dev) { devclass_t dc = dev->devclass; int domain; if (dev->sysctl_tree != NULL) return; devclass_sysctl_init(dc); sysctl_ctx_init(&dev->sysctl_ctx); dev->sysctl_tree = SYSCTL_ADD_NODE(&dev->sysctl_ctx, SYSCTL_CHILDREN(dc->sysctl_tree), OID_AUTO, dev->nameunit + strlen(dc->name), CTLFLAG_RD, NULL, ""); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%desc", CTLTYPE_STRING | CTLFLAG_RD, dev, DEVICE_SYSCTL_DESC, device_sysctl_handler, "A", "device description"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%driver", CTLTYPE_STRING | CTLFLAG_RD, dev, DEVICE_SYSCTL_DRIVER, device_sysctl_handler, "A", "device driver name"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%location", CTLTYPE_STRING | CTLFLAG_RD, dev, DEVICE_SYSCTL_LOCATION, device_sysctl_handler, "A", "device location relative to parent"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%pnpinfo", CTLTYPE_STRING | CTLFLAG_RD, dev, DEVICE_SYSCTL_PNPINFO, device_sysctl_handler, "A", "device identification"); SYSCTL_ADD_PROC(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD, dev, DEVICE_SYSCTL_PARENT, device_sysctl_handler, "A", "parent device"); if (bus_get_domain(dev, &domain) == 0) SYSCTL_ADD_INT(&dev->sysctl_ctx, SYSCTL_CHILDREN(dev->sysctl_tree), OID_AUTO, "%domain", CTLFLAG_RD, NULL, domain, "NUMA domain"); } static void device_sysctl_update(device_t dev) { devclass_t dc = dev->devclass; if (dev->sysctl_tree == NULL) return; sysctl_rename_oid(dev->sysctl_tree, dev->nameunit + strlen(dc->name)); } static void device_sysctl_fini(device_t dev) { if (dev->sysctl_tree == NULL) return; sysctl_ctx_free(&dev->sysctl_ctx); dev->sysctl_tree = NULL; } /* * /dev/devctl implementation */ /* * This design allows only one reader for /dev/devctl. This is not desirable * in the long run, but will get a lot of hair out of this implementation. * Maybe we should make this device a clonable device. * * Also note: we specifically do not attach a device to the device_t tree * to avoid potential chicken and egg problems. One could argue that all * of this belongs to the root node. One could also further argue that the * sysctl interface that we have not might more properly be an ioctl * interface, but at this stage of the game, I'm not inclined to rock that * boat. * * I'm also not sure that the SIGIO support is done correctly or not, as * I copied it from a driver that had SIGIO support that likely hasn't been * tested since 3.4 or 2.2.8! */ /* Deprecated way to adjust queue length */ static int sysctl_devctl_disable(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_disable, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_disable, "I", "devctl disable -- deprecated"); #define DEVCTL_DEFAULT_QUEUE_LEN 1000 static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS); static int devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN; SYSCTL_PROC(_hw_bus, OID_AUTO, devctl_queue, CTLTYPE_INT | CTLFLAG_RWTUN | CTLFLAG_MPSAFE, NULL, 0, sysctl_devctl_queue, "I", "devctl queue length"); static d_open_t devopen; static d_close_t devclose; static d_read_t devread; static d_ioctl_t devioctl; static d_poll_t devpoll; static d_kqfilter_t devkqfilter; static struct cdevsw dev_cdevsw = { .d_version = D_VERSION, .d_open = devopen, .d_close = devclose, .d_read = devread, .d_ioctl = devioctl, .d_poll = devpoll, .d_kqfilter = devkqfilter, .d_name = "devctl", }; struct dev_event_info { char *dei_data; TAILQ_ENTRY(dev_event_info) dei_link; }; TAILQ_HEAD(devq, dev_event_info); static struct dev_softc { int inuse; int nonblock; int queued; int async; struct mtx mtx; struct cv cv; struct selinfo sel; struct devq devq; struct sigio *sigio; } devsoftc; static void filt_devctl_detach(struct knote *kn); static int filt_devctl_read(struct knote *kn, long hint); struct filterops devctl_rfiltops = { .f_isfd = 1, .f_detach = filt_devctl_detach, .f_event = filt_devctl_read, }; static struct cdev *devctl_dev; static void devinit(void) { devctl_dev = make_dev_credf(MAKEDEV_ETERNAL, &dev_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl"); mtx_init(&devsoftc.mtx, "dev mtx", "devd", MTX_DEF); cv_init(&devsoftc.cv, "dev cv"); TAILQ_INIT(&devsoftc.devq); knlist_init_mtx(&devsoftc.sel.si_note, &devsoftc.mtx); devctl2_init(); } static int devopen(struct cdev *dev, int oflags, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); if (devsoftc.inuse) { mtx_unlock(&devsoftc.mtx); return (EBUSY); } /* move to init */ devsoftc.inuse = 1; mtx_unlock(&devsoftc.mtx); return (0); } static int devclose(struct cdev *dev, int fflag, int devtype, struct thread *td) { mtx_lock(&devsoftc.mtx); devsoftc.inuse = 0; devsoftc.nonblock = 0; devsoftc.async = 0; cv_broadcast(&devsoftc.cv); funsetown(&devsoftc.sigio); mtx_unlock(&devsoftc.mtx); return (0); } /* * The read channel for this device is used to report changes to * userland in realtime. We are required to free the data as well as * the n1 object because we allocate them separately. Also note that * we return one record at a time. If you try to read this device a * character at a time, you will lose the rest of the data. Listening * programs are expected to cope. */ static int devread(struct cdev *dev, struct uio *uio, int ioflag) { struct dev_event_info *n1; int rv; mtx_lock(&devsoftc.mtx); while (TAILQ_EMPTY(&devsoftc.devq)) { if (devsoftc.nonblock) { mtx_unlock(&devsoftc.mtx); return (EAGAIN); } rv = cv_wait_sig(&devsoftc.cv, &devsoftc.mtx); if (rv) { /* * Need to translate ERESTART to EINTR here? -- jake */ mtx_unlock(&devsoftc.mtx); return (rv); } } n1 = TAILQ_FIRST(&devsoftc.devq); TAILQ_REMOVE(&devsoftc.devq, n1, dei_link); devsoftc.queued--; mtx_unlock(&devsoftc.mtx); rv = uiomove(n1->dei_data, strlen(n1->dei_data), uio); free(n1->dei_data, M_BUS); free(n1, M_BUS); return (rv); } static int devioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { switch (cmd) { case FIONBIO: if (*(int*)data) devsoftc.nonblock = 1; else devsoftc.nonblock = 0; return (0); case FIOASYNC: if (*(int*)data) devsoftc.async = 1; else devsoftc.async = 0; return (0); case FIOSETOWN: return fsetown(*(int *)data, &devsoftc.sigio); case FIOGETOWN: *(int *)data = fgetown(&devsoftc.sigio); return (0); /* (un)Support for other fcntl() calls. */ case FIOCLEX: case FIONCLEX: case FIONREAD: default: break; } return (ENOTTY); } static int devpoll(struct cdev *dev, int events, struct thread *td) { int revents = 0; mtx_lock(&devsoftc.mtx); if (events & (POLLIN | POLLRDNORM)) { if (!TAILQ_EMPTY(&devsoftc.devq)) revents = events & (POLLIN | POLLRDNORM); else selrecord(td, &devsoftc.sel); } mtx_unlock(&devsoftc.mtx); return (revents); } static int devkqfilter(struct cdev *dev, struct knote *kn) { int error; if (kn->kn_filter == EVFILT_READ) { kn->kn_fop = &devctl_rfiltops; knlist_add(&devsoftc.sel.si_note, kn, 0); error = 0; } else error = EINVAL; return (error); } static void filt_devctl_detach(struct knote *kn) { knlist_remove(&devsoftc.sel.si_note, kn, 0); } static int filt_devctl_read(struct knote *kn, long hint) { kn->kn_data = devsoftc.queued; return (kn->kn_data != 0); } /** * @brief Return whether the userland process is running */ boolean_t devctl_process_running(void) { return (devsoftc.inuse == 1); } /** * @brief Queue data to be read from the devctl device * * Generic interface to queue data to the devctl device. It is * assumed that @p data is properly formatted. It is further assumed * that @p data is allocated using the M_BUS malloc type. */ void devctl_queue_data_f(char *data, int flags) { struct dev_event_info *n1 = NULL, *n2 = NULL; if (strlen(data) == 0) goto out; if (devctl_queue_length == 0) goto out; n1 = malloc(sizeof(*n1), M_BUS, flags); if (n1 == NULL) goto out; n1->dei_data = data; mtx_lock(&devsoftc.mtx); if (devctl_queue_length == 0) { mtx_unlock(&devsoftc.mtx); free(n1->dei_data, M_BUS); free(n1, M_BUS); return; } /* Leave at least one spot in the queue... */ while (devsoftc.queued > devctl_queue_length - 1) { n2 = TAILQ_FIRST(&devsoftc.devq); TAILQ_REMOVE(&devsoftc.devq, n2, dei_link); free(n2->dei_data, M_BUS); free(n2, M_BUS); devsoftc.queued--; } TAILQ_INSERT_TAIL(&devsoftc.devq, n1, dei_link); devsoftc.queued++; cv_broadcast(&devsoftc.cv); KNOTE_LOCKED(&devsoftc.sel.si_note, 0); mtx_unlock(&devsoftc.mtx); selwakeup(&devsoftc.sel); if (devsoftc.async && devsoftc.sigio != NULL) pgsigio(&devsoftc.sigio, SIGIO, 0); return; out: /* * We have to free data on all error paths since the caller * assumes it will be free'd when this item is dequeued. */ free(data, M_BUS); return; } void devctl_queue_data(char *data) { devctl_queue_data_f(data, M_NOWAIT); } /** * @brief Send a 'notification' to userland, using standard ways */ void devctl_notify_f(const char *system, const char *subsystem, const char *type, const char *data, int flags) { int len = 0; char *msg; if (system == NULL) return; /* BOGUS! Must specify system. */ if (subsystem == NULL) return; /* BOGUS! Must specify subsystem. */ if (type == NULL) return; /* BOGUS! Must specify type. */ len += strlen(" system=") + strlen(system); len += strlen(" subsystem=") + strlen(subsystem); len += strlen(" type=") + strlen(type); /* add in the data message plus newline. */ if (data != NULL) len += strlen(data); len += 3; /* '!', '\n', and NUL */ msg = malloc(len, M_BUS, flags); if (msg == NULL) return; /* Drop it on the floor */ if (data != NULL) snprintf(msg, len, "!system=%s subsystem=%s type=%s %s\n", system, subsystem, type, data); else snprintf(msg, len, "!system=%s subsystem=%s type=%s\n", system, subsystem, type); devctl_queue_data_f(msg, flags); } void devctl_notify(const char *system, const char *subsystem, const char *type, const char *data) { devctl_notify_f(system, subsystem, type, data, M_NOWAIT); } /* * Common routine that tries to make sending messages as easy as possible. * We allocate memory for the data, copy strings into that, but do not * free it unless there's an error. The dequeue part of the driver should * free the data. We don't send data when the device is disabled. We do * send data, even when we have no listeners, because we wish to avoid * races relating to startup and restart of listening applications. * * devaddq is designed to string together the type of event, with the * object of that event, plus the plug and play info and location info * for that event. This is likely most useful for devices, but less * useful for other consumers of this interface. Those should use * the devctl_queue_data() interface instead. */ static void devaddq(const char *type, const char *what, device_t dev) { char *data = NULL; char *loc = NULL; char *pnp = NULL; const char *parstr; if (!devctl_queue_length)/* Rare race, but lost races safely discard */ return; data = malloc(1024, M_BUS, M_NOWAIT); if (data == NULL) goto bad; /* get the bus specific location of this device */ loc = malloc(1024, M_BUS, M_NOWAIT); if (loc == NULL) goto bad; *loc = '\0'; bus_child_location_str(dev, loc, 1024); /* Get the bus specific pnp info of this device */ pnp = malloc(1024, M_BUS, M_NOWAIT); if (pnp == NULL) goto bad; *pnp = '\0'; bus_child_pnpinfo_str(dev, pnp, 1024); /* Get the parent of this device, or / if high enough in the tree. */ if (device_get_parent(dev) == NULL) parstr = "."; /* Or '/' ? */ else parstr = device_get_nameunit(device_get_parent(dev)); /* String it all together. */ snprintf(data, 1024, "%s%s at %s %s on %s\n", type, what, loc, pnp, parstr); free(loc, M_BUS); free(pnp, M_BUS); devctl_queue_data(data); return; bad: free(pnp, M_BUS); free(loc, M_BUS); free(data, M_BUS); return; } /* * A device was added to the tree. We are called just after it successfully * attaches (that is, probe and attach success for this device). No call * is made if a device is merely parented into the tree. See devnomatch * if probe fails. If attach fails, no notification is sent (but maybe * we should have a different message for this). */ static void devadded(device_t dev) { devaddq("+", device_get_nameunit(dev), dev); } /* * A device was removed from the tree. We are called just before this * happens. */ static void devremoved(device_t dev) { devaddq("-", device_get_nameunit(dev), dev); } /* * Called when there's no match for this device. This is only called * the first time that no match happens, so we don't keep getting this * message. Should that prove to be undesirable, we can change it. * This is called when all drivers that can attach to a given bus * decline to accept this device. Other errors may not be detected. */ static void devnomatch(device_t dev) { devaddq("?", "", dev); } static int sysctl_devctl_disable(SYSCTL_HANDLER_ARGS) { struct dev_event_info *n1; int dis, error; dis = (devctl_queue_length == 0); error = sysctl_handle_int(oidp, &dis, 0, req); if (error || !req->newptr) return (error); if (mtx_initialized(&devsoftc.mtx)) mtx_lock(&devsoftc.mtx); if (dis) { while (!TAILQ_EMPTY(&devsoftc.devq)) { n1 = TAILQ_FIRST(&devsoftc.devq); TAILQ_REMOVE(&devsoftc.devq, n1, dei_link); free(n1->dei_data, M_BUS); free(n1, M_BUS); } devsoftc.queued = 0; devctl_queue_length = 0; } else { devctl_queue_length = DEVCTL_DEFAULT_QUEUE_LEN; } if (mtx_initialized(&devsoftc.mtx)) mtx_unlock(&devsoftc.mtx); return (0); } static int sysctl_devctl_queue(SYSCTL_HANDLER_ARGS) { struct dev_event_info *n1; int q, error; q = devctl_queue_length; error = sysctl_handle_int(oidp, &q, 0, req); if (error || !req->newptr) return (error); if (q < 0) return (EINVAL); if (mtx_initialized(&devsoftc.mtx)) mtx_lock(&devsoftc.mtx); devctl_queue_length = q; while (devsoftc.queued > devctl_queue_length) { n1 = TAILQ_FIRST(&devsoftc.devq); TAILQ_REMOVE(&devsoftc.devq, n1, dei_link); free(n1->dei_data, M_BUS); free(n1, M_BUS); devsoftc.queued--; } if (mtx_initialized(&devsoftc.mtx)) mtx_unlock(&devsoftc.mtx); return (0); } /** * @brief safely quotes strings that might have double quotes in them. * * The devctl protocol relies on quoted strings having matching quotes. * This routine quotes any internal quotes so the resulting string * is safe to pass to snprintf to construct, for example pnp info strings. * Strings are always terminated with a NUL, but may be truncated if longer * than @p len bytes after quotes. * * @param dst Buffer to hold the string. Must be at least @p len bytes long * @param src Original buffer. * @param len Length of buffer pointed to by @dst, including trailing NUL */ void devctl_safe_quote(char *dst, const char *src, size_t len) { char *walker = dst, *ep = dst + len - 1; if (len == 0) return; while (src != NULL && walker < ep) { if (*src == '"' || *src == '\\') { if (ep - walker < 2) break; *walker++ = '\\'; } *walker++ = *src++; } *walker = '\0'; } /* End of /dev/devctl code */ static TAILQ_HEAD(,device) bus_data_devices; static int bus_data_generation = 1; static kobj_method_t null_methods[] = { KOBJMETHOD_END }; DEFINE_CLASS(null, null_methods, 0); /* * Bus pass implementation */ static driver_list_t passes = TAILQ_HEAD_INITIALIZER(passes); int bus_current_pass = BUS_PASS_ROOT; /** * @internal * @brief Register the pass level of a new driver attachment * * Register a new driver attachment's pass level. If no driver * attachment with the same pass level has been added, then @p new * will be added to the global passes list. * * @param new the new driver attachment */ static void driver_register_pass(struct driverlink *new) { struct driverlink *dl; /* We only consider pass numbers during boot. */ if (bus_current_pass == BUS_PASS_DEFAULT) return; /* * Walk the passes list. If we already know about this pass * then there is nothing to do. If we don't, then insert this * driver link into the list. */ TAILQ_FOREACH(dl, &passes, passlink) { if (dl->pass < new->pass) continue; if (dl->pass == new->pass) return; TAILQ_INSERT_BEFORE(dl, new, passlink); return; } TAILQ_INSERT_TAIL(&passes, new, passlink); } /** * @brief Raise the current bus pass * * Raise the current bus pass level to @p pass. Call the BUS_NEW_PASS() * method on the root bus to kick off a new device tree scan for each * new pass level that has at least one driver. */ void bus_set_pass(int pass) { struct driverlink *dl; if (bus_current_pass > pass) panic("Attempt to lower bus pass level"); TAILQ_FOREACH(dl, &passes, passlink) { /* Skip pass values below the current pass level. */ if (dl->pass <= bus_current_pass) continue; /* * Bail once we hit a driver with a pass level that is * too high. */ if (dl->pass > pass) break; /* * Raise the pass level to the next level and rescan * the tree. */ bus_current_pass = dl->pass; BUS_NEW_PASS(root_bus); } /* * If there isn't a driver registered for the requested pass, * then bus_current_pass might still be less than 'pass'. Set * it to 'pass' in that case. */ if (bus_current_pass < pass) bus_current_pass = pass; KASSERT(bus_current_pass == pass, ("Failed to update bus pass level")); } /* * Devclass implementation */ static devclass_list_t devclasses = TAILQ_HEAD_INITIALIZER(devclasses); /** * @internal * @brief Find or create a device class * * If a device class with the name @p classname exists, return it, * otherwise if @p create is non-zero create and return a new device * class. * * If @p parentname is non-NULL, the parent of the devclass is set to * the devclass of that name. * * @param classname the devclass name to find or create * @param parentname the parent devclass name or @c NULL * @param create non-zero to create a devclass */ static devclass_t devclass_find_internal(const char *classname, const char *parentname, int create) { devclass_t dc; PDEBUG(("looking for %s", classname)); if (!classname) return (NULL); TAILQ_FOREACH(dc, &devclasses, link) { if (!strcmp(dc->name, classname)) break; } if (create && !dc) { PDEBUG(("creating %s", classname)); dc = malloc(sizeof(struct devclass) + strlen(classname) + 1, M_BUS, M_NOWAIT | M_ZERO); if (!dc) return (NULL); dc->parent = NULL; dc->name = (char*) (dc + 1); strcpy(dc->name, classname); TAILQ_INIT(&dc->drivers); TAILQ_INSERT_TAIL(&devclasses, dc, link); bus_data_generation_update(); } /* * If a parent class is specified, then set that as our parent so * that this devclass will support drivers for the parent class as * well. If the parent class has the same name don't do this though * as it creates a cycle that can trigger an infinite loop in * device_probe_child() if a device exists for which there is no * suitable driver. */ if (parentname && dc && !dc->parent && strcmp(classname, parentname) != 0) { dc->parent = devclass_find_internal(parentname, NULL, TRUE); dc->parent->flags |= DC_HAS_CHILDREN; } return (dc); } /** * @brief Create a device class * * If a device class with the name @p classname exists, return it, * otherwise create and return a new device class. * * @param classname the devclass name to find or create */ devclass_t devclass_create(const char *classname) { return (devclass_find_internal(classname, NULL, TRUE)); } /** * @brief Find a device class * * If a device class with the name @p classname exists, return it, * otherwise return @c NULL. * * @param classname the devclass name to find */ devclass_t devclass_find(const char *classname) { return (devclass_find_internal(classname, NULL, FALSE)); } /** * @brief Register that a device driver has been added to a devclass * * Register that a device driver has been added to a devclass. This * is called by devclass_add_driver to accomplish the recursive * notification of all the children classes of dc, as well as dc. * Each layer will have BUS_DRIVER_ADDED() called for all instances of * the devclass. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param dc the devclass to edit * @param driver the driver that was just added */ static void devclass_driver_added(devclass_t dc, driver_t *driver) { devclass_t parent; int i; /* * Call BUS_DRIVER_ADDED for any existing busses in this class. */ for (i = 0; i < dc->maxunit; i++) if (dc->devices[i] && device_is_attached(dc->devices[i])) BUS_DRIVER_ADDED(dc->devices[i], driver); /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(dc->flags & DC_HAS_CHILDREN)) return; parent = dc; TAILQ_FOREACH(dc, &devclasses, link) { if (dc->parent == parent) devclass_driver_added(dc, driver); } } /** * @brief Add a device driver to a device class * * Add a device driver to a devclass. This is normally called * automatically by DRIVER_MODULE(). The BUS_DRIVER_ADDED() method of * all devices in the devclass will be called to allow them to attempt * to re-probe any unmatched children. * * @param dc the devclass to edit * @param driver the driver to register */ int devclass_add_driver(devclass_t dc, driver_t *driver, int pass, devclass_t *dcp) { driverlink_t dl; const char *parentname; PDEBUG(("%s", DRIVERNAME(driver))); /* Don't allow invalid pass values. */ if (pass <= BUS_PASS_ROOT) return (EINVAL); dl = malloc(sizeof *dl, M_BUS, M_NOWAIT|M_ZERO); if (!dl) return (ENOMEM); /* * Compile the driver's methods. Also increase the reference count * so that the class doesn't get freed when the last instance * goes. This means we can safely use static methods and avoids a * double-free in devclass_delete_driver. */ kobj_class_compile((kobj_class_t) driver); /* * If the driver has any base classes, make the * devclass inherit from the devclass of the driver's * first base class. This will allow the system to * search for drivers in both devclasses for children * of a device using this driver. */ if (driver->baseclasses) parentname = driver->baseclasses[0]->name; else parentname = NULL; *dcp = devclass_find_internal(driver->name, parentname, TRUE); dl->driver = driver; TAILQ_INSERT_TAIL(&dc->drivers, dl, link); driver->refs++; /* XXX: kobj_mtx */ dl->pass = pass; driver_register_pass(dl); devclass_driver_added(dc, driver); bus_data_generation_update(); return (0); } /** * @brief Register that a device driver has been deleted from a devclass * * Register that a device driver has been removed from a devclass. * This is called by devclass_delete_driver to accomplish the * recursive notification of all the children classes of busclass, as * well as busclass. Each layer will attempt to detach the driver * from any devices that are children of the bus's devclass. The function * will return an error if a device fails to detach. * * We do a full search here of the devclass list at each iteration * level to save storing children-lists in the devclass structure. If * we ever move beyond a few dozen devices doing this, we may need to * reevaluate... * * @param busclass the devclass of the parent bus * @param dc the devclass of the driver being deleted * @param driver the driver being deleted */ static int devclass_driver_deleted(devclass_t busclass, devclass_t dc, driver_t *driver) { devclass_t parent; device_t dev; int error, i; /* * Disassociate from any devices. We iterate through all the * devices in the devclass of the driver and detach any which are * using the driver and which have a parent in the devclass which * we are deleting from. * * Note that since a driver can be in multiple devclasses, we * should not detach devices which are not children of devices in * the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_detach(dev)) != 0) return (error); BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } } } /* * Walk through the children classes. Since we only keep a * single parent pointer around, we walk the entire list of * devclasses looking for children. We set the * DC_HAS_CHILDREN flag when a child devclass is created on * the parent, so we only walk the list for those devclasses * that have children. */ if (!(busclass->flags & DC_HAS_CHILDREN)) return (0); parent = busclass; TAILQ_FOREACH(busclass, &devclasses, link) { if (busclass->parent == parent) { error = devclass_driver_deleted(busclass, dc, driver); if (error) return (error); } } return (0); } /** * @brief Delete a device driver from a device class * * Delete a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_delete_driver() will first attempt to detach from each * device. If one of the detach calls fails, the driver will not be * deleted. * * @param dc the devclass to edit * @param driver the driver to unregister */ int devclass_delete_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } error = devclass_driver_deleted(busclass, dc, driver); if (error != 0) return (error); TAILQ_REMOVE(&busclass->drivers, dl, link); free(dl, M_BUS); /* XXX: kobj_mtx */ driver->refs--; if (driver->refs == 0) kobj_class_free((kobj_class_t) driver); bus_data_generation_update(); return (0); } /** * @brief Quiesces a set of device drivers from a device class * * Quiesce a device driver from a devclass. This is normally called * automatically by DRIVER_MODULE(). * * If the driver is currently attached to any devices, * devclass_quiesece_driver() will first attempt to quiesce each * device. * * @param dc the devclass to edit * @param driver the driver to unregister */ static int devclass_quiesce_driver(devclass_t busclass, driver_t *driver) { devclass_t dc = devclass_find(driver->name); driverlink_t dl; device_t dev; int i; int error; PDEBUG(("%s from devclass %s", driver->name, DEVCLANAME(busclass))); if (!dc) return (0); /* * Find the link structure in the bus' list of drivers. */ TAILQ_FOREACH(dl, &busclass->drivers, link) { if (dl->driver == driver) break; } if (!dl) { PDEBUG(("%s not found in %s list", driver->name, busclass->name)); return (ENOENT); } /* * Quiesce all devices. We iterate through all the devices in * the devclass of the driver and quiesce any which are using * the driver and which have a parent in the devclass which we * are quiescing. * * Note that since a driver can be in multiple devclasses, we * should not quiesce devices which are not children of * devices in the affected devclass. */ for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { dev = dc->devices[i]; if (dev->driver == driver && dev->parent && dev->parent->devclass == busclass) { if ((error = device_quiesce(dev)) != 0) return (error); } } } return (0); } /** * @internal */ static driverlink_t devclass_find_driver_internal(devclass_t dc, const char *classname) { driverlink_t dl; PDEBUG(("%s in devclass %s", classname, DEVCLANAME(dc))); TAILQ_FOREACH(dl, &dc->drivers, link) { if (!strcmp(dl->driver->name, classname)) return (dl); } PDEBUG(("not found")); return (NULL); } /** * @brief Return the name of the devclass */ const char * devclass_get_name(devclass_t dc) { return (dc->name); } /** * @brief Find a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t devclass_get_device(devclass_t dc, int unit) { if (dc == NULL || unit < 0 || unit >= dc->maxunit) return (NULL); return (dc->devices[unit]); } /** * @brief Find the softc field of a device given a unit number * * @param dc the devclass to search * @param unit the unit number to search for * * @returns the softc field of the device with the given * unit number or @c NULL if there is no such * device */ void * devclass_get_softc(devclass_t dc, int unit) { device_t dev; dev = devclass_get_device(dc, unit); if (!dev) return (NULL); return (device_get_softc(dev)); } /** * @brief Get a list of devices in the devclass * * An array containing a list of all the devices in the given devclass * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP), even if @p *devcountp is 0. * * @param dc the devclass to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_devices(devclass_t dc, device_t **devlistp, int *devcountp) { int count, i; device_t *list; count = devclass_get_count(dc); list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; for (i = 0; i < dc->maxunit; i++) { if (dc->devices[i]) { list[count] = dc->devices[i]; count++; } } *devlistp = list; *devcountp = count; return (0); } /** * @brief Get a list of drivers in the devclass * * An array containing a list of pointers to all the drivers in the * given devclass is allocated and returned in @p *listp. The number * of drivers in the array is returned in @p *countp. The caller should * free the array using @c free(p, M_TEMP). * * @param dc the devclass to examine * @param listp gives location for array pointer return value * @param countp gives location for number of array elements * return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int devclass_get_drivers(devclass_t dc, driver_t ***listp, int *countp) { driverlink_t dl; driver_t **list; int count; count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) count++; list = malloc(count * sizeof(driver_t *), M_TEMP, M_NOWAIT); if (list == NULL) return (ENOMEM); count = 0; TAILQ_FOREACH(dl, &dc->drivers, link) { list[count] = dl->driver; count++; } *listp = list; *countp = count; return (0); } /** * @brief Get the number of devices in a devclass * * @param dc the devclass to examine */ int devclass_get_count(devclass_t dc) { int count, i; count = 0; for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) count++; return (count); } /** * @brief Get the maximum unit number used in a devclass * * Note that this is one greater than the highest currently-allocated * unit. If a null devclass_t is passed in, -1 is returned to indicate * that not even the devclass has been allocated yet. * * @param dc the devclass to examine */ int devclass_get_maxunit(devclass_t dc) { if (dc == NULL) return (-1); return (dc->maxunit); } /** * @brief Find a free unit number in a devclass * * This function searches for the first unused unit number greater * that or equal to @p unit. * * @param dc the devclass to examine * @param unit the first unit number to check */ int devclass_find_free_unit(devclass_t dc, int unit) { if (dc == NULL) return (unit); while (unit < dc->maxunit && dc->devices[unit] != NULL) unit++; return (unit); } /** * @brief Set the parent of a devclass * * The parent class is normally initialised automatically by * DRIVER_MODULE(). * * @param dc the devclass to edit * @param pdc the new parent devclass */ void devclass_set_parent(devclass_t dc, devclass_t pdc) { dc->parent = pdc; } /** * @brief Get the parent of a devclass * * @param dc the devclass to examine */ devclass_t devclass_get_parent(devclass_t dc) { return (dc->parent); } struct sysctl_ctx_list * devclass_get_sysctl_ctx(devclass_t dc) { return (&dc->sysctl_ctx); } struct sysctl_oid * devclass_get_sysctl_tree(devclass_t dc) { return (dc->sysctl_tree); } /** * @internal * @brief Allocate a unit number * * On entry, @p *unitp is the desired unit number (or @c -1 if any * will do). The allocated unit number is returned in @p *unitp. * @param dc the devclass to allocate from * @param unitp points at the location for the allocated unit * number * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_alloc_unit(devclass_t dc, device_t dev, int *unitp) { const char *s; int unit = *unitp; PDEBUG(("unit %d in devclass %s", unit, DEVCLANAME(dc))); /* Ask the parent bus if it wants to wire this device. */ if (unit == -1) BUS_HINT_DEVICE_UNIT(device_get_parent(dev), dev, dc->name, &unit); /* If we were given a wired unit number, check for existing device */ /* XXX imp XXX */ if (unit != -1) { if (unit >= 0 && unit < dc->maxunit && dc->devices[unit] != NULL) { if (bootverbose) printf("%s: %s%d already exists; skipping it\n", dc->name, dc->name, *unitp); return (EEXIST); } } else { /* Unwired device, find the next available slot for it */ unit = 0; for (unit = 0;; unit++) { /* If there is an "at" hint for a unit then skip it. */ if (resource_string_value(dc->name, unit, "at", &s) == 0) continue; /* If this device slot is already in use, skip it. */ if (unit < dc->maxunit && dc->devices[unit] != NULL) continue; break; } } /* * We've selected a unit beyond the length of the table, so let's * extend the table to make room for all units up to and including * this one. */ if (unit >= dc->maxunit) { device_t *newlist, *oldlist; int newsize; oldlist = dc->devices; newsize = roundup((unit + 1), MINALLOCSIZE / sizeof(device_t)); newlist = malloc(sizeof(device_t) * newsize, M_BUS, M_NOWAIT); if (!newlist) return (ENOMEM); if (oldlist != NULL) bcopy(oldlist, newlist, sizeof(device_t) * dc->maxunit); bzero(newlist + dc->maxunit, sizeof(device_t) * (newsize - dc->maxunit)); dc->devices = newlist; dc->maxunit = newsize; if (oldlist != NULL) free(oldlist, M_BUS); } PDEBUG(("now: unit %d in devclass %s", unit, DEVCLANAME(dc))); *unitp = unit; return (0); } /** * @internal * @brief Add a device to a devclass * * A unit number is allocated for the device (using the device's * preferred unit number if any) and the device is registered in the * devclass. This allows the device to be looked up by its unit * number, e.g. by decoding a dev_t minor number. * * @param dc the devclass to add to * @param dev the device to add * * @retval 0 success * @retval EEXIST the requested unit number is already allocated * @retval ENOMEM memory allocation failure */ static int devclass_add_device(devclass_t dc, device_t dev) { int buflen, error; PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); buflen = snprintf(NULL, 0, "%s%d$", dc->name, INT_MAX); if (buflen < 0) return (ENOMEM); dev->nameunit = malloc(buflen, M_BUS, M_NOWAIT|M_ZERO); if (!dev->nameunit) return (ENOMEM); if ((error = devclass_alloc_unit(dc, dev, &dev->unit)) != 0) { free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (error); } dc->devices[dev->unit] = dev; dev->devclass = dc; snprintf(dev->nameunit, buflen, "%s%d", dc->name, dev->unit); return (0); } /** * @internal * @brief Delete a device from a devclass * * The device is removed from the devclass's device list and its unit * number is freed. * @param dc the devclass to delete from * @param dev the device to delete * * @retval 0 success */ static int devclass_delete_device(devclass_t dc, device_t dev) { if (!dc || !dev) return (0); PDEBUG(("%s in devclass %s", DEVICENAME(dev), DEVCLANAME(dc))); if (dev->devclass != dc || dc->devices[dev->unit] != dev) panic("devclass_delete_device: inconsistent device class"); dc->devices[dev->unit] = NULL; if (dev->flags & DF_WILDCARD) dev->unit = -1; dev->devclass = NULL; free(dev->nameunit, M_BUS); dev->nameunit = NULL; return (0); } /** * @internal * @brief Make a new device and add it as a child of @p parent * * @param parent the parent of the new device * @param name the devclass name of the new device or @c NULL * to leave the devclass unspecified * @parem unit the unit number of the new device of @c -1 to * leave the unit number unspecified * * @returns the new device */ static device_t make_device(device_t parent, const char *name, int unit) { device_t dev; devclass_t dc; PDEBUG(("%s at %s as unit %d", name, DEVICENAME(parent), unit)); if (name) { dc = devclass_find_internal(name, NULL, TRUE); if (!dc) { printf("make_device: can't find device class %s\n", name); return (NULL); } } else { dc = NULL; } - dev = malloc(sizeof(struct device), M_BUS, M_NOWAIT|M_ZERO); + dev = malloc(sizeof(*dev), M_BUS, M_NOWAIT|M_ZERO); if (!dev) return (NULL); dev->parent = parent; TAILQ_INIT(&dev->children); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; dev->devclass = NULL; dev->unit = unit; dev->nameunit = NULL; dev->desc = NULL; dev->busy = 0; dev->devflags = 0; dev->flags = DF_ENABLED; dev->order = 0; if (unit == -1) dev->flags |= DF_WILDCARD; if (name) { dev->flags |= DF_FIXEDCLASS; if (devclass_add_device(dc, dev)) { kobj_delete((kobj_t) dev, M_BUS); return (NULL); } } dev->ivars = NULL; dev->softc = NULL; dev->state = DS_NOTPRESENT; TAILQ_INSERT_TAIL(&bus_data_devices, dev, devlink); bus_data_generation_update(); return (dev); } /** * @internal * @brief Print a description of a device. */ static int device_print_child(device_t dev, device_t child) { int retval = 0; if (device_is_alive(child)) retval += BUS_PRINT_CHILD(dev, child); else retval += device_printf(child, " not found\n"); return (retval); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with order zero. * * @param dev the device which will be the parent of the * new child device * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child(device_t dev, const char *name, int unit) { return (device_add_child_ordered(dev, 0, name, unit)); } /** * @brief Create a new device * * This creates a new device and adds it as a child of an existing * parent device. The new device will be added after the last existing * child with the same order. * * @param dev the device which will be the parent of the * new child device * @param order a value which is used to partially sort the * children of @p dev - devices created using * lower values of @p order appear first in @p * dev's list of children * @param name devclass name for new device or @c NULL if not * specified * @param unit unit number for new device or @c -1 if not * specified * * @returns the new device */ device_t device_add_child_ordered(device_t dev, u_int order, const char *name, int unit) { device_t child; device_t place; PDEBUG(("%s at %s with order %u as unit %d", name, DEVICENAME(dev), order, unit)); KASSERT(name != NULL || unit == -1, ("child device with wildcard name and specific unit number")); child = make_device(dev, name, unit); if (child == NULL) return (child); child->order = order; TAILQ_FOREACH(place, &dev->children, link) { if (place->order > order) break; } if (place) { /* * The device 'place' is the first device whose order is * greater than the new child. */ TAILQ_INSERT_BEFORE(place, child, link); } else { /* * The new child's order is greater or equal to the order of * any existing device. Add the child to the tail of the list. */ TAILQ_INSERT_TAIL(&dev->children, child, link); } bus_data_generation_update(); return (child); } /** * @brief Delete a device * * This function deletes a device along with all of its children. If * the device currently has a driver attached to it, the device is * detached first using device_detach(). * * @param dev the parent device * @param child the device to delete * * @retval 0 success * @retval non-zero a unit error code describing the error */ int device_delete_child(device_t dev, device_t child) { int error; device_t grandchild; PDEBUG(("%s from %s", DEVICENAME(child), DEVICENAME(dev))); /* remove children first */ while ((grandchild = TAILQ_FIRST(&child->children)) != NULL) { error = device_delete_child(child, grandchild); if (error) return (error); } if ((error = device_detach(child)) != 0) return (error); if (child->devclass) devclass_delete_device(child->devclass, child); if (child->parent) BUS_CHILD_DELETED(dev, child); TAILQ_REMOVE(&dev->children, child, link); TAILQ_REMOVE(&bus_data_devices, child, devlink); kobj_delete((kobj_t) child, M_BUS); bus_data_generation_update(); return (0); } /** * @brief Delete all children devices of the given device, if any. * * This function deletes all children devices of the given device, if * any, using the device_delete_child() function for each device it * finds. If a child device cannot be deleted, this function will * return an error code. * * @param dev the parent device * * @retval 0 success * @retval non-zero a device would not detach */ int device_delete_children(device_t dev) { device_t child; int error; PDEBUG(("Deleting all children of %s", DEVICENAME(dev))); error = 0; while ((child = TAILQ_FIRST(&dev->children)) != NULL) { error = device_delete_child(dev, child); if (error) { PDEBUG(("Failed deleting %s", DEVICENAME(child))); break; } } return (error); } /** * @brief Find a device given a unit number * * This is similar to devclass_get_devices() but only searches for * devices which have @p dev as a parent. * * @param dev the parent device to search * @param unit the unit number to search for. If the unit is -1, * return the first child of @p dev which has name * @p classname (that is, the one with the lowest unit.) * * @returns the device with the given unit number or @c * NULL if there is no such device */ device_t device_find_child(device_t dev, const char *classname, int unit) { devclass_t dc; device_t child; dc = devclass_find(classname); if (!dc) return (NULL); if (unit != -1) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } else { for (unit = 0; unit < devclass_get_maxunit(dc); unit++) { child = devclass_get_device(dc, unit); if (child && child->parent == dev) return (child); } } return (NULL); } /** * @internal */ static driverlink_t first_matching_driver(devclass_t dc, device_t dev) { if (dev->devclass) return (devclass_find_driver_internal(dc, dev->devclass->name)); return (TAILQ_FIRST(&dc->drivers)); } /** * @internal */ static driverlink_t next_matching_driver(devclass_t dc, device_t dev, driverlink_t last) { if (dev->devclass) { driverlink_t dl; for (dl = TAILQ_NEXT(last, link); dl; dl = TAILQ_NEXT(dl, link)) if (!strcmp(dev->devclass->name, dl->driver->name)) return (dl); return (NULL); } return (TAILQ_NEXT(last, link)); } /** * @internal */ int device_probe_child(device_t dev, device_t child) { devclass_t dc; driverlink_t best = NULL; driverlink_t dl; int result, pri = 0; int hasclass = (child->devclass != NULL); GIANT_REQUIRED; dc = dev->devclass; if (!dc) panic("device_probe_child: parent device has no devclass"); /* * If the state is already probed, then return. However, don't * return if we can rebid this object. */ if (child->state == DS_ALIVE && (child->flags & DF_REBID) == 0) return (0); for (; dc; dc = dc->parent) { for (dl = first_matching_driver(dc, child); dl; dl = next_matching_driver(dc, child, dl)) { /* If this driver's pass is too high, then ignore it. */ if (dl->pass > bus_current_pass) continue; PDEBUG(("Trying %s", DRIVERNAME(dl->driver))); result = device_set_driver(child, dl->driver); if (result == ENOMEM) return (result); else if (result != 0) continue; if (!hasclass) { if (device_set_devclass(child, dl->driver->name) != 0) { char const * devname = device_get_name(child); if (devname == NULL) devname = "(unknown)"; printf("driver bug: Unable to set " "devclass (class: %s " "devname: %s)\n", dl->driver->name, devname); (void)device_set_driver(child, NULL); continue; } } /* Fetch any flags for the device before probing. */ resource_int_value(dl->driver->name, child->unit, "flags", &child->devflags); result = DEVICE_PROBE(child); /* Reset flags and devclass before the next probe. */ child->devflags = 0; if (!hasclass) (void)device_set_devclass(child, NULL); /* * If the driver returns SUCCESS, there can be * no higher match for this device. */ if (result == 0) { best = dl; pri = 0; break; } /* * Probes that return BUS_PROBE_NOWILDCARD or lower * only match on devices whose driver was explicitly * specified. */ if (result <= BUS_PROBE_NOWILDCARD && !(child->flags & DF_FIXEDCLASS)) { result = ENXIO; } /* * The driver returned an error so it * certainly doesn't match. */ if (result > 0) { (void)device_set_driver(child, NULL); continue; } /* * A priority lower than SUCCESS, remember the * best matching driver. Initialise the value * of pri for the first match. */ if (best == NULL || result > pri) { best = dl; pri = result; continue; } } /* * If we have an unambiguous match in this devclass, * don't look in the parent. */ if (best && pri == 0) break; } /* * If we found a driver, change state and initialise the devclass. */ /* XXX What happens if we rebid and got no best? */ if (best) { /* * If this device was attached, and we were asked to * rescan, and it is a different driver, then we have * to detach the old driver and reattach this new one. * Note, we don't have to check for DF_REBID here * because if the state is > DS_ALIVE, we know it must * be. * * This assumes that all DF_REBID drivers can have * their probe routine called at any time and that * they are idempotent as well as completely benign in * normal operations. * * We also have to make sure that the detach * succeeded, otherwise we fail the operation (or * maybe it should just fail silently? I'm torn). */ if (child->state > DS_ALIVE && best->driver != child->driver) if ((result = device_detach(dev)) != 0) return (result); /* Set the winning driver, devclass, and flags. */ if (!child->devclass) { result = device_set_devclass(child, best->driver->name); if (result != 0) return (result); } result = device_set_driver(child, best->driver); if (result != 0) return (result); resource_int_value(best->driver->name, child->unit, "flags", &child->devflags); if (pri < 0) { /* * A bit bogus. Call the probe method again to make * sure that we have the right description. */ DEVICE_PROBE(child); #if 0 child->flags |= DF_REBID; #endif } else child->flags &= ~DF_REBID; child->state = DS_ALIVE; bus_data_generation_update(); return (0); } return (ENXIO); } /** * @brief Return the parent of a device */ device_t device_get_parent(device_t dev) { return (dev->parent); } /** * @brief Get a list of children of a device * * An array containing a list of all the children of the given device * is allocated and returned in @p *devlistp. The number of devices * in the array is returned in @p *devcountp. The caller should free * the array using @c free(p, M_TEMP). * * @param dev the device to examine * @param devlistp points at location for array pointer return * value * @param devcountp points at location for array size return value * * @retval 0 success * @retval ENOMEM the array allocation failed */ int device_get_children(device_t dev, device_t **devlistp, int *devcountp) { int count; device_t child; device_t *list; count = 0; TAILQ_FOREACH(child, &dev->children, link) { count++; } if (count == 0) { *devlistp = NULL; *devcountp = 0; return (0); } list = malloc(count * sizeof(device_t), M_TEMP, M_NOWAIT|M_ZERO); if (!list) return (ENOMEM); count = 0; TAILQ_FOREACH(child, &dev->children, link) { list[count] = child; count++; } *devlistp = list; *devcountp = count; return (0); } /** * @brief Return the current driver for the device or @c NULL if there * is no driver currently attached */ driver_t * device_get_driver(device_t dev) { return (dev->driver); } /** * @brief Return the current devclass for the device or @c NULL if * there is none. */ devclass_t device_get_devclass(device_t dev) { return (dev->devclass); } /** * @brief Return the name of the device's devclass or @c NULL if there * is none. */ const char * device_get_name(device_t dev) { if (dev != NULL && dev->devclass) return (devclass_get_name(dev->devclass)); return (NULL); } /** * @brief Return a string containing the device's devclass name * followed by an ascii representation of the device's unit number * (e.g. @c "foo2"). */ const char * device_get_nameunit(device_t dev) { return (dev->nameunit); } /** * @brief Return the device's unit number. */ int device_get_unit(device_t dev) { return (dev->unit); } /** * @brief Return the device's description string */ const char * device_get_desc(device_t dev) { return (dev->desc); } /** * @brief Return the device's flags */ uint32_t device_get_flags(device_t dev) { return (dev->devflags); } struct sysctl_ctx_list * device_get_sysctl_ctx(device_t dev) { return (&dev->sysctl_ctx); } struct sysctl_oid * device_get_sysctl_tree(device_t dev) { return (dev->sysctl_tree); } /** * @brief Print the name of the device followed by a colon and a space * * @returns the number of characters printed */ int device_print_prettyname(device_t dev) { const char *name = device_get_name(dev); if (name == NULL) return (printf("unknown: ")); return (printf("%s%d: ", name, device_get_unit(dev))); } /** * @brief Print the name of the device followed by a colon, a space * and the result of calling vprintf() with the value of @p fmt and * the following arguments. * * @returns the number of characters printed */ int device_printf(device_t dev, const char * fmt, ...) { va_list ap; int retval; retval = device_print_prettyname(dev); va_start(ap, fmt); retval += vprintf(fmt, ap); va_end(ap); return (retval); } /** * @internal */ static void device_set_desc_internal(device_t dev, const char* desc, int copy) { if (dev->desc && (dev->flags & DF_DESCMALLOCED)) { free(dev->desc, M_BUS); dev->flags &= ~DF_DESCMALLOCED; dev->desc = NULL; } if (copy && desc) { dev->desc = malloc(strlen(desc) + 1, M_BUS, M_NOWAIT); if (dev->desc) { strcpy(dev->desc, desc); dev->flags |= DF_DESCMALLOCED; } } else { /* Avoid a -Wcast-qual warning */ dev->desc = (char *)(uintptr_t) desc; } bus_data_generation_update(); } /** * @brief Set the device's description * * The value of @c desc should be a string constant that will not * change (at least until the description is changed in a subsequent * call to device_set_desc() or device_set_desc_copy()). */ void device_set_desc(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, FALSE); } /** * @brief Set the device's description * * The string pointed to by @c desc is copied. Use this function if * the device description is generated, (e.g. with sprintf()). */ void device_set_desc_copy(device_t dev, const char* desc) { device_set_desc_internal(dev, desc, TRUE); } /** * @brief Set the device's flags */ void device_set_flags(device_t dev, uint32_t flags) { dev->devflags = flags; } /** * @brief Return the device's softc field * * The softc is allocated and zeroed when a driver is attached, based * on the size field of the driver. */ void * device_get_softc(device_t dev) { return (dev->softc); } /** * @brief Set the device's softc field * * Most drivers do not need to use this since the softc is allocated * automatically when the driver is attached. */ void device_set_softc(device_t dev, void *softc) { if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) free(dev->softc, M_BUS_SC); dev->softc = softc; if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Free claimed softc * * Most drivers do not need to use this since the softc is freed * automatically when the driver is detached. */ void device_free_softc(void *softc) { free(softc, M_BUS_SC); } /** * @brief Claim softc * * This function can be used to let the driver free the automatically * allocated softc using "device_free_softc()". This function is * useful when the driver is refcounting the softc and the softc * cannot be freed when the "device_detach" method is called. */ void device_claim_softc(device_t dev) { if (dev->softc) dev->flags |= DF_EXTERNALSOFTC; else dev->flags &= ~DF_EXTERNALSOFTC; } /** * @brief Get the device's ivars field * * The ivars field is used by the parent device to store per-device * state (e.g. the physical location of the device or a list of * resources). */ void * device_get_ivars(device_t dev) { KASSERT(dev != NULL, ("device_get_ivars(NULL, ...)")); return (dev->ivars); } /** * @brief Set the device's ivars field */ void device_set_ivars(device_t dev, void * ivars) { KASSERT(dev != NULL, ("device_set_ivars(NULL, ...)")); dev->ivars = ivars; } /** * @brief Return the device's state */ device_state_t device_get_state(device_t dev) { return (dev->state); } /** * @brief Set the DF_ENABLED flag for the device */ void device_enable(device_t dev) { dev->flags |= DF_ENABLED; } /** * @brief Clear the DF_ENABLED flag for the device */ void device_disable(device_t dev) { dev->flags &= ~DF_ENABLED; } /** * @brief Increment the busy counter for the device */ void device_busy(device_t dev) { if (dev->state < DS_ATTACHING) panic("device_busy: called for unattached device"); if (dev->busy == 0 && dev->parent) device_busy(dev->parent); dev->busy++; if (dev->state == DS_ATTACHED) dev->state = DS_BUSY; } /** * @brief Decrement the busy counter for the device */ void device_unbusy(device_t dev) { if (dev->busy != 0 && dev->state != DS_BUSY && dev->state != DS_ATTACHING) panic("device_unbusy: called for non-busy device %s", device_get_nameunit(dev)); dev->busy--; if (dev->busy == 0) { if (dev->parent) device_unbusy(dev->parent); if (dev->state == DS_BUSY) dev->state = DS_ATTACHED; } } /** * @brief Set the DF_QUIET flag for the device */ void device_quiet(device_t dev) { dev->flags |= DF_QUIET; } /** * @brief Clear the DF_QUIET flag for the device */ void device_verbose(device_t dev) { dev->flags &= ~DF_QUIET; } /** * @brief Return non-zero if the DF_QUIET flag is set on the device */ int device_is_quiet(device_t dev) { return ((dev->flags & DF_QUIET) != 0); } /** * @brief Return non-zero if the DF_ENABLED flag is set on the device */ int device_is_enabled(device_t dev) { return ((dev->flags & DF_ENABLED) != 0); } /** * @brief Return non-zero if the device was successfully probed */ int device_is_alive(device_t dev) { return (dev->state >= DS_ALIVE); } /** * @brief Return non-zero if the device currently has a driver * attached to it */ int device_is_attached(device_t dev) { return (dev->state >= DS_ATTACHED); } /** * @brief Return non-zero if the device is currently suspended. */ int device_is_suspended(device_t dev) { return ((dev->flags & DF_SUSPENDED) != 0); } /** * @brief Set the devclass of a device * @see devclass_add_device(). */ int device_set_devclass(device_t dev, const char *classname) { devclass_t dc; int error; if (!classname) { if (dev->devclass) devclass_delete_device(dev->devclass, dev); return (0); } if (dev->devclass) { printf("device_set_devclass: device class already set\n"); return (EINVAL); } dc = devclass_find_internal(classname, NULL, TRUE); if (!dc) return (ENOMEM); error = devclass_add_device(dc, dev); bus_data_generation_update(); return (error); } /** * @brief Set the devclass of a device and mark the devclass fixed. * @see device_set_devclass() */ int device_set_devclass_fixed(device_t dev, const char *classname) { int error; if (classname == NULL) return (EINVAL); error = device_set_devclass(dev, classname); if (error) return (error); dev->flags |= DF_FIXEDCLASS; return (0); } /** * @brief Set the driver of a device * * @retval 0 success * @retval EBUSY the device already has a driver attached * @retval ENOMEM a memory allocation failure occurred */ int device_set_driver(device_t dev, driver_t *driver) { if (dev->state >= DS_ATTACHED) return (EBUSY); if (dev->driver == driver) return (0); if (dev->softc && !(dev->flags & DF_EXTERNALSOFTC)) { free(dev->softc, M_BUS_SC); dev->softc = NULL; } device_set_desc(dev, NULL); kobj_delete((kobj_t) dev, NULL); dev->driver = driver; if (driver) { kobj_init((kobj_t) dev, (kobj_class_t) driver); if (!(dev->flags & DF_EXTERNALSOFTC) && driver->size > 0) { dev->softc = malloc(driver->size, M_BUS_SC, M_NOWAIT | M_ZERO); if (!dev->softc) { kobj_delete((kobj_t) dev, NULL); kobj_init((kobj_t) dev, &null_class); dev->driver = NULL; return (ENOMEM); } } } else { kobj_init((kobj_t) dev, &null_class); } bus_data_generation_update(); return (0); } /** * @brief Probe a device, and return this status. * * This function is the core of the device autoconfiguration * system. Its purpose is to select a suitable driver for a device and * then call that driver to initialise the hardware appropriately. The * driver is selected by calling the DEVICE_PROBE() method of a set of * candidate drivers and then choosing the driver which returned the * best value. This driver is then attached to the device using * device_attach(). * * The set of suitable drivers is taken from the list of drivers in * the parent device's devclass. If the device was originally created * with a specific class name (see device_add_child()), only drivers * with that name are probed, otherwise all drivers in the devclass * are probed. If no drivers return successful probe values in the * parent devclass, the search continues in the parent of that * devclass (see devclass_get_parent()) if any. * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code * @retval -1 Device already attached */ int device_probe(device_t dev) { int error; GIANT_REQUIRED; if (dev->state >= DS_ALIVE && (dev->flags & DF_REBID) == 0) return (-1); if (!(dev->flags & DF_ENABLED)) { if (bootverbose && device_get_name(dev) != NULL) { device_print_prettyname(dev); printf("not probed (disabled)\n"); } return (-1); } if ((error = device_probe_child(dev->parent, dev)) != 0) { if (bus_current_pass == BUS_PASS_DEFAULT && !(dev->flags & DF_DONENOMATCH)) { BUS_PROBE_NOMATCH(dev->parent, dev); devnomatch(dev); dev->flags |= DF_DONENOMATCH; } return (error); } return (0); } /** * @brief Probe a device and attach a driver if possible * * calls device_probe() and attaches if that was successful. */ int device_probe_and_attach(device_t dev) { int error; GIANT_REQUIRED; error = device_probe(dev); if (error == -1) return (0); else if (error != 0) return (error); CURVNET_SET_QUIET(vnet0); error = device_attach(dev); CURVNET_RESTORE(); return error; } /** * @brief Attach a device driver to a device * * This function is a wrapper around the DEVICE_ATTACH() driver * method. In addition to calling DEVICE_ATTACH(), it initialises the * device's sysctl tree, optionally prints a description of the device * and queues a notification event for user-based device management * services. * * Normally this function is only called internally from * device_probe_and_attach(). * * @param dev the device to initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_attach(device_t dev) { uint64_t attachtime; int error; if (resource_disabled(dev->driver->name, dev->unit)) { device_disable(dev); if (bootverbose) device_printf(dev, "disabled via hints entry\n"); return (ENXIO); } device_sysctl_init(dev); if (!device_is_quiet(dev)) device_print_child(dev->parent, dev); attachtime = get_cyclecount(); dev->state = DS_ATTACHING; if ((error = DEVICE_ATTACH(dev)) != 0) { printf("device_attach: %s%d attach returned %d\n", dev->driver->name, dev->unit, error); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); KASSERT(dev->busy == 0, ("attach failed but busy")); dev->state = DS_NOTPRESENT; return (error); } attachtime = get_cyclecount() - attachtime; /* * 4 bits per device is a reasonable value for desktop and server * hardware with good get_cyclecount() implementations, but WILL * need to be adjusted on other platforms. */ #define RANDOM_PROBE_BIT_GUESS 4 if (bootverbose) printf("random: harvesting attach, %zu bytes (%d bits) from %s%d\n", sizeof(attachtime), RANDOM_PROBE_BIT_GUESS, dev->driver->name, dev->unit); random_harvest_direct(&attachtime, sizeof(attachtime), RANDOM_PROBE_BIT_GUESS, RANDOM_ATTACH); device_sysctl_update(dev); if (dev->busy) dev->state = DS_BUSY; else dev->state = DS_ATTACHED; dev->flags &= ~DF_DONENOMATCH; devadded(dev); return (0); } /** * @brief Detach a driver from a device * * This function is a wrapper around the DEVICE_DETACH() driver * method. If the call to DEVICE_DETACH() succeeds, it calls * BUS_CHILD_DETACHED() for the parent of @p dev, queues a * notification event for user-based device management services and * cleans up the device's sysctl tree. * * @param dev the device to un-initialise * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_detach(device_t dev) { int error; GIANT_REQUIRED; PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); if ((error = DEVICE_DETACH(dev)) != 0) return (error); devremoved(dev); if (!device_is_quiet(dev)) device_printf(dev, "detached\n"); if (dev->parent) BUS_CHILD_DETACHED(dev->parent, dev); if (!(dev->flags & DF_FIXEDCLASS)) devclass_delete_device(dev->devclass, dev); dev->state = DS_NOTPRESENT; (void)device_set_driver(dev, NULL); device_sysctl_fini(dev); return (0); } /** * @brief Tells a driver to quiesce itself. * * This function is a wrapper around the DEVICE_QUIESCE() driver * method. If the call to DEVICE_QUIESCE() succeeds. * * @param dev the device to quiesce * * @retval 0 success * @retval ENXIO no driver was found * @retval ENOMEM memory allocation failure * @retval non-zero some other unix error code */ int device_quiesce(device_t dev) { PDEBUG(("%s", DEVICENAME(dev))); if (dev->state == DS_BUSY) return (EBUSY); if (dev->state != DS_ATTACHED) return (0); return (DEVICE_QUIESCE(dev)); } /** * @brief Notify a device of system shutdown * * This function calls the DEVICE_SHUTDOWN() driver method if the * device currently has an attached driver. * * @returns the value returned by DEVICE_SHUTDOWN() */ int device_shutdown(device_t dev) { if (dev->state < DS_ATTACHED) return (0); return (DEVICE_SHUTDOWN(dev)); } /** * @brief Set the unit number of a device * * This function can be used to override the unit number used for a * device (e.g. to wire a device to a pre-configured unit number). */ int device_set_unit(device_t dev, int unit) { devclass_t dc; int err; dc = device_get_devclass(dev); if (unit < dc->maxunit && dc->devices[unit]) return (EBUSY); err = devclass_delete_device(dc, dev); if (err) return (err); dev->unit = unit; err = devclass_add_device(dc, dev); if (err) return (err); bus_data_generation_update(); return (0); } /*======================================*/ /* * Some useful method implementations to make life easier for bus drivers. */ void resource_init_map_request_impl(struct resource_map_request *args, size_t sz) { bzero(args, sz); args->size = sz; args->memattr = VM_MEMATTR_UNCACHEABLE; } /** * @brief Initialise a resource list. * * @param rl the resource list to initialise */ void resource_list_init(struct resource_list *rl) { STAILQ_INIT(rl); } /** * @brief Reclaim memory used by a resource list. * * This function frees the memory for all resource entries on the list * (if any). * * @param rl the resource list to free */ void resource_list_free(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) panic("resource_list_free: resource entry is busy"); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } /** * @brief Add a resource entry. * * This function adds a resource entry using the given @p type, @p * start, @p end and @p count values. A rid value is chosen by * searching sequentially for the first unused rid starting at zero. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ int resource_list_add_next(struct resource_list *rl, int type, rman_res_t start, rman_res_t end, rman_res_t count) { int rid; rid = 0; while (resource_list_find(rl, type, rid) != NULL) rid++; resource_list_add(rl, type, rid, start, end, count); return (rid); } /** * @brief Add or modify a resource entry. * * If an existing entry exists with the same type and rid, it will be * modified using the given values of @p start, @p end and @p * count. If no entry exists, a new one will be created using the * given values. The resource list entry that matches is then returned. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * @param start the start address of the resource * @param end the end address of the resource * @param count XXX end-start+1 */ struct resource_list_entry * resource_list_add(struct resource_list *rl, int type, int rid, rman_res_t start, rman_res_t end, rman_res_t count) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) { rle = malloc(sizeof(struct resource_list_entry), M_BUS, M_NOWAIT); if (!rle) panic("resource_list_add: can't record entry"); STAILQ_INSERT_TAIL(rl, rle, link); rle->type = type; rle->rid = rid; rle->res = NULL; rle->flags = 0; } if (rle->res) panic("resource_list_add: resource entry is busy"); rle->start = start; rle->end = end; rle->count = count; return (rle); } /** * @brief Determine if a resource entry is busy. * * Returns true if a resource entry is busy meaning that it has an * associated resource that is not an unallocated "reserved" resource. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is busy, zero otherwise. */ int resource_list_busy(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle == NULL || rle->res == NULL) return (0); if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) { KASSERT(!(rman_get_flags(rle->res) & RF_ACTIVE), ("reserved resource is active")); return (0); } return (1); } /** * @brief Determine if a resource entry is reserved. * * Returns true if a resource entry is reserved meaning that it has an * associated "reserved" resource. The resource can either be * allocated or unallocated. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns Non-zero if the entry is reserved, zero otherwise. */ int resource_list_reserved(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (rle != NULL && rle->flags & RLE_RESERVED) return (1); return (0); } /** * @brief Find a resource entry by type and rid. * * @param rl the resource list to search * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier * * @returns the resource entry pointer or NULL if there is no such * entry. */ struct resource_list_entry * resource_list_find(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle; STAILQ_FOREACH(rle, rl, link) { if (rle->type == type && rle->rid == rid) return (rle); } return (NULL); } /** * @brief Delete a resource entry. * * @param rl the resource list to edit * @param type the resource entry type (e.g. SYS_RES_MEMORY) * @param rid the resource identifier */ void resource_list_delete(struct resource_list *rl, int type, int rid) { struct resource_list_entry *rle = resource_list_find(rl, type, rid); if (rle) { if (rle->res != NULL) panic("resource_list_delete: resource has not been released"); STAILQ_REMOVE(rl, rle, resource_list_entry, link); free(rle, M_BUS); } } /** * @brief Allocate a reserved resource * * This can be used by busses to force the allocation of resources * that are always active in the system even if they are not allocated * by a driver (e.g. PCI BARs). This function is usually called when * adding a new child to the bus. The resource is allocated from the * parent bus when it is reserved. The resource list entry is marked * with RLE_RESERVED to note that it is a reserved resource. * * Subsequent attempts to allocate the resource with * resource_list_alloc() will succeed the first time and will set * RLE_ALLOCATED to note that it has been allocated. When a reserved * resource that has been allocated is released with * resource_list_release() the resource RLE_ALLOCATED is cleared, but * the actual resource remains allocated. The resource can be released to * the parent bus by calling resource_list_unreserve(). * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device for which the resource is being reserved * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_reserve(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); struct resource *r; if (passthrough) panic( "resource_list_reserve() should only be called for direct children"); if (flags & RF_ACTIVE) panic( "resource_list_reserve() should only reserve inactive resources"); r = resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (r != NULL) { rle = resource_list_find(rl, type, *rid); rle->flags |= RLE_RESERVED; } return (r); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE() * * Implement BUS_ALLOC_RESOURCE() by looking up a resource from the list * and passing the allocation up to the parent of @p bus. This assumes * that the first entry of @c device_get_ivars(child) is a struct * resource_list. This also handles 'passthrough' allocations where a * child is a remote descendant of bus by passing the allocation up to * the parent of bus. * * Typically, a bus driver would store a list of child resources * somewhere in the child device's ivars (see device_get_ivars()) and * its implementation of BUS_ALLOC_RESOURCE() would find that list and * then call resource_list_alloc() to perform the allocation. * * @param rl the resource list to allocate from * @param bus the parent device of @p child * @param child the device which is requesting an allocation * @param type the type of resource to allocate * @param rid a pointer to the resource identifier * @param start hint at the start of the resource range - pass * @c 0 for any start address * @param end hint at the end of the resource range - pass * @c ~0 for any end address * @param count hint at the size of range required - pass @c 1 * for any size * @param flags any extra flags to control the resource * allocation - see @c RF_XXX flags in * for details * * @returns the resource which was allocated or @c NULL if no * resource could be allocated */ struct resource * resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); } rle = resource_list_find(rl, type, *rid); if (!rle) return (NULL); /* no resource of that type/rid */ if (rle->res) { if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) return (NULL); if ((flags & RF_ACTIVE) && bus_activate_resource(child, type, *rid, rle->res) != 0) return (NULL); rle->flags |= RLE_ALLOCATED; return (rle->res); } device_printf(bus, "resource entry %#x type %d for child %s is busy\n", *rid, type, device_get_nameunit(child)); return (NULL); } if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } rle->res = BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); /* * Record the new range. */ if (rle->res) { rle->start = rman_get_start(rle->res); rle->end = rman_get_end(rle->res); rle->count = count; } return (rle->res); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE() * * Implement BUS_RELEASE_RESOURCE() using a resource list. Normally * used with resource_list_alloc(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device which is requesting a release * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_release(struct resource_list *rl, device_t bus, device_t child, int type, int rid, struct resource *res) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); int error; if (passthrough) { return (BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res)); } rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_release: can't find resource"); if (!rle->res) panic("resource_list_release: resource entry is not busy"); if (rle->flags & RLE_RESERVED) { if (rle->flags & RLE_ALLOCATED) { if (rman_get_flags(res) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, res); if (error) return (error); } rle->flags &= ~RLE_ALLOCATED; return (0); } return (EINVAL); } error = BUS_RELEASE_RESOURCE(device_get_parent(bus), child, type, rid, res); if (error) return (error); rle->res = NULL; return (0); } /** * @brief Release all active resources of a given type * * Release all active resources of a specified type. This is intended * to be used to cleanup resources leaked by a driver after detach or * a failed attach. * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose active resources are being released * @param type the type of resources to release * * @retval 0 success * @retval EBUSY at least one resource was active */ int resource_list_release_active(struct resource_list *rl, device_t bus, device_t child, int type) { struct resource_list_entry *rle; int error, retval; retval = 0; STAILQ_FOREACH(rle, rl, link) { if (rle->type != type) continue; if (rle->res == NULL) continue; if ((rle->flags & (RLE_RESERVED | RLE_ALLOCATED)) == RLE_RESERVED) continue; retval = EBUSY; error = resource_list_release(rl, bus, child, type, rman_get_rid(rle->res), rle->res); if (error != 0) device_printf(bus, "Failed to release active resource: %d\n", error); } return (retval); } /** * @brief Fully release a reserved resource * * Fully releases a resource reserved via resource_list_reserve(). * * @param rl the resource list which was allocated from * @param bus the parent device of @p child * @param child the device whose reserved resource is being released * @param type the type of resource to release * @param rid the resource identifier * @param res the resource to release * * @retval 0 success * @retval non-zero a standard unix error code indicating what * error condition prevented the operation */ int resource_list_unreserve(struct resource_list *rl, device_t bus, device_t child, int type, int rid) { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); if (passthrough) panic( "resource_list_unreserve() should only be called for direct children"); rle = resource_list_find(rl, type, rid); if (!rle) panic("resource_list_unreserve: can't find resource"); if (!(rle->flags & RLE_RESERVED)) return (EINVAL); if (rle->flags & RLE_ALLOCATED) return (EBUSY); rle->flags &= ~RLE_RESERVED; return (resource_list_release(rl, bus, child, type, rid, rle->res)); } /** * @brief Print a description of resources in a resource list * * Print all resources of a specified type, for use in BUS_PRINT_CHILD(). * The name is printed if at least one resource of the given type is available. * The format is used to print resource start and end. * * @param rl the resource list to print * @param name the name of @p type, e.g. @c "memory" * @param type type type of resource entry to print * @param format printf(9) format string to print resource * start and end values * * @returns the number of characters printed */ int resource_list_print_type(struct resource_list *rl, const char *name, int type, const char *format) { struct resource_list_entry *rle; int printed, retval; printed = 0; retval = 0; /* Yes, this is kinda cheating */ STAILQ_FOREACH(rle, rl, link) { if (rle->type == type) { if (printed == 0) retval += printf(" %s ", name); else retval += printf(","); printed++; retval += printf(format, rle->start); if (rle->count > 1) { retval += printf("-"); retval += printf(format, rle->start + rle->count - 1); } } } return (retval); } /** * @brief Releases all the resources in a list. * * @param rl The resource list to purge. * * @returns nothing */ void resource_list_purge(struct resource_list *rl) { struct resource_list_entry *rle; while ((rle = STAILQ_FIRST(rl)) != NULL) { if (rle->res) bus_release_resource(rman_get_device(rle->res), rle->type, rle->rid, rle->res); STAILQ_REMOVE_HEAD(rl, link); free(rle, M_BUS); } } device_t bus_generic_add_child(device_t dev, u_int order, const char *name, int unit) { return (device_add_child_ordered(dev, order, name, unit)); } /** * @brief Helper function for implementing DEVICE_PROBE() * * This function can be used to help implement the DEVICE_PROBE() for * a bus (i.e. a device which has other devices attached to it). It * calls the DEVICE_IDENTIFY() method of each driver in the device's * devclass. */ int bus_generic_probe(device_t dev) { devclass_t dc = dev->devclass; driverlink_t dl; TAILQ_FOREACH(dl, &dc->drivers, link) { /* * If this driver's pass is too high, then ignore it. * For most drivers in the default pass, this will * never be true. For early-pass drivers they will * only call the identify routines of eligible drivers * when this routine is called. Drivers for later * passes should have their identify routines called * on early-pass busses during BUS_NEW_PASS(). */ if (dl->pass > bus_current_pass) continue; DEVICE_IDENTIFY(dl->driver, dev); } return (0); } /** * @brief Helper function for implementing DEVICE_ATTACH() * * This function can be used to help implement the DEVICE_ATTACH() for * a bus. It calls device_probe_and_attach() for each of the device's * children. */ int bus_generic_attach(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_probe_and_attach(child); } return (0); } /** * @brief Helper function for implementing DEVICE_DETACH() * * This function can be used to help implement the DEVICE_DETACH() for * a bus. It calls device_detach() for each of the device's * children. */ int bus_generic_detach(device_t dev) { device_t child; int error; if (dev->state != DS_ATTACHED) return (EBUSY); TAILQ_FOREACH(child, &dev->children, link) { if ((error = device_detach(child)) != 0) return (error); } return (0); } /** * @brief Helper function for implementing DEVICE_SHUTDOWN() * * This function can be used to help implement the DEVICE_SHUTDOWN() * for a bus. It calls device_shutdown() for each of the device's * children. */ int bus_generic_shutdown(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { device_shutdown(child); } return (0); } /** * @brief Default function for suspending a child device. * * This function is to be used by a bus's DEVICE_SUSPEND_CHILD(). */ int bus_generic_suspend_child(device_t dev, device_t child) { int error; error = DEVICE_SUSPEND(child); if (error == 0) child->flags |= DF_SUSPENDED; return (error); } /** * @brief Default function for resuming a child device. * * This function is to be used by a bus's DEVICE_RESUME_CHILD(). */ int bus_generic_resume_child(device_t dev, device_t child) { DEVICE_RESUME(child); child->flags &= ~DF_SUSPENDED; return (0); } /** * @brief Helper function for implementing DEVICE_SUSPEND() * * This function can be used to help implement the DEVICE_SUSPEND() * for a bus. It calls DEVICE_SUSPEND() for each of the device's * children. If any call to DEVICE_SUSPEND() fails, the suspend * operation is aborted and any devices which were suspended are * resumed immediately by calling their DEVICE_RESUME() methods. */ int bus_generic_suspend(device_t dev) { int error; device_t child, child2; TAILQ_FOREACH(child, &dev->children, link) { error = BUS_SUSPEND_CHILD(dev, child); if (error) { for (child2 = TAILQ_FIRST(&dev->children); child2 && child2 != child; child2 = TAILQ_NEXT(child2, link)) BUS_RESUME_CHILD(dev, child2); return (error); } } return (0); } /** * @brief Helper function for implementing DEVICE_RESUME() * * This function can be used to help implement the DEVICE_RESUME() for * a bus. It calls DEVICE_RESUME() on each of the device's children. */ int bus_generic_resume(device_t dev) { device_t child; TAILQ_FOREACH(child, &dev->children, link) { BUS_RESUME_CHILD(dev, child); /* if resume fails, there's nothing we can usefully do... */ } return (0); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the first part of the ascii representation of * @p child, including its name, unit and description (if any - see * device_set_desc()). * * @returns the number of characters printed */ int bus_print_child_header(device_t dev, device_t child) { int retval = 0; if (device_get_desc(child)) { retval += device_printf(child, "<%s>", device_get_desc(child)); } else { retval += printf("%s", device_get_nameunit(child)); } return (retval); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints the last part of the ascii representation of * @p child, which consists of the string @c " on " followed by the * name and unit of the @p dev. * * @returns the number of characters printed */ int bus_print_child_footer(device_t dev, device_t child) { return (printf(" on %s\n", device_get_nameunit(dev))); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function prints out the VM domain for the given device. * * @returns the number of characters printed */ int bus_print_child_domain(device_t dev, device_t child) { int domain; /* No domain? Don't print anything */ if (BUS_GET_DOMAIN(dev, child, &domain) != 0) return (0); return (printf(" numa-domain %d", domain)); } /** * @brief Helper function for implementing BUS_PRINT_CHILD(). * * This function simply calls bus_print_child_header() followed by * bus_print_child_footer(). * * @returns the number of characters printed */ int bus_generic_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += bus_print_child_domain(dev, child); retval += bus_print_child_footer(dev, child); return (retval); } /** * @brief Stub function for implementing BUS_READ_IVAR(). * * @returns ENOENT */ int bus_generic_read_ivar(device_t dev, device_t child, int index, uintptr_t * result) { return (ENOENT); } /** * @brief Stub function for implementing BUS_WRITE_IVAR(). * * @returns ENOENT */ int bus_generic_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { return (ENOENT); } /** * @brief Stub function for implementing BUS_GET_RESOURCE_LIST(). * * @returns NULL */ struct resource_list * bus_generic_get_resource_list(device_t dev, device_t child) { return (NULL); } /** * @brief Helper function for implementing BUS_DRIVER_ADDED(). * * This implementation of BUS_DRIVER_ADDED() simply calls the driver's * DEVICE_IDENTIFY() method to allow it to add new children to the bus * and then calls device_probe_and_attach() for each unattached child. */ void bus_generic_driver_added(device_t dev, driver_t *driver) { device_t child; DEVICE_IDENTIFY(driver, dev); TAILQ_FOREACH(child, &dev->children, link) { if (child->state == DS_NOTPRESENT || (child->flags & DF_REBID)) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_NEW_PASS(). * * This implementing of BUS_NEW_PASS() first calls the identify * routines for any drivers that probe at the current pass. Then it * walks the list of devices for this bus. If a device is already * attached, then it calls BUS_NEW_PASS() on that device. If the * device is not already attached, it attempts to attach a driver to * it. */ void bus_generic_new_pass(device_t dev) { driverlink_t dl; devclass_t dc; device_t child; dc = dev->devclass; TAILQ_FOREACH(dl, &dc->drivers, link) { if (dl->pass == bus_current_pass) DEVICE_IDENTIFY(dl->driver, dev); } TAILQ_FOREACH(child, &dev->children, link) { if (child->state >= DS_ATTACHED) BUS_NEW_PASS(child); else if (child->state == DS_NOTPRESENT) device_probe_and_attach(child); } } /** * @brief Helper function for implementing BUS_MAP_INTR(). * * This simple implementation of BUS_MAP_INTR() simply calls the * BUS_MAP_INTR() method of the parent of @p dev. */ int bus_generic_map_intr(device_t dev, device_t child, int *rid, rman_res_t *start, rman_res_t *end, rman_res_t *count, struct intr_map_data **imd) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_INTR(dev->parent, child, rid, start, end, count, imd)); return (EINVAL); } /** * @brief Helper function for implementing BUS_SETUP_INTR(). * * This simple implementation of BUS_SETUP_INTR() simply calls the * BUS_SETUP_INTR() method of the parent of @p dev. */ int bus_generic_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_SETUP_INTR(dev->parent, child, irq, flags, filter, intr, arg, cookiep)); return (EINVAL); } /** * @brief Helper function for implementing BUS_TEARDOWN_INTR(). * * This simple implementation of BUS_TEARDOWN_INTR() simply calls the * BUS_TEARDOWN_INTR() method of the parent of @p dev. */ int bus_generic_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_TEARDOWN_INTR(dev->parent, child, irq, cookie)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ADJUST_RESOURCE(). * * This simple implementation of BUS_ADJUST_RESOURCE() simply calls the * BUS_ADJUST_RESOURCE() method of the parent of @p dev. */ int bus_generic_adjust_resource(device_t dev, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ADJUST_RESOURCE(dev->parent, child, type, r, start, end)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This simple implementation of BUS_ALLOC_RESOURCE() simply calls the * BUS_ALLOC_RESOURCE() method of the parent of @p dev. */ struct resource * bus_generic_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ALLOC_RESOURCE(dev->parent, child, type, rid, start, end, count, flags)); return (NULL); } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This simple implementation of BUS_RELEASE_RESOURCE() simply calls the * BUS_RELEASE_RESOURCE() method of the parent of @p dev. */ int bus_generic_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_RELEASE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_ACTIVATE_RESOURCE(). * * This simple implementation of BUS_ACTIVATE_RESOURCE() simply calls the * BUS_ACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_ACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DEACTIVATE_RESOURCE(). * * This simple implementation of BUS_DEACTIVATE_RESOURCE() simply calls the * BUS_DEACTIVATE_RESOURCE() method of the parent of @p dev. */ int bus_generic_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DEACTIVATE_RESOURCE(dev->parent, child, type, rid, r)); return (EINVAL); } /** * @brief Helper function for implementing BUS_MAP_RESOURCE(). * * This simple implementation of BUS_MAP_RESOURCE() simply calls the * BUS_MAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_map_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_MAP_RESOURCE(dev->parent, child, type, r, args, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_UNMAP_RESOURCE(). * * This simple implementation of BUS_UNMAP_RESOURCE() simply calls the * BUS_UNMAP_RESOURCE() method of the parent of @p dev. */ int bus_generic_unmap_resource(device_t dev, device_t child, int type, struct resource *r, struct resource_map *map) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_UNMAP_RESOURCE(dev->parent, child, type, r, map)); return (EINVAL); } /** * @brief Helper function for implementing BUS_BIND_INTR(). * * This simple implementation of BUS_BIND_INTR() simply calls the * BUS_BIND_INTR() method of the parent of @p dev. */ int bus_generic_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_BIND_INTR(dev->parent, child, irq, cpu)); return (EINVAL); } /** * @brief Helper function for implementing BUS_CONFIG_INTR(). * * This simple implementation of BUS_CONFIG_INTR() simply calls the * BUS_CONFIG_INTR() method of the parent of @p dev. */ int bus_generic_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_CONFIG_INTR(dev->parent, irq, trig, pol)); return (EINVAL); } /** * @brief Helper function for implementing BUS_DESCRIBE_INTR(). * * This simple implementation of BUS_DESCRIBE_INTR() simply calls the * BUS_DESCRIBE_INTR() method of the parent of @p dev. */ int bus_generic_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent) return (BUS_DESCRIBE_INTR(dev->parent, child, irq, cookie, descr)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_CPUS(). * * This simple implementation of BUS_GET_CPUS() simply calls the * BUS_GET_CPUS() method of the parent of @p dev. */ int bus_generic_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_CPUS(dev->parent, child, op, setsize, cpuset)); return (EINVAL); } /** * @brief Helper function for implementing BUS_GET_DMA_TAG(). * * This simple implementation of BUS_GET_DMA_TAG() simply calls the * BUS_GET_DMA_TAG() method of the parent of @p dev. */ bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_DMA_TAG(dev->parent, child)); return (NULL); } /** * @brief Helper function for implementing BUS_GET_BUS_TAG(). * * This simple implementation of BUS_GET_BUS_TAG() simply calls the * BUS_GET_BUS_TAG() method of the parent of @p dev. */ bus_space_tag_t bus_generic_get_bus_tag(device_t dev, device_t child) { /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); return ((bus_space_tag_t)0); } /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * * This implementation of BUS_GET_RESOURCE() uses the * resource_list_find() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * search. */ int bus_generic_rl_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct resource_list * rl = NULL; struct resource_list_entry * rle = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); rle = resource_list_find(rl, type, rid); if (!rle) return (ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } /** * @brief Helper function for implementing BUS_SET_RESOURCE(). * * This implementation of BUS_SET_RESOURCE() uses the * resource_list_add() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ int bus_generic_rl_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); resource_list_add(rl, type, rid, start, (start + count - 1), count); return (0); } /** * @brief Helper function for implementing BUS_DELETE_RESOURCE(). * * This implementation of BUS_DELETE_RESOURCE() uses the * resource_list_delete() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list to * edit. */ void bus_generic_rl_delete_resource(device_t dev, device_t child, int type, int rid) { struct resource_list * rl = NULL; rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return; resource_list_delete(rl, type, rid); return; } /** * @brief Helper function for implementing BUS_RELEASE_RESOURCE(). * * This implementation of BUS_RELEASE_RESOURCE() uses the * resource_list_release() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ int bus_generic_rl_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, type, rid, r)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (EINVAL); return (resource_list_release(rl, dev, child, type, rid, r)); } /** * @brief Helper function for implementing BUS_ALLOC_RESOURCE(). * * This implementation of BUS_ALLOC_RESOURCE() uses the * resource_list_alloc() function to do most of the work. It calls * BUS_GET_RESOURCE_LIST() to find a suitable resource list. */ struct resource * bus_generic_rl_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource_list * rl = NULL; if (device_get_parent(child) != dev) return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, start, end, count, flags)); rl = BUS_GET_RESOURCE_LIST(dev, child); if (!rl) return (NULL); return (resource_list_alloc(rl, dev, child, type, rid, start, end, count, flags)); } /** * @brief Helper function for implementing BUS_CHILD_PRESENT(). * * This simple implementation of BUS_CHILD_PRESENT() simply calls the * BUS_CHILD_PRESENT() method of the parent of @p dev. */ int bus_generic_child_present(device_t dev, device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(dev), dev)); } int bus_generic_get_domain(device_t dev, device_t child, int *domain) { if (dev->parent) return (BUS_GET_DOMAIN(dev->parent, dev, domain)); return (ENOENT); } /** * @brief Helper function for implementing BUS_RESCAN(). * * This null implementation of BUS_RESCAN() always fails to indicate * the bus does not support rescanning. */ int bus_null_rescan(device_t dev) { return (ENXIO); } /* * Some convenience functions to make it easier for drivers to use the * resource-management functions. All these really do is hide the * indirection through the parent's method table, making for slightly * less-wordy code. In the future, it might make sense for this code * to maintain some sort of a list of resources allocated by each device. */ int bus_alloc_resources(device_t dev, struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) res[i] = NULL; for (i = 0; rs[i].type != -1; i++) { res[i] = bus_alloc_resource_any(dev, rs[i].type, &rs[i].rid, rs[i].flags); if (res[i] == NULL && !(rs[i].flags & RF_OPTIONAL)) { bus_release_resources(dev, rs, res); return (ENXIO); } } return (0); } void bus_release_resources(device_t dev, const struct resource_spec *rs, struct resource **res) { int i; for (i = 0; rs[i].type != -1; i++) if (res[i] != NULL) { bus_release_resource( dev, rs[i].type, rs[i].rid, res[i]); res[i] = NULL; } } #ifdef INTRNG /** * @internal * * This can be converted to bus method later. (XXX) */ static struct intr_map_data * bus_extend_resource(device_t dev, int type, int *rid, rman_res_t *start, rman_res_t *end, rman_res_t *count) { struct intr_map_data *imd; struct resource_list *rl; int rv; if (dev->parent == NULL) return (NULL); if (type != SYS_RES_IRQ) return (NULL); if (!RMAN_IS_DEFAULT_RANGE(*start, *end)) return (NULL); rl = BUS_GET_RESOURCE_LIST(dev->parent, dev); if (rl != NULL) { if (resource_list_find(rl, type, *rid) != NULL) return (NULL); } rv = BUS_MAP_INTR(dev->parent, dev, rid, start, end, count, &imd); if (rv != 0) return (NULL); if (rl != NULL) resource_list_add(rl, type, *rid, *start, *end, *count); return (imd); } #endif /** * @brief Wrapper function for BUS_ALLOC_RESOURCE(). * * This function simply calls the BUS_ALLOC_RESOURCE() method of the * parent of @p dev. */ struct resource * bus_alloc_resource(device_t dev, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; #ifdef INTRNG struct intr_map_data *imd; #endif if (dev->parent == NULL) return (NULL); #ifdef INTRNG imd = bus_extend_resource(dev, type, rid, &start, &end, &count); #endif res = BUS_ALLOC_RESOURCE(dev->parent, dev, type, rid, start, end, count, flags); #ifdef INTRNG if (imd != NULL) { if (res != NULL && rman_get_virtual(res) == NULL) rman_set_virtual(res, imd); else imd->destruct(imd); } #endif return (res); } /** * @brief Wrapper function for BUS_ADJUST_RESOURCE(). * * This function simply calls the BUS_ADJUST_RESOURCE() method of the * parent of @p dev. */ int bus_adjust_resource(device_t dev, int type, struct resource *r, rman_res_t start, rman_res_t end) { if (dev->parent == NULL) return (EINVAL); return (BUS_ADJUST_RESOURCE(dev->parent, dev, type, r, start, end)); } /** * @brief Wrapper function for BUS_ACTIVATE_RESOURCE(). * * This function simply calls the BUS_ACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_activate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_ACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_DEACTIVATE_RESOURCE(). * * This function simply calls the BUS_DEACTIVATE_RESOURCE() method of the * parent of @p dev. */ int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r) { if (dev->parent == NULL) return (EINVAL); return (BUS_DEACTIVATE_RESOURCE(dev->parent, dev, type, rid, r)); } /** * @brief Wrapper function for BUS_MAP_RESOURCE(). * * This function simply calls the BUS_MAP_RESOURCE() method of the * parent of @p dev. */ int bus_map_resource(device_t dev, int type, struct resource *r, struct resource_map_request *args, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_MAP_RESOURCE(dev->parent, dev, type, r, args, map)); } /** * @brief Wrapper function for BUS_UNMAP_RESOURCE(). * * This function simply calls the BUS_UNMAP_RESOURCE() method of the * parent of @p dev. */ int bus_unmap_resource(device_t dev, int type, struct resource *r, struct resource_map *map) { if (dev->parent == NULL) return (EINVAL); return (BUS_UNMAP_RESOURCE(dev->parent, dev, type, r, map)); } /** * @brief Wrapper function for BUS_RELEASE_RESOURCE(). * * This function simply calls the BUS_RELEASE_RESOURCE() method of the * parent of @p dev. */ int bus_release_resource(device_t dev, int type, int rid, struct resource *r) { int rv; #ifdef INTRNG struct intr_map_data *imd; #endif if (dev->parent == NULL) return (EINVAL); #ifdef INTRNG imd = (type == SYS_RES_IRQ) ? rman_get_virtual(r) : NULL; #endif rv = BUS_RELEASE_RESOURCE(dev->parent, dev, type, rid, r); #ifdef INTRNG if (imd != NULL) imd->destruct(imd); #endif return (rv); } /** * @brief Wrapper function for BUS_SETUP_INTR(). * * This function simply calls the BUS_SETUP_INTR() method of the * parent of @p dev. */ int bus_setup_intr(device_t dev, struct resource *r, int flags, driver_filter_t filter, driver_intr_t handler, void *arg, void **cookiep) { int error; if (dev->parent == NULL) return (EINVAL); error = BUS_SETUP_INTR(dev->parent, dev, r, flags, filter, handler, arg, cookiep); if (error != 0) return (error); if (handler != NULL && !(flags & INTR_MPSAFE)) device_printf(dev, "[GIANT-LOCKED]\n"); return (0); } /** * @brief Wrapper function for BUS_TEARDOWN_INTR(). * * This function simply calls the BUS_TEARDOWN_INTR() method of the * parent of @p dev. */ int bus_teardown_intr(device_t dev, struct resource *r, void *cookie) { if (dev->parent == NULL) return (EINVAL); return (BUS_TEARDOWN_INTR(dev->parent, dev, r, cookie)); } /** * @brief Wrapper function for BUS_BIND_INTR(). * * This function simply calls the BUS_BIND_INTR() method of the * parent of @p dev. */ int bus_bind_intr(device_t dev, struct resource *r, int cpu) { if (dev->parent == NULL) return (EINVAL); return (BUS_BIND_INTR(dev->parent, dev, r, cpu)); } /** * @brief Wrapper function for BUS_DESCRIBE_INTR(). * * This function first formats the requested description into a * temporary buffer and then calls the BUS_DESCRIBE_INTR() method of * the parent of @p dev. */ int bus_describe_intr(device_t dev, struct resource *irq, void *cookie, const char *fmt, ...) { va_list ap; char descr[MAXCOMLEN + 1]; if (dev->parent == NULL) return (EINVAL); va_start(ap, fmt); vsnprintf(descr, sizeof(descr), fmt, ap); va_end(ap); return (BUS_DESCRIBE_INTR(dev->parent, dev, irq, cookie, descr)); } /** * @brief Wrapper function for BUS_SET_RESOURCE(). * * This function simply calls the BUS_SET_RESOURCE() method of the * parent of @p dev. */ int bus_set_resource(device_t dev, int type, int rid, rman_res_t start, rman_res_t count) { return (BUS_SET_RESOURCE(device_get_parent(dev), dev, type, rid, start, count)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev. */ int bus_get_resource(device_t dev, int type, int rid, rman_res_t *startp, rman_res_t *countp) { return (BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, startp, countp)); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the start value. */ rman_res_t bus_get_resource_start(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (start); } /** * @brief Wrapper function for BUS_GET_RESOURCE(). * * This function simply calls the BUS_GET_RESOURCE() method of the * parent of @p dev and returns the count value. */ rman_res_t bus_get_resource_count(device_t dev, int type, int rid) { rman_res_t start; rman_res_t count; int error; error = BUS_GET_RESOURCE(device_get_parent(dev), dev, type, rid, &start, &count); if (error) return (0); return (count); } /** * @brief Wrapper function for BUS_DELETE_RESOURCE(). * * This function simply calls the BUS_DELETE_RESOURCE() method of the * parent of @p dev. */ void bus_delete_resource(device_t dev, int type, int rid) { BUS_DELETE_RESOURCE(device_get_parent(dev), dev, type, rid); } /** * @brief Wrapper function for BUS_CHILD_PRESENT(). * * This function simply calls the BUS_CHILD_PRESENT() method of the * parent of @p dev. */ int bus_child_present(device_t child) { return (BUS_CHILD_PRESENT(device_get_parent(child), child)); } /** * @brief Wrapper function for BUS_CHILD_PNPINFO_STR(). * * This function simply calls the BUS_CHILD_PNPINFO_STR() method of the * parent of @p dev. */ int bus_child_pnpinfo_str(device_t child, char *buf, size_t buflen) { device_t parent; parent = device_get_parent(child); if (parent == NULL) { *buf = '\0'; return (0); } return (BUS_CHILD_PNPINFO_STR(parent, child, buf, buflen)); } /** * @brief Wrapper function for BUS_CHILD_LOCATION_STR(). * * This function simply calls the BUS_CHILD_LOCATION_STR() method of the * parent of @p dev. */ int bus_child_location_str(device_t child, char *buf, size_t buflen) { device_t parent; parent = device_get_parent(child); if (parent == NULL) { *buf = '\0'; return (0); } return (BUS_CHILD_LOCATION_STR(parent, child, buf, buflen)); } /** * @brief Wrapper function for BUS_GET_CPUS(). * * This function simply calls the BUS_GET_CPUS() method of the * parent of @p dev. */ int bus_get_cpus(device_t dev, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (EINVAL); return (BUS_GET_CPUS(parent, dev, op, setsize, cpuset)); } /** * @brief Wrapper function for BUS_GET_DMA_TAG(). * * This function simply calls the BUS_GET_DMA_TAG() method of the * parent of @p dev. */ bus_dma_tag_t bus_get_dma_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return (NULL); return (BUS_GET_DMA_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_BUS_TAG(). * * This function simply calls the BUS_GET_BUS_TAG() method of the * parent of @p dev. */ bus_space_tag_t bus_get_bus_tag(device_t dev) { device_t parent; parent = device_get_parent(dev); if (parent == NULL) return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } /** * @brief Wrapper function for BUS_GET_DOMAIN(). * * This function simply calls the BUS_GET_DOMAIN() method of the * parent of @p dev. */ int bus_get_domain(device_t dev, int *domain) { return (BUS_GET_DOMAIN(device_get_parent(dev), dev, domain)); } /* Resume all devices and then notify userland that we're up again. */ static int root_resume(device_t dev) { int error; error = bus_generic_resume(dev); if (error == 0) devctl_notify("kern", "power", "resume", NULL); return (error); } static int root_print_child(device_t dev, device_t child) { int retval = 0; retval += bus_print_child_header(dev, child); retval += printf("\n"); return (retval); } static int root_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) { /* * If an interrupt mapping gets to here something bad has happened. */ panic("root_setup_intr"); } /* * If we get here, assume that the device is permanent and really is * present in the system. Removable bus drivers are expected to intercept * this call long before it gets here. We return -1 so that drivers that * really care can check vs -1 or some ERRNO returned higher in the food * chain. */ static int root_child_present(device_t dev, device_t child) { return (-1); } static int root_get_cpus(device_t dev, device_t child, enum cpu_sets op, size_t setsize, cpuset_t *cpuset) { switch (op) { case INTR_CPUS: /* Default to returning the set of all CPUs. */ if (setsize != sizeof(cpuset_t)) return (EINVAL); *cpuset = all_cpus; return (0); default: return (EINVAL); } } static kobj_method_t root_methods[] = { /* Device interface */ KOBJMETHOD(device_shutdown, bus_generic_shutdown), KOBJMETHOD(device_suspend, bus_generic_suspend), KOBJMETHOD(device_resume, root_resume), /* Bus interface */ KOBJMETHOD(bus_print_child, root_print_child), KOBJMETHOD(bus_read_ivar, bus_generic_read_ivar), KOBJMETHOD(bus_write_ivar, bus_generic_write_ivar), KOBJMETHOD(bus_setup_intr, root_setup_intr), KOBJMETHOD(bus_child_present, root_child_present), KOBJMETHOD(bus_get_cpus, root_get_cpus), KOBJMETHOD_END }; static driver_t root_driver = { "root", root_methods, 1, /* no softc */ }; device_t root_bus; devclass_t root_devclass; static int root_bus_module_handler(module_t mod, int what, void* arg) { switch (what) { case MOD_LOAD: TAILQ_INIT(&bus_data_devices); kobj_class_compile((kobj_class_t) &root_driver); root_bus = make_device(NULL, "root", 0); root_bus->desc = "System root bus"; kobj_init((kobj_t) root_bus, (kobj_class_t) &root_driver); root_bus->driver = &root_driver; root_bus->state = DS_ATTACHED; root_devclass = devclass_find_internal("root", NULL, FALSE); devinit(); return (0); case MOD_SHUTDOWN: device_shutdown(root_bus); return (0); default: return (EOPNOTSUPP); } return (0); } static moduledata_t root_bus_mod = { "rootbus", root_bus_module_handler, NULL }; DECLARE_MODULE(rootbus, root_bus_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST); /** * @brief Automatically configure devices * * This function begins the autoconfiguration process by calling * device_probe_and_attach() for each child of the @c root0 device. */ void root_bus_configure(void) { PDEBUG((".")); /* Eventually this will be split up, but this is sufficient for now. */ bus_set_pass(BUS_PASS_DEFAULT); } /** * @brief Module handler for registering device drivers * * This module handler is used to automatically register device * drivers when modules are loaded. If @p what is MOD_LOAD, it calls * devclass_add_driver() for the driver described by the * driver_module_data structure pointed to by @p arg */ int driver_module_handler(module_t mod, int what, void *arg) { struct driver_module_data *dmd; devclass_t bus_devclass; kobj_class_t driver; int error, pass; dmd = (struct driver_module_data *)arg; bus_devclass = devclass_find_internal(dmd->dmd_busname, NULL, TRUE); error = 0; switch (what) { case MOD_LOAD: if (dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); pass = dmd->dmd_pass; driver = dmd->dmd_driver; PDEBUG(("Loading module: driver %s on bus %s (pass %d)", DRIVERNAME(driver), dmd->dmd_busname, pass)); error = devclass_add_driver(bus_devclass, driver, pass, dmd->dmd_devclass); break; case MOD_UNLOAD: PDEBUG(("Unloading module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_delete_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; case MOD_QUIESCE: PDEBUG(("Quiesce module: driver %s from bus %s", DRIVERNAME(dmd->dmd_driver), dmd->dmd_busname)); error = devclass_quiesce_driver(bus_devclass, dmd->dmd_driver); if (!error && dmd->dmd_chainevh) error = dmd->dmd_chainevh(mod,what,dmd->dmd_chainarg); break; default: error = EOPNOTSUPP; break; } return (error); } /** * @brief Enumerate all hinted devices for this bus. * * Walks through the hints for this bus and calls the bus_hinted_child * routine for each one it fines. It searches first for the specific * bus that's being probed for hinted children (eg isa0), and then for * generic children (eg isa). * * @param dev bus device to enumerate */ void bus_enumerate_hinted_children(device_t bus) { int i; const char *dname, *busname; int dunit; /* * enumerate all devices on the specific bus */ busname = device_get_nameunit(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); /* * and all the generic ones. */ busname = device_get_name(bus); i = 0; while (resource_find_match(&i, &dname, &dunit, "at", busname) == 0) BUS_HINTED_CHILD(bus, dname, dunit); } #ifdef BUS_DEBUG /* the _short versions avoid iteration by not calling anything that prints * more than oneliners. I love oneliners. */ static void print_device_short(device_t dev, int indent) { if (!dev) return; indentprintf(("device %d: <%s> %sparent,%schildren,%s%s%s%s%s,%sivars,%ssoftc,busy=%d\n", dev->unit, dev->desc, (dev->parent? "":"no "), (TAILQ_EMPTY(&dev->children)? "no ":""), (dev->flags&DF_ENABLED? "enabled,":"disabled,"), (dev->flags&DF_FIXEDCLASS? "fixed,":""), (dev->flags&DF_WILDCARD? "wildcard,":""), (dev->flags&DF_DESCMALLOCED? "descmalloced,":""), (dev->flags&DF_REBID? "rebiddable,":""), (dev->ivars? "":"no "), (dev->softc? "":"no "), dev->busy)); } static void print_device(device_t dev, int indent) { if (!dev) return; print_device_short(dev, indent); indentprintf(("Parent:\n")); print_device_short(dev->parent, indent+1); indentprintf(("Driver:\n")); print_driver_short(dev->driver, indent+1); indentprintf(("Devclass:\n")); print_devclass_short(dev->devclass, indent+1); } void print_device_tree_short(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device_short(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree_short(child, indent+1); } } void print_device_tree(device_t dev, int indent) /* print the device and all its children (indented) */ { device_t child; if (!dev) return; print_device(dev, indent); TAILQ_FOREACH(child, &dev->children, link) { print_device_tree(child, indent+1); } } static void print_driver_short(driver_t *driver, int indent) { if (!driver) return; indentprintf(("driver %s: softc size = %zd\n", driver->name, driver->size)); } static void print_driver(driver_t *driver, int indent) { if (!driver) return; print_driver_short(driver, indent); } static void print_driver_list(driver_list_t drivers, int indent) { driverlink_t driver; TAILQ_FOREACH(driver, &drivers, link) { print_driver(driver->driver, indent); } } static void print_devclass_short(devclass_t dc, int indent) { if ( !dc ) return; indentprintf(("devclass %s: max units = %d\n", dc->name, dc->maxunit)); } static void print_devclass(devclass_t dc, int indent) { int i; if ( !dc ) return; print_devclass_short(dc, indent); indentprintf(("Drivers:\n")); print_driver_list(dc->drivers, indent+1); indentprintf(("Devices:\n")); for (i = 0; i < dc->maxunit; i++) if (dc->devices[i]) print_device(dc->devices[i], indent+1); } void print_devclass_list_short(void) { devclass_t dc; printf("Short listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass_short(dc, 0); } } void print_devclass_list(void) { devclass_t dc; printf("Full listing of devclasses, drivers & devices:\n"); TAILQ_FOREACH(dc, &devclasses, link) { print_devclass(dc, 0); } } #endif /* * User-space access to the device tree. * * We implement a small set of nodes: * * hw.bus Single integer read method to obtain the * current generation count. * hw.bus.devices Reads the entire device tree in flat space. * hw.bus.rman Resource manager interface * * We might like to add the ability to scan devclasses and/or drivers to * determine what else is currently loaded/available. */ static int sysctl_bus(SYSCTL_HANDLER_ARGS) { struct u_businfo ubus; ubus.ub_version = BUS_USER_VERSION; ubus.ub_generation = bus_data_generation; return (SYSCTL_OUT(req, &ubus, sizeof(ubus))); } SYSCTL_NODE(_hw_bus, OID_AUTO, info, CTLFLAG_RW, sysctl_bus, "bus-related data"); static int sysctl_devices(SYSCTL_HANDLER_ARGS) { int *name = (int *)arg1; u_int namelen = arg2; int index; - struct device *dev; + device_t dev; struct u_device udev; /* XXX this is a bit big */ int error; if (namelen != 2) return (EINVAL); if (bus_data_generation_check(name[0])) return (EINVAL); index = name[1]; /* * Scan the list of devices, looking for the requested index. */ TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (index-- == 0) break; } if (dev == NULL) return (ENOENT); /* * Populate the return array. */ bzero(&udev, sizeof(udev)); udev.dv_handle = (uintptr_t)dev; udev.dv_parent = (uintptr_t)dev->parent; if (dev->nameunit != NULL) strlcpy(udev.dv_name, dev->nameunit, sizeof(udev.dv_name)); if (dev->desc != NULL) strlcpy(udev.dv_desc, dev->desc, sizeof(udev.dv_desc)); if (dev->driver != NULL && dev->driver->name != NULL) strlcpy(udev.dv_drivername, dev->driver->name, sizeof(udev.dv_drivername)); bus_child_pnpinfo_str(dev, udev.dv_pnpinfo, sizeof(udev.dv_pnpinfo)); bus_child_location_str(dev, udev.dv_location, sizeof(udev.dv_location)); udev.dv_devflags = dev->devflags; udev.dv_flags = dev->flags; udev.dv_state = dev->state; error = SYSCTL_OUT(req, &udev, sizeof(udev)); return (error); } SYSCTL_NODE(_hw_bus, OID_AUTO, devices, CTLFLAG_RD, sysctl_devices, "system device tree"); int bus_data_generation_check(int generation) { if (generation != bus_data_generation) return (1); /* XXX generate optimised lists here? */ return (0); } void bus_data_generation_update(void) { bus_data_generation++; } int bus_free_resource(device_t dev, int type, struct resource *r) { if (r == NULL) return (0); return (bus_release_resource(dev, type, rman_get_rid(r), r)); } device_t device_lookup_by_name(const char *name) { device_t dev; TAILQ_FOREACH(dev, &bus_data_devices, devlink) { if (dev->nameunit != NULL && strcmp(dev->nameunit, name) == 0) return (dev); } return (NULL); } /* * /dev/devctl2 implementation. The existing /dev/devctl device has * implicit semantics on open, so it could not be reused for this. * Another option would be to call this /dev/bus? */ static int find_device(struct devreq *req, device_t *devp) { device_t dev; /* * First, ensure that the name is nul terminated. */ if (memchr(req->dr_name, '\0', sizeof(req->dr_name)) == NULL) return (EINVAL); /* * Second, try to find an attached device whose name matches * 'name'. */ dev = device_lookup_by_name(req->dr_name); if (dev != NULL) { *devp = dev; return (0); } /* Finally, give device enumerators a chance. */ dev = NULL; EVENTHANDLER_INVOKE(dev_lookup, req->dr_name, &dev); if (dev == NULL) return (ENOENT); *devp = dev; return (0); } static bool driver_exists(device_t bus, const char *driver) { devclass_t dc; for (dc = bus->devclass; dc != NULL; dc = dc->parent) { if (devclass_find_driver_internal(dc, driver) != NULL) return (true); } return (false); } static int devctl2_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct devreq *req; device_t dev; int error, old; /* Locate the device to control. */ mtx_lock(&Giant); req = (struct devreq *)data; switch (cmd) { case DEV_ATTACH: case DEV_DETACH: case DEV_ENABLE: case DEV_DISABLE: case DEV_SUSPEND: case DEV_RESUME: case DEV_SET_DRIVER: case DEV_RESCAN: case DEV_DELETE: error = priv_check(td, PRIV_DRIVER); if (error == 0) error = find_device(req, &dev); break; default: error = ENOTTY; break; } if (error) { mtx_unlock(&Giant); return (error); } /* Perform the requested operation. */ switch (cmd) { case DEV_ATTACH: if (device_is_attached(dev) && (dev->flags & DF_REBID) == 0) error = EBUSY; else if (!device_is_enabled(dev)) error = ENXIO; else error = device_probe_and_attach(dev); break; case DEV_DETACH: if (!device_is_attached(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } error = device_detach(dev); break; case DEV_ENABLE: if (device_is_enabled(dev)) { error = EBUSY; break; } /* * If the device has been probed but not attached (e.g. * when it has been disabled by a loader hint), just * attach the device rather than doing a full probe. */ device_enable(dev); if (device_is_alive(dev)) { /* * If the device was disabled via a hint, clear * the hint. */ if (resource_disabled(dev->driver->name, dev->unit)) resource_unset_value(dev->driver->name, dev->unit, "disabled"); error = device_attach(dev); } else error = device_probe_and_attach(dev); break; case DEV_DISABLE: if (!device_is_enabled(dev)) { error = ENXIO; break; } if (!(req->dr_flags & DEVF_FORCE_DETACH)) { error = device_quiesce(dev); if (error) break; } /* * Force DF_FIXEDCLASS on around detach to preserve * the existing name. */ old = dev->flags; dev->flags |= DF_FIXEDCLASS; error = device_detach(dev); if (!(old & DF_FIXEDCLASS)) dev->flags &= ~DF_FIXEDCLASS; if (error == 0) device_disable(dev); break; case DEV_SUSPEND: if (device_is_suspended(dev)) { error = EBUSY; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_SUSPEND_CHILD(device_get_parent(dev), dev); break; case DEV_RESUME: if (!device_is_suspended(dev)) { error = EINVAL; break; } if (device_get_parent(dev) == NULL) { error = EINVAL; break; } error = BUS_RESUME_CHILD(device_get_parent(dev), dev); break; case DEV_SET_DRIVER: { devclass_t dc; char driver[128]; error = copyinstr(req->dr_data, driver, sizeof(driver), NULL); if (error) break; if (driver[0] == '\0') { error = EINVAL; break; } if (dev->devclass != NULL && strcmp(driver, dev->devclass->name) == 0) /* XXX: Could possibly force DF_FIXEDCLASS on? */ break; /* * Scan drivers for this device's bus looking for at * least one matching driver. */ if (dev->parent == NULL) { error = EINVAL; break; } if (!driver_exists(dev->parent, driver)) { error = ENOENT; break; } dc = devclass_create(driver); if (dc == NULL) { error = ENOMEM; break; } /* Detach device if necessary. */ if (device_is_attached(dev)) { if (req->dr_flags & DEVF_SET_DRIVER_DETACH) error = device_detach(dev); else error = EBUSY; if (error) break; } /* Clear any previously-fixed device class and unit. */ if (dev->flags & DF_FIXEDCLASS) devclass_delete_device(dev->devclass, dev); dev->flags |= DF_WILDCARD; dev->unit = -1; /* Force the new device class. */ error = devclass_add_device(dc, dev); if (error) break; dev->flags |= DF_FIXEDCLASS; error = device_probe_and_attach(dev); break; } case DEV_RESCAN: if (!device_is_attached(dev)) { error = ENXIO; break; } error = BUS_RESCAN(dev); break; case DEV_DELETE: { device_t parent; parent = device_get_parent(dev); if (parent == NULL) { error = EINVAL; break; } if (!(req->dr_flags & DEVF_FORCE_DELETE)) { if (bus_child_present(dev) != 0) { error = EBUSY; break; } } error = device_delete_child(parent, dev); break; } } mtx_unlock(&Giant); return (error); } static struct cdevsw devctl2_cdevsw = { .d_version = D_VERSION, .d_ioctl = devctl2_ioctl, .d_name = "devctl2", }; static void devctl2_init(void) { make_dev_credf(MAKEDEV_ETERNAL, &devctl2_cdevsw, 0, NULL, UID_ROOT, GID_WHEEL, 0600, "devctl2"); } Index: head/sys/mips/nlm/dev/net/xlpge.c =================================================================== --- head/sys/mips/nlm/dev/net/xlpge.c (revision 303889) +++ head/sys/mips/nlm/dev/net/xlpge.c (revision 303890) @@ -1,1541 +1,1541 @@ /*- * Copyright (c) 2003-2012 Broadcom Corporation * All Rights Reserved * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __RMAN_RESOURCE_VISIBLE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include "miibus_if.h" #include #include /*#define XLP_DRIVER_LOOPBACK*/ static struct nae_port_config nae_port_config[64]; int poe_cl_tbl[MAX_POE_CLASSES] = { 0x0, 0x249249, 0x492492, 0x6db6db, 0x924924, 0xb6db6d, 0xdb6db6, 0xffffff }; /* #define DUMP_PACKET */ static uint64_t nlm_paddr_ld(uint64_t paddr) { uint64_t xkaddr = 0x9800000000000000 | paddr; return (nlm_load_dword_daddr(xkaddr)); } struct nlm_xlp_portdata ifp_ports[64]; static uma_zone_t nl_tx_desc_zone; /* This implementation will register the following tree of device * registration: * pcibus * | * xlpnae (1 instance - virtual entity) * | * xlpge * (18 sgmii / 4 xaui / 2 interlaken instances) * | * miibus */ static int nlm_xlpnae_probe(device_t); static int nlm_xlpnae_attach(device_t); static int nlm_xlpnae_detach(device_t); static int nlm_xlpnae_suspend(device_t); static int nlm_xlpnae_resume(device_t); static int nlm_xlpnae_shutdown(device_t); static device_method_t nlm_xlpnae_methods[] = { /* Methods from the device interface */ DEVMETHOD(device_probe, nlm_xlpnae_probe), DEVMETHOD(device_attach, nlm_xlpnae_attach), DEVMETHOD(device_detach, nlm_xlpnae_detach), DEVMETHOD(device_suspend, nlm_xlpnae_suspend), DEVMETHOD(device_resume, nlm_xlpnae_resume), DEVMETHOD(device_shutdown, nlm_xlpnae_shutdown), DEVMETHOD(bus_driver_added, bus_generic_driver_added), DEVMETHOD_END }; static driver_t nlm_xlpnae_driver = { "xlpnae", nlm_xlpnae_methods, sizeof(struct nlm_xlpnae_softc) }; static devclass_t nlm_xlpnae_devclass; static int nlm_xlpge_probe(device_t); static int nlm_xlpge_attach(device_t); static int nlm_xlpge_detach(device_t); static int nlm_xlpge_suspend(device_t); static int nlm_xlpge_resume(device_t); static int nlm_xlpge_shutdown(device_t); /* mii override functions */ -static int nlm_xlpge_mii_read(struct device *, int, int); -static int nlm_xlpge_mii_write(struct device *, int, int, int); +static int nlm_xlpge_mii_read(device_t, int, int); +static int nlm_xlpge_mii_write(device_t, int, int, int); static void nlm_xlpge_mii_statchg(device_t); static device_method_t nlm_xlpge_methods[] = { /* Methods from the device interface */ DEVMETHOD(device_probe, nlm_xlpge_probe), DEVMETHOD(device_attach, nlm_xlpge_attach), DEVMETHOD(device_detach, nlm_xlpge_detach), DEVMETHOD(device_suspend, nlm_xlpge_suspend), DEVMETHOD(device_resume, nlm_xlpge_resume), DEVMETHOD(device_shutdown, nlm_xlpge_shutdown), /* Methods from the nexus bus needed for explicitly * probing children when driver is loaded as a kernel module */ DEVMETHOD(miibus_readreg, nlm_xlpge_mii_read), DEVMETHOD(miibus_writereg, nlm_xlpge_mii_write), DEVMETHOD(miibus_statchg, nlm_xlpge_mii_statchg), /* Terminate method list */ DEVMETHOD_END }; static driver_t nlm_xlpge_driver = { "xlpge", nlm_xlpge_methods, sizeof(struct nlm_xlpge_softc) }; static devclass_t nlm_xlpge_devclass; DRIVER_MODULE(xlpnae, pci, nlm_xlpnae_driver, nlm_xlpnae_devclass, 0, 0); DRIVER_MODULE(xlpge, xlpnae, nlm_xlpge_driver, nlm_xlpge_devclass, 0, 0); DRIVER_MODULE(miibus, xlpge, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(pci, xlpnae, 1, 1, 1); MODULE_DEPEND(xlpnae, xlpge, 1, 1, 1); MODULE_DEPEND(xlpge, ether, 1, 1, 1); MODULE_DEPEND(xlpge, miibus, 1, 1, 1); #define SGMII_RCV_CONTEXT_WIDTH 8 /* prototypes */ static void nlm_xlpge_msgring_handler(int vc, int size, int code, int srcid, struct nlm_fmn_msg *msg, void *data); static void nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num); static void nlm_xlpge_init(void *addr); static void nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc); static void nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc); /* globals */ int dbg_on = 1; int cntx2port[524]; static __inline void atomic_incr_long(unsigned long *addr) { atomic_add_long(addr, 1); } /* * xlpnae driver implementation */ static int nlm_xlpnae_probe(device_t dev) { if (pci_get_vendor(dev) != PCI_VENDOR_NETLOGIC || pci_get_device(dev) != PCI_DEVICE_ID_NLM_NAE) return (ENXIO); return (BUS_PROBE_DEFAULT); } static void nlm_xlpnae_print_frin_desc_carving(struct nlm_xlpnae_softc *sc) { int intf; uint32_t value; int start, size; /* XXXJC: use max_ports instead of 20 ? */ for (intf = 0; intf < 20; intf++) { nlm_write_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG, (0x80000000 | intf)); value = nlm_read_nae_reg(sc->base, NAE_FREE_IN_FIFO_CFG); size = 2 * ((value >> 20) & 0x3ff); start = 2 * ((value >> 8) & 0x1ff); } } static void nlm_config_egress(struct nlm_xlpnae_softc *sc, int nblock, int context_base, int hwport, int max_channels) { int offset, num_channels; uint32_t data; num_channels = sc->portcfg[hwport].num_channels; data = (2048 << 12) | (hwport << 4) | 1; nlm_write_nae_reg(sc->base, NAE_TX_IF_BURSTMAX_CMD, data); data = ((context_base + num_channels - 1) << 22) | (context_base << 12) | (hwport << 4) | 1; nlm_write_nae_reg(sc->base, NAE_TX_DDR_ACTVLIST_CMD, data); config_egress_fifo_carvings(sc->base, hwport, context_base, num_channels, max_channels, sc->portcfg); config_egress_fifo_credits(sc->base, hwport, context_base, num_channels, max_channels, sc->portcfg); data = nlm_read_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH); data |= (1 << 25) | (1 << 24); nlm_write_nae_reg(sc->base, NAE_DMA_TX_CREDIT_TH, data); for (offset = 0; offset < num_channels; offset++) { nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD1, NAE_DRR_QUANTA); data = (hwport << 15) | ((context_base + offset) << 5); if (sc->cmplx_type[nblock] == ILC) data |= (offset << 20); nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data | 1); nlm_write_nae_reg(sc->base, NAE_TX_SCHED_MAP_CMD0, data); } } static int xlpnae_get_maxchannels(struct nlm_xlpnae_softc *sc) { int maxchans = 0; int i; for (i = 0; i < sc->max_ports; i++) { if (sc->portcfg[i].type == UNKNOWN) continue; maxchans += sc->portcfg[i].num_channels; } return (maxchans); } static void nlm_setup_interface(struct nlm_xlpnae_softc *sc, int nblock, int port, uint32_t cur_flow_base, uint32_t flow_mask, int max_channels, int context) { uint64_t nae_base = sc->base; int mtu = 1536; /* XXXJC: don't hard code */ uint32_t ucore_mask; if (sc->cmplx_type[nblock] == XAUIC) nlm_config_xaui(nae_base, nblock, mtu, mtu, sc->portcfg[port].vlan_pri_en); nlm_config_freein_fifo_uniq_cfg(nae_base, port, sc->portcfg[port].free_desc_sizes); nlm_config_ucore_iface_mask_cfg(nae_base, port, sc->portcfg[port].ucore_mask); nlm_program_flow_cfg(nae_base, port, cur_flow_base, flow_mask); if (sc->cmplx_type[nblock] == SGMIIC) nlm_configure_sgmii_interface(nae_base, nblock, port, mtu, 0); nlm_config_egress(sc, nblock, context, port, max_channels); nlm_nae_init_netior(nae_base, sc->nblocks); nlm_nae_open_if(nae_base, nblock, sc->cmplx_type[nblock], port, sc->portcfg[port].free_desc_sizes); /* XXXJC: check mask calculation */ ucore_mask = (1 << sc->nucores) - 1; nlm_nae_init_ucore(nae_base, port, ucore_mask); } static void nlm_setup_interfaces(struct nlm_xlpnae_softc *sc) { uint64_t nae_base; uint32_t cur_slot, cur_slot_base; uint32_t cur_flow_base, port, flow_mask; int max_channels; int i, context; cur_slot = 0; cur_slot_base = 0; cur_flow_base = 0; nae_base = sc->base; flow_mask = nlm_get_flow_mask(sc->total_num_ports); /* calculate max_channels */ max_channels = xlpnae_get_maxchannels(sc); port = 0; context = 0; for (i = 0; i < sc->max_ports; i++) { if (sc->portcfg[i].type == UNKNOWN) continue; nlm_setup_interface(sc, sc->portcfg[i].block, i, cur_flow_base, flow_mask, max_channels, context); cur_flow_base += sc->per_port_num_flows; context += sc->portcfg[i].num_channels; } } static void nlm_xlpnae_init(int node, struct nlm_xlpnae_softc *sc) { uint64_t nae_base; uint32_t ucoremask = 0; uint32_t val; int i; nae_base = sc->base; nlm_nae_flush_free_fifo(nae_base, sc->nblocks); nlm_deflate_frin_fifo_carving(nae_base, sc->max_ports); nlm_reset_nae(node); for (i = 0; i < sc->nucores; i++) /* XXXJC: code repeated below */ ucoremask |= (0x1 << i); printf("Loading 0x%x ucores with microcode\n", ucoremask); nlm_ucore_load_all(nae_base, ucoremask, 1); val = nlm_set_device_frequency(node, DFS_DEVICE_NAE, sc->freq); printf("Setup NAE frequency to %dMHz\n", val); nlm_mdio_reset_all(nae_base); printf("Initialze SGMII PCS for blocks 0x%x\n", sc->sgmiimask); nlm_sgmii_pcs_init(nae_base, sc->sgmiimask); printf("Initialze XAUI PCS for blocks 0x%x\n", sc->xauimask); nlm_xaui_pcs_init(nae_base, sc->xauimask); /* clear NETIOR soft reset */ nlm_write_nae_reg(nae_base, NAE_LANE_CFG_SOFTRESET, 0x0); /* Disable RX enable bit in RX_CONFIG */ val = nlm_read_nae_reg(nae_base, NAE_RX_CONFIG); val &= 0xfffffffe; nlm_write_nae_reg(nae_base, NAE_RX_CONFIG, val); if (nlm_is_xlp8xx_ax() == 0) { val = nlm_read_nae_reg(nae_base, NAE_TX_CONFIG); val &= ~(1 << 3); nlm_write_nae_reg(nae_base, NAE_TX_CONFIG, val); } nlm_setup_poe_class_config(nae_base, MAX_POE_CLASSES, sc->ncontexts, poe_cl_tbl); nlm_setup_vfbid_mapping(nae_base); nlm_setup_flow_crc_poly(nae_base, sc->flow_crc_poly); nlm_setup_rx_cal_cfg(nae_base, sc->max_ports, sc->portcfg); /* note: xlp8xx Ax does not have Tx Calendering */ if (!nlm_is_xlp8xx_ax()) nlm_setup_tx_cal_cfg(nae_base, sc->max_ports, sc->portcfg); nlm_setup_interfaces(sc); nlm_config_poe(sc->poe_base, sc->poedv_base); if (sc->hw_parser_en) nlm_enable_hardware_parser(nae_base); if (sc->prepad_en) nlm_prepad_enable(nae_base, sc->prepad_size); if (sc->ieee_1588_en) nlm_setup_1588_timer(sc->base, sc->portcfg); } static void nlm_xlpnae_update_pde(void *dummy __unused) { struct nlm_xlpnae_softc *sc; uint32_t dv[NUM_WORDS_PER_DV]; device_t dev; int vec; dev = devclass_get_device(devclass_find("xlpnae"), 0); sc = device_get_softc(dev); nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0); for (vec = 0; vec < NUM_DIST_VEC; vec++) { if (nlm_get_poe_distvec(vec, dv) != 0) continue; nlm_write_poe_distvec(sc->poedv_base, vec, dv); } nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1); } SYSINIT(nlm_xlpnae_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlm_xlpnae_update_pde, NULL); /* configuration common for sgmii, xaui, ilaken goes here */ static void nlm_setup_portcfg(struct nlm_xlpnae_softc *sc, struct xlp_nae_ivars *naep, int block, int port) { int i; uint32_t ucore_mask = 0; struct xlp_block_ivars *bp; struct xlp_port_ivars *p; bp = &(naep->block_ivars[block]); p = &(bp->port_ivars[port & 0x3]); sc->portcfg[port].node = p->node; sc->portcfg[port].block = p->block; sc->portcfg[port].port = p->port; sc->portcfg[port].type = p->type; sc->portcfg[port].mdio_bus = p->mdio_bus; sc->portcfg[port].phy_addr = p->phy_addr; sc->portcfg[port].loopback_mode = p->loopback_mode; sc->portcfg[port].num_channels = p->num_channels; if (p->free_desc_sizes != MCLBYTES) { printf("[%d, %d] Error: free_desc_sizes %d != %d\n", block, port, p->free_desc_sizes, MCLBYTES); return; } sc->portcfg[port].free_desc_sizes = p->free_desc_sizes; for (i = 0; i < sc->nucores; i++) /* XXXJC: configure this */ ucore_mask |= (0x1 << i); sc->portcfg[port].ucore_mask = ucore_mask; sc->portcfg[port].vlan_pri_en = p->vlan_pri_en; sc->portcfg[port].num_free_descs = p->num_free_descs; sc->portcfg[port].iface_fifo_size = p->iface_fifo_size; sc->portcfg[port].rxbuf_size = p->rxbuf_size; sc->portcfg[port].rx_slots_reqd = p->rx_slots_reqd; sc->portcfg[port].tx_slots_reqd = p->tx_slots_reqd; sc->portcfg[port].pseq_fifo_size = p->pseq_fifo_size; sc->portcfg[port].stg2_fifo_size = p->stg2_fifo_size; sc->portcfg[port].eh_fifo_size = p->eh_fifo_size; sc->portcfg[port].frout_fifo_size = p->frout_fifo_size; sc->portcfg[port].ms_fifo_size = p->ms_fifo_size; sc->portcfg[port].pkt_fifo_size = p->pkt_fifo_size; sc->portcfg[port].pktlen_fifo_size = p->pktlen_fifo_size; sc->portcfg[port].max_stg2_offset = p->max_stg2_offset; sc->portcfg[port].max_eh_offset = p->max_eh_offset; sc->portcfg[port].max_frout_offset = p->max_frout_offset; sc->portcfg[port].max_ms_offset = p->max_ms_offset; sc->portcfg[port].max_pmem_offset = p->max_pmem_offset; sc->portcfg[port].stg1_2_credit = p->stg1_2_credit; sc->portcfg[port].stg2_eh_credit = p->stg2_eh_credit; sc->portcfg[port].stg2_frout_credit = p->stg2_frout_credit; sc->portcfg[port].stg2_ms_credit = p->stg2_ms_credit; sc->portcfg[port].ieee1588_inc_intg = p->ieee1588_inc_intg; sc->portcfg[port].ieee1588_inc_den = p->ieee1588_inc_den; sc->portcfg[port].ieee1588_inc_num = p->ieee1588_inc_num; sc->portcfg[port].ieee1588_userval = p->ieee1588_userval; sc->portcfg[port].ieee1588_ptpoff = p->ieee1588_ptpoff; sc->portcfg[port].ieee1588_tmr1 = p->ieee1588_tmr1; sc->portcfg[port].ieee1588_tmr2 = p->ieee1588_tmr2; sc->portcfg[port].ieee1588_tmr3 = p->ieee1588_tmr3; sc->total_free_desc += sc->portcfg[port].free_desc_sizes; sc->total_num_ports++; } static int nlm_xlpnae_attach(device_t dev) { struct xlp_nae_ivars *nae_ivars; struct nlm_xlpnae_softc *sc; device_t tmpd; uint32_t dv[NUM_WORDS_PER_DV]; int port, i, j, nchan, nblock, node, qstart, qnum; int offset, context, txq_base, rxvcbase; uint64_t poe_pcibase, nae_pcibase; node = pci_get_slot(dev) / 8; nae_ivars = &xlp_board_info.nodes[node].nae_ivars; sc = device_get_softc(dev); sc->xlpnae_dev = dev; sc->node = nae_ivars->node; sc->base = nlm_get_nae_regbase(sc->node); sc->poe_base = nlm_get_poe_regbase(sc->node); sc->poedv_base = nlm_get_poedv_regbase(sc->node); sc->portcfg = nae_port_config; sc->blockmask = nae_ivars->blockmask; sc->ilmask = nae_ivars->ilmask; sc->xauimask = nae_ivars->xauimask; sc->sgmiimask = nae_ivars->sgmiimask; sc->nblocks = nae_ivars->nblocks; sc->freq = nae_ivars->freq; /* flow table generation is done by CRC16 polynomial */ sc->flow_crc_poly = nae_ivars->flow_crc_poly; sc->hw_parser_en = nae_ivars->hw_parser_en; sc->prepad_en = nae_ivars->prepad_en; sc->prepad_size = nae_ivars->prepad_size; sc->ieee_1588_en = nae_ivars->ieee_1588_en; nae_pcibase = nlm_get_nae_pcibase(sc->node); sc->ncontexts = nlm_read_reg(nae_pcibase, XLP_PCI_DEVINFO_REG5); sc->nucores = nlm_num_uengines(nae_pcibase); for (nblock = 0; nblock < sc->nblocks; nblock++) { sc->cmplx_type[nblock] = nae_ivars->block_ivars[nblock].type; sc->portmask[nblock] = nae_ivars->block_ivars[nblock].portmask; } for (i = 0; i < sc->ncontexts; i++) cntx2port[i] = 18; /* 18 is an invalid port */ if (sc->nblocks == 5) sc->max_ports = 18; /* 8xx has a block 4 with 2 ports */ else sc->max_ports = sc->nblocks * PORTS_PER_CMPLX; for (i = 0; i < sc->max_ports; i++) sc->portcfg[i].type = UNKNOWN; /* Port Not Present */ /* * Now setup all internal fifo carvings based on * total number of ports in the system */ sc->total_free_desc = 0; sc->total_num_ports = 0; port = 0; context = 0; txq_base = nlm_qidstart(nae_pcibase); rxvcbase = txq_base + sc->ncontexts; for (i = 0; i < sc->nblocks; i++) { uint32_t portmask; if ((nae_ivars->blockmask & (1 << i)) == 0) { port += 4; continue; } portmask = nae_ivars->block_ivars[i].portmask; for (j = 0; j < PORTS_PER_CMPLX; j++, port++) { if ((portmask & (1 << j)) == 0) continue; nlm_setup_portcfg(sc, nae_ivars, i, port); nchan = sc->portcfg[port].num_channels; for (offset = 0; offset < nchan; offset++) cntx2port[context + offset] = port; sc->portcfg[port].txq = txq_base + context; sc->portcfg[port].rxfreeq = rxvcbase + port; context += nchan; } } poe_pcibase = nlm_get_poe_pcibase(sc->node); sc->per_port_num_flows = nlm_poe_max_flows(poe_pcibase) / sc->total_num_ports; /* zone for P2P descriptors */ nl_tx_desc_zone = uma_zcreate("NL Tx Desc", sizeof(struct xlpge_tx_desc), NULL, NULL, NULL, NULL, NAE_CACHELINE_SIZE, 0); /* NAE FMN messages have CMS src station id's in the * range of qstart to qnum. */ qstart = nlm_qidstart(nae_pcibase); qnum = nlm_qnum(nae_pcibase); if (register_msgring_handler(qstart, qstart + qnum - 1, nlm_xlpge_msgring_handler, sc)) { panic("Couldn't register NAE msgring handler\n"); } /* POE FMN messages have CMS src station id's in the * range of qstart to qnum. */ qstart = nlm_qidstart(poe_pcibase); qnum = nlm_qnum(poe_pcibase); if (register_msgring_handler(qstart, qstart + qnum - 1, nlm_xlpge_msgring_handler, sc)) { panic("Couldn't register POE msgring handler\n"); } nlm_xlpnae_init(node, sc); for (i = 0; i < sc->max_ports; i++) { char desc[32]; int block, port; if (sc->portcfg[i].type == UNKNOWN) continue; block = sc->portcfg[i].block; port = sc->portcfg[i].port; tmpd = device_add_child(dev, "xlpge", i); device_set_ivars(tmpd, &(nae_ivars->block_ivars[block].port_ivars[port])); sprintf(desc, "XLP NAE Port %d,%d", block, port); device_set_desc_copy(tmpd, desc); } nlm_setup_iface_fifo_cfg(sc->base, sc->max_ports, sc->portcfg); nlm_setup_rx_base_config(sc->base, sc->max_ports, sc->portcfg); nlm_setup_rx_buf_config(sc->base, sc->max_ports, sc->portcfg); nlm_setup_freein_fifo_cfg(sc->base, sc->portcfg); nlm_program_nae_parser_seq_fifo(sc->base, sc->max_ports, sc->portcfg); nlm_xlpnae_print_frin_desc_carving(sc); bus_generic_probe(dev); bus_generic_attach(dev); /* * Enable only boot cpu at this point, full distribution comes * only after SMP is started */ nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 0); nlm_calc_poe_distvec(0x1, 0, 0, 0, 0x1 << XLPGE_RX_VC, dv); nlm_write_poe_distvec(sc->poedv_base, 0, dv); nlm_write_poe_reg(sc->poe_base, POE_DISTR_EN, 1); return (0); } static int nlm_xlpnae_detach(device_t dev) { /* TODO - free zone here */ return (0); } static int nlm_xlpnae_suspend(device_t dev) { return (0); } static int nlm_xlpnae_resume(device_t dev) { return (0); } static int nlm_xlpnae_shutdown(device_t dev) { return (0); } /* * xlpge driver implementation */ static void nlm_xlpge_mac_set_rx_mode(struct nlm_xlpge_softc *sc) { if (sc->if_flags & IFF_PROMISC) { if (sc->type == SGMIIC) nlm_nae_setup_rx_mode_sgmii(sc->base_addr, sc->block, sc->port, sc->type, 1 /* broadcast */, 1/* multicast */, 0 /* pause */, 1 /* promisc */); else nlm_nae_setup_rx_mode_xaui(sc->base_addr, sc->block, sc->port, sc->type, 1 /* broadcast */, 1/* multicast */, 0 /* pause */, 1 /* promisc */); } else { if (sc->type == SGMIIC) nlm_nae_setup_rx_mode_sgmii(sc->base_addr, sc->block, sc->port, sc->type, 1 /* broadcast */, 1/* multicast */, 0 /* pause */, 0 /* promisc */); else nlm_nae_setup_rx_mode_xaui(sc->base_addr, sc->block, sc->port, sc->type, 1 /* broadcast */, 1/* multicast */, 0 /* pause */, 0 /* promisc */); } } static int nlm_xlpge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct mii_data *mii; struct nlm_xlpge_softc *sc; struct ifreq *ifr; int error; sc = ifp->if_softc; error = 0; ifr = (struct ifreq *)data; switch (command) { case SIOCSIFFLAGS: XLPGE_LOCK(sc); sc->if_flags = ifp->if_flags; if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) nlm_xlpge_init(sc); else nlm_xlpge_port_enable(sc); nlm_xlpge_mac_set_rx_mode(sc); sc->link = NLM_LINK_UP; } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) nlm_xlpge_port_disable(sc); sc->link = NLM_LINK_DOWN; } XLPGE_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->mii_bus != NULL) { mii = device_get_softc(sc->mii_bus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int xlpge_tx(struct ifnet *ifp, struct mbuf *mbuf_chain) { struct nlm_fmn_msg msg; struct xlpge_tx_desc *p2p; struct nlm_xlpge_softc *sc; struct mbuf *m; vm_paddr_t paddr; int fbid, dst, pos, err; int ret = 0, tx_msgstatus, retries; err = 0; if (mbuf_chain == NULL) return (0); sc = ifp->if_softc; p2p = NULL; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || ifp->if_drv_flags & IFF_DRV_OACTIVE) { err = ENXIO; goto fail; } /* free a few in coming messages on the fb vc */ xlp_handle_msg_vc(1 << XLPGE_FB_VC, 2); /* vfb id table is setup to map cpu to vc 3 of the cpu */ fbid = nlm_cpuid(); dst = sc->txq; pos = 0; p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT); if (p2p == NULL) { printf("alloc fail\n"); err = ENOBUFS; goto fail; } for (m = mbuf_chain; m != NULL; m = m->m_next) { vm_offset_t buf = (vm_offset_t) m->m_data; int len = m->m_len; int frag_sz; uint64_t desc; /*printf("m_data = %p len %d\n", m->m_data, len); */ while (len) { if (pos == XLP_NTXFRAGS - 3) { device_printf(sc->xlpge_dev, "packet defrag %d\n", m_length(mbuf_chain, NULL)); err = ENOBUFS; /* TODO fix error */ goto fail; } paddr = vtophys(buf); frag_sz = PAGE_SIZE - (buf & PAGE_MASK); if (len < frag_sz) frag_sz = len; desc = nae_tx_desc(P2D_NEOP, 0, 127, frag_sz, paddr); p2p->frag[pos] = htobe64(desc); pos++; len -= frag_sz; buf += frag_sz; } } KASSERT(pos != 0, ("Zero-length mbuf chain?\n")); /* Make the last one P2D EOP */ p2p->frag[pos-1] |= htobe64((uint64_t)P2D_EOP << 62); /* stash useful pointers in the desc */ p2p->frag[XLP_NTXFRAGS-3] = 0xf00bad; p2p->frag[XLP_NTXFRAGS-2] = (uintptr_t)p2p; p2p->frag[XLP_NTXFRAGS-1] = (uintptr_t)mbuf_chain; paddr = vtophys(p2p); msg.msg[0] = nae_tx_desc(P2P, 0, fbid, pos, paddr); for (retries = 16; retries > 0; retries--) { ret = nlm_fmn_msgsend(dst, 1, FMN_SWCODE_NAE, &msg); if (ret == 0) return (0); } fail: if (ret != 0) { tx_msgstatus = nlm_read_c2_txmsgstatus(); if ((tx_msgstatus >> 24) & 0x1) device_printf(sc->xlpge_dev, "Transmit queue full - "); if ((tx_msgstatus >> 3) & 0x1) device_printf(sc->xlpge_dev, "ECC error - "); if ((tx_msgstatus >> 2) & 0x1) device_printf(sc->xlpge_dev, "Pending Sync - "); if ((tx_msgstatus >> 1) & 0x1) device_printf(sc->xlpge_dev, "Insufficient input queue credits - "); if (tx_msgstatus & 0x1) device_printf(sc->xlpge_dev, "Insufficient output queue credits - "); } device_printf(sc->xlpge_dev, "Send failed! err = %d\n", err); if (p2p) uma_zfree(nl_tx_desc_zone, p2p); m_freem(mbuf_chain); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (err); } static int nlm_xlpge_gmac_config_speed(struct nlm_xlpge_softc *sc) { struct mii_data *mii; if (sc->type == XAUIC || sc->type == ILC) return (0); if (sc->mii_bus) { mii = device_get_softc(sc->mii_bus); mii_pollstat(mii); } return (0); } static void nlm_xlpge_port_disable(struct nlm_xlpge_softc *sc) { struct ifnet *ifp; ifp = sc->xlpge_if; ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->xlpge_callout); nlm_mac_disable(sc->base_addr, sc->block, sc->type, sc->port); } static void nlm_mii_pollstat(void *arg) { struct nlm_xlpge_softc *sc = (struct nlm_xlpge_softc *)arg; struct mii_data *mii = NULL; if (sc->mii_bus) { mii = device_get_softc(sc->mii_bus); KASSERT(mii != NULL, ("mii ptr is NULL")); mii_pollstat(mii); callout_reset(&sc->xlpge_callout, hz, nlm_mii_pollstat, sc); } } static void nlm_xlpge_port_enable(struct nlm_xlpge_softc *sc) { if ((sc->type != SGMIIC) && (sc->type != XAUIC)) return; nlm_mac_enable(sc->base_addr, sc->block, sc->type, sc->port); nlm_mii_pollstat((void *)sc); } static void nlm_xlpge_init(void *addr) { struct nlm_xlpge_softc *sc; struct ifnet *ifp; struct mii_data *mii = NULL; sc = (struct nlm_xlpge_softc *)addr; ifp = sc->xlpge_if; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; if (sc->mii_bus) { mii = device_get_softc(sc->mii_bus); mii_mediachg(mii); } nlm_xlpge_gmac_config_speed(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; nlm_xlpge_port_enable(sc); /* start the callout */ callout_reset(&sc->xlpge_callout, hz, nlm_mii_pollstat, sc); } /* * Read the MAC address from FDT or board eeprom. */ static void xlpge_read_mac_addr(struct nlm_xlpge_softc *sc) { xlpge_get_macaddr(sc->dev_addr); /* last octet is port specific */ sc->dev_addr[5] += (sc->block * 4) + sc->port; if (sc->type == SGMIIC) nlm_nae_setup_mac_addr_sgmii(sc->base_addr, sc->block, sc->port, sc->type, sc->dev_addr); else if (sc->type == XAUIC) nlm_nae_setup_mac_addr_xaui(sc->base_addr, sc->block, sc->port, sc->type, sc->dev_addr); } static int xlpge_mediachange(struct ifnet *ifp) { return (0); } static void xlpge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct nlm_xlpge_softc *sc; struct mii_data *md; md = NULL; sc = ifp->if_softc; if (sc->mii_bus) md = device_get_softc(sc->mii_bus); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->link == NLM_LINK_DOWN) return; if (md != NULL) ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media; ifmr->ifm_status |= IFM_ACTIVE; } static int nlm_xlpge_ifinit(struct nlm_xlpge_softc *sc) { struct ifnet *ifp; device_t dev; int port = sc->block * 4 + sc->port; dev = sc->xlpge_dev; ifp = sc->xlpge_if = if_alloc(IFT_ETHER); /*(sc->network_sc)->ifp_ports[port].xlpge_if = ifp;*/ ifp_ports[port].xlpge_if = ifp; if (ifp == NULL) { device_printf(dev, "cannot if_alloc()\n"); return (ENOSPC); } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; sc->if_flags = ifp->if_flags; /*ifp->if_capabilities = IFCAP_TXCSUM | IFCAP_VLAN_HWTAGGING;*/ ifp->if_capabilities = 0; ifp->if_capenable = ifp->if_capabilities; ifp->if_ioctl = nlm_xlpge_ioctl; ifp->if_init = nlm_xlpge_init ; ifp->if_hwassist = 0; ifp->if_snd.ifq_drv_maxlen = NLM_XLPGE_TXQ_SIZE; /* TODO: make this a sysint */ IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifmedia_init(&sc->xlpge_mii.mii_media, 0, xlpge_mediachange, xlpge_mediastatus); ifmedia_add(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->xlpge_mii.mii_media, IFM_ETHER | IFM_AUTO); sc->xlpge_mii.mii_media.ifm_media = sc->xlpge_mii.mii_media.ifm_cur->ifm_media; xlpge_read_mac_addr(sc); ether_ifattach(ifp, sc->dev_addr); /* override if_transmit : per ifnet(9), do it after if_attach */ ifp->if_transmit = xlpge_tx; return (0); } static int nlm_xlpge_probe(device_t dev) { return (BUS_PROBE_DEFAULT); } static void * get_buf(void) { struct mbuf *m_new; uint64_t *md; #ifdef INVARIANTS vm_paddr_t temp1, temp2; #endif if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; KASSERT(((uintptr_t)m_new->m_data & (NAE_CACHELINE_SIZE - 1)) == 0, ("m_new->m_data is not cacheline aligned")); md = (uint64_t *)m_new->m_data; md[0] = (intptr_t)m_new; /* Back Ptr */ md[1] = 0xf00bad; m_adj(m_new, NAE_CACHELINE_SIZE); #ifdef INVARIANTS temp1 = vtophys((vm_offset_t) m_new->m_data); temp2 = vtophys((vm_offset_t) m_new->m_data + 1536); KASSERT((temp1 + 1536) == temp2, ("Alloced buffer is not contiguous")); #endif return ((void *)m_new->m_data); } static void nlm_xlpge_mii_init(device_t dev, struct nlm_xlpge_softc *sc) { int error; error = mii_attach(dev, &sc->mii_bus, sc->xlpge_if, xlpge_mediachange, xlpge_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); sc->mii_bus = NULL; } if (sc->mii_bus != NULL) { /* enable MDIO interrupts in the PHY */ /* XXXJC: TODO */ } } static int xlpge_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct nlm_xlpge_softc *sc; uint32_t val; int reg, field; sc = arg1; field = arg2; reg = SGMII_STATS_MLR(sc->block, sc->port) + field; val = nlm_read_nae_reg(sc->base_addr, reg); return (sysctl_handle_int(oidp, &val, 0, req)); } static void nlm_xlpge_setup_stats_sysctl(device_t dev, struct nlm_xlpge_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *child; struct sysctl_oid *tree; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); #define XLPGE_STAT(name, offset, desc) \ SYSCTL_ADD_PROC(ctx, child, OID_AUTO, name, \ CTLTYPE_UINT | CTLFLAG_RD, sc, offset, \ xlpge_stats_sysctl, "IU", desc) XLPGE_STAT("tr127", nlm_sgmii_stats_tr127, "TxRx 64 - 127 Bytes"); XLPGE_STAT("tr255", nlm_sgmii_stats_tr255, "TxRx 128 - 255 Bytes"); XLPGE_STAT("tr511", nlm_sgmii_stats_tr511, "TxRx 256 - 511 Bytes"); XLPGE_STAT("tr1k", nlm_sgmii_stats_tr1k, "TxRx 512 - 1023 Bytes"); XLPGE_STAT("trmax", nlm_sgmii_stats_trmax, "TxRx 1024 - 1518 Bytes"); XLPGE_STAT("trmgv", nlm_sgmii_stats_trmgv, "TxRx 1519 - 1522 Bytes"); XLPGE_STAT("rbyt", nlm_sgmii_stats_rbyt, "Rx Bytes"); XLPGE_STAT("rpkt", nlm_sgmii_stats_rpkt, "Rx Packets"); XLPGE_STAT("rfcs", nlm_sgmii_stats_rfcs, "Rx FCS Error"); XLPGE_STAT("rmca", nlm_sgmii_stats_rmca, "Rx Multicast Packets"); XLPGE_STAT("rbca", nlm_sgmii_stats_rbca, "Rx Broadcast Packets"); XLPGE_STAT("rxcf", nlm_sgmii_stats_rxcf, "Rx Control Frames"); XLPGE_STAT("rxpf", nlm_sgmii_stats_rxpf, "Rx Pause Frames"); XLPGE_STAT("rxuo", nlm_sgmii_stats_rxuo, "Rx Unknown Opcode"); XLPGE_STAT("raln", nlm_sgmii_stats_raln, "Rx Alignment Errors"); XLPGE_STAT("rflr", nlm_sgmii_stats_rflr, "Rx Framelength Errors"); XLPGE_STAT("rcde", nlm_sgmii_stats_rcde, "Rx Code Errors"); XLPGE_STAT("rcse", nlm_sgmii_stats_rcse, "Rx Carrier Sense Errors"); XLPGE_STAT("rund", nlm_sgmii_stats_rund, "Rx Undersize Packet Errors"); XLPGE_STAT("rovr", nlm_sgmii_stats_rovr, "Rx Oversize Packet Errors"); XLPGE_STAT("rfrg", nlm_sgmii_stats_rfrg, "Rx Fragments"); XLPGE_STAT("rjbr", nlm_sgmii_stats_rjbr, "Rx Jabber"); XLPGE_STAT("tbyt", nlm_sgmii_stats_tbyt, "Tx Bytes"); XLPGE_STAT("tpkt", nlm_sgmii_stats_tpkt, "Tx Packets"); XLPGE_STAT("tmca", nlm_sgmii_stats_tmca, "Tx Multicast Packets"); XLPGE_STAT("tbca", nlm_sgmii_stats_tbca, "Tx Broadcast Packets"); XLPGE_STAT("txpf", nlm_sgmii_stats_txpf, "Tx Pause Frame"); XLPGE_STAT("tdfr", nlm_sgmii_stats_tdfr, "Tx Deferral Packets"); XLPGE_STAT("tedf", nlm_sgmii_stats_tedf, "Tx Excessive Deferral Pkts"); XLPGE_STAT("tscl", nlm_sgmii_stats_tscl, "Tx Single Collisions"); XLPGE_STAT("tmcl", nlm_sgmii_stats_tmcl, "Tx Multiple Collisions"); XLPGE_STAT("tlcl", nlm_sgmii_stats_tlcl, "Tx Late Collision Pkts"); XLPGE_STAT("txcl", nlm_sgmii_stats_txcl, "Tx Excessive Collisions"); XLPGE_STAT("tncl", nlm_sgmii_stats_tncl, "Tx Total Collisions"); XLPGE_STAT("tjbr", nlm_sgmii_stats_tjbr, "Tx Jabber Frames"); XLPGE_STAT("tfcs", nlm_sgmii_stats_tfcs, "Tx FCS Errors"); XLPGE_STAT("txcf", nlm_sgmii_stats_txcf, "Tx Control Frames"); XLPGE_STAT("tovr", nlm_sgmii_stats_tovr, "Tx Oversize Frames"); XLPGE_STAT("tund", nlm_sgmii_stats_tund, "Tx Undersize Frames"); XLPGE_STAT("tfrg", nlm_sgmii_stats_tfrg, "Tx Fragments"); #undef XLPGE_STAT } static int nlm_xlpge_attach(device_t dev) { struct xlp_port_ivars *pv; struct nlm_xlpge_softc *sc; int port; pv = device_get_ivars(dev); sc = device_get_softc(dev); sc->xlpge_dev = dev; sc->mii_bus = NULL; sc->block = pv->block; sc->node = pv->node; sc->port = pv->port; sc->type = pv->type; sc->xlpge_if = NULL; sc->phy_addr = pv->phy_addr; sc->mdio_bus = pv->mdio_bus; sc->portcfg = nae_port_config; sc->hw_parser_en = pv->hw_parser_en; /* default settings */ sc->speed = NLM_SGMII_SPEED_10; sc->duplexity = NLM_SGMII_DUPLEX_FULL; sc->link = NLM_LINK_DOWN; sc->flowctrl = NLM_FLOWCTRL_DISABLED; sc->network_sc = device_get_softc(device_get_parent(dev)); sc->base_addr = sc->network_sc->base; sc->prepad_en = sc->network_sc->prepad_en; sc->prepad_size = sc->network_sc->prepad_size; callout_init(&sc->xlpge_callout, 1); XLPGE_LOCK_INIT(sc, device_get_nameunit(dev)); port = (sc->block*4)+sc->port; sc->nfree_desc = nae_port_config[port].num_free_descs; sc->txq = nae_port_config[port].txq; sc->rxfreeq = nae_port_config[port].rxfreeq; nlm_xlpge_submit_rx_free_desc(sc, sc->nfree_desc); if (sc->hw_parser_en) nlm_enable_hardware_parser_per_port(sc->base_addr, sc->block, sc->port); nlm_xlpge_ifinit(sc); ifp_ports[port].xlpge_sc = sc; nlm_xlpge_mii_init(dev, sc); nlm_xlpge_setup_stats_sysctl(dev, sc); return (0); } static int nlm_xlpge_detach(device_t dev) { return (0); } static int nlm_xlpge_suspend(device_t dev) { return (0); } static int nlm_xlpge_resume(device_t dev) { return (0); } static int nlm_xlpge_shutdown(device_t dev) { return (0); } /* * miibus function with custom implementation */ static int -nlm_xlpge_mii_read(struct device *dev, int phyaddr, int regidx) +nlm_xlpge_mii_read(device_t dev, int phyaddr, int regidx) { struct nlm_xlpge_softc *sc; int val; sc = device_get_softc(dev); if (sc->type == SGMIIC) val = nlm_gmac_mdio_read(sc->base_addr, sc->mdio_bus, BLOCK_7, LANE_CFG, phyaddr, regidx); else val = 0xffff; return (val); } static int -nlm_xlpge_mii_write(struct device *dev, int phyaddr, int regidx, int val) +nlm_xlpge_mii_write(device_t dev, int phyaddr, int regidx, int val) { struct nlm_xlpge_softc *sc; sc = device_get_softc(dev); if (sc->type == SGMIIC) nlm_gmac_mdio_write(sc->base_addr, sc->mdio_bus, BLOCK_7, LANE_CFG, phyaddr, regidx, val); return (0); } static void nlm_xlpge_mii_statchg(device_t dev) { struct nlm_xlpge_softc *sc; struct mii_data *mii; char *speed, *duplexity; sc = device_get_softc(dev); if (sc->mii_bus == NULL) return; mii = device_get_softc(sc->mii_bus); if (mii->mii_media_status & IFM_ACTIVE) { if (IFM_SUBTYPE(mii->mii_media_active) == IFM_10_T) { sc->speed = NLM_SGMII_SPEED_10; speed = "10Mbps"; } else if (IFM_SUBTYPE(mii->mii_media_active) == IFM_100_TX) { sc->speed = NLM_SGMII_SPEED_100; speed = "100Mbps"; } else { /* default to 1G */ sc->speed = NLM_SGMII_SPEED_1000; speed = "1Gbps"; } if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { sc->duplexity = NLM_SGMII_DUPLEX_FULL; duplexity = "full"; } else { sc->duplexity = NLM_SGMII_DUPLEX_HALF; duplexity = "half"; } printf("Port [%d, %d] setup with speed=%s duplex=%s\n", sc->block, sc->port, speed, duplexity); nlm_nae_setup_mac(sc->base_addr, sc->block, sc->port, 0, 1, 1, sc->speed, sc->duplexity); } } /* * xlpge support function implementations */ static void nlm_xlpge_release_mbuf(uint64_t paddr) { uint64_t mag, desc, mbuf; paddr += (XLP_NTXFRAGS - 3) * sizeof(uint64_t); mag = nlm_paddr_ld(paddr); desc = nlm_paddr_ld(paddr + sizeof(uint64_t)); mbuf = nlm_paddr_ld(paddr + 2 * sizeof(uint64_t)); if (mag != 0xf00bad) { /* somebody else packet Error - FIXME in intialization */ printf("cpu %d: ERR Tx packet paddr %jx, mag %jx, desc %jx mbuf %jx\n", nlm_cpuid(), (uintmax_t)paddr, (uintmax_t)mag, (intmax_t)desc, (uintmax_t)mbuf); return; } m_freem((struct mbuf *)(uintptr_t)mbuf); uma_zfree(nl_tx_desc_zone, (void *)(uintptr_t)desc); } static void nlm_xlpge_rx(struct nlm_xlpge_softc *sc, int port, vm_paddr_t paddr, int len) { struct ifnet *ifp; struct mbuf *m; vm_offset_t temp; unsigned long mag; int prepad_size; ifp = sc->xlpge_if; temp = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE); mag = nlm_paddr_ld(paddr - NAE_CACHELINE_SIZE + sizeof(uint64_t)); m = (struct mbuf *)(intptr_t)temp; if (mag != 0xf00bad) { /* somebody else packet Error - FIXME in intialization */ printf("cpu %d: ERR Rx packet paddr %jx, temp %p, mag %lx\n", nlm_cpuid(), (uintmax_t)paddr, (void *)temp, mag); return; } m->m_pkthdr.rcvif = ifp; #ifdef DUMP_PACKET { int i = 0, j = 64; unsigned char *buf = (char *)m->m_data; printf("(cpu_%d: nlge_rx, !RX_COPY) Rx Packet: length=%d\n", nlm_cpuid(), len); if (len < j) j = len; if (sc->prepad_en) j += ((sc->prepad_size + 1) * 16); for (i = 0; i < j; i++) { if (i && (i % 16) == 0) printf("\n"); printf("%02x ", buf[i]); } printf("\n"); } #endif if (sc->prepad_en) { prepad_size = ((sc->prepad_size + 1) * 16); m->m_data += prepad_size; m->m_pkthdr.len = m->m_len = (len - prepad_size); } else m->m_pkthdr.len = m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); #ifdef XLP_DRIVER_LOOPBACK if (port == 16 || port == 17) (*ifp->if_input)(ifp, m); else xlpge_tx(ifp, m); #else (*ifp->if_input)(ifp, m); #endif } void nlm_xlpge_submit_rx_free_desc(struct nlm_xlpge_softc *sc, int num) { int i, size, ret, n; struct nlm_fmn_msg msg; void *ptr; for(i = 0; i < num; i++) { memset(&msg, 0, sizeof(msg)); ptr = get_buf(); if (!ptr) { device_printf(sc->xlpge_dev, "Cannot allocate mbuf\n"); break; } msg.msg[0] = vtophys(ptr); if (msg.msg[0] == 0) { printf("Bad ptr for %p\n", ptr); break; } size = 1; n = 0; while (1) { /* on success returns 1, else 0 */ ret = nlm_fmn_msgsend(sc->rxfreeq, size, 0, &msg); if (ret == 0) break; if (n++ > 10000) { printf("Too many credit fails for send free desc\n"); break; } } } } void nlm_xlpge_msgring_handler(int vc, int size, int code, int src_id, struct nlm_fmn_msg *msg, void *data) { uint64_t phys_addr; struct nlm_xlpnae_softc *sc; struct nlm_xlpge_softc *xlpge_sc; struct ifnet *ifp; uint32_t context; uint32_t port = 0; uint32_t length; sc = (struct nlm_xlpnae_softc *)data; KASSERT(sc != NULL, ("Null sc in msgring handler")); if (size == 1) { /* process transmit complete */ phys_addr = msg->msg[0] & 0xffffffffffULL; /* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type * or vlan priority */ context = (msg->msg[0] >> 40) & 0x3fff; port = cntx2port[context]; if (port >= XLP_MAX_PORTS) { printf("%s:%d Bad port %d (context=%d)\n", __func__, __LINE__, port, context); return; } ifp = ifp_ports[port].xlpge_if; xlpge_sc = ifp_ports[port].xlpge_sc; nlm_xlpge_release_mbuf(phys_addr); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else if (size > 1) { /* Recieve packet */ phys_addr = msg->msg[1] & 0xffffffffc0ULL; length = (msg->msg[1] >> 40) & 0x3fff; length -= MAC_CRC_LEN; /* context is SGMII_RCV_CONTEXT_NUM + three bit vlan type * or vlan priority */ context = (msg->msg[1] >> 54) & 0x3ff; port = cntx2port[context]; if (port >= XLP_MAX_PORTS) { printf("%s:%d Bad port %d (context=%d)\n", __func__, __LINE__, port, context); return; } ifp = ifp_ports[port].xlpge_if; xlpge_sc = ifp_ports[port].xlpge_sc; nlm_xlpge_rx(xlpge_sc, port, phys_addr, length); /* return back a free descriptor to NA */ nlm_xlpge_submit_rx_free_desc(xlpge_sc, 1); } } Index: head/sys/mips/rmi/dev/nlge/if_nlge.c =================================================================== --- head/sys/mips/rmi/dev/nlge/if_nlge.c (revision 303889) +++ head/sys/mips/rmi/dev/nlge/if_nlge.c (revision 303890) @@ -1,2563 +1,2563 @@ /*- * Copyright (c) 2003-2009 RMI Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of RMI Corporation, nor the names of its contributors, * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * RMI_BSD */ /* * The XLR device supports upto four 10/100/1000 Ethernet MACs and upto * two 10G Ethernet MACs (of XGMII). Alternatively, each 10G port can used * as a SPI-4 interface, with 8 ports per such interface. The MACs are * encapsulated in another hardware block referred to as network accelerator, * such that there are three instances of these in a XLR. One of them controls * the four 1G RGMII ports while one each of the others controls an XGMII port. * Enabling MACs requires configuring the corresponding network accelerator * and the individual port. * The XLS device supports upto 8 10/100/1000 Ethernet MACs or max 2 10G * Ethernet MACs. The 1G MACs are of SGMII and 10G MACs are of XAUI * interface. These ports are part of two network accelerators. * The nlge driver configures and initializes non-SPI4 Ethernet ports in the * XLR/XLS devices and enables data transfer on them. */ #include __FBSDID("$FreeBSD$"); #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __RMAN_RESOURCE_VISIBLE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for DELAY */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miidevs.h" #include #include "miibus_if.h" #include MODULE_DEPEND(nlna, nlge, 1, 1, 1); MODULE_DEPEND(nlge, ether, 1, 1, 1); MODULE_DEPEND(nlge, miibus, 1, 1, 1); /* Network accelarator entry points */ static int nlna_probe(device_t); static int nlna_attach(device_t); static int nlna_detach(device_t); static int nlna_suspend(device_t); static int nlna_resume(device_t); static int nlna_shutdown(device_t); /* GMAC port entry points */ static int nlge_probe(device_t); static int nlge_attach(device_t); static int nlge_detach(device_t); static int nlge_suspend(device_t); static int nlge_resume(device_t); static void nlge_init(void *); static int nlge_ioctl(struct ifnet *, u_long, caddr_t); static int nlge_tx(struct ifnet *ifp, struct mbuf *m); static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len); -static int nlge_mii_write(struct device *, int, int, int); -static int nlge_mii_read(struct device *, int, int); +static int nlge_mii_write(device_t, int, int, int); +static int nlge_mii_read(device_t, int, int); static void nlge_mac_mii_statchg(device_t); static int nlge_mediachange(struct ifnet *ifp); static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr); /* Other internal/helper functions */ static void *get_buf(void); static void nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc); static void nlna_config_pde(struct nlna_softc *); static void nlna_config_parser(struct nlna_softc *); static void nlna_config_classifier(struct nlna_softc *); static void nlna_config_fifo_spill_area(struct nlna_softc *sc); static void nlna_config_translate_table(struct nlna_softc *sc); static void nlna_config_common(struct nlna_softc *); static void nlna_disable_ports(struct nlna_softc *sc); static void nlna_enable_intr(struct nlna_softc *sc); static void nlna_disable_intr(struct nlna_softc *sc); static void nlna_enable_ports(struct nlna_softc *sc); static void nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec, uint32_t vec_sz); static void nlna_hw_init(struct nlna_softc *sc); static int nlna_is_last_active_na(struct nlna_softc *sc); static void nlna_media_specific_config(struct nlna_softc *sc); static void nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk); static struct nlna_softc *nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk); static void nlna_setup_intr(struct nlna_softc *sc); static void nlna_smp_update_pde(void *dummy __unused); static void nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc); static int nlge_gmac_config_speed(struct nlge_softc *, int quick); static void nlge_hw_init(struct nlge_softc *sc); static int nlge_if_init(struct nlge_softc *sc); static void nlge_intr(void *arg); static int nlge_irq_init(struct nlge_softc *sc); static void nlge_irq_fini(struct nlge_softc *sc); static void nlge_media_specific_init(struct nlge_softc *sc); static void nlge_mii_init(device_t dev, struct nlge_softc *sc); static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx); static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx, int regval); void nlge_msgring_handler(int bucket, int size, int code, int stid, struct msgrng_msg *msg, void *data); static void nlge_port_disable(struct nlge_softc *sc); static void nlge_port_enable(struct nlge_softc *sc); static void nlge_read_mac_addr(struct nlge_softc *sc); static void nlge_sc_init(struct nlge_softc *sc, device_t dev, struct xlr_gmac_port *port_info); static void nlge_set_mac_addr(struct nlge_softc *sc); static void nlge_set_port_attribs(struct nlge_softc *, struct xlr_gmac_port *); static void nlge_mac_set_rx_mode(struct nlge_softc *sc); static void nlge_sgmii_init(struct nlge_softc *sc); static int nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc, struct mbuf *m); static int prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *msg, uint32_t *n_entries, struct mbuf *m_head, uint64_t fr_stid, struct nlge_tx_desc **tx_desc); static void release_tx_desc(vm_paddr_t phy_addr); static int send_fmn_msg_tx(struct nlge_softc *, struct msgrng_msg *, uint32_t n_entries); //#define DEBUG #ifdef DEBUG static int mac_debug = 1; #undef PDEBUG #define PDEBUG(fmt, args...) \ do {\ if (mac_debug) {\ printf("[%s@%d|%s]: cpu_%d: " fmt, \ __FILE__, __LINE__, __FUNCTION__, PCPU_GET(cpuid), ##args);\ }\ } while(0); /* Debug/dump functions */ static void dump_reg(xlr_reg_t *addr, uint32_t offset, char *name); static void dump_gmac_registers(struct nlge_softc *); static void dump_na_registers(xlr_reg_t *base, int port_id); static void dump_mac_stats(struct nlge_softc *sc); static void dump_mii_regs(struct nlge_softc *sc) __attribute__((used)); static void dump_mii_data(struct mii_data *mii) __attribute__((used)); static void dump_board_info(struct xlr_board_info *); static void dump_pcs_regs(struct nlge_softc *sc, int phy); #else #undef PDEBUG #define PDEBUG(fmt, args...) #define dump_reg(a, o, n) /* nop */ #define dump_gmac_registers(a) /* nop */ #define dump_na_registers(a, p) /* nop */ #define dump_board_info(b) /* nop */ #define dump_mac_stats(sc) /* nop */ #define dump_mii_regs(sc) /* nop */ #define dump_mii_data(mii) /* nop */ #define dump_pcs_regs(sc, phy) /* nop */ #endif /* Wrappers etc. to export the driver entry points. */ static device_method_t nlna_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nlna_probe), DEVMETHOD(device_attach, nlna_attach), DEVMETHOD(device_detach, nlna_detach), DEVMETHOD(device_shutdown, nlna_shutdown), DEVMETHOD(device_suspend, nlna_suspend), DEVMETHOD(device_resume, nlna_resume), /* bus interface : TBD : what are these for ? */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD_END }; static driver_t nlna_driver = { "nlna", nlna_methods, sizeof(struct nlna_softc) }; static devclass_t nlna_devclass; static device_method_t nlge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nlge_probe), DEVMETHOD(device_attach, nlge_attach), DEVMETHOD(device_detach, nlge_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, nlge_suspend), DEVMETHOD(device_resume, nlge_resume), /* MII interface */ DEVMETHOD(miibus_readreg, nlge_mii_read), DEVMETHOD(miibus_writereg, nlge_mii_write), DEVMETHOD(miibus_statchg, nlge_mac_mii_statchg), {0, 0} }; static driver_t nlge_driver = { "nlge", nlge_methods, sizeof(struct nlge_softc) }; static devclass_t nlge_devclass; DRIVER_MODULE(nlna, iodi, nlna_driver, nlna_devclass, 0, 0); DRIVER_MODULE(nlge, nlna, nlge_driver, nlge_devclass, 0, 0); DRIVER_MODULE(miibus, nlge, miibus_driver, miibus_devclass, 0, 0); static uma_zone_t nl_tx_desc_zone; /* Tunables. */ static int flow_classification = 0; TUNABLE_INT("hw.nlge.flow_classification", &flow_classification); #define NLGE_HW_CHKSUM 1 static __inline void atomic_incr_long(unsigned long *addr) { /* XXX: fix for 64 bit */ unsigned int *iaddr = (unsigned int *)addr; xlr_ldaddwu(1, iaddr); } static int nlna_probe(device_t dev) { return (BUS_PROBE_DEFAULT); } /* * Add all attached GMAC/XGMAC ports to the device tree. Port * configuration is spread in two regions - common configuration * for all ports in the NA and per-port configuration in MAC-specific * region. This function does the following: * - adds the ports to the device tree * - reset the ports * - do all the common initialization * - invoke bus_generic_attach for per-port configuration * - supply initial free rx descriptors to ports * - initialize s/w data structures * - finally, enable interrupts (only in the last NA). * * For reference, sample address space for common and per-port * registers is given below. * * The address map for RNA0 is: (typical value) * * XLR_IO_BASE +--------------------------------------+ 0xbef0_0000 * | | * | | * | | * | | * | | * | | * GMAC0 ---> +--------------------------------------+ 0xbef0_c000 * | | * | | * (common) -> |......................................| 0xbef0_c400 * | | * | (RGMII/SGMII: common registers) | * | | * GMAC1 ---> |--------------------------------------| 0xbef0_d000 * | | * | | * (common) -> |......................................| 0xbef0_d400 * | | * | (RGMII/SGMII: common registers) | * | | * |......................................| * and so on .... * * Ref: Figure 14-3 and Table 14-1 of XLR PRM */ static int nlna_attach(device_t dev) { struct xlr_gmac_block_t *block_info; device_t gmac_dev; struct nlna_softc *sc; int error; int i; int id; id = device_get_unit(dev); block_info = device_get_ivars(dev); if (!block_info->enabled) { return 0; } #ifdef DEBUG dump_board_info(&xlr_board_info); #endif /* Initialize nlna state in softc structure */ sc = nlna_sc_init(dev, block_info); /* Add device's for the ports controlled by this NA. */ if (block_info->type == XLR_GMAC) { KASSERT(id < 2, ("No GMACs supported with this network" "accelerator: %d", id)); for (i = 0; i < sc->num_ports; i++) { gmac_dev = device_add_child(dev, "nlge", -1); device_set_ivars(gmac_dev, &block_info->gmac_port[i]); } } else if (block_info->type == XLR_XGMAC) { KASSERT(id > 0 && id <= 2, ("No XGMACs supported with this" "network accelerator: %d", id)); gmac_dev = device_add_child(dev, "nlge", -1); device_set_ivars(gmac_dev, &block_info->gmac_port[0]); } else if (block_info->type == XLR_SPI4) { /* SPI4 is not supported here */ device_printf(dev, "Unsupported: NA with SPI4 type"); return (ENOTSUP); } nlna_reset_ports(sc, block_info); /* Initialize Network Accelarator registers. */ nlna_hw_init(sc); error = bus_generic_attach(dev); if (error) { device_printf(dev, "failed to attach port(s)\n"); goto fail; } /* Send out the initial pool of free-descriptors for the rx path */ nlna_submit_rx_free_desc(sc, MAX_FRIN_SPILL); /* S/w data structure initializations shared by all NA's. */ if (nl_tx_desc_zone == NULL) { /* Create a zone for allocating tx descriptors */ nl_tx_desc_zone = uma_zcreate("NL Tx Desc", sizeof(struct nlge_tx_desc), NULL, NULL, NULL, NULL, XLR_CACHELINE_SIZE, 0); } /* Enable NA interrupts */ nlna_setup_intr(sc); return (0); fail: return (error); } static int nlna_detach(device_t dev) { struct nlna_softc *sc; sc = device_get_softc(dev); if (device_is_alive(dev)) { nlna_disable_intr(sc); /* This will make sure that per-port detach is complete * and all traffic on the ports has been stopped. */ bus_generic_detach(dev); uma_zdestroy(nl_tx_desc_zone); } return (0); } static int nlna_suspend(device_t dev) { return (0); } static int nlna_resume(device_t dev) { return (0); } static int nlna_shutdown(device_t dev) { return (0); } /* GMAC port entry points */ static int nlge_probe(device_t dev) { struct nlge_softc *sc; struct xlr_gmac_port *port_info; int index; char *desc[] = { "RGMII", "SGMII", "RGMII/SGMII", "XGMAC", "XAUI", "Unknown"}; port_info = device_get_ivars(dev); index = (port_info->type < XLR_RGMII || port_info->type > XLR_XAUI) ? 5 : port_info->type; device_set_desc_copy(dev, desc[index]); sc = device_get_softc(dev); nlge_sc_init(sc, dev, port_info); nlge_port_disable(sc); return (0); } static int nlge_attach(device_t dev) { struct nlge_softc *sc; struct nlna_softc *nsc; int error; sc = device_get_softc(dev); nlge_if_init(sc); nlge_mii_init(dev, sc); error = nlge_irq_init(sc); if (error) return error; nlge_hw_init(sc); nsc = (struct nlna_softc *)device_get_softc(device_get_parent(dev)); nsc->child_sc[sc->instance] = sc; return (0); } static int nlge_detach(device_t dev) { struct nlge_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->nlge_if; if (device_is_attached(dev)) { nlge_port_disable(sc); nlge_irq_fini(sc); ether_ifdetach(ifp); bus_generic_detach(dev); } if (ifp) if_free(ifp); return (0); } static int nlge_suspend(device_t dev) { return (0); } static int nlge_resume(device_t dev) { return (0); } static void nlge_init(void *addr) { struct nlge_softc *sc; struct ifnet *ifp; sc = (struct nlge_softc *)addr; ifp = sc->nlge_if; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; nlge_gmac_config_speed(sc, 1); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; nlge_port_enable(sc); if (sc->port_type == XLR_SGMII) { dump_pcs_regs(sc, 27); } dump_gmac_registers(sc); dump_mac_stats(sc); } static int nlge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct mii_data *mii; struct nlge_softc *sc; struct ifreq *ifr; int error; sc = ifp->if_softc; error = 0; ifr = (struct ifreq *)data; switch(command) { case SIOCSIFFLAGS: NLGE_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { nlge_init(sc); } if (ifp->if_flags & IFF_PROMISC && !(sc->if_flags & IFF_PROMISC)) { sc->if_flags |= IFF_PROMISC; nlge_mac_set_rx_mode(sc); } else if (!(ifp->if_flags & IFF_PROMISC) && sc->if_flags & IFF_PROMISC) { sc->if_flags &= IFF_PROMISC; nlge_mac_set_rx_mode(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { nlge_port_disable(sc); } } sc->if_flags = ifp->if_flags; NLGE_UNLOCK(sc); error = 0; break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: if (sc->mii_bus != NULL) { mii = (struct mii_data *)device_get_softc(sc->mii_bus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* This function is called from an interrupt handler */ void nlge_msgring_handler(int bucket, int size, int code, int stid, struct msgrng_msg *msg, void *data) { struct nlna_softc *na_sc; struct nlge_softc *sc; struct ifnet *ifp; struct mbuf *m; vm_paddr_t phys_addr; uint32_t length; int ctrl; int tx_error; int port; int is_p2p; is_p2p = 0; tx_error = 0; length = (msg->msg0 >> 40) & 0x3fff; na_sc = (struct nlna_softc *)data; if (length == 0) { ctrl = CTRL_REG_FREE; phys_addr = msg->msg0 & 0xffffffffffULL; port = (msg->msg0 >> 54) & 0x0f; is_p2p = (msg->msg0 >> 62) & 0x1; tx_error = (msg->msg0 >> 58) & 0xf; } else { ctrl = CTRL_SNGL; phys_addr = msg->msg0 & 0xffffffffe0ULL; length = length - BYTE_OFFSET - MAC_CRC_LEN; port = msg->msg0 & 0x0f; } sc = na_sc->child_sc[port]; if (sc == NULL) { printf("Message (of %d len) with softc=NULL on %d port (type=%s)\n", length, port, (ctrl == CTRL_SNGL ? "Pkt rx" : "Freeback for tx packet")); return; } if (ctrl == CTRL_REG_FREE || ctrl == CTRL_JUMBO_FREE) { ifp = sc->nlge_if; if (!tx_error) { if (is_p2p) { release_tx_desc(phys_addr); } else { #ifdef __mips_n64 m = (struct mbuf *)(uintptr_t)xlr_paddr_ld(phys_addr); m->m_nextpkt = NULL; #else m = (struct mbuf *)(uintptr_t)phys_addr; #endif m_freem(m); } NLGE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_OACTIVE){ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } NLGE_UNLOCK(sc); } else { printf("ERROR: Tx fb error (%d) on port %d\n", tx_error, port); } tx_error ? if_inc_counter(ifp, IFCOUNTER_OERRORS, 1) : if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } else if (ctrl == CTRL_SNGL || ctrl == CTRL_START) { /* Rx Packet */ nlge_rx(sc, phys_addr, length); nlna_submit_rx_free_desc(na_sc, 1); /* return free descr to NA */ } else { printf("[%s]: unrecognized ctrl=%d!\n", __func__, ctrl); } } static int nlge_tx(struct ifnet *ifp, struct mbuf *m) { return (nlge_start_locked(ifp, ifp->if_softc, m)); } static int nlge_start_locked(struct ifnet *ifp, struct nlge_softc *sc, struct mbuf *m) { struct msgrng_msg msg; struct nlge_tx_desc *tx_desc; uint64_t fr_stid; uint32_t cpu; uint32_t n_entries; uint32_t tid; int error, ret; if (m == NULL) return (0); tx_desc = NULL; error = 0; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING) || ifp->if_drv_flags & IFF_DRV_OACTIVE) { error = ENXIO; goto fail; // note: mbuf will get free'd } cpu = xlr_core_id(); tid = xlr_thr_id(); /* H/w threads [0, 2] --> bucket 6 and [1, 3] --> bucket 7 */ fr_stid = cpu * 8 + 6 + (tid % 2); /* * First, remove some freeback messages before transmitting * any new packets. However, cap the number of messages * drained to permit this thread to continue with its * transmission. * * Mask for buckets {6, 7} is 0xc0 */ xlr_msgring_handler(0xc0, 4); ret = prepare_fmn_message(sc, &msg, &n_entries, m, fr_stid, &tx_desc); if (ret) { error = (ret == 2) ? ENOBUFS : ENOTSUP; goto fail; } ret = send_fmn_msg_tx(sc, &msg, n_entries); if (ret != 0) { error = EBUSY; goto fail; } return (0); fail: if (tx_desc != NULL) { uma_zfree(nl_tx_desc_zone, tx_desc); } if (m != NULL) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { NLGE_LOCK(sc); ifp->if_drv_flags |= IFF_DRV_OACTIVE; NLGE_UNLOCK(sc); } m_freem(m); if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); } return (error); } static void nlge_rx(struct nlge_softc *sc, vm_paddr_t paddr, int len) { struct ifnet *ifp; struct mbuf *m; uint64_t tm, mag; uint32_t sr; sr = xlr_enable_kx(); tm = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE); mag = xlr_paddr_ld(paddr - XLR_CACHELINE_SIZE + sizeof(uint64_t)); xlr_restore_kx(sr); m = (struct mbuf *)(intptr_t)tm; if (mag != 0xf00bad) { /* somebody else's packet. Error - FIXME in intialization */ printf("cpu %d: *ERROR* Not my packet paddr %jx\n", xlr_core_id(), (uintmax_t)paddr); return; } ifp = sc->nlge_if; #ifdef NLGE_HW_CHKSUM m->m_pkthdr.csum_flags = CSUM_IP_CHECKED; if (m->m_data[10] & 0x2) { m->m_pkthdr.csum_flags |= CSUM_IP_VALID; if (m->m_data[10] & 0x1) { m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m->m_pkthdr.csum_data = htons(0xffff); } } m->m_data += NLGE_PREPAD_LEN; len -= NLGE_PREPAD_LEN; #else m->m_pkthdr.csum_flags = 0; #endif /* align the data */ m->m_data += BYTE_OFFSET ; m->m_pkthdr.len = m->m_len = len; m->m_pkthdr.rcvif = ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); (*ifp->if_input)(ifp, m); } static int -nlge_mii_write(struct device *dev, int phyaddr, int regidx, int regval) +nlge_mii_write(device_t dev, int phyaddr, int regidx, int regval) { struct nlge_softc *sc; sc = device_get_softc(dev); if (sc->port_type != XLR_XGMII) nlge_mii_write_internal(sc->mii_base, phyaddr, regidx, regval); return (0); } static int -nlge_mii_read(struct device *dev, int phyaddr, int regidx) +nlge_mii_read(device_t dev, int phyaddr, int regidx) { struct nlge_softc *sc; int val; sc = device_get_softc(dev); val = (sc->port_type == XLR_XGMII) ? (0xffff) : nlge_mii_read_internal(sc->mii_base, phyaddr, regidx); return (val); } static void nlge_mac_mii_statchg(device_t dev) { } static int nlge_mediachange(struct ifnet *ifp) { return 0; } static void nlge_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct nlge_softc *sc; struct mii_data *md; md = NULL; sc = ifp->if_softc; if (sc->mii_bus) md = device_get_softc(sc->mii_bus); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (sc->link == xlr_mac_link_down) return; if (md != NULL) ifmr->ifm_active = md->mii_media.ifm_cur->ifm_media; ifmr->ifm_status |= IFM_ACTIVE; } static struct nlna_softc * nlna_sc_init(device_t dev, struct xlr_gmac_block_t *blk) { struct nlna_softc *sc; sc = device_get_softc(dev); memset(sc, 0, sizeof(*sc)); sc->nlna_dev = dev; sc->base = xlr_io_mmio(blk->baseaddr); sc->rfrbucket = blk->station_rfr; sc->station_id = blk->station_id; sc->na_type = blk->type; sc->mac_type = blk->mode; sc->num_ports = blk->num_ports; sc->mdio_set.port_vec = sc->mdio_sc; sc->mdio_set.vec_sz = XLR_MAX_MACS; return (sc); } /* * Do: * - Initialize common GMAC registers (index range 0x100-0x3ff). */ static void nlna_hw_init(struct nlna_softc *sc) { /* * Register message ring handler for the NA block, messages from * the GMAC will have source station id to the first bucket of the * NA FMN station, so register just that station id. */ if (register_msgring_handler(sc->station_id, sc->station_id + 1, nlge_msgring_handler, sc)) { panic("Couldn't register msgring handler\n"); } nlna_config_fifo_spill_area(sc); nlna_config_pde(sc); nlna_config_common(sc); nlna_config_parser(sc); nlna_config_classifier(sc); } /* * Enable interrupts on all the ports controlled by this NA. For now, we * only care about the MII interrupt and this has to be enabled only * on the port id0. * * This function is not in-sync with the regular way of doing things - it * executes only in the context of the last active network accelerator (and * thereby has some ugly accesses in the device tree). Though inelegant, it * is necessary to do it this way as the per-port interrupts can be * setup/enabled only after all the network accelerators have been * initialized. */ static void nlna_setup_intr(struct nlna_softc *sc) { struct nlna_softc *na_sc[XLR_MAX_NLNA]; struct nlge_port_set *pset; struct xlr_gmac_port *port_info; device_t iodi_dev; int i, j; if (!nlna_is_last_active_na(sc)) return ; /* Collect all nlna softc pointers */ memset(na_sc, 0, sizeof(*na_sc) * XLR_MAX_NLNA); iodi_dev = device_get_parent(sc->nlna_dev); nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA); /* Setup the MDIO interrupt lists. */ /* * MDIO interrupts are coarse - a single interrupt line provides * information about one of many possible ports. To figure out the * exact port on which action is to be taken, all of the ports * linked to an MDIO interrupt should be read. To enable this, * ports need to add themselves to port sets. */ for (i = 0; i < XLR_MAX_NLNA; i++) { if (na_sc[i] == NULL) continue; for (j = 0; j < na_sc[i]->num_ports; j++) { /* processing j-th port on i-th NA */ port_info = device_get_ivars( na_sc[i]->child_sc[j]->nlge_dev); pset = &na_sc[port_info->mdint_id]->mdio_set; nlna_add_to_port_set(pset, na_sc[i]->child_sc[j]); } } /* Enable interrupts */ for (i = 0; i < XLR_MAX_NLNA; i++) { if (na_sc[i] != NULL && na_sc[i]->na_type != XLR_XGMAC) { nlna_enable_intr(na_sc[i]); } } } static void nlna_add_to_port_set(struct nlge_port_set *pset, struct nlge_softc *sc) { int i; /* step past the non-NULL elements */ for (i = 0; i < pset->vec_sz && pset->port_vec[i] != NULL; i++) ; if (i < pset->vec_sz) pset->port_vec[i] = sc; else printf("warning: internal error: out-of-bounds for MDIO array"); } static void nlna_enable_intr(struct nlna_softc *sc) { int i; for (i = 0; i < sc->num_ports; i++) { if (sc->child_sc[i]->instance == 0) NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, (1 << O_INTMASK__MDInt)); } } static void nlna_disable_intr(struct nlna_softc *sc) { int i; for (i = 0; i < sc->num_ports; i++) { if (sc->child_sc[i]->instance == 0) NLGE_WRITE(sc->child_sc[i]->base, R_INTMASK, 0); } } static int nlna_is_last_active_na(struct nlna_softc *sc) { int id; id = device_get_unit(sc->nlna_dev); return (id == 2 || xlr_board_info.gmac_block[id + 1].enabled == 0); } static void nlna_submit_rx_free_desc(struct nlna_softc *sc, uint32_t n_desc) { struct msgrng_msg msg; void *ptr; uint32_t msgrng_flags; int i, n, stid, ret, code; if (n_desc > 1) { PDEBUG("Sending %d free-in descriptors to station=%d\n", n_desc, sc->rfrbucket); } stid = sc->rfrbucket; code = (sc->na_type == XLR_XGMAC) ? MSGRNG_CODE_XGMAC : MSGRNG_CODE_MAC; memset(&msg, 0, sizeof(msg)); for (i = 0; i < n_desc; i++) { ptr = get_buf(); if (!ptr) { ret = -ENOMEM; device_printf(sc->nlna_dev, "Cannot allocate mbuf\n"); break; } /* Send the free Rx desc to the MAC */ msg.msg0 = vtophys(ptr) & 0xffffffffe0ULL; n = 0; do { msgrng_flags = msgrng_access_enable(); ret = message_send(1, code, stid, &msg); msgrng_restore(msgrng_flags); KASSERT(n++ < 100000, ("Too many credit fails in rx path\n")); } while (ret != 0); } } static __inline__ void * nlna_config_spill(xlr_reg_t *base, int reg_start_0, int reg_start_1, int reg_size, int size) { void *spill; uint64_t phys_addr; uint32_t spill_size; spill_size = size; spill = contigmalloc((spill_size + XLR_CACHELINE_SIZE), M_DEVBUF, M_NOWAIT | M_ZERO, 0, 0xffffffff, XLR_CACHELINE_SIZE, 0); if (spill == NULL || ((vm_offset_t) spill & (XLR_CACHELINE_SIZE - 1))) { panic("Unable to allocate memory for spill area!\n"); } phys_addr = vtophys(spill); PDEBUG("Allocated spill %d bytes at %llx\n", size, phys_addr); NLGE_WRITE(base, reg_start_0, (phys_addr >> 5) & 0xffffffff); NLGE_WRITE(base, reg_start_1, (phys_addr >> 37) & 0x07); NLGE_WRITE(base, reg_size, spill_size); return (spill); } /* * Configure the 6 FIFO's that are used by the network accelarator to * communicate with the rest of the XLx device. 4 of the FIFO's are for * packets from NA --> cpu (called Class FIFO's) and 2 are for feeding * the NA with free descriptors. */ static void nlna_config_fifo_spill_area(struct nlna_softc *sc) { sc->frin_spill = nlna_config_spill(sc->base, R_REG_FRIN_SPILL_MEM_START_0, R_REG_FRIN_SPILL_MEM_START_1, R_REG_FRIN_SPILL_MEM_SIZE, MAX_FRIN_SPILL * sizeof(struct fr_desc)); sc->frout_spill = nlna_config_spill(sc->base, R_FROUT_SPILL_MEM_START_0, R_FROUT_SPILL_MEM_START_1, R_FROUT_SPILL_MEM_SIZE, MAX_FROUT_SPILL * sizeof(struct fr_desc)); sc->class_0_spill = nlna_config_spill(sc->base, R_CLASS0_SPILL_MEM_START_0, R_CLASS0_SPILL_MEM_START_1, R_CLASS0_SPILL_MEM_SIZE, MAX_CLASS_0_SPILL * sizeof(union rx_tx_desc)); sc->class_1_spill = nlna_config_spill(sc->base, R_CLASS1_SPILL_MEM_START_0, R_CLASS1_SPILL_MEM_START_1, R_CLASS1_SPILL_MEM_SIZE, MAX_CLASS_1_SPILL * sizeof(union rx_tx_desc)); sc->class_2_spill = nlna_config_spill(sc->base, R_CLASS2_SPILL_MEM_START_0, R_CLASS2_SPILL_MEM_START_1, R_CLASS2_SPILL_MEM_SIZE, MAX_CLASS_2_SPILL * sizeof(union rx_tx_desc)); sc->class_3_spill = nlna_config_spill(sc->base, R_CLASS3_SPILL_MEM_START_0, R_CLASS3_SPILL_MEM_START_1, R_CLASS3_SPILL_MEM_SIZE, MAX_CLASS_3_SPILL * sizeof(union rx_tx_desc)); } /* Set the CPU buckets that receive packets from the NA class FIFOs. */ static void nlna_config_pde(struct nlna_softc *sc) { uint64_t bucket_map; uint32_t cpumask; int i, cpu, bucket; cpumask = 0x1; #ifdef SMP /* * nlna may be called before SMP start in a BOOTP/NFSROOT * setup. we will distribute packets to other cpus only when * the SMP is started. */ if (smp_started) cpumask = xlr_hw_thread_mask; #endif bucket_map = 0; for (i = 0; i < 32; i++) { if (cpumask & (1 << i)) { cpu = i; /* use bucket 0 and 1 on every core for NA msgs */ bucket = cpu/4 * 8; bucket_map |= (3ULL << bucket); } } NLGE_WRITE(sc->base, R_PDE_CLASS_0, (bucket_map & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_0 + 1, ((bucket_map >> 32) & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_1, (bucket_map & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_1 + 1, ((bucket_map >> 32) & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_2, (bucket_map & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_2 + 1, ((bucket_map >> 32) & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_3, (bucket_map & 0xffffffff)); NLGE_WRITE(sc->base, R_PDE_CLASS_3 + 1, ((bucket_map >> 32) & 0xffffffff)); } /* * Update the network accelerator packet distribution engine for SMP. * On bootup, we have just the boot hw thread handling all packets, on SMP * start, we can start distributing packets across all the cores which are up. */ static void nlna_smp_update_pde(void *dummy __unused) { device_t iodi_dev; struct nlna_softc *na_sc[XLR_MAX_NLNA]; int i; printf("Updating packet distribution for SMP\n"); iodi_dev = devclass_get_device(devclass_find("iodi"), 0); nlna_get_all_softc(iodi_dev, na_sc, XLR_MAX_NLNA); for (i = 0; i < XLR_MAX_NLNA; i++) { if (na_sc[i] == NULL) continue; nlna_disable_ports(na_sc[i]); nlna_config_pde(na_sc[i]); nlna_config_translate_table(na_sc[i]); nlna_enable_ports(na_sc[i]); } } SYSINIT(nlna_smp_update_pde, SI_SUB_SMP, SI_ORDER_ANY, nlna_smp_update_pde, NULL); static void nlna_config_translate_table(struct nlna_softc *sc) { uint32_t cpu_mask; uint32_t val; int bkts[32]; /* one bucket is assumed for each cpu */ int b1, b2, c1, c2, i, j, k; int use_bkt; if (!flow_classification) return; use_bkt = 1; if (smp_started) cpu_mask = xlr_hw_thread_mask; else return; printf("Using %s-based distribution\n", (use_bkt) ? "bucket" : "class"); j = 0; for(i = 0; i < 32; i++) { if ((1 << i) & cpu_mask){ /* for each cpu, mark the 4+threadid bucket */ bkts[j] = ((i / 4) * 8) + (i % 4); j++; } } /*configure the 128 * 9 Translation table to send to available buckets*/ k = 0; c1 = 3; c2 = 0; for(i = 0; i < 64; i++) { /* Get the next 2 pairs of (class, bucket): (c1, b1), (c2, b2). c1, c2 limited to {0, 1, 2, 3} i.e, the 4 classes defined by h/w b1, b2 limited to { bkts[i], where 0 <= i < j} i.e, the set of buckets computed in the above loop. */ c1 = (c1 + 1) & 3; c2 = (c1 + 1) & 3; b1 = bkts[k]; k = (k + 1) % j; b2 = bkts[k]; k = (k + 1) % j; PDEBUG("Translation table[%d] b1=%d b2=%d c1=%d c2=%d\n", i, b1, b2, c1, c2); val = ((c1 << 23) | (b1 << 17) | (use_bkt << 16) | (c2 << 7) | (b2 << 1) | (use_bkt << 0)); NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, val); c1 = c2; } } static void nlna_config_parser(struct nlna_softc *sc) { uint32_t val; /* * Mark it as ETHERNET type. */ NLGE_WRITE(sc->base, R_L2TYPE_0, 0x01); #ifndef NLGE_HW_CHKSUM if (!flow_classification) return; #endif /* Use 7bit CRChash for flow classification with 127 as CRC polynomial*/ NLGE_WRITE(sc->base, R_PARSERCONFIGREG, ((0x7f << 8) | (1 << 1))); /* configure the parser : L2 Type is configured in the bootloader */ /* extract IP: src, dest protocol */ NLGE_WRITE(sc->base, R_L3CTABLE, (9 << 20) | (1 << 19) | (1 << 18) | (0x01 << 16) | (0x0800 << 0)); NLGE_WRITE(sc->base, R_L3CTABLE + 1, (9 << 25) | (1 << 21) | (12 << 14) | (4 << 10) | (16 << 4) | 4); #ifdef NLGE_HW_CHKSUM device_printf(sc->nlna_dev, "Enabled h/w support to compute TCP/IP" " checksum\n"); #endif /* Configure to extract SRC port and Dest port for TCP and UDP pkts */ NLGE_WRITE(sc->base, R_L4CTABLE, 6); NLGE_WRITE(sc->base, R_L4CTABLE + 2, 17); val = ((0 << 21) | (2 << 17) | (2 << 11) | (2 << 7)); NLGE_WRITE(sc->base, R_L4CTABLE + 1, val); NLGE_WRITE(sc->base, R_L4CTABLE + 3, val); } static void nlna_config_classifier(struct nlna_softc *sc) { int i; if (sc->mac_type == XLR_XGMII) { /* TBD: XGMII init sequence */ /* xgmac translation table doesn't have sane values on reset */ for (i = 0; i < 64; i++) NLGE_WRITE(sc->base, R_TRANSLATETABLE + i, 0x0); /* * use upper 7 bits of the parser extract to index the * translate table */ NLGE_WRITE(sc->base, R_PARSERCONFIGREG, 0x0); } } /* * Complete a bunch of h/w register initializations that are common for all the * ports controlled by a NA. */ static void nlna_config_common(struct nlna_softc *sc) { struct xlr_gmac_block_t *block_info; struct stn_cc *gmac_cc_config; int i; block_info = device_get_ivars(sc->nlna_dev); gmac_cc_config = block_info->credit_config; for (i = 0; i < MAX_NUM_MSGRNG_STN_CC; i++) { NLGE_WRITE(sc->base, R_CC_CPU0_0 + i, gmac_cc_config->counters[i >> 3][i & 0x07]); } NLGE_WRITE(sc->base, R_MSG_TX_THRESHOLD, 3); NLGE_WRITE(sc->base, R_DMACR0, 0xffffffff); NLGE_WRITE(sc->base, R_DMACR1, 0xffffffff); NLGE_WRITE(sc->base, R_DMACR2, 0xffffffff); NLGE_WRITE(sc->base, R_DMACR3, 0xffffffff); NLGE_WRITE(sc->base, R_FREEQCARVE, 0); nlna_media_specific_config(sc); } static void nlna_media_specific_config(struct nlna_softc *sc) { struct bucket_size *bucket_sizes; bucket_sizes = xlr_board_info.bucket_sizes; switch (sc->mac_type) { case XLR_RGMII: case XLR_SGMII: case XLR_XAUI: NLGE_WRITE(sc->base, R_GMAC_JFR0_BUCKET_SIZE, bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_0]); NLGE_WRITE(sc->base, R_GMAC_RFR0_BUCKET_SIZE, bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_0]); NLGE_WRITE(sc->base, R_GMAC_JFR1_BUCKET_SIZE, bucket_sizes->bucket[MSGRNG_STNID_GMACJFR_1]); NLGE_WRITE(sc->base, R_GMAC_RFR1_BUCKET_SIZE, bucket_sizes->bucket[MSGRNG_STNID_GMACRFR_1]); if (sc->mac_type == XLR_XAUI) { NLGE_WRITE(sc->base, R_TXDATAFIFO0, (224 << 16)); } break; case XLR_XGMII: NLGE_WRITE(sc->base, R_XGS_RFR_BUCKET_SIZE, bucket_sizes->bucket[sc->rfrbucket]); default: break; } } static void nlna_reset_ports(struct nlna_softc *sc, struct xlr_gmac_block_t *blk) { xlr_reg_t *addr; int i; uint32_t rx_ctrl; /* Refer Section 13.9.3 in the PRM for the reset sequence */ for (i = 0; i < sc->num_ports; i++) { addr = xlr_io_mmio(blk->gmac_port[i].base_addr); /* 1. Reset RxEnable in MAC_CONFIG */ switch (sc->mac_type) { case XLR_RGMII: case XLR_SGMII: NLGE_UPDATE(addr, R_MAC_CONFIG_1, 0, (1 << O_MAC_CONFIG_1__rxen)); break; case XLR_XAUI: case XLR_XGMII: NLGE_UPDATE(addr, R_RX_CONTROL, 0, (1 << O_RX_CONTROL__RxEnable)); break; default: printf("Error: Unsupported port_type=%d\n", sc->mac_type); } /* 1.1 Wait for RxControl.RxHalt to be set */ do { rx_ctrl = NLGE_READ(addr, R_RX_CONTROL); } while (!(rx_ctrl & 0x2)); /* 2. Set the soft reset bit in RxControl */ NLGE_UPDATE(addr, R_RX_CONTROL, (1 << O_RX_CONTROL__SoftReset), (1 << O_RX_CONTROL__SoftReset)); /* 2.1 Wait for RxControl.SoftResetDone to be set */ do { rx_ctrl = NLGE_READ(addr, R_RX_CONTROL); } while (!(rx_ctrl & 0x8)); /* 3. Clear the soft reset bit in RxControl */ NLGE_UPDATE(addr, R_RX_CONTROL, 0, (1 << O_RX_CONTROL__SoftReset)); /* Turn off tx/rx on the port. */ NLGE_UPDATE(addr, R_RX_CONTROL, 0, (1 << O_RX_CONTROL__RxEnable)); NLGE_UPDATE(addr, R_TX_CONTROL, 0, (1 << O_TX_CONTROL__TxEnable)); } } static void nlna_disable_ports(struct nlna_softc *sc) { int i; for (i = 0; i < sc->num_ports; i++) { if (sc->child_sc[i] != NULL) nlge_port_disable(sc->child_sc[i]); } } static void nlna_enable_ports(struct nlna_softc *sc) { device_t nlge_dev, *devlist; struct nlge_softc *port_sc; int i, numdevs; device_get_children(sc->nlna_dev, &devlist, &numdevs); for (i = 0; i < numdevs; i++) { nlge_dev = devlist[i]; if (nlge_dev == NULL) continue; port_sc = device_get_softc(nlge_dev); if (port_sc->nlge_if->if_drv_flags & IFF_DRV_RUNNING) nlge_port_enable(port_sc); } free(devlist, M_TEMP); } static void nlna_get_all_softc(device_t iodi_dev, struct nlna_softc **sc_vec, uint32_t vec_sz) { device_t na_dev; int i; for (i = 0; i < vec_sz; i++) { sc_vec[i] = NULL; na_dev = device_find_child(iodi_dev, "nlna", i); if (na_dev != NULL) sc_vec[i] = device_get_softc(na_dev); } } static void nlge_port_disable(struct nlge_softc *sc) { struct ifnet *ifp; xlr_reg_t *base; uint32_t rd; int id, port_type; id = sc->id; port_type = sc->port_type; base = sc->base; ifp = sc->nlge_if; NLGE_UPDATE(base, R_RX_CONTROL, 0x0, 1 << O_RX_CONTROL__RxEnable); do { rd = NLGE_READ(base, R_RX_CONTROL); } while (!(rd & (1 << O_RX_CONTROL__RxHalt))); NLGE_UPDATE(base, R_TX_CONTROL, 0, 1 << O_TX_CONTROL__TxEnable); do { rd = NLGE_READ(base, R_TX_CONTROL); } while (!(rd & (1 << O_TX_CONTROL__TxIdle))); switch (port_type) { case XLR_RGMII: case XLR_SGMII: NLGE_UPDATE(base, R_MAC_CONFIG_1, 0, ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen))); break; case XLR_XGMII: case XLR_XAUI: NLGE_UPDATE(base, R_XGMAC_CONFIG_1, 0, ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen))); break; default: panic("Unknown MAC type on port %d\n", id); } if (ifp) { ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); } } static void nlge_port_enable(struct nlge_softc *sc) { struct xlr_gmac_port *self; xlr_reg_t *base; base = sc->base; self = device_get_ivars(sc->nlge_dev); if (xlr_board_info.is_xls && sc->port_type == XLR_RGMII) NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RGMII), (1 << O_RX_CONTROL__RGMII)); NLGE_UPDATE(base, R_RX_CONTROL, (1 << O_RX_CONTROL__RxEnable), (1 << O_RX_CONTROL__RxEnable)); NLGE_UPDATE(base, R_TX_CONTROL, (1 << O_TX_CONTROL__TxEnable | RGE_TX_THRESHOLD_BYTES), (1 << O_TX_CONTROL__TxEnable | 0x3fff)); switch (sc->port_type) { case XLR_RGMII: case XLR_SGMII: NLGE_UPDATE(base, R_MAC_CONFIG_1, ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen)), ((1 << O_MAC_CONFIG_1__rxen) | (1 << O_MAC_CONFIG_1__txen))); break; case XLR_XGMII: case XLR_XAUI: NLGE_UPDATE(base, R_XGMAC_CONFIG_1, ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen)), ((1 << O_XGMAC_CONFIG_1__hsttfen) | (1 << O_XGMAC_CONFIG_1__hstrfen))); break; default: panic("Unknown MAC type on port %d\n", sc->id); } } static void nlge_mac_set_rx_mode(struct nlge_softc *sc) { uint32_t regval; regval = NLGE_READ(sc->base, R_MAC_FILTER_CONFIG); if (sc->if_flags & IFF_PROMISC) { regval |= (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | (1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN); } else { regval &= ~((1 << O_MAC_FILTER_CONFIG__PAUSE_FRAME_EN) | (1 << O_MAC_FILTER_CONFIG__ALL_UCAST_EN)); } NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG, regval); } static void nlge_sgmii_init(struct nlge_softc *sc) { xlr_reg_t *mmio_gpio; int phy; if (sc->port_type != XLR_SGMII) return; nlge_mii_write_internal(sc->serdes_addr, 26, 0, 0x6DB0); nlge_mii_write_internal(sc->serdes_addr, 26, 1, 0xFFFF); nlge_mii_write_internal(sc->serdes_addr, 26, 2, 0xB6D0); nlge_mii_write_internal(sc->serdes_addr, 26, 3, 0x00FF); nlge_mii_write_internal(sc->serdes_addr, 26, 4, 0x0000); nlge_mii_write_internal(sc->serdes_addr, 26, 5, 0x0000); nlge_mii_write_internal(sc->serdes_addr, 26, 6, 0x0005); nlge_mii_write_internal(sc->serdes_addr, 26, 7, 0x0001); nlge_mii_write_internal(sc->serdes_addr, 26, 8, 0x0000); nlge_mii_write_internal(sc->serdes_addr, 26, 9, 0x0000); nlge_mii_write_internal(sc->serdes_addr, 26,10, 0x0000); /* program GPIO values for serdes init parameters */ DELAY(100); mmio_gpio = xlr_io_mmio(XLR_IO_GPIO_OFFSET); xlr_write_reg(mmio_gpio, 0x20, 0x7e6802); xlr_write_reg(mmio_gpio, 0x10, 0x7104); DELAY(100); /* * This kludge is needed to setup serdes (?) clock correctly on some * XLS boards */ if ((xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XI || xlr_boot1_info.board_major_version == RMI_XLR_BOARD_ARIZONA_XII) && xlr_boot1_info.board_minor_version == 4) { /* use 125 Mhz instead of 156.25Mhz ref clock */ DELAY(100); xlr_write_reg(mmio_gpio, 0x10, 0x7103); xlr_write_reg(mmio_gpio, 0x21, 0x7103); DELAY(100); } /* enable autoneg - more magic */ phy = sc->phy_addr % 4 + 27; nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x1000); DELAY(100000); nlge_mii_write_internal(sc->pcs_addr, phy, 0, 0x0200); DELAY(100000); } static void nlge_intr(void *arg) { struct nlge_port_set *pset; struct nlge_softc *sc; struct nlge_softc *port_sc; xlr_reg_t *base; uint32_t intreg; uint32_t intr_status; int i; sc = arg; if (sc == NULL) { printf("warning: No port registered for interrupt\n"); return; } base = sc->base; intreg = NLGE_READ(base, R_INTREG); if (intreg & (1 << O_INTREG__MDInt)) { pset = sc->mdio_pset; if (pset == NULL) { printf("warning: No ports for MDIO interrupt\n"); return; } for (i = 0; i < pset->vec_sz; i++) { port_sc = pset->port_vec[i]; if (port_sc == NULL) continue; /* Ack phy interrupt - clear on read*/ intr_status = nlge_mii_read_internal(port_sc->mii_base, port_sc->phy_addr, 26); PDEBUG("Phy_%d: int_status=0x%08x\n", port_sc->phy_addr, intr_status); if (!(intr_status & 0x8000)) { /* no interrupt for this port */ continue; } if (intr_status & 0x2410) { /* update link status for port */ nlge_gmac_config_speed(port_sc, 1); } else { printf("%s: Unsupported phy interrupt" " (0x%08x)\n", device_get_nameunit(port_sc->nlge_dev), intr_status); } } } /* Clear the NA interrupt */ xlr_write_reg(base, R_INTREG, 0xffffffff); return; } static int nlge_irq_init(struct nlge_softc *sc) { struct resource irq_res; struct nlna_softc *na_sc; struct xlr_gmac_block_t *block_info; device_t na_dev; int ret; int irq_num; na_dev = device_get_parent(sc->nlge_dev); block_info = device_get_ivars(na_dev); irq_num = block_info->baseirq + sc->instance; irq_res.__r_i = (struct resource_i *)(intptr_t) (irq_num); ret = bus_setup_intr(sc->nlge_dev, &irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, nlge_intr, sc, NULL); if (ret) { nlge_detach(sc->nlge_dev); device_printf(sc->nlge_dev, "couldn't set up irq: error=%d\n", ret); return (ENXIO); } PDEBUG("Setup intr for dev=%s, irq=%d\n", device_get_nameunit(sc->nlge_dev), irq_num); if (sc->instance == 0) { na_sc = device_get_softc(na_dev); sc->mdio_pset = &na_sc->mdio_set; } return (0); } static void nlge_irq_fini(struct nlge_softc *sc) { } static void nlge_hw_init(struct nlge_softc *sc) { struct xlr_gmac_port *port_info; xlr_reg_t *base; base = sc->base; port_info = device_get_ivars(sc->nlge_dev); sc->tx_bucket_id = port_info->tx_bucket_id; /* each packet buffer is 1536 bytes */ NLGE_WRITE(base, R_DESC_PACK_CTRL, (1 << O_DESC_PACK_CTRL__MaxEntry) | #ifdef NLGE_HW_CHKSUM (1 << O_DESC_PACK_CTRL__PrePadEnable) | #endif (MAX_FRAME_SIZE << O_DESC_PACK_CTRL__RegularSize)); NLGE_WRITE(base, R_STATCTRL, ((1 << O_STATCTRL__Sten) | (1 << O_STATCTRL__ClrCnt))); NLGE_WRITE(base, R_L2ALLOCCTRL, 0xffffffff); NLGE_WRITE(base, R_INTMASK, 0); nlge_set_mac_addr(sc); nlge_media_specific_init(sc); } static void nlge_sc_init(struct nlge_softc *sc, device_t dev, struct xlr_gmac_port *port_info) { memset(sc, 0, sizeof(*sc)); sc->nlge_dev = dev; sc->id = device_get_unit(dev); nlge_set_port_attribs(sc, port_info); } static void nlge_media_specific_init(struct nlge_softc *sc) { struct mii_data *media; struct bucket_size *bucket_sizes; bucket_sizes = xlr_board_info.bucket_sizes; switch (sc->port_type) { case XLR_RGMII: case XLR_SGMII: case XLR_XAUI: NLGE_UPDATE(sc->base, R_DESC_PACK_CTRL, (BYTE_OFFSET << O_DESC_PACK_CTRL__ByteOffset), (W_DESC_PACK_CTRL__ByteOffset << O_DESC_PACK_CTRL__ByteOffset)); NLGE_WRITE(sc->base, R_GMAC_TX0_BUCKET_SIZE + sc->instance, bucket_sizes->bucket[sc->tx_bucket_id]); if (sc->port_type != XLR_XAUI) { nlge_gmac_config_speed(sc, 1); if (sc->mii_bus) { media = (struct mii_data *)device_get_softc( sc->mii_bus); } } break; case XLR_XGMII: NLGE_WRITE(sc->base, R_BYTEOFFSET0, 0x2); NLGE_WRITE(sc->base, R_XGMACPADCALIBRATION, 0x30); NLGE_WRITE(sc->base, R_XGS_TX0_BUCKET_SIZE, bucket_sizes->bucket[sc->tx_bucket_id]); break; default: break; } } /* * Read the MAC address from the XLR boot registers. All port addresses * are identical except for the lowest octet. */ static void nlge_read_mac_addr(struct nlge_softc *sc) { int i, j; for (i = 0, j = 40; i < ETHER_ADDR_LEN && j >= 0; i++, j-= 8) sc->dev_addr[i] = (xlr_boot1_info.mac_addr >> j) & 0xff; sc->dev_addr[i - 1] += sc->id; /* last octet is port-specific */ } /* * Write the MAC address to the XLR MAC port. Also, set the address * masks and MAC filter configuration. */ static void nlge_set_mac_addr(struct nlge_softc *sc) { NLGE_WRITE(sc->base, R_MAC_ADDR0, ((sc->dev_addr[5] << 24) | (sc->dev_addr[4] << 16) | (sc->dev_addr[3] << 8) | (sc->dev_addr[2]))); NLGE_WRITE(sc->base, R_MAC_ADDR0 + 1, ((sc->dev_addr[1] << 24) | (sc-> dev_addr[0] << 16))); NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2, 0xffffffff); NLGE_WRITE(sc->base, R_MAC_ADDR_MASK2 + 1, 0xffffffff); NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3, 0xffffffff); NLGE_WRITE(sc->base, R_MAC_ADDR_MASK3 + 1, 0xffffffff); NLGE_WRITE(sc->base, R_MAC_FILTER_CONFIG, (1 << O_MAC_FILTER_CONFIG__BROADCAST_EN) | (1 << O_MAC_FILTER_CONFIG__ALL_MCAST_EN) | (1 << O_MAC_FILTER_CONFIG__MAC_ADDR0_VALID)); if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) { NLGE_UPDATE(sc->base, R_IPG_IFG, MAC_B2B_IPG, 0x7f); } } static int nlge_if_init(struct nlge_softc *sc) { struct ifnet *ifp; device_t dev; int error; error = 0; dev = sc->nlge_dev; NLGE_LOCK_INIT(sc, device_get_nameunit(dev)); ifp = sc->nlge_if = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_capabilities = 0; ifp->if_capenable = ifp->if_capabilities; ifp->if_ioctl = nlge_ioctl; ifp->if_init = nlge_init; ifp->if_hwassist = 0; ifp->if_snd.ifq_drv_maxlen = RGE_TX_Q_SIZE; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); ifmedia_init(&sc->nlge_mii.mii_media, 0, nlge_mediachange, nlge_mediastatus); ifmedia_add(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->nlge_mii.mii_media, IFM_ETHER | IFM_AUTO); sc->nlge_mii.mii_media.ifm_media = sc->nlge_mii.mii_media.ifm_cur->ifm_media; nlge_read_mac_addr(sc); ether_ifattach(ifp, sc->dev_addr); /* override if_transmit : per ifnet(9), do it after if_attach */ ifp->if_transmit = nlge_tx; fail: return (error); } static void nlge_mii_init(device_t dev, struct nlge_softc *sc) { int error; if (sc->port_type != XLR_XAUI && sc->port_type != XLR_XGMII) { NLGE_WRITE(sc->mii_base, R_MII_MGMT_CONFIG, 0x07); } error = mii_attach(dev, &sc->mii_bus, sc->nlge_if, nlge_mediachange, nlge_mediastatus, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); sc->mii_bus = NULL; } if (sc->mii_bus != NULL) { /* * Enable all MDIO interrupts in the phy. RX_ER bit seems to get * set about every 1 sec in GigE mode, ignore it for now... */ nlge_mii_write_internal(sc->mii_base, sc->phy_addr, 25, 0xfffffffe); } } /* * Read a PHY register. * * Input parameters: * mii_base - Base address of MII * phyaddr - PHY's address * regidx = index of register to read * * Return value: * value read, or 0 if an error occurred. */ static int nlge_mii_read_internal(xlr_reg_t *mii_base, int phyaddr, int regidx) { int i, val; /* setup the phy reg to be used */ NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS, (phyaddr << 8) | (regidx << 0)); /* Issue the read command */ NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, (1 << O_MII_MGMT_COMMAND__rstat)); /* poll for the read cycle to complete */ for (i = 0; i < PHY_STATUS_RETRIES; i++) { if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0) break; } /* clear the read cycle */ NLGE_WRITE(mii_base, R_MII_MGMT_COMMAND, 0); if (i == PHY_STATUS_RETRIES) { return (0xffffffff); } val = NLGE_READ(mii_base, R_MII_MGMT_STATUS); return (val); } /* * Write a value to a PHY register. * * Input parameters: * mii_base - Base address of MII * phyaddr - PHY to use * regidx - register within the PHY * regval - data to write to register * * Return value: * nothing */ static void nlge_mii_write_internal(xlr_reg_t *mii_base, int phyaddr, int regidx, int regval) { int i; NLGE_WRITE(mii_base, R_MII_MGMT_ADDRESS, (phyaddr << 8) | (regidx << 0)); /* Write the data which starts the write cycle */ NLGE_WRITE(mii_base, R_MII_MGMT_WRITE_DATA, regval); /* poll for the write cycle to complete */ for (i = 0; i < PHY_STATUS_RETRIES; i++) { if (NLGE_READ(mii_base, R_MII_MGMT_INDICATORS) == 0) break; } } /* * Function to optimize the use of p2d descriptors for the given PDU. * As it is on the fast-path (called during packet transmission), it * described in more detail than the initialization functions. * * Input: mbuf chain (MC), pointer to fmn message * Input constraints: None * Output: FMN message to transmit the data in MC * Return values: 0 - success * 1 - MC cannot be handled (see Limitations below) * 2 - MC cannot be handled presently (maybe worth re-trying) * Other output: Number of entries filled in the FMN message * * Output structure/constraints: * 1. Max 3 p2d's + 1 zero-len (ZL) p2d with virtual address of MC. * 2. 3 p2d's + 1 p2p with max 14 p2d's (ZL p2d not required in this case). * 3. Each p2d points to physically contiguous chunk of data (subject to * entire MC requiring max 17 p2d's). * Limitations: * 1. MC's that require more than 17 p2d's are not handled. * Benefits: MC's that require <= 3 p2d's avoid the overhead of allocating * the p2p structure. Small packets (which typically give low * performance) are expected to have a small MC that takes * advantage of this. */ static int prepare_fmn_message(struct nlge_softc *sc, struct msgrng_msg *fmn_msg, uint32_t *n_entries, struct mbuf *mbuf_chain, uint64_t fb_stn_id, struct nlge_tx_desc **tx_desc) { struct mbuf *m; struct nlge_tx_desc *p2p; uint64_t *cur_p2d; uint64_t fbpaddr; vm_offset_t buf; vm_paddr_t paddr; int msg_sz, p2p_sz, len, frag_sz; /* Num entries per FMN msg is 4 for XLR/XLS */ const int FMN_SZ = sizeof(*fmn_msg) / sizeof(uint64_t); msg_sz = p2p_sz = 0; p2p = NULL; cur_p2d = &fmn_msg->msg0; for (m = mbuf_chain; m != NULL; m = m->m_next) { buf = (vm_offset_t) m->m_data; len = m->m_len; while (len) { if (msg_sz == (FMN_SZ - 1)) { p2p = uma_zalloc(nl_tx_desc_zone, M_NOWAIT); if (p2p == NULL) { return (2); } /* * Save the virtual address in the descriptor, * it makes freeing easy. */ p2p->frag[XLR_MAX_TX_FRAGS] = (uint64_t)(vm_offset_t)p2p; cur_p2d = &p2p->frag[0]; } else if (msg_sz == (FMN_SZ - 2 + XLR_MAX_TX_FRAGS)) { uma_zfree(nl_tx_desc_zone, p2p); return (1); } paddr = vtophys(buf); frag_sz = PAGE_SIZE - (buf & PAGE_MASK); if (len < frag_sz) frag_sz = len; *cur_p2d++ = (127ULL << 54) | ((uint64_t)frag_sz << 40) | paddr; msg_sz++; if (p2p != NULL) p2p_sz++; len -= frag_sz; buf += frag_sz; } } if (msg_sz == 0) { printf("Zero-length mbuf chain ??\n"); *n_entries = msg_sz ; return (0); } /* set eop in most-recent p2d */ cur_p2d[-1] |= (1ULL << 63); #ifdef __mips_n64 /* * On n64, we cannot store our mbuf pointer(64 bit) in the freeback * message (40bit available), so we put the mbuf in m_nextpkt and * use the physical addr of that in freeback message. */ mbuf_chain->m_nextpkt = mbuf_chain; fbpaddr = vtophys(&mbuf_chain->m_nextpkt); #else /* Careful, don't sign extend when going to 64bit */ fbpaddr = (uint64_t)(uintptr_t)mbuf_chain; #endif *cur_p2d = (1ULL << 63) | ((uint64_t)fb_stn_id << 54) | fbpaddr; *tx_desc = p2p; if (p2p != NULL) { paddr = vtophys(p2p); p2p_sz++; fmn_msg->msg3 = (1ULL << 62) | ((uint64_t)fb_stn_id << 54) | ((uint64_t)(p2p_sz * 8) << 40) | paddr; *n_entries = FMN_SZ; } else { *n_entries = msg_sz + 1; } return (0); } static int send_fmn_msg_tx(struct nlge_softc *sc, struct msgrng_msg *msg, uint32_t n_entries) { uint32_t msgrng_flags; int ret; int i = 0; do { msgrng_flags = msgrng_access_enable(); ret = message_send(n_entries, MSGRNG_CODE_MAC, sc->tx_bucket_id, msg); msgrng_restore(msgrng_flags); if (ret == 0) return (0); i++; } while (i < 100000); device_printf(sc->nlge_dev, "Too many credit fails in tx path\n"); return (1); } static void release_tx_desc(vm_paddr_t paddr) { struct nlge_tx_desc *tx_desc; uint32_t sr; uint64_t vaddr; paddr += (XLR_MAX_TX_FRAGS * sizeof(uint64_t)); sr = xlr_enable_kx(); vaddr = xlr_paddr_ld(paddr); xlr_restore_kx(sr); tx_desc = (struct nlge_tx_desc*)(intptr_t)vaddr; uma_zfree(nl_tx_desc_zone, tx_desc); } static void * get_buf(void) { struct mbuf *m_new; uint64_t *md; #ifdef INVARIANTS vm_paddr_t temp1, temp2; #endif if ((m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR)) == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, XLR_CACHELINE_SIZE - ((uintptr_t)m_new->m_data & 0x1f)); md = (uint64_t *)m_new->m_data; md[0] = (intptr_t)m_new; /* Back Ptr */ md[1] = 0xf00bad; m_adj(m_new, XLR_CACHELINE_SIZE); #ifdef INVARIANTS temp1 = vtophys((vm_offset_t) m_new->m_data); temp2 = vtophys((vm_offset_t) m_new->m_data + 1536); if ((temp1 + 1536) != temp2) panic("ALLOCED BUFFER IS NOT CONTIGUOUS\n"); #endif return ((void *)m_new->m_data); } static int nlge_gmac_config_speed(struct nlge_softc *sc, int quick) { struct mii_data *md; xlr_reg_t *mmio; int bmsr, n_tries, max_tries; int core_ctl[] = { 0x2, 0x1, 0x0, 0x1 }; int sgmii_speed[] = { SGMII_SPEED_10, SGMII_SPEED_100, SGMII_SPEED_1000, SGMII_SPEED_100 }; /* default to 100Mbps */ char *speed_str[] = { "10", "100", "1000", "unknown, defaulting to 100" }; int link_state = LINK_STATE_DOWN; if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) return 0; md = NULL; mmio = sc->base; if (sc->mii_base != NULL) { max_tries = (quick == 1) ? 100 : 4000; bmsr = 0; for (n_tries = 0; n_tries < max_tries; n_tries++) { bmsr = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, MII_BMSR); if ((bmsr & BMSR_ACOMP) && (bmsr & BMSR_LINK)) break; /* Auto-negotiation is complete and link is up */ DELAY(1000); } bmsr &= BMSR_LINK; sc->link = (bmsr == 0) ? xlr_mac_link_down : xlr_mac_link_up; sc->speed = nlge_mii_read_internal(sc->mii_base, sc->phy_addr, 28); sc->speed = (sc->speed >> 3) & 0x03; if (sc->link == xlr_mac_link_up) { link_state = LINK_STATE_UP; nlge_sgmii_init(sc); } if (sc->mii_bus) md = (struct mii_data *)device_get_softc(sc->mii_bus); } if (sc->port_type != XLR_RGMII) NLGE_WRITE(mmio, R_INTERFACE_CONTROL, sgmii_speed[sc->speed]); if (sc->speed == xlr_mac_speed_10 || sc->speed == xlr_mac_speed_100 || sc->speed == xlr_mac_speed_rsvd) { NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7117); } else if (sc->speed == xlr_mac_speed_1000) { NLGE_WRITE(mmio, R_MAC_CONFIG_2, 0x7217); if (md != NULL) { ifmedia_set(&md->mii_media, IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, md->mii_instance)); } } NLGE_WRITE(mmio, R_CORECONTROL, core_ctl[sc->speed]); if_link_state_change(sc->nlge_if, link_state); printf("%s: [%sMbps]\n", device_get_nameunit(sc->nlge_dev), speed_str[sc->speed]); return (0); } /* * This function is called for each port that was added to the device tree * and it initializes the following port attributes: * - type * - base (base address to access port-specific registers) * - mii_base * - phy_addr */ static void nlge_set_port_attribs(struct nlge_softc *sc, struct xlr_gmac_port *port_info) { sc->instance = port_info->instance % 4; /* TBD: will not work for SPI-4 */ sc->port_type = port_info->type; sc->base = xlr_io_mmio(port_info->base_addr); sc->mii_base = xlr_io_mmio(port_info->mii_addr); if (port_info->pcs_addr != 0) sc->pcs_addr = xlr_io_mmio(port_info->pcs_addr); if (port_info->serdes_addr != 0) sc->serdes_addr = xlr_io_mmio(port_info->serdes_addr); sc->phy_addr = port_info->phy_addr; PDEBUG("Port%d: base=%p, mii_base=%p, phy_addr=%d\n", sc->id, sc->base, sc->mii_base, sc->phy_addr); } /* ------------------------------------------------------------------------ */ /* Debug dump functions */ #ifdef DEBUG static void dump_reg(xlr_reg_t *base, uint32_t offset, char *name) { int val; val = NLGE_READ(base, offset); printf("%-30s: 0x%8x 0x%8x\n", name, offset, val); } #define STRINGIFY(x) #x static void dump_na_registers(xlr_reg_t *base_addr, int port_id) { PDEBUG("Register dump for NA (of port=%d)\n", port_id); dump_reg(base_addr, R_PARSERCONFIGREG, STRINGIFY(R_PARSERCONFIGREG)); PDEBUG("Tx bucket sizes\n"); dump_reg(base_addr, R_GMAC_JFR0_BUCKET_SIZE, STRINGIFY(R_GMAC_JFR0_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_RFR0_BUCKET_SIZE, STRINGIFY(R_GMAC_RFR0_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_TX0_BUCKET_SIZE, STRINGIFY(R_GMAC_TX0_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_TX1_BUCKET_SIZE, STRINGIFY(R_GMAC_TX1_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_TX2_BUCKET_SIZE, STRINGIFY(R_GMAC_TX2_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_TX3_BUCKET_SIZE, STRINGIFY(R_GMAC_TX3_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_JFR1_BUCKET_SIZE, STRINGIFY(R_GMAC_JFR1_BUCKET_SIZE)); dump_reg(base_addr, R_GMAC_RFR1_BUCKET_SIZE, STRINGIFY(R_GMAC_RFR1_BUCKET_SIZE)); dump_reg(base_addr, R_TXDATAFIFO0, STRINGIFY(R_TXDATAFIFO0)); dump_reg(base_addr, R_TXDATAFIFO1, STRINGIFY(R_TXDATAFIFO1)); } static void dump_gmac_registers(struct nlge_softc *sc) { xlr_reg_t *base_addr = sc->base; int port_id = sc->instance; PDEBUG("Register dump for port=%d\n", port_id); if (sc->port_type == XLR_RGMII || sc->port_type == XLR_SGMII) { dump_reg(base_addr, R_MAC_CONFIG_1, STRINGIFY(R_MAC_CONFIG_1)); dump_reg(base_addr, R_MAC_CONFIG_2, STRINGIFY(R_MAC_CONFIG_2)); dump_reg(base_addr, R_IPG_IFG, STRINGIFY(R_IPG_IFG)); dump_reg(base_addr, R_HALF_DUPLEX, STRINGIFY(R_HALF_DUPLEX)); dump_reg(base_addr, R_MAXIMUM_FRAME_LENGTH, STRINGIFY(R_MAXIMUM_FRAME_LENGTH)); dump_reg(base_addr, R_TEST, STRINGIFY(R_TEST)); dump_reg(base_addr, R_MII_MGMT_CONFIG, STRINGIFY(R_MII_MGMT_CONFIG)); dump_reg(base_addr, R_MII_MGMT_COMMAND, STRINGIFY(R_MII_MGMT_COMMAND)); dump_reg(base_addr, R_MII_MGMT_ADDRESS, STRINGIFY(R_MII_MGMT_ADDRESS)); dump_reg(base_addr, R_MII_MGMT_WRITE_DATA, STRINGIFY(R_MII_MGMT_WRITE_DATA)); dump_reg(base_addr, R_MII_MGMT_STATUS, STRINGIFY(R_MII_MGMT_STATUS)); dump_reg(base_addr, R_MII_MGMT_INDICATORS, STRINGIFY(R_MII_MGMT_INDICATORS)); dump_reg(base_addr, R_INTERFACE_CONTROL, STRINGIFY(R_INTERFACE_CONTROL)); dump_reg(base_addr, R_INTERFACE_STATUS, STRINGIFY(R_INTERFACE_STATUS)); } else if (sc->port_type == XLR_XAUI || sc->port_type == XLR_XGMII) { dump_reg(base_addr, R_XGMAC_CONFIG_0, STRINGIFY(R_XGMAC_CONFIG_0)); dump_reg(base_addr, R_XGMAC_CONFIG_1, STRINGIFY(R_XGMAC_CONFIG_1)); dump_reg(base_addr, R_XGMAC_CONFIG_2, STRINGIFY(R_XGMAC_CONFIG_2)); dump_reg(base_addr, R_XGMAC_CONFIG_3, STRINGIFY(R_XGMAC_CONFIG_3)); dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_LS, STRINGIFY(R_XGMAC_STATION_ADDRESS_LS)); dump_reg(base_addr, R_XGMAC_STATION_ADDRESS_MS, STRINGIFY(R_XGMAC_STATION_ADDRESS_MS)); dump_reg(base_addr, R_XGMAC_MAX_FRAME_LEN, STRINGIFY(R_XGMAC_MAX_FRAME_LEN)); dump_reg(base_addr, R_XGMAC_REV_LEVEL, STRINGIFY(R_XGMAC_REV_LEVEL)); dump_reg(base_addr, R_XGMAC_MIIM_COMMAND, STRINGIFY(R_XGMAC_MIIM_COMMAND)); dump_reg(base_addr, R_XGMAC_MIIM_FILED, STRINGIFY(R_XGMAC_MIIM_FILED)); dump_reg(base_addr, R_XGMAC_MIIM_CONFIG, STRINGIFY(R_XGMAC_MIIM_CONFIG)); dump_reg(base_addr, R_XGMAC_MIIM_LINK_FAIL_VECTOR, STRINGIFY(R_XGMAC_MIIM_LINK_FAIL_VECTOR)); dump_reg(base_addr, R_XGMAC_MIIM_INDICATOR, STRINGIFY(R_XGMAC_MIIM_INDICATOR)); } dump_reg(base_addr, R_MAC_ADDR0, STRINGIFY(R_MAC_ADDR0)); dump_reg(base_addr, R_MAC_ADDR0 + 1, STRINGIFY(R_MAC_ADDR0+1)); dump_reg(base_addr, R_MAC_ADDR1, STRINGIFY(R_MAC_ADDR1)); dump_reg(base_addr, R_MAC_ADDR2, STRINGIFY(R_MAC_ADDR2)); dump_reg(base_addr, R_MAC_ADDR3, STRINGIFY(R_MAC_ADDR3)); dump_reg(base_addr, R_MAC_ADDR_MASK2, STRINGIFY(R_MAC_ADDR_MASK2)); dump_reg(base_addr, R_MAC_ADDR_MASK3, STRINGIFY(R_MAC_ADDR_MASK3)); dump_reg(base_addr, R_MAC_FILTER_CONFIG, STRINGIFY(R_MAC_FILTER_CONFIG)); dump_reg(base_addr, R_TX_CONTROL, STRINGIFY(R_TX_CONTROL)); dump_reg(base_addr, R_RX_CONTROL, STRINGIFY(R_RX_CONTROL)); dump_reg(base_addr, R_DESC_PACK_CTRL, STRINGIFY(R_DESC_PACK_CTRL)); dump_reg(base_addr, R_STATCTRL, STRINGIFY(R_STATCTRL)); dump_reg(base_addr, R_L2ALLOCCTRL, STRINGIFY(R_L2ALLOCCTRL)); dump_reg(base_addr, R_INTMASK, STRINGIFY(R_INTMASK)); dump_reg(base_addr, R_INTREG, STRINGIFY(R_INTREG)); dump_reg(base_addr, R_TXRETRY, STRINGIFY(R_TXRETRY)); dump_reg(base_addr, R_CORECONTROL, STRINGIFY(R_CORECONTROL)); dump_reg(base_addr, R_BYTEOFFSET0, STRINGIFY(R_BYTEOFFSET0)); dump_reg(base_addr, R_BYTEOFFSET1, STRINGIFY(R_BYTEOFFSET1)); dump_reg(base_addr, R_L2TYPE_0, STRINGIFY(R_L2TYPE_0)); dump_na_registers(base_addr, port_id); } static void dump_fmn_cpu_credits_for_gmac(struct xlr_board_info *board, int gmac_id) { struct stn_cc *cc; int gmac_bucket_ids[] = { 97, 98, 99, 100, 101, 103 }; int j, k, r, c; int n_gmac_buckets; n_gmac_buckets = nitems(gmac_bucket_ids); for (j = 0; j < 8; j++) { // for each cpu cc = board->credit_configs[j]; printf("Credits for Station CPU_%d ---> GMAC buckets (tx path)\n", j); for (k = 0; k < n_gmac_buckets; k++) { r = gmac_bucket_ids[k] / 8; c = gmac_bucket_ids[k] % 8; printf (" --> gmac%d_bucket_%-3d: credits=%d\n", gmac_id, gmac_bucket_ids[k], cc->counters[r][c]); } } } static void dump_fmn_gmac_credits(struct xlr_board_info *board, int gmac_id) { struct stn_cc *cc; int j, k; cc = board->gmac_block[gmac_id].credit_config; printf("Credits for Station: GMAC_%d ---> CPU buckets (rx path)\n", gmac_id); for (j = 0; j < 8; j++) { // for each cpu printf(" ---> cpu_%d\n", j); for (k = 0; k < 8; k++) { // for each bucket in cpu printf(" ---> bucket_%d: credits=%d\n", j * 8 + k, cc->counters[j][k]); } } } static void dump_board_info(struct xlr_board_info *board) { struct xlr_gmac_block_t *gm; int i, k; printf("cpu=%x ", xlr_revision()); printf("board_version: major=%llx, minor=%llx\n", xlr_boot1_info.board_major_version, xlr_boot1_info.board_minor_version); printf("is_xls=%d, nr_cpus=%d, usb=%s, cfi=%s, ata=%s\npci_irq=%d," "gmac_ports=%d\n", board->is_xls, board->nr_cpus, board->usb ? "Yes" : "No", board->cfi ? "Yes": "No", board->ata ? "Yes" : "No", board->pci_irq, board->gmacports); printf("FMN: Core-station bucket sizes\n"); for (i = 0; i < 128; i++) { if (i && ((i % 16) == 0)) printf("\n"); printf ("b[%d] = %d ", i, board->bucket_sizes->bucket[i]); } printf("\n"); for (i = 0; i < 3; i++) { gm = &board->gmac_block[i]; printf("RNA_%d: type=%d, enabled=%s, mode=%d, station_id=%d," "station_txbase=%d, station_rfr=%d ", i, gm->type, gm->enabled ? "Yes" : "No", gm->mode, gm->station_id, gm->station_txbase, gm->station_rfr); printf("n_ports=%d, baseaddr=%p, baseirq=%d, baseinst=%d\n", gm->num_ports, (xlr_reg_t *)gm->baseaddr, gm->baseirq, gm->baseinst); } for (k = 0; k < 3; k++) { // for each NA dump_fmn_cpu_credits_for_gmac(board, k); dump_fmn_gmac_credits(board, k); } } static void dump_mac_stats(struct nlge_softc *sc) { xlr_reg_t *addr; uint32_t pkts_tx, pkts_rx; addr = sc->base; pkts_rx = NLGE_READ(sc->base, R_RPKT); pkts_tx = NLGE_READ(sc->base, R_TPKT); printf("[nlge_%d mac stats]: pkts_tx=%u, pkts_rx=%u\n", sc->id, pkts_tx, pkts_rx); if (pkts_rx > 0) { uint32_t r; /* dump all rx counters. we need this because pkts_rx includes bad packets. */ for (r = R_RFCS; r <= R_ROVR; r++) printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r, NLGE_READ(sc->base, r)); } if (pkts_tx > 0) { uint32_t r; /* dump all tx counters. might be useful for debugging. */ for (r = R_TMCA; r <= R_TFRG; r++) { if ((r == (R_TNCL + 1)) || (r == (R_TNCL + 2))) continue; printf("[nlge_%d mac stats]: [0x%x]=%u\n", sc->id, r, NLGE_READ(sc->base, r)); } } } static void dump_mii_regs(struct nlge_softc *sc) { uint32_t mii_regs[] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e}; int i, n_regs; if (sc->mii_base == NULL || sc->mii_bus == NULL) return; n_regs = nitems(mii_regs); for (i = 0; i < n_regs; i++) { printf("[mii_0x%x] = %x\n", mii_regs[i], nlge_mii_read_internal(sc->mii_base, sc->phy_addr, mii_regs[i])); } } static void dump_ifmedia(struct ifmedia *ifm) { printf("ifm_mask=%08x, ifm_media=%08x, cur=%p\n", ifm->ifm_mask, ifm->ifm_media, ifm->ifm_cur); if (ifm->ifm_cur != NULL) { printf("Cur attribs: ifmedia_entry.ifm_media=%08x," " ifmedia_entry.ifm_data=%08x\n", ifm->ifm_cur->ifm_media, ifm->ifm_cur->ifm_data); } } static void dump_mii_data(struct mii_data *mii) { dump_ifmedia(&mii->mii_media); printf("ifp=%p, mii_instance=%d, mii_media_status=%08x," " mii_media_active=%08x\n", mii->mii_ifp, mii->mii_instance, mii->mii_media_status, mii->mii_media_active); } static void dump_pcs_regs(struct nlge_softc *sc, int phy) { int i, val; printf("PCS regs from %p for phy=%d\n", sc->pcs_addr, phy); for (i = 0; i < 18; i++) { if (i == 2 || i == 3 || (i >= 9 && i <= 14)) continue; val = nlge_mii_read_internal(sc->pcs_addr, phy, i); printf("PHY:%d pcs[0x%x] is 0x%x\n", phy, i, val); } } #endif Index: head/sys/powerpc/include/bus_dma.h =================================================================== --- head/sys/powerpc/include/bus_dma.h (revision 303889) +++ head/sys/powerpc/include/bus_dma.h (revision 303890) @@ -1,37 +1,35 @@ /*- * Copyright (c) 2005 Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* $FreeBSD$ */ #ifndef _POWERPC_BUS_DMA_H_ #define _POWERPC_BUS_DMA_H_ #include -struct device; - -int bus_dma_tag_set_iommu(bus_dma_tag_t, struct device *iommu, void *cookie); +int bus_dma_tag_set_iommu(bus_dma_tag_t, device_t iommu, void *cookie); #endif /* _POWERPC_BUS_DMA_H_ */ Index: head/sys/powerpc/powerpc/busdma_machdep.c =================================================================== --- head/sys/powerpc/powerpc/busdma_machdep.c (revision 303889) +++ head/sys/powerpc/powerpc/busdma_machdep.c (revision 303890) @@ -1,1220 +1,1220 @@ /*- * Copyright (c) 1997, 1998 Justin T. Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * From amd64/busdma_machdep.c, r204214 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "iommu_if.h" #define MAX_BPAGES MIN(8192, physmem/40) struct bounce_zone; struct bus_dma_tag { bus_dma_tag_t parent; bus_size_t alignment; bus_addr_t boundary; bus_addr_t lowaddr; bus_addr_t highaddr; bus_dma_filter_t *filter; void *filterarg; bus_size_t maxsize; u_int nsegments; bus_size_t maxsegsz; int flags; int ref_count; int map_count; bus_dma_lock_t *lockfunc; void *lockfuncarg; struct bounce_zone *bounce_zone; device_t iommu; void *iommu_cookie; }; struct bounce_page { vm_offset_t vaddr; /* kva of bounce buffer */ bus_addr_t busaddr; /* Physical address */ vm_offset_t datavaddr; /* kva of client data */ vm_page_t datapage; /* physical page of client data */ vm_offset_t dataoffs; /* page offset of client data */ bus_size_t datacount; /* client data count */ STAILQ_ENTRY(bounce_page) links; }; int busdma_swi_pending; struct bounce_zone { STAILQ_ENTRY(bounce_zone) links; STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; int total_bpages; int free_bpages; int reserved_bpages; int active_bpages; int total_bounced; int total_deferred; int map_count; bus_size_t alignment; bus_addr_t lowaddr; char zoneid[8]; char lowaddrid[20]; struct sysctl_ctx_list sysctl_tree; struct sysctl_oid *sysctl_tree_top; }; static struct mtx bounce_lock; static int total_bpages; static int busdma_zonecount; static STAILQ_HEAD(, bounce_zone) bounce_zone_list; static SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, "Total bounce pages"); struct bus_dmamap { struct bp_list bpages; int pagesneeded; int pagesreserved; bus_dma_tag_t dmat; struct memdesc mem; bus_dma_segment_t *segments; int nsegs; bus_dmamap_callback_t *callback; void *callback_arg; STAILQ_ENTRY(bus_dmamap) links; int contigalloc; }; static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; static void init_bounce_pages(void *dummy); static int alloc_bounce_zone(bus_dma_tag_t dmat); static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit); static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr); /* * Return true if a match is made. * * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. * * If paddr is within the bounds of the dma tag then call the filter callback * to check for a match, if there is no filter callback then assume a match. */ static __inline int run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) { int retval; retval = 0; do { if (dmat->filter == NULL && dmat->iommu == NULL && paddr > dmat->lowaddr && paddr <= dmat->highaddr) retval = 1; if (dmat->filter == NULL && (paddr & (dmat->alignment - 1)) != 0) retval = 1; if (dmat->filter != NULL && (*dmat->filter)(dmat->filterarg, paddr) != 0) retval = 1; dmat = dmat->parent; } while (retval == 0 && dmat != NULL); return (retval); } /* * Convenience function for manipulating driver locks from busdma (during * busdma_swi, for example). Drivers that don't provide their own locks * should specify &Giant to dmat->lockfuncarg. Drivers that use their own * non-mutex locking scheme don't have to use this at all. */ void busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) { struct mtx *dmtx; dmtx = (struct mtx *)arg; switch (op) { case BUS_DMA_LOCK: mtx_lock(dmtx); break; case BUS_DMA_UNLOCK: mtx_unlock(dmtx); break; default: panic("Unknown operation 0x%x for busdma_lock_mutex!", op); } } /* * dflt_lock should never get called. It gets put into the dma tag when * lockfunc == NULL, which is only valid if the maps that are associated * with the tag are meant to never be defered. * XXX Should have a way to identify which driver is responsible here. */ static void dflt_lock(void *arg, bus_dma_lock_op_t op) { panic("driver error: busdma dflt_lock called"); } #define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 #define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 /* * Allocate a device specific dma_tag. */ int bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, void *lockfuncarg, bus_dma_tag_t *dmat) { bus_dma_tag_t newtag; int error = 0; /* Basic sanity checking */ if (boundary != 0 && boundary < maxsegsz) maxsegsz = boundary; if (maxsegsz == 0) { return (EINVAL); } /* Return a NULL tag on failure */ *dmat = NULL; newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_DEVBUF, M_ZERO | M_NOWAIT); if (newtag == NULL) { CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, 0, error); return (ENOMEM); } newtag->parent = parent; newtag->alignment = alignment; newtag->boundary = boundary; newtag->lowaddr = trunc_page((vm_paddr_t)lowaddr) + (PAGE_SIZE - 1); newtag->highaddr = trunc_page((vm_paddr_t)highaddr) + (PAGE_SIZE - 1); newtag->filter = filter; newtag->filterarg = filterarg; newtag->maxsize = maxsize; newtag->nsegments = nsegments; newtag->maxsegsz = maxsegsz; newtag->flags = flags; newtag->ref_count = 1; /* Count ourself */ newtag->map_count = 0; if (lockfunc != NULL) { newtag->lockfunc = lockfunc; newtag->lockfuncarg = lockfuncarg; } else { newtag->lockfunc = dflt_lock; newtag->lockfuncarg = NULL; } /* Take into account any restrictions imposed by our parent tag */ if (parent != NULL) { newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); if (newtag->boundary == 0) newtag->boundary = parent->boundary; else if (parent->boundary != 0) newtag->boundary = MIN(parent->boundary, newtag->boundary); if (newtag->filter == NULL) { /* * Short circuit looking at our parent directly * since we have encapsulated all of its information */ newtag->filter = parent->filter; newtag->filterarg = parent->filterarg; newtag->parent = parent->parent; } if (newtag->parent != NULL) atomic_add_int(&parent->ref_count, 1); newtag->iommu = parent->iommu; newtag->iommu_cookie = parent->iommu_cookie; } if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL) newtag->flags |= BUS_DMA_COULD_BOUNCE; if (newtag->alignment > 1) newtag->flags |= BUS_DMA_COULD_BOUNCE; if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && (flags & BUS_DMA_ALLOCNOW) != 0) { struct bounce_zone *bz; /* Must bounce */ if ((error = alloc_bounce_zone(newtag)) != 0) { free(newtag, M_DEVBUF); return (error); } bz = newtag->bounce_zone; if (ptoa(bz->total_bpages) < maxsize) { int pages; pages = atop(maxsize) - bz->total_bpages; /* Add pages to our bounce pool */ if (alloc_bounce_pages(newtag, pages) < pages) error = ENOMEM; } /* Performed initial allocation */ newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; } if (error != 0) { free(newtag, M_DEVBUF); } else { *dmat = newtag; } CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); return (error); } int bus_dma_tag_destroy(bus_dma_tag_t dmat) { bus_dma_tag_t dmat_copy; int error; error = 0; dmat_copy = dmat; if (dmat != NULL) { if (dmat->map_count != 0) { error = EBUSY; goto out; } while (dmat != NULL) { bus_dma_tag_t parent; parent = dmat->parent; atomic_subtract_int(&dmat->ref_count, 1); if (dmat->ref_count == 0) { free(dmat, M_DEVBUF); /* * Last reference count, so * release our reference * count on our parent. */ dmat = parent; } else dmat = NULL; } } out: CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); return (error); } /* * Allocate a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) { int error; error = 0; *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO); if (*mapp == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } /* * Bouncing might be required if the driver asks for an active * exclusion region, a data alignment that is stricter than 1, and/or * an active address boundary. */ if (dmat->flags & BUS_DMA_COULD_BOUNCE) { /* Must bounce */ struct bounce_zone *bz; int maxpages; if (dmat->bounce_zone == NULL) { if ((error = alloc_bounce_zone(dmat)) != 0) return (error); } bz = dmat->bounce_zone; /* Initialize the new map */ STAILQ_INIT(&((*mapp)->bpages)); /* * Attempt to add pages to our pool on a per-instance * basis up to a sane limit. */ if (dmat->alignment > 1) maxpages = MAX_BPAGES; else maxpages = MIN(MAX_BPAGES, Maxmem -atop(dmat->lowaddr)); if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 || (bz->map_count > 0 && bz->total_bpages < maxpages)) { int pages; pages = MAX(atop(dmat->maxsize), 1); pages = MIN(maxpages - bz->total_bpages, pages); pages = MAX(pages, 1); if (alloc_bounce_pages(dmat, pages) < pages) error = ENOMEM; if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) { if (error == 0) dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; } else { error = 0; } } bz->map_count++; } (*mapp)->nsegs = 0; (*mapp)->segments = (bus_dma_segment_t *)malloc( sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF, M_NOWAIT); if ((*mapp)->segments == NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); return (ENOMEM); } if (error == 0) dmat->map_count++; CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, error); return (error); } /* * Destroy a handle for mapping from kva/uva/physical * address space into bus device space. */ int bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) { if (dmat->flags & BUS_DMA_COULD_BOUNCE) { if (STAILQ_FIRST(&map->bpages) != NULL) { CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, EBUSY); return (EBUSY); } if (dmat->bounce_zone) dmat->bounce_zone->map_count--; } free(map->segments, M_DEVBUF); free(map, M_DEVBUF); dmat->map_count--; CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); return (0); } /* * Allocate a piece of memory that can be efficiently mapped into * bus device space based on the constraints lited in the dma tag. * A dmamap to for use with dmamap_load is also allocated. */ int bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags, bus_dmamap_t *mapp) { vm_memattr_t attr; int mflags; if (flags & BUS_DMA_NOWAIT) mflags = M_NOWAIT; else mflags = M_WAITOK; bus_dmamap_create(dmat, flags, mapp); if (flags & BUS_DMA_ZERO) mflags |= M_ZERO; #ifdef NOTYET if (flags & BUS_DMA_NOCACHE) attr = VM_MEMATTR_UNCACHEABLE; else #endif attr = VM_MEMATTR_DEFAULT; /* * XXX: * (dmat->alignment <= dmat->maxsize) is just a quick hack; the exact * alignment guarantees of malloc need to be nailed down, and the * code below should be rewritten to take that into account. * * In the meantime, we'll warn the user if malloc gets it wrong. */ if ((dmat->maxsize <= PAGE_SIZE) && (dmat->alignment <= dmat->maxsize) && dmat->lowaddr >= ptoa((vm_paddr_t)Maxmem) && attr == VM_MEMATTR_DEFAULT) { *vaddr = malloc(dmat->maxsize, M_DEVBUF, mflags); } else { /* * XXX Use Contigmalloc until it is merged into this facility * and handles multi-seg allocations. Nobody is doing * multi-seg allocations yet though. * XXX Certain AGP hardware does. */ *vaddr = (void *)kmem_alloc_contig(kmem_arena, dmat->maxsize, mflags, 0ul, dmat->lowaddr, dmat->alignment ? dmat->alignment : 1ul, dmat->boundary, attr); (*mapp)->contigalloc = 1; } if (*vaddr == NULL) { CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, ENOMEM); return (ENOMEM); } else if (vtophys(*vaddr) & (dmat->alignment - 1)) { printf("bus_dmamem_alloc failed to align memory properly.\n"); } CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", __func__, dmat, dmat->flags, 0); return (0); } /* * Free a piece of memory and it's allociated dmamap, that was allocated * via bus_dmamem_alloc. Make the same choice for free/contigfree. */ void bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) { if (!map->contigalloc) free(vaddr, M_DEVBUF); else kmem_free(kmem_arena, (vm_offset_t)vaddr, dmat->maxsize); bus_dmamap_destroy(dmat, map); CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); } static void _bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags) { bus_addr_t curaddr; bus_size_t sgsize; if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ curaddr = buf; while (buflen != 0) { sgsize = MIN(buflen, dmat->maxsegsz); if (run_filter(dmat, curaddr) != 0) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); map->pagesneeded++; } curaddr += sgsize; buflen -= sgsize; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static void _bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, void *buf, bus_size_t buflen, int flags) { vm_offset_t vaddr; vm_offset_t vendaddr; bus_addr_t paddr; if (map->pagesneeded == 0) { CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, " "alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem), dmat->boundary, dmat->alignment); CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded); /* * Count the number of bounce pages * needed in order to complete this transfer */ vaddr = (vm_offset_t)buf; vendaddr = (vm_offset_t)buf + buflen; while (vaddr < vendaddr) { bus_size_t sg_len; sg_len = PAGE_SIZE - ((vm_offset_t)vaddr & PAGE_MASK); if (pmap == kernel_pmap) paddr = pmap_kextract(vaddr); else paddr = pmap_extract(pmap, vaddr); if (run_filter(dmat, paddr) != 0) { sg_len = roundup2(sg_len, dmat->alignment); map->pagesneeded++; } vaddr += sg_len; } CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); } } static int _bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) { /* Reserve Necessary Bounce Pages */ mtx_lock(&bounce_lock); if (flags & BUS_DMA_NOWAIT) { if (reserve_bounce_pages(dmat, map, 0) != 0) { mtx_unlock(&bounce_lock); return (ENOMEM); } } else { if (reserve_bounce_pages(dmat, map, 1) != 0) { /* Queue us for resources */ STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); mtx_unlock(&bounce_lock); return (EINPROGRESS); } } mtx_unlock(&bounce_lock); return (0); } /* * Add a single contiguous physical range to the segment list. */ static int _bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) { bus_addr_t baddr, bmask; int seg; /* * Make sure we don't cross any boundaries. */ bmask = ~(dmat->boundary - 1); if (dmat->boundary > 0) { baddr = (curaddr + dmat->boundary) & bmask; if (sgsize > (baddr - curaddr)) sgsize = (baddr - curaddr); } /* * Insert chunk into a segment, coalescing with * previous segment if possible. */ seg = *segp; if (seg == -1) { seg = 0; segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } else { if (curaddr == segs[seg].ds_addr + segs[seg].ds_len && (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && (dmat->boundary == 0 || (segs[seg].ds_addr & bmask) == (curaddr & bmask))) segs[seg].ds_len += sgsize; else { if (++seg >= dmat->nsegments) return (0); segs[seg].ds_addr = curaddr; segs[seg].ds_len = sgsize; } } *segp = seg; return (sgsize); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ int _bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t curaddr; bus_size_t sgsize; int error; if (segs == NULL) segs = map->segments; if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } while (buflen > 0) { curaddr = buf; sgsize = MIN(buflen, dmat->maxsegsz); if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); curaddr = add_bounce_page(dmat, map, 0, curaddr, sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; buf += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } int _bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs, int *segp) { return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, segs, segp)); } /* * Utility function to load a linear buffer. segp contains * the starting segment on entrance, and the ending segment on exit. */ int _bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_size_t sgsize; bus_addr_t curaddr; vm_offset_t kvaddr, vaddr; int error; if (segs == NULL) segs = map->segments; if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); if (map->pagesneeded != 0) { error = _bus_dmamap_reserve_pages(dmat, map, flags); if (error) return (error); } } vaddr = (vm_offset_t)buf; while (buflen > 0) { bus_size_t max_sgsize; /* * Get the physical address for this segment. */ if (pmap == kernel_pmap) { curaddr = pmap_kextract(vaddr); kvaddr = vaddr; } else { curaddr = pmap_extract(pmap, vaddr); kvaddr = 0; } /* * Compute the segment size, and adjust counts. */ max_sgsize = MIN(buflen, dmat->maxsegsz); sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); if (map->pagesneeded != 0 && run_filter(dmat, curaddr)) { sgsize = roundup2(sgsize, dmat->alignment); sgsize = MIN(sgsize, max_sgsize); curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, sgsize); } else { sgsize = MIN(sgsize, max_sgsize); } sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, segp); if (sgsize == 0) break; vaddr += sgsize; buflen -= sgsize; } /* * Did we fit? */ return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */ } void __bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { if (dmat->flags & BUS_DMA_COULD_BOUNCE) { map->dmat = dmat; map->mem = *mem; map->callback = callback; map->callback_arg = callback_arg; } } bus_dma_segment_t * _bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { map->nsegs = nsegs; if (segs != NULL) memcpy(map->segments, segs, map->nsegs*sizeof(segs[0])); if (dmat->iommu != NULL) IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr, dmat->highaddr, dmat->alignment, dmat->boundary, dmat->iommu_cookie); if (segs != NULL) memcpy(segs, map->segments, map->nsegs*sizeof(segs[0])); else segs = map->segments; return (segs); } /* * Release the mapping held by map. */ void _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) { struct bounce_page *bpage; if (dmat->iommu) { IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie); map->nsegs = 0; } while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { STAILQ_REMOVE_HEAD(&map->bpages, links); free_bounce_page(dmat, bpage); } } void _bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) { struct bounce_page *bpage; vm_offset_t datavaddr, tempvaddr; if ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { /* * Handle data bouncing. We might also * want to add support for invalidating * the caches on broken hardware */ CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x op 0x%x " "performing bounce", __func__, dmat, dmat->flags, op); if (op & BUS_DMASYNC_PREWRITE) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)datavaddr, (void *)bpage->vaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } if (op & BUS_DMASYNC_POSTREAD) { while (bpage != NULL) { tempvaddr = 0; datavaddr = bpage->datavaddr; if (datavaddr == 0) { tempvaddr = pmap_quick_enter_page( bpage->datapage); datavaddr = tempvaddr | bpage->dataoffs; } bcopy((void *)bpage->vaddr, (void *)datavaddr, bpage->datacount); if (tempvaddr != 0) pmap_quick_remove_page(tempvaddr); bpage = STAILQ_NEXT(bpage, links); } dmat->bounce_zone->total_bounced++; } } powerpc_sync(); } static void init_bounce_pages(void *dummy __unused) { total_bpages = 0; STAILQ_INIT(&bounce_zone_list); STAILQ_INIT(&bounce_map_waitinglist); STAILQ_INIT(&bounce_map_callbacklist); mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); } SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); static struct sysctl_ctx_list * busdma_sysctl_tree(struct bounce_zone *bz) { return (&bz->sysctl_tree); } static struct sysctl_oid * busdma_sysctl_tree_top(struct bounce_zone *bz) { return (bz->sysctl_tree_top); } static int alloc_bounce_zone(bus_dma_tag_t dmat) { struct bounce_zone *bz; /* Check to see if we already have a suitable zone */ STAILQ_FOREACH(bz, &bounce_zone_list, links) { if ((dmat->alignment <= bz->alignment) && (dmat->lowaddr >= bz->lowaddr)) { dmat->bounce_zone = bz; return (0); } } if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) return (ENOMEM); STAILQ_INIT(&bz->bounce_page_list); bz->free_bpages = 0; bz->reserved_bpages = 0; bz->active_bpages = 0; bz->lowaddr = dmat->lowaddr; bz->alignment = MAX(dmat->alignment, PAGE_SIZE); bz->map_count = 0; snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); busdma_zonecount++; snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); dmat->bounce_zone = bz; sysctl_ctx_init(&bz->sysctl_tree); bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, CTLFLAG_RD, 0, ""); if (bz->sysctl_tree_top == NULL) { sysctl_ctx_free(&bz->sysctl_tree); return (0); /* XXX error code? */ } SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, "Total bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, "Free bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, "Reserved bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, "Active bounce pages"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, "Total bounce requests"); SYSCTL_ADD_INT(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, "Total bounce requests that were deferred"); SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); SYSCTL_ADD_UAUTO(busdma_sysctl_tree(bz), SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, "alignment", CTLFLAG_RD, &bz->alignment, ""); return (0); } static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) { struct bounce_zone *bz; int count; bz = dmat->bounce_zone; count = 0; while (numpages > 0) { struct bounce_page *bpage; bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_DEVBUF, M_NOWAIT | M_ZERO); if (bpage == NULL) break; bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_DEVBUF, M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); if (bpage->vaddr == 0) { free(bpage, M_DEVBUF); break; } bpage->busaddr = pmap_kextract(bpage->vaddr); mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); total_bpages++; bz->total_bpages++; bz->free_bpages++; mtx_unlock(&bounce_lock); count++; numpages--; } return (count); } static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) { struct bounce_zone *bz; int pages; mtx_assert(&bounce_lock, MA_OWNED); bz = dmat->bounce_zone; pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) return (map->pagesneeded - (map->pagesreserved + pages)); bz->free_bpages -= pages; bz->reserved_bpages += pages; map->pagesreserved += pages; pages = map->pagesneeded - map->pagesreserved; return (pages); } static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, bus_addr_t addr, bus_size_t size) { struct bounce_zone *bz; struct bounce_page *bpage; KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); bz = dmat->bounce_zone; if (map->pagesneeded == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesneeded--; if (map->pagesreserved == 0) panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bz->bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); bz->reserved_bpages--; bz->active_bpages++; mtx_unlock(&bounce_lock); if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { /* Page offset needs to be preserved. */ bpage->vaddr |= addr & PAGE_MASK; bpage->busaddr |= addr & PAGE_MASK; } bpage->datavaddr = vaddr; bpage->datapage = PHYS_TO_VM_PAGE(addr); bpage->dataoffs = addr & PAGE_MASK; bpage->datacount = size; STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); return (bpage->busaddr); } static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) { struct bus_dmamap *map; struct bounce_zone *bz; bz = dmat->bounce_zone; bpage->datavaddr = 0; bpage->datacount = 0; if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { /* * Reset the bounce page to start at offset 0. Other uses * of this bounce page may need to store a full page of * data and/or assume it starts on a page boundary. */ bpage->vaddr &= ~PAGE_MASK; bpage->busaddr &= ~PAGE_MASK; } mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); bz->free_bpages++; bz->active_bpages--; if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { if (reserve_bounce_pages(map->dmat, map, 1) == 0) { STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); STAILQ_INSERT_TAIL(&bounce_map_callbacklist, map, links); busdma_swi_pending = 1; bz->total_deferred++; swi_sched(vm_ih, 0); } } mtx_unlock(&bounce_lock); } void busdma_swi(void) { bus_dma_tag_t dmat; struct bus_dmamap *map; mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); mtx_unlock(&bounce_lock); dmat = map->dmat; (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_LOCK); bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, map->callback_arg, BUS_DMA_WAITOK); (dmat->lockfunc)(dmat->lockfuncarg, BUS_DMA_UNLOCK); mtx_lock(&bounce_lock); } mtx_unlock(&bounce_lock); } int -bus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie) +bus_dma_tag_set_iommu(bus_dma_tag_t tag, device_t iommu, void *cookie) { tag->iommu = iommu; tag->iommu_cookie = cookie; return (0); } Index: head/sys/sparc64/fhc/clkbrd.c =================================================================== --- head/sys/sparc64/fhc/clkbrd.c (revision 303889) +++ head/sys/sparc64/fhc/clkbrd.c (revision 303890) @@ -1,212 +1,212 @@ /*- * Copyright (c) 2004 Jason L. Wright (jason@thought.net) * Copyright (c) 2005 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * from: OpenBSD: clkbrd.c,v 1.5 2004/10/01 18:18:49 jason Exp */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #define CLKBRD_NREG 3 #define CLKBRD_CF 0 #define CLKBRD_CLK 1 #define CLKBRD_CLKVER 2 struct clkbrd_softc { - struct device *sc_dev; + device_t sc_dev; struct resource *sc_res[CLKBRD_NREG]; int sc_rid[CLKBRD_NREG]; bus_space_tag_t sc_bt[CLKBRD_NREG]; bus_space_handle_t sc_bh[CLKBRD_NREG]; uint8_t sc_clk_ctrl; struct cdev *sc_led_dev; int sc_flags; #define CLKBRD_HAS_CLKVER (1 << 0) }; static devclass_t clkbrd_devclass; static device_probe_t clkbrd_probe; static device_attach_t clkbrd_attach; static device_detach_t clkbrd_detach; static void clkbrd_free_resources(struct clkbrd_softc *); static void clkbrd_led_func(void *, int); static device_method_t clkbrd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, clkbrd_probe), DEVMETHOD(device_attach, clkbrd_attach), DEVMETHOD(device_detach, clkbrd_detach), { 0, 0 } }; static driver_t clkbrd_driver = { "clkbrd", clkbrd_methods, sizeof(struct clkbrd_softc), }; DRIVER_MODULE(clkbrd, fhc, clkbrd_driver, clkbrd_devclass, 0, 0); static int clkbrd_probe(device_t dev) { if (strcmp(ofw_bus_get_name(dev), "clock-board") == 0) { device_set_desc(dev, "Clock Board"); return (0); } return (ENXIO); } static int clkbrd_attach(device_t dev) { struct clkbrd_softc *sc; int i, slots; uint8_t r; sc = device_get_softc(dev); sc->sc_dev = dev; for (i = CLKBRD_CF; i <= CLKBRD_CLKVER; i++) { sc->sc_rid[i] = i; sc->sc_res[i] = bus_alloc_resource_any(sc->sc_dev, SYS_RES_MEMORY, &sc->sc_rid[i], RF_ACTIVE); if (sc->sc_res[i] == NULL) { if (i != CLKBRD_CLKVER) { device_printf(sc->sc_dev, "could not allocate resource %d\n", i); goto fail; } continue; } sc->sc_bt[i] = rman_get_bustag(sc->sc_res[i]); sc->sc_bh[i] = rman_get_bushandle(sc->sc_res[i]); if (i == CLKBRD_CLKVER) sc->sc_flags |= CLKBRD_HAS_CLKVER; } slots = 4; r = bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_STS1); switch (r & CLK_STS1_SLOTS_MASK) { case CLK_STS1_SLOTS_16: slots = 16; break; case CLK_STS1_SLOTS_8: slots = 8; break; case CLK_STS1_SLOTS_4: if (sc->sc_flags & CLKBRD_HAS_CLKVER) { r = bus_space_read_1(sc->sc_bt[CLKBRD_CLKVER], sc->sc_bh[CLKBRD_CLKVER], CLKVER_SLOTS); if (r != 0 && (r & CLKVER_SLOTS_MASK) == CLKVER_SLOTS_PLUS) slots = 5; } } device_printf(sc->sc_dev, "Sun Enterprise Exx00 machine: %d slots\n", slots); sc->sc_clk_ctrl = bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL); sc->sc_led_dev = led_create(clkbrd_led_func, sc, "clockboard"); return (0); fail: clkbrd_free_resources(sc); return (ENXIO); } static int clkbrd_detach(device_t dev) { struct clkbrd_softc *sc; sc = device_get_softc(dev); led_destroy(sc->sc_led_dev); bus_space_write_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL, sc->sc_clk_ctrl); bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL); clkbrd_free_resources(sc); return (0); } static void clkbrd_free_resources(struct clkbrd_softc *sc) { int i; for (i = CLKBRD_CF; i <= CLKBRD_CLKVER; i++) if (sc->sc_res[i] != NULL) bus_release_resource(sc->sc_dev, SYS_RES_MEMORY, sc->sc_rid[i], sc->sc_res[i]); } static void clkbrd_led_func(void *arg, int onoff) { struct clkbrd_softc *sc; uint8_t r; sc = (struct clkbrd_softc *)arg; r = bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL); if (onoff) r |= CLK_CTRL_RLED; else r &= ~CLK_CTRL_RLED; bus_space_write_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL, r); bus_space_read_1(sc->sc_bt[CLKBRD_CLK], sc->sc_bh[CLKBRD_CLK], CLK_CTRL); } Index: head/sys/sys/pcpu.h =================================================================== --- head/sys/sys/pcpu.h (revision 303889) +++ head/sys/sys/pcpu.h (revision 303890) @@ -1,239 +1,239 @@ /*- * Copyright (c) 2001 Wind River Systems, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_PCPU_H_ #define _SYS_PCPU_H_ #ifdef LOCORE #error "no assembler-serviceable parts inside" #endif #include #include #include #include #include #include #include #include #include #define DPCPU_SETNAME "set_pcpu" #define DPCPU_SYMPREFIX "pcpu_entry_" #ifdef _KERNEL /* * Define a set for pcpu data. */ extern uintptr_t *__start_set_pcpu; __GLOBL(__start_set_pcpu); extern uintptr_t *__stop_set_pcpu; __GLOBL(__stop_set_pcpu); /* * Array of dynamic pcpu base offsets. Indexed by id. */ extern uintptr_t dpcpu_off[]; /* * Convenience defines. */ #define DPCPU_START ((uintptr_t)&__start_set_pcpu) #define DPCPU_STOP ((uintptr_t)&__stop_set_pcpu) #define DPCPU_BYTES (DPCPU_STOP - DPCPU_START) #define DPCPU_MODMIN 2048 #define DPCPU_SIZE roundup2(DPCPU_BYTES, PAGE_SIZE) #define DPCPU_MODSIZE (DPCPU_SIZE - (DPCPU_BYTES - DPCPU_MODMIN)) /* * Declaration and definition. */ #define DPCPU_NAME(n) pcpu_entry_##n #define DPCPU_DECLARE(t, n) extern t DPCPU_NAME(n) #define DPCPU_DEFINE(t, n) t DPCPU_NAME(n) __section(DPCPU_SETNAME) __used /* * Accessors with a given base. */ #define _DPCPU_PTR(b, n) \ (__typeof(DPCPU_NAME(n))*)((b) + (uintptr_t)&DPCPU_NAME(n)) #define _DPCPU_GET(b, n) (*_DPCPU_PTR(b, n)) #define _DPCPU_SET(b, n, v) (*_DPCPU_PTR(b, n) = v) /* * Accessors for the current cpu. */ #define DPCPU_PTR(n) _DPCPU_PTR(PCPU_GET(dynamic), n) #define DPCPU_GET(n) (*DPCPU_PTR(n)) #define DPCPU_SET(n, v) (*DPCPU_PTR(n) = v) /* * Accessors for remote cpus. */ #define DPCPU_ID_PTR(i, n) _DPCPU_PTR(dpcpu_off[(i)], n) #define DPCPU_ID_GET(i, n) (*DPCPU_ID_PTR(i, n)) #define DPCPU_ID_SET(i, n, v) (*DPCPU_ID_PTR(i, n) = v) /* * Utility macros. */ #define DPCPU_SUM(n) __extension__ \ ({ \ u_int _i; \ __typeof(*DPCPU_PTR(n)) sum; \ \ sum = 0; \ CPU_FOREACH(_i) { \ sum += *DPCPU_ID_PTR(_i, n); \ } \ sum; \ }) #define DPCPU_VARSUM(n, var) __extension__ \ ({ \ u_int _i; \ __typeof((DPCPU_PTR(n))->var) sum; \ \ sum = 0; \ CPU_FOREACH(_i) { \ sum += (DPCPU_ID_PTR(_i, n))->var; \ } \ sum; \ }) #define DPCPU_ZERO(n) do { \ u_int _i; \ \ CPU_FOREACH(_i) { \ bzero(DPCPU_ID_PTR(_i, n), sizeof(*DPCPU_PTR(n))); \ } \ } while(0) #endif /* _KERNEL */ /* * This structure maps out the global data that needs to be kept on a * per-cpu basis. The members are accessed via the PCPU_GET/SET/PTR * macros defined in . Machine dependent fields are * defined in the PCPU_MD_FIELDS macro defined in . */ struct pcpu { struct thread *pc_curthread; /* Current thread */ struct thread *pc_idlethread; /* Idle thread */ struct thread *pc_fpcurthread; /* Fp state owner */ struct thread *pc_deadthread; /* Zombie thread or NULL */ struct pcb *pc_curpcb; /* Current pcb */ uint64_t pc_switchtime; /* cpu_ticks() at last csw */ int pc_switchticks; /* `ticks' at last csw */ u_int pc_cpuid; /* This cpu number */ STAILQ_ENTRY(pcpu) pc_allcpu; struct lock_list_entry *pc_spinlocks; struct vmmeter pc_cnt; /* VM stats counters */ long pc_cp_time[CPUSTATES]; /* statclock ticks */ - struct device *pc_device; + device_t pc_device; void *pc_netisr; /* netisr SWI cookie */ int pc_unused1; /* unused field */ int pc_domain; /* Memory domain. */ struct rm_queue pc_rm_queue; /* rmlock list of trackers */ uintptr_t pc_dynamic; /* Dynamic per-cpu data area */ /* * Keep MD fields last, so that CPU-specific variations on a * single architecture don't result in offset variations of * the machine-independent fields of the pcpu. Even though * the pcpu structure is private to the kernel, some ports * (e.g., lsof, part of gtop) define _KERNEL and include this * header. While strictly speaking this is wrong, there's no * reason not to keep the offsets of the MI fields constant * if only to make kernel debugging easier. */ PCPU_MD_FIELDS; } __aligned(CACHE_LINE_SIZE); #ifdef CTASSERT /* * To minimize memory waste in per-cpu UMA zones, size of struct pcpu * should be denominator of PAGE_SIZE. */ CTASSERT((PAGE_SIZE / sizeof(struct pcpu)) * sizeof(struct pcpu) == PAGE_SIZE); #endif #ifdef _KERNEL STAILQ_HEAD(cpuhead, pcpu); extern struct cpuhead cpuhead; extern struct pcpu *cpuid_to_pcpu[]; #define curcpu PCPU_GET(cpuid) #define curproc (curthread->td_proc) #ifndef curthread #define curthread PCPU_GET(curthread) #endif #define curvidata PCPU_GET(vidata) /* Accessor to elements allocated via UMA_ZONE_PCPU zone. */ static inline void * zpcpu_get(void *base) { return ((char *)(base) + sizeof(struct pcpu) * curcpu); } static inline void * zpcpu_get_cpu(void *base, int cpu) { return ((char *)(base) + sizeof(struct pcpu) * cpu); } /* * Machine dependent callouts. cpu_pcpu_init() is responsible for * initializing machine dependent fields of struct pcpu, and * db_show_mdpcpu() is responsible for handling machine dependent * fields for the DDB 'show pcpu' command. */ void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size); void db_show_mdpcpu(struct pcpu *pcpu); void *dpcpu_alloc(int size); void dpcpu_copy(void *s, int size); void dpcpu_free(void *s, int size); void dpcpu_init(void *dpcpu, int cpuid); void pcpu_destroy(struct pcpu *pcpu); struct pcpu *pcpu_find(u_int cpuid); void pcpu_init(struct pcpu *pcpu, int cpuid, size_t size); #endif /* _KERNEL */ #endif /* !_SYS_PCPU_H_ */ Index: head/sys/sys/rman.h =================================================================== --- head/sys/sys/rman.h (revision 303889) +++ head/sys/sys/rman.h (revision 303890) @@ -1,165 +1,165 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _SYS_RMAN_H_ #define _SYS_RMAN_H_ 1 #ifndef _KERNEL #include #else #include #include #endif #define RF_ALLOCATED 0x0001 /* resource has been reserved */ #define RF_ACTIVE 0x0002 /* resource allocation has been activated */ #define RF_SHAREABLE 0x0004 /* resource permits contemporaneous sharing */ #define RF_SPARE1 0x0008 #define RF_SPARE2 0x0010 #define RF_FIRSTSHARE 0x0020 /* first in sharing list */ #define RF_PREFETCHABLE 0x0040 /* resource is prefetchable */ #define RF_OPTIONAL 0x0080 /* for bus_alloc_resources() */ #define RF_UNMAPPED 0x0100 /* don't map resource when activating */ #define RF_ALIGNMENT_SHIFT 10 /* alignment size bit starts bit 10 */ #define RF_ALIGNMENT_MASK (0x003F << RF_ALIGNMENT_SHIFT) /* resource address alignment size bit mask */ #define RF_ALIGNMENT_LOG2(x) ((x) << RF_ALIGNMENT_SHIFT) #define RF_ALIGNMENT(x) (((x) & RF_ALIGNMENT_MASK) >> RF_ALIGNMENT_SHIFT) enum rman_type { RMAN_UNINIT = 0, RMAN_GAUGE, RMAN_ARRAY }; /* * String length exported to userspace for resource names, etc. */ #define RM_TEXTLEN 32 #define RM_MAX_END (~(rman_res_t)0) #define RMAN_IS_DEFAULT_RANGE(s,e) ((s) == 0 && (e) == RM_MAX_END) /* * Userspace-exported structures. */ struct u_resource { uintptr_t r_handle; /* resource uniquifier */ uintptr_t r_parent; /* parent rman */ uintptr_t r_device; /* device owning this resource */ char r_devname[RM_TEXTLEN]; /* device name XXX obsolete */ rman_res_t r_start; /* offset in resource space */ rman_res_t r_size; /* size in resource space */ u_int r_flags; /* RF_* flags */ }; struct u_rman { uintptr_t rm_handle; /* rman uniquifier */ char rm_descr[RM_TEXTLEN]; /* rman description */ rman_res_t rm_start; /* base of managed region */ rman_res_t rm_size; /* size of managed region */ enum rman_type rm_type; /* region type */ }; #ifdef _KERNEL /* * The public (kernel) view of struct resource * * NB: Changing the offset/size/type of existing fields in struct resource * NB: breaks the device driver ABI and is strongly FORBIDDEN. * NB: Appending new fields is probably just misguided. */ struct resource { struct resource_i *__r_i; bus_space_tag_t r_bustag; /* bus_space tag */ bus_space_handle_t r_bushandle; /* bus_space handle */ }; struct resource_i; struct resource_map; TAILQ_HEAD(resource_head, resource_i); struct rman { struct resource_head rm_list; struct mtx *rm_mtx; /* mutex used to protect rm_list */ TAILQ_ENTRY(rman) rm_link; /* link in list of all rmans */ rman_res_t rm_start; /* index of globally first entry */ rman_res_t rm_end; /* index of globally last entry */ enum rman_type rm_type; /* what type of resource this is */ const char *rm_descr; /* text descripion of this resource */ }; TAILQ_HEAD(rman_head, rman); int rman_activate_resource(struct resource *r); int rman_adjust_resource(struct resource *r, rman_res_t start, rman_res_t end); int rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end); bus_space_handle_t rman_get_bushandle(struct resource *); bus_space_tag_t rman_get_bustag(struct resource *); rman_res_t rman_get_end(struct resource *); -struct device *rman_get_device(struct resource *); +device_t rman_get_device(struct resource *); u_int rman_get_flags(struct resource *); void rman_get_mapping(struct resource *, struct resource_map *); int rman_get_rid(struct resource *); rman_res_t rman_get_size(struct resource *); rman_res_t rman_get_start(struct resource *); void *rman_get_virtual(struct resource *); int rman_deactivate_resource(struct resource *r); int rman_fini(struct rman *rm); int rman_init(struct rman *rm); int rman_init_from_resource(struct rman *rm, struct resource *r); int rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end); uint32_t rman_make_alignment_flags(uint32_t size); int rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end); int rman_is_region_manager(struct resource *r, struct rman *rm); int rman_release_resource(struct resource *r); struct resource *rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end, rman_res_t count, - u_int flags, struct device *dev); + u_int flags, device_t dev); struct resource *rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end, rman_res_t count, rman_res_t bound, - u_int flags, struct device *dev); + u_int flags, device_t dev); void rman_set_bushandle(struct resource *_r, bus_space_handle_t _h); void rman_set_bustag(struct resource *_r, bus_space_tag_t _t); -void rman_set_device(struct resource *_r, struct device *_dev); +void rman_set_device(struct resource *_r, device_t _dev); void rman_set_end(struct resource *_r, rman_res_t _end); void rman_set_mapping(struct resource *, struct resource_map *); void rman_set_rid(struct resource *_r, int _rid); void rman_set_start(struct resource *_r, rman_res_t _start); void rman_set_virtual(struct resource *_r, void *_v); extern struct rman_head rman_head; #endif /* _KERNEL */ #endif /* !_SYS_RMAN_H_ */