Index: head/share/man/man9/bios.9 =================================================================== --- head/share/man/man9/bios.9 (revision 295879) +++ head/share/man/man9/bios.9 (revision 295880) @@ -1,182 +1,181 @@ .\" $FreeBSD$ .\" .\" Copyright (c) 1997 Michael Smith .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, .\" BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; .\" LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED .\" AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, .\" OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY .\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF .\" SUCH DAMAGE. .\" .Dd August 9, 2005 .Dt BIOS 9 .Os .Sh NAME .Nm bios_sigsearch , .Nm bios32_SDlookup , .Nm bios32 , .Nm bios_oem_strings .Nd interact with PC BIOS .Sh SYNOPSIS .In sys/param.h .In vm/vm.h .In vm/pmap.h .In machine/param.h -.In machine/pmap.h .In machine/pc/bios.h .Ft uint32_t .Fn bios_sigsearch "uint32_t start" "u_char *sig" "int siglen" "int paralen" "int sigofs" .Ft int .Fn bios32_SDlookup "struct bios32_SDentry *ent" .Ft int .Fn bios32 "struct bios_regs *br" "u_int offset" "u_short segment" .Fn BIOS_PADDRTOVADDR "addr" .Fn BIOS_VADDRTOPADDR "addr" .Vt extern struct bios32_SDentry PCIbios ; .Vt extern struct SMBIOS_table SMBIOStable ; .Vt extern struct DMI_table DMItable ; .Ft int .Fn bios_oem_strings "struct bios_oem *oem" "u_char *buffer" "size_t maxlen" .Bd -literal struct bios_oem_signature { char * anchor; /* search anchor string in BIOS memory */ size_t offset; /* offset from anchor (may be negative) */ size_t totlen; /* total length of BIOS string to copy */ }; struct bios_oem_range { u_int from; /* shouldn't be below 0xe0000 */ u_int to; /* shouldn't be above 0xfffff */ }; struct bios_oem { struct bios_oem_range range; struct bios_oem_signature signature[]; }; .Ed .Sh DESCRIPTION These functions provide a general-purpose interface for dealing with the BIOS functions and data encountered on x86 PC-architecture systems. .Bl -tag -width 20n .It Fn bios_sigsearch Searches the BIOS address space for a service signature, usually an uppercase ASCII sequence surrounded by underscores. The search begins at .Fa start , or at the beginning of the BIOS if .Fa start is zero. .Fa siglen bytes of the BIOS image and .Fa sig are compared at .Fa sigofs bytes offset from the current location. If no match is found, the current location is incremented by .Fa paralen bytes and the search repeated. If the signature is found, its effective physical address is returned. If no signature is found, zero is returned. .It Fn bios_oem_strings Searches a given BIOS memory range for one or more strings, and composes a printable concatenation of those found. The routine expects a structure describing the BIOS address .Fa range (within .Li 0xe0000 - .Li 0xfffff ) , and a { .Dv NULL , Li 0 , 0 } -terminated array of .Vt bios_oem_signature structures which define the .Va anchor string, an .Va offset from the beginning of the match (which may be negative), and .Va totlen number of bytes to be collected from BIOS memory starting at that offset. Unmatched anchors are ignored, whereas matches are copied from BIOS memory starting at their corresponding .Vt offset with unprintable characters being replaced with space, and consecutive spaces being suppressed. This composed string is stored in .Fa buffer up to the given .Fa maxlen bytes (including trailing .Ql \e0 , and any trailing space suppressed). If an error is encountered, i.e.\& trying to read out of said BIOS range, other invalid input, or .Fa buffer overflow, a negative integer is returned, otherwise the length of the composed string is returned. In particular, a return value of 0 means that none of the given anchor strings were found in the specified BIOS memory range. .It Fn BIOS_VADDRTOPADDR Returns the effective physical address which corresponds to the kernel virtual address .Fa addr . .It Fn BIOS_PADDRTOVADDR Returns the kernel virtual address which corresponds to the effective physical address .Fa addr . .It SMBIOStable If not NULL, points to a .Ft struct SMBIOS_table structure containing information read from the System Management BIOS table during system startup. .It DMItable If not NULL, points to a .Ft struct DMI_table structure containing information read from the Desktop Management Interface parameter table during system startup. .El .Sh BIOS32 At system startup, the BIOS is scanned for the BIOS32 Service Directory (part of the PCI specification), and the existence of the directory is recorded. This can then be used to locate other services. .Bl -tag -width 20n .It Fn bios32_SDlookup Attempts to locate the BIOS32 service matching the 4-byte identifier passed in the .Fa ident field of the .Fa ent argument. .It Fn bios32 Calls a bios32 function. This presumes that the function is capable of working within the kernel segment (normally the case). The virtual address of the entrypoint is supplied in .Fa entry and the register arguments to the function are supplied in .Fa args . .It PCIbios If not NULL, points to a .Ft struct bios32_SDentry structure describing the PCI BIOS entrypoint which was found during system startup. .El Index: head/sys/amd64/cloudabi64/cloudabi64_sysvec.c =================================================================== --- head/sys/amd64/cloudabi64/cloudabi64_sysvec.c (revision 295879) +++ head/sys/amd64/cloudabi64/cloudabi64_sysvec.c (revision 295880) @@ -1,161 +1,160 @@ /*- * Copyright (c) 2015 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include -#include #include #include #include #include extern const char *cloudabi64_syscallnames[]; extern struct sysent cloudabi64_sysent[]; static int cloudabi64_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct trapframe *frame = td->td_frame; /* Obtain system call number. */ sa->code = frame->tf_rax; if (sa->code >= CLOUDABI64_SYS_MAXSYSCALL) return (ENOSYS); sa->callp = &cloudabi64_sysent[sa->code]; /* Fetch system call arguments. */ sa->args[0] = frame->tf_rdi; sa->args[1] = frame->tf_rsi; sa->args[2] = frame->tf_rdx; sa->args[3] = frame->tf_rcx; /* Actually %r10. */ sa->args[4] = frame->tf_r8; sa->args[5] = frame->tf_r9; /* Default system call return values. */ td->td_retval[0] = 0; td->td_retval[1] = frame->tf_rdx; return (0); } static void cloudabi64_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame = td->td_frame; switch (error) { case 0: /* System call succeeded. */ frame->tf_rax = td->td_retval[0]; frame->tf_rdx = td->td_retval[1]; frame->tf_rflags &= ~PSL_C; break; case ERESTART: /* Restart system call. */ frame->tf_rip -= frame->tf_err; frame->tf_r10 = frame->tf_rcx; set_pcb_flags(td->td_pcb, PCB_FULL_IRET); break; case EJUSTRETURN: break; default: /* System call returned an error. */ frame->tf_rax = cloudabi_convert_errno(error); frame->tf_rflags |= PSL_C; break; } } static void cloudabi64_schedtail(struct thread *td) { struct trapframe *frame = td->td_frame; /* Initial register values for processes returning from fork. */ frame->tf_rax = CLOUDABI_PROCESS_CHILD; frame->tf_rdx = td->td_tid; } void cloudabi64_thread_setregs(struct thread *td, const cloudabi64_threadattr_t *attr) { struct trapframe *frame; stack_t stack; /* Perform standard register initialization. */ stack.ss_sp = (void *)attr->stack; stack.ss_size = attr->stack_size; cpu_set_upcall_kse(td, (void *)attr->entry_point, NULL, &stack); /* * Pass in the thread ID of the new thread and the argument * pointer provided by the parent thread in as arguments to the * entry point. */ frame = td->td_frame; frame->tf_rdi = td->td_tid; frame->tf_rsi = attr->argument; } static struct sysentvec cloudabi64_elf_sysvec = { .sv_size = CLOUDABI64_SYS_MAXSYSCALL, .sv_table = cloudabi64_sysent, .sv_fixup = cloudabi64_fixup, .sv_name = "CloudABI ELF64", .sv_coredump = elf64_coredump, .sv_pagesize = PAGE_SIZE, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE, .sv_copyout_strings = cloudabi64_copyout_strings, .sv_flags = SV_ABI_CLOUDABI | SV_CAPSICUM, .sv_set_syscall_retval = cloudabi64_set_syscall_retval, .sv_fetch_syscall_args = cloudabi64_fetch_syscall_args, .sv_syscallnames = cloudabi64_syscallnames, .sv_schedtail = cloudabi64_schedtail, }; INIT_SYSENTVEC(elf_sysvec, &cloudabi64_elf_sysvec); Elf64_Brandinfo cloudabi64_brand = { .brand = ELFOSABI_CLOUDABI, .machine = EM_X86_64, .sysvec = &cloudabi64_elf_sysvec, .compat_3_brand = "CloudABI", }; Index: head/sys/amd64/vmm/amd/npt.c =================================================================== --- head/sys/amd64/vmm/amd/npt.c (revision 295879) +++ head/sys/amd64/vmm/amd/npt.c (revision 295880) @@ -1,87 +1,85 @@ /*- * Copyright (c) 2013 Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include -#include - #include "npt.h" SYSCTL_DECL(_hw_vmm); SYSCTL_NODE(_hw_vmm, OID_AUTO, npt, CTLFLAG_RW, NULL, NULL); static int npt_flags; SYSCTL_INT(_hw_vmm_npt, OID_AUTO, pmap_flags, CTLFLAG_RD, &npt_flags, 0, NULL); #define NPT_IPIMASK 0xFF /* * AMD nested page table init. */ int svm_npt_init(int ipinum) { int enable_superpage = 1; npt_flags = ipinum & NPT_IPIMASK; TUNABLE_INT_FETCH("hw.vmm.npt.enable_superpage", &enable_superpage); if (enable_superpage) npt_flags |= PMAP_PDE_SUPERPAGE; return (0); } static int npt_pinit(pmap_t pmap) { return (pmap_pinit_type(pmap, PT_RVI, npt_flags)); } struct vmspace * svm_npt_alloc(vm_offset_t min, vm_offset_t max) { return (vmspace_alloc(min, max, npt_pinit)); } void svm_npt_free(struct vmspace *vmspace) { vmspace_free(vmspace); } Index: head/sys/amd64/vmm/amd/svm.c =================================================================== --- head/sys/amd64/vmm/amd/svm.c (revision 295879) +++ head/sys/amd64/vmm/amd/svm.c (revision 295880) @@ -1,2246 +1,2245 @@ /*- * Copyright (c) 2013, Anish Gupta (akgupt3@gmail.com) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include "vmm_lapic.h" #include "vmm_stat.h" #include "vmm_ktr.h" #include "vmm_ioport.h" #include "vatpic.h" #include "vlapic.h" #include "vlapic_priv.h" #include "x86.h" #include "vmcb.h" #include "svm.h" #include "svm_softc.h" #include "svm_msr.h" #include "npt.h" SYSCTL_DECL(_hw_vmm); SYSCTL_NODE(_hw_vmm, OID_AUTO, svm, CTLFLAG_RW, NULL, NULL); /* * SVM CPUID function 0x8000_000A, edx bit decoding. */ #define AMD_CPUID_SVM_NP BIT(0) /* Nested paging or RVI */ #define AMD_CPUID_SVM_LBR BIT(1) /* Last branch virtualization */ #define AMD_CPUID_SVM_SVML BIT(2) /* SVM lock */ #define AMD_CPUID_SVM_NRIP_SAVE BIT(3) /* Next RIP is saved */ #define AMD_CPUID_SVM_TSC_RATE BIT(4) /* TSC rate control. */ #define AMD_CPUID_SVM_VMCB_CLEAN BIT(5) /* VMCB state caching */ #define AMD_CPUID_SVM_FLUSH_BY_ASID BIT(6) /* Flush by ASID */ #define AMD_CPUID_SVM_DECODE_ASSIST BIT(7) /* Decode assist */ #define AMD_CPUID_SVM_PAUSE_INC BIT(10) /* Pause intercept filter. */ #define AMD_CPUID_SVM_PAUSE_FTH BIT(12) /* Pause filter threshold */ #define AMD_CPUID_SVM_AVIC BIT(13) /* AVIC present */ #define VMCB_CACHE_DEFAULT (VMCB_CACHE_ASID | \ VMCB_CACHE_IOPM | \ VMCB_CACHE_I | \ VMCB_CACHE_TPR | \ VMCB_CACHE_CR2 | \ VMCB_CACHE_CR | \ VMCB_CACHE_DT | \ VMCB_CACHE_SEG | \ VMCB_CACHE_NP) static uint32_t vmcb_clean = VMCB_CACHE_DEFAULT; SYSCTL_INT(_hw_vmm_svm, OID_AUTO, vmcb_clean, CTLFLAG_RDTUN, &vmcb_clean, 0, NULL); static MALLOC_DEFINE(M_SVM, "svm", "svm"); static MALLOC_DEFINE(M_SVM_VLAPIC, "svm-vlapic", "svm-vlapic"); /* Per-CPU context area. */ extern struct pcpu __pcpu[]; static uint32_t svm_feature = ~0U; /* AMD SVM features. */ SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, features, CTLFLAG_RDTUN, &svm_feature, 0, "SVM features advertised by CPUID.8000000AH:EDX"); static int disable_npf_assist; SYSCTL_INT(_hw_vmm_svm, OID_AUTO, disable_npf_assist, CTLFLAG_RWTUN, &disable_npf_assist, 0, NULL); /* Maximum ASIDs supported by the processor */ static uint32_t nasid; SYSCTL_UINT(_hw_vmm_svm, OID_AUTO, num_asids, CTLFLAG_RDTUN, &nasid, 0, "Number of ASIDs supported by this processor"); /* Current ASID generation for each host cpu */ static struct asid asid[MAXCPU]; /* * SVM host state saved area of size 4KB for each core. */ static uint8_t hsave[MAXCPU][PAGE_SIZE] __aligned(PAGE_SIZE); static VMM_STAT_AMD(VCPU_EXITINTINFO, "VM exits during event delivery"); static VMM_STAT_AMD(VCPU_INTINFO_INJECTED, "Events pending at VM entry"); static VMM_STAT_AMD(VMEXIT_VINTR, "VM exits due to interrupt window"); static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val); static __inline int flush_by_asid(void) { return (svm_feature & AMD_CPUID_SVM_FLUSH_BY_ASID); } static __inline int decode_assist(void) { return (svm_feature & AMD_CPUID_SVM_DECODE_ASSIST); } static void svm_disable(void *arg __unused) { uint64_t efer; efer = rdmsr(MSR_EFER); efer &= ~EFER_SVM; wrmsr(MSR_EFER, efer); } /* * Disable SVM on all CPUs. */ static int svm_cleanup(void) { smp_rendezvous(NULL, svm_disable, NULL, NULL); return (0); } /* * Verify that all the features required by bhyve are available. */ static int check_svm_features(void) { u_int regs[4]; /* CPUID Fn8000_000A is for SVM */ do_cpuid(0x8000000A, regs); svm_feature &= regs[3]; /* * The number of ASIDs can be configured to be less than what is * supported by the hardware but not more. */ if (nasid == 0 || nasid > regs[1]) nasid = regs[1]; KASSERT(nasid > 1, ("Insufficient ASIDs for guests: %#x", nasid)); /* bhyve requires the Nested Paging feature */ if (!(svm_feature & AMD_CPUID_SVM_NP)) { printf("SVM: Nested Paging feature not available.\n"); return (ENXIO); } /* bhyve requires the NRIP Save feature */ if (!(svm_feature & AMD_CPUID_SVM_NRIP_SAVE)) { printf("SVM: NRIP Save feature not available.\n"); return (ENXIO); } return (0); } static void svm_enable(void *arg __unused) { uint64_t efer; efer = rdmsr(MSR_EFER); efer |= EFER_SVM; wrmsr(MSR_EFER, efer); wrmsr(MSR_VM_HSAVE_PA, vtophys(hsave[curcpu])); } /* * Return 1 if SVM is enabled on this processor and 0 otherwise. */ static int svm_available(void) { uint64_t msr; /* Section 15.4 Enabling SVM from APM2. */ if ((amd_feature2 & AMDID2_SVM) == 0) { printf("SVM: not available.\n"); return (0); } msr = rdmsr(MSR_VM_CR); if ((msr & VM_CR_SVMDIS) != 0) { printf("SVM: disabled by BIOS.\n"); return (0); } return (1); } static int svm_init(int ipinum) { int error, cpu; if (!svm_available()) return (ENXIO); error = check_svm_features(); if (error) return (error); vmcb_clean &= VMCB_CACHE_DEFAULT; for (cpu = 0; cpu < MAXCPU; cpu++) { /* * Initialize the host ASIDs to their "highest" valid values. * * The next ASID allocation will rollover both 'gen' and 'num' * and start off the sequence at {1,1}. */ asid[cpu].gen = ~0UL; asid[cpu].num = nasid - 1; } svm_msr_init(); svm_npt_init(ipinum); /* Enable SVM on all CPUs */ smp_rendezvous(NULL, svm_enable, NULL, NULL); return (0); } static void svm_restore(void) { svm_enable(NULL); } /* Pentium compatible MSRs */ #define MSR_PENTIUM_START 0 #define MSR_PENTIUM_END 0x1FFF /* AMD 6th generation and Intel compatible MSRs */ #define MSR_AMD6TH_START 0xC0000000UL #define MSR_AMD6TH_END 0xC0001FFFUL /* AMD 7th and 8th generation compatible MSRs */ #define MSR_AMD7TH_START 0xC0010000UL #define MSR_AMD7TH_END 0xC0011FFFUL /* * Get the index and bit position for a MSR in permission bitmap. * Two bits are used for each MSR: lower bit for read and higher bit for write. */ static int svm_msr_index(uint64_t msr, int *index, int *bit) { uint32_t base, off; *index = -1; *bit = (msr % 4) * 2; base = 0; if (msr >= MSR_PENTIUM_START && msr <= MSR_PENTIUM_END) { *index = msr / 4; return (0); } base += (MSR_PENTIUM_END - MSR_PENTIUM_START + 1); if (msr >= MSR_AMD6TH_START && msr <= MSR_AMD6TH_END) { off = (msr - MSR_AMD6TH_START); *index = (off + base) / 4; return (0); } base += (MSR_AMD6TH_END - MSR_AMD6TH_START + 1); if (msr >= MSR_AMD7TH_START && msr <= MSR_AMD7TH_END) { off = (msr - MSR_AMD7TH_START); *index = (off + base) / 4; return (0); } return (EINVAL); } /* * Allow vcpu to read or write the 'msr' without trapping into the hypervisor. */ static void svm_msr_perm(uint8_t *perm_bitmap, uint64_t msr, bool read, bool write) { int index, bit, error; error = svm_msr_index(msr, &index, &bit); KASSERT(error == 0, ("%s: invalid msr %#lx", __func__, msr)); KASSERT(index >= 0 && index < SVM_MSR_BITMAP_SIZE, ("%s: invalid index %d for msr %#lx", __func__, index, msr)); KASSERT(bit >= 0 && bit <= 6, ("%s: invalid bit position %d " "msr %#lx", __func__, bit, msr)); if (read) perm_bitmap[index] &= ~(1UL << bit); if (write) perm_bitmap[index] &= ~(2UL << bit); } static void svm_msr_rw_ok(uint8_t *perm_bitmap, uint64_t msr) { svm_msr_perm(perm_bitmap, msr, true, true); } static void svm_msr_rd_ok(uint8_t *perm_bitmap, uint64_t msr) { svm_msr_perm(perm_bitmap, msr, true, false); } static __inline int svm_get_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask) { struct vmcb_ctrl *ctrl; KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); ctrl = svm_get_vmcb_ctrl(sc, vcpu); return (ctrl->intercept[idx] & bitmask ? 1 : 0); } static __inline void svm_set_intercept(struct svm_softc *sc, int vcpu, int idx, uint32_t bitmask, int enabled) { struct vmcb_ctrl *ctrl; uint32_t oldval; KASSERT(idx >=0 && idx < 5, ("invalid intercept index %d", idx)); ctrl = svm_get_vmcb_ctrl(sc, vcpu); oldval = ctrl->intercept[idx]; if (enabled) ctrl->intercept[idx] |= bitmask; else ctrl->intercept[idx] &= ~bitmask; if (ctrl->intercept[idx] != oldval) { svm_set_dirty(sc, vcpu, VMCB_CACHE_I); VCPU_CTR3(sc->vm, vcpu, "intercept[%d] modified " "from %#x to %#x", idx, oldval, ctrl->intercept[idx]); } } static __inline void svm_disable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) { svm_set_intercept(sc, vcpu, off, bitmask, 0); } static __inline void svm_enable_intercept(struct svm_softc *sc, int vcpu, int off, uint32_t bitmask) { svm_set_intercept(sc, vcpu, off, bitmask, 1); } static void vmcb_init(struct svm_softc *sc, int vcpu, uint64_t iopm_base_pa, uint64_t msrpm_base_pa, uint64_t np_pml4) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; uint32_t mask; int n; ctrl = svm_get_vmcb_ctrl(sc, vcpu); state = svm_get_vmcb_state(sc, vcpu); ctrl->iopm_base_pa = iopm_base_pa; ctrl->msrpm_base_pa = msrpm_base_pa; /* Enable nested paging */ ctrl->np_enable = 1; ctrl->n_cr3 = np_pml4; /* * Intercept accesses to the control registers that are not shadowed * in the VMCB - i.e. all except cr0, cr2, cr3, cr4 and cr8. */ for (n = 0; n < 16; n++) { mask = (BIT(n) << 16) | BIT(n); if (n == 0 || n == 2 || n == 3 || n == 4 || n == 8) svm_disable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); else svm_enable_intercept(sc, vcpu, VMCB_CR_INTCPT, mask); } /* * Intercept everything when tracing guest exceptions otherwise * just intercept machine check exception. */ if (vcpu_trace_exceptions(sc->vm, vcpu)) { for (n = 0; n < 32; n++) { /* * Skip unimplemented vectors in the exception bitmap. */ if (n == 2 || n == 9) { continue; } svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(n)); } } else { svm_enable_intercept(sc, vcpu, VMCB_EXC_INTCPT, BIT(IDT_MC)); } /* Intercept various events (for e.g. I/O, MSR and CPUID accesses) */ svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IO); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_MSR); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_CPUID); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INTR); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_INIT); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_NMI); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SMI); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_SHUTDOWN); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_FERR_FREEZE); svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MONITOR); svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_MWAIT); /* * From section "Canonicalization and Consistency Checks" in APMv2 * the VMRUN intercept bit must be set to pass the consistency check. */ svm_enable_intercept(sc, vcpu, VMCB_CTRL2_INTCPT, VMCB_INTCPT_VMRUN); /* * The ASID will be set to a non-zero value just before VMRUN. */ ctrl->asid = 0; /* * Section 15.21.1, Interrupt Masking in EFLAGS * Section 15.21.2, Virtualizing APIC.TPR * * This must be set for %rflag and %cr8 isolation of guest and host. */ ctrl->v_intr_masking = 1; /* Enable Last Branch Record aka LBR for debugging */ ctrl->lbr_virt_en = 1; state->dbgctl = BIT(0); /* EFER_SVM must always be set when the guest is executing */ state->efer = EFER_SVM; /* Set up the PAT to power-on state */ state->g_pat = PAT_VALUE(0, PAT_WRITE_BACK) | PAT_VALUE(1, PAT_WRITE_THROUGH) | PAT_VALUE(2, PAT_UNCACHED) | PAT_VALUE(3, PAT_UNCACHEABLE) | PAT_VALUE(4, PAT_WRITE_BACK) | PAT_VALUE(5, PAT_WRITE_THROUGH) | PAT_VALUE(6, PAT_UNCACHED) | PAT_VALUE(7, PAT_UNCACHEABLE); } /* * Initialize a virtual machine. */ static void * svm_vminit(struct vm *vm, pmap_t pmap) { struct svm_softc *svm_sc; struct svm_vcpu *vcpu; vm_paddr_t msrpm_pa, iopm_pa, pml4_pa; int i; svm_sc = malloc(sizeof (struct svm_softc), M_SVM, M_WAITOK | M_ZERO); svm_sc->vm = vm; svm_sc->nptp = (vm_offset_t)vtophys(pmap->pm_pml4); /* * Intercept read and write accesses to all MSRs. */ memset(svm_sc->msr_bitmap, 0xFF, sizeof(svm_sc->msr_bitmap)); /* * Access to the following MSRs is redirected to the VMCB when the * guest is executing. Therefore it is safe to allow the guest to * read/write these MSRs directly without hypervisor involvement. */ svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_GSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_FSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_KGSBASE); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_STAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_LSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_CSTAR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SF_MASK); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_CS_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_ESP_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_SYSENTER_EIP_MSR); svm_msr_rw_ok(svm_sc->msr_bitmap, MSR_PAT); svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_TSC); /* * Intercept writes to make sure that the EFER_SVM bit is not cleared. */ svm_msr_rd_ok(svm_sc->msr_bitmap, MSR_EFER); /* Intercept access to all I/O ports. */ memset(svm_sc->iopm_bitmap, 0xFF, sizeof(svm_sc->iopm_bitmap)); iopm_pa = vtophys(svm_sc->iopm_bitmap); msrpm_pa = vtophys(svm_sc->msr_bitmap); pml4_pa = svm_sc->nptp; for (i = 0; i < VM_MAXCPU; i++) { vcpu = svm_get_vcpu(svm_sc, i); vcpu->nextrip = ~0; vcpu->lastcpu = NOCPU; vcpu->vmcb_pa = vtophys(&vcpu->vmcb); vmcb_init(svm_sc, i, iopm_pa, msrpm_pa, pml4_pa); svm_msr_guest_init(svm_sc, i); } return (svm_sc); } /* * Collateral for a generic SVM VM-exit. */ static void vm_exit_svm(struct vm_exit *vme, uint64_t code, uint64_t info1, uint64_t info2) { vme->exitcode = VM_EXITCODE_SVM; vme->u.svm.exitcode = code; vme->u.svm.exitinfo1 = info1; vme->u.svm.exitinfo2 = info2; } static int svm_cpl(struct vmcb_state *state) { /* * From APMv2: * "Retrieve the CPL from the CPL field in the VMCB, not * from any segment DPL" */ return (state->cpl); } static enum vm_cpu_mode svm_vcpu_mode(struct vmcb *vmcb) { struct vmcb_segment seg; struct vmcb_state *state; int error; state = &vmcb->state; if (state->efer & EFER_LMA) { error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(cs) error %d", __func__, error)); /* * Section 4.8.1 for APM2, check if Code Segment has * Long attribute set in descriptor. */ if (seg.attrib & VMCB_CS_ATTRIB_L) return (CPU_MODE_64BIT); else return (CPU_MODE_COMPATIBILITY); } else if (state->cr0 & CR0_PE) { return (CPU_MODE_PROTECTED); } else { return (CPU_MODE_REAL); } } static enum vm_paging_mode svm_paging_mode(uint64_t cr0, uint64_t cr4, uint64_t efer) { if ((cr0 & CR0_PG) == 0) return (PAGING_MODE_FLAT); if ((cr4 & CR4_PAE) == 0) return (PAGING_MODE_32); if (efer & EFER_LME) return (PAGING_MODE_64); else return (PAGING_MODE_PAE); } /* * ins/outs utility routines */ static uint64_t svm_inout_str_index(struct svm_regctx *regs, int in) { uint64_t val; val = in ? regs->sctx_rdi : regs->sctx_rsi; return (val); } static uint64_t svm_inout_str_count(struct svm_regctx *regs, int rep) { uint64_t val; val = rep ? regs->sctx_rcx : 1; return (val); } static void svm_inout_str_seginfo(struct svm_softc *svm_sc, int vcpu, int64_t info1, int in, struct vm_inout_str *vis) { int error, s; if (in) { vis->seg_name = VM_REG_GUEST_ES; } else { /* The segment field has standard encoding */ s = (info1 >> 10) & 0x7; vis->seg_name = vm_segment_name(s); } error = vmcb_getdesc(svm_sc, vcpu, vis->seg_name, &vis->seg_desc); KASSERT(error == 0, ("%s: svm_getdesc error %d", __func__, error)); } static int svm_inout_str_addrsize(uint64_t info1) { uint32_t size; size = (info1 >> 7) & 0x7; switch (size) { case 1: return (2); /* 16 bit */ case 2: return (4); /* 32 bit */ case 4: return (8); /* 64 bit */ default: panic("%s: invalid size encoding %d", __func__, size); } } static void svm_paging_info(struct vmcb *vmcb, struct vm_guest_paging *paging) { struct vmcb_state *state; state = &vmcb->state; paging->cr3 = state->cr3; paging->cpl = svm_cpl(state); paging->cpu_mode = svm_vcpu_mode(vmcb); paging->paging_mode = svm_paging_mode(state->cr0, state->cr4, state->efer); } #define UNHANDLED 0 /* * Handle guest I/O intercept. */ static int svm_handle_io(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; struct svm_regctx *regs; struct vm_inout_str *vis; uint64_t info1; int inout_string; state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); regs = svm_get_guest_regctx(svm_sc, vcpu); info1 = ctrl->exitinfo1; inout_string = info1 & BIT(2) ? 1 : 0; /* * The effective segment number in EXITINFO1[12:10] is populated * only if the processor has the DecodeAssist capability. * * XXX this is not specified explicitly in APMv2 but can be verified * empirically. */ if (inout_string && !decode_assist()) return (UNHANDLED); vmexit->exitcode = VM_EXITCODE_INOUT; vmexit->u.inout.in = (info1 & BIT(0)) ? 1 : 0; vmexit->u.inout.string = inout_string; vmexit->u.inout.rep = (info1 & BIT(3)) ? 1 : 0; vmexit->u.inout.bytes = (info1 >> 4) & 0x7; vmexit->u.inout.port = (uint16_t)(info1 >> 16); vmexit->u.inout.eax = (uint32_t)(state->rax); if (inout_string) { vmexit->exitcode = VM_EXITCODE_INOUT_STR; vis = &vmexit->u.inout_str; svm_paging_info(svm_get_vmcb(svm_sc, vcpu), &vis->paging); vis->rflags = state->rflags; vis->cr0 = state->cr0; vis->index = svm_inout_str_index(regs, vmexit->u.inout.in); vis->count = svm_inout_str_count(regs, vmexit->u.inout.rep); vis->addrsize = svm_inout_str_addrsize(info1); svm_inout_str_seginfo(svm_sc, vcpu, info1, vmexit->u.inout.in, vis); } return (UNHANDLED); } static int npf_fault_type(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_W) return (VM_PROT_WRITE); else if (exitinfo1 & VMCB_NPF_INFO1_ID) return (VM_PROT_EXECUTE); else return (VM_PROT_READ); } static bool svm_npf_emul_fault(uint64_t exitinfo1) { if (exitinfo1 & VMCB_NPF_INFO1_ID) { return (false); } if (exitinfo1 & VMCB_NPF_INFO1_GPT) { return (false); } if ((exitinfo1 & VMCB_NPF_INFO1_GPA) == 0) { return (false); } return (true); } static void svm_handle_inst_emul(struct vmcb *vmcb, uint64_t gpa, struct vm_exit *vmexit) { struct vm_guest_paging *paging; struct vmcb_segment seg; struct vmcb_ctrl *ctrl; char *inst_bytes; int error, inst_len; ctrl = &vmcb->ctrl; paging = &vmexit->u.inst_emul.paging; vmexit->exitcode = VM_EXITCODE_INST_EMUL; vmexit->u.inst_emul.gpa = gpa; vmexit->u.inst_emul.gla = VIE_INVALID_GLA; svm_paging_info(vmcb, paging); error = vmcb_seg(vmcb, VM_REG_GUEST_CS, &seg); KASSERT(error == 0, ("%s: vmcb_seg(CS) error %d", __func__, error)); switch(paging->cpu_mode) { case CPU_MODE_REAL: vmexit->u.inst_emul.cs_base = seg.base; vmexit->u.inst_emul.cs_d = 0; break; case CPU_MODE_PROTECTED: case CPU_MODE_COMPATIBILITY: vmexit->u.inst_emul.cs_base = seg.base; /* * Section 4.8.1 of APM2, Default Operand Size or D bit. */ vmexit->u.inst_emul.cs_d = (seg.attrib & VMCB_CS_ATTRIB_D) ? 1 : 0; break; default: vmexit->u.inst_emul.cs_base = 0; vmexit->u.inst_emul.cs_d = 0; break; } /* * Copy the instruction bytes into 'vie' if available. */ if (decode_assist() && !disable_npf_assist) { inst_len = ctrl->inst_len; inst_bytes = ctrl->inst_bytes; } else { inst_len = 0; inst_bytes = NULL; } vie_init(&vmexit->u.inst_emul.vie, inst_bytes, inst_len); } #ifdef KTR static const char * intrtype_to_str(int intr_type) { switch (intr_type) { case VMCB_EVENTINJ_TYPE_INTR: return ("hwintr"); case VMCB_EVENTINJ_TYPE_NMI: return ("nmi"); case VMCB_EVENTINJ_TYPE_INTn: return ("swintr"); case VMCB_EVENTINJ_TYPE_EXCEPTION: return ("exception"); default: panic("%s: unknown intr_type %d", __func__, intr_type); } } #endif /* * Inject an event to vcpu as described in section 15.20, "Event injection". */ static void svm_eventinject(struct svm_softc *sc, int vcpu, int intr_type, int vector, uint32_t error, bool ec_valid) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(sc, vcpu); KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event already pending %#lx", __func__, ctrl->eventinj)); KASSERT(vector >=0 && vector <= 255, ("%s: invalid vector %d", __func__, vector)); switch (intr_type) { case VMCB_EVENTINJ_TYPE_INTR: case VMCB_EVENTINJ_TYPE_NMI: case VMCB_EVENTINJ_TYPE_INTn: break; case VMCB_EVENTINJ_TYPE_EXCEPTION: if (vector >= 0 && vector <= 31 && vector != 2) break; /* FALLTHROUGH */ default: panic("%s: invalid intr_type/vector: %d/%d", __func__, intr_type, vector); } ctrl->eventinj = vector | (intr_type << 8) | VMCB_EVENTINJ_VALID; if (ec_valid) { ctrl->eventinj |= VMCB_EVENTINJ_EC_VALID; ctrl->eventinj |= (uint64_t)error << 32; VCPU_CTR3(sc->vm, vcpu, "Injecting %s at vector %d errcode %#x", intrtype_to_str(intr_type), vector, error); } else { VCPU_CTR2(sc->vm, vcpu, "Injecting %s at vector %d", intrtype_to_str(intr_type), vector); } } static void svm_update_virqinfo(struct svm_softc *sc, int vcpu) { struct vm *vm; struct vlapic *vlapic; struct vmcb_ctrl *ctrl; int pending; vm = sc->vm; vlapic = vm_lapic(vm, vcpu); ctrl = svm_get_vmcb_ctrl(sc, vcpu); /* Update %cr8 in the emulated vlapic */ vlapic_set_cr8(vlapic, ctrl->v_tpr); /* * If V_IRQ indicates that the interrupt injection attempted on then * last VMRUN was successful then update the vlapic accordingly. */ if (ctrl->v_intr_vector != 0) { pending = ctrl->v_irq; KASSERT(ctrl->v_intr_vector >= 16, ("%s: invalid " "v_intr_vector %d", __func__, ctrl->v_intr_vector)); KASSERT(!ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); VCPU_CTR2(vm, vcpu, "v_intr_vector %d %s", ctrl->v_intr_vector, pending ? "pending" : "accepted"); if (!pending) vlapic_intr_accepted(vlapic, ctrl->v_intr_vector); } } static void svm_save_intinfo(struct svm_softc *svm_sc, int vcpu) { struct vmcb_ctrl *ctrl; uint64_t intinfo; ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); intinfo = ctrl->exitintinfo; if (!VMCB_EXITINTINFO_VALID(intinfo)) return; /* * From APMv2, Section "Intercepts during IDT interrupt delivery" * * If a #VMEXIT happened during event delivery then record the event * that was being delivered. */ VCPU_CTR2(svm_sc->vm, vcpu, "SVM:Pending INTINFO(0x%lx), vector=%d.\n", intinfo, VMCB_EXITINTINFO_VECTOR(intinfo)); vmm_stat_incr(svm_sc->vm, vcpu, VCPU_EXITINTINFO, 1); vm_exit_intinfo(svm_sc->vm, vcpu, intinfo); } static __inline int vintr_intercept_enabled(struct svm_softc *sc, int vcpu) { return (svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR)); } static __inline void enable_intr_window_exiting(struct svm_softc *sc, int vcpu) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(sc, vcpu); if (ctrl->v_irq && ctrl->v_intr_vector == 0) { KASSERT(ctrl->v_ign_tpr, ("%s: invalid v_ign_tpr", __func__)); KASSERT(vintr_intercept_enabled(sc, vcpu), ("%s: vintr intercept should be enabled", __func__)); return; } VCPU_CTR0(sc->vm, vcpu, "Enable intr window exiting"); ctrl->v_irq = 1; ctrl->v_ign_tpr = 1; ctrl->v_intr_vector = 0; svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); } static __inline void disable_intr_window_exiting(struct svm_softc *sc, int vcpu) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(sc, vcpu); if (!ctrl->v_irq && ctrl->v_intr_vector == 0) { KASSERT(!vintr_intercept_enabled(sc, vcpu), ("%s: vintr intercept should be disabled", __func__)); return; } #ifdef KTR if (ctrl->v_intr_vector == 0) VCPU_CTR0(sc->vm, vcpu, "Disable intr window exiting"); else VCPU_CTR0(sc->vm, vcpu, "Clearing V_IRQ interrupt injection"); #endif ctrl->v_irq = 0; ctrl->v_intr_vector = 0; svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_VINTR); } static int svm_modify_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t val) { struct vmcb_ctrl *ctrl; int oldval, newval; ctrl = svm_get_vmcb_ctrl(sc, vcpu); oldval = ctrl->intr_shadow; newval = val ? 1 : 0; if (newval != oldval) { ctrl->intr_shadow = newval; VCPU_CTR1(sc->vm, vcpu, "Setting intr_shadow to %d", newval); } return (0); } static int svm_get_intr_shadow(struct svm_softc *sc, int vcpu, uint64_t *val) { struct vmcb_ctrl *ctrl; ctrl = svm_get_vmcb_ctrl(sc, vcpu); *val = ctrl->intr_shadow; return (0); } /* * Once an NMI is injected it blocks delivery of further NMIs until the handler * executes an IRET. The IRET intercept is enabled when an NMI is injected to * to track when the vcpu is done handling the NMI. */ static int nmi_blocked(struct svm_softc *sc, int vcpu) { int blocked; blocked = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); return (blocked); } static void enable_nmi_blocking(struct svm_softc *sc, int vcpu) { KASSERT(!nmi_blocked(sc, vcpu), ("vNMI already blocked")); VCPU_CTR0(sc->vm, vcpu, "vNMI blocking enabled"); svm_enable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); } static void clear_nmi_blocking(struct svm_softc *sc, int vcpu) { int error; KASSERT(nmi_blocked(sc, vcpu), ("vNMI already unblocked")); VCPU_CTR0(sc->vm, vcpu, "vNMI blocking cleared"); /* * When the IRET intercept is cleared the vcpu will attempt to execute * the "iret" when it runs next. However, it is possible to inject * another NMI into the vcpu before the "iret" has actually executed. * * For e.g. if the "iret" encounters a #NPF when accessing the stack * it will trap back into the hypervisor. If an NMI is pending for * the vcpu it will be injected into the guest. * * XXX this needs to be fixed */ svm_disable_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_IRET); /* * Set 'intr_shadow' to prevent an NMI from being injected on the * immediate VMRUN. */ error = svm_modify_intr_shadow(sc, vcpu, 1); KASSERT(!error, ("%s: error %d setting intr_shadow", __func__, error)); } #define EFER_MBZ_BITS 0xFFFFFFFFFFFF0200UL static int svm_write_efer(struct svm_softc *sc, int vcpu, uint64_t newval, bool *retu) { struct vm_exit *vme; struct vmcb_state *state; uint64_t changed, lma, oldval; int error; state = svm_get_vmcb_state(sc, vcpu); oldval = state->efer; VCPU_CTR2(sc->vm, vcpu, "wrmsr(efer) %#lx/%#lx", oldval, newval); newval &= ~0xFE; /* clear the Read-As-Zero (RAZ) bits */ changed = oldval ^ newval; if (newval & EFER_MBZ_BITS) goto gpf; /* APMv2 Table 14-5 "Long-Mode Consistency Checks" */ if (changed & EFER_LME) { if (state->cr0 & CR0_PG) goto gpf; } /* EFER.LMA = EFER.LME & CR0.PG */ if ((newval & EFER_LME) != 0 && (state->cr0 & CR0_PG) != 0) lma = EFER_LMA; else lma = 0; if ((newval & EFER_LMA) != lma) goto gpf; if (newval & EFER_NXE) { if (!vm_cpuid_capability(sc->vm, vcpu, VCC_NO_EXECUTE)) goto gpf; } /* * XXX bhyve does not enforce segment limits in 64-bit mode. Until * this is fixed flag guest attempt to set EFER_LMSLE as an error. */ if (newval & EFER_LMSLE) { vme = vm_exitinfo(sc->vm, vcpu); vm_exit_svm(vme, VMCB_EXIT_MSR, 1, 0); *retu = true; return (0); } if (newval & EFER_FFXSR) { if (!vm_cpuid_capability(sc->vm, vcpu, VCC_FFXSR)) goto gpf; } if (newval & EFER_TCE) { if (!vm_cpuid_capability(sc->vm, vcpu, VCC_TCE)) goto gpf; } error = svm_setreg(sc, vcpu, VM_REG_GUEST_EFER, newval); KASSERT(error == 0, ("%s: error %d updating efer", __func__, error)); return (0); gpf: vm_inject_gp(sc->vm, vcpu); return (0); } static int emulate_wrmsr(struct svm_softc *sc, int vcpu, u_int num, uint64_t val, bool *retu) { int error; if (lapic_msr(num)) error = lapic_wrmsr(sc->vm, vcpu, num, val, retu); else if (num == MSR_EFER) error = svm_write_efer(sc, vcpu, val, retu); else error = svm_wrmsr(sc, vcpu, num, val, retu); return (error); } static int emulate_rdmsr(struct svm_softc *sc, int vcpu, u_int num, bool *retu) { struct vmcb_state *state; struct svm_regctx *ctx; uint64_t result; int error; if (lapic_msr(num)) error = lapic_rdmsr(sc->vm, vcpu, num, &result, retu); else error = svm_rdmsr(sc, vcpu, num, &result, retu); if (error == 0) { state = svm_get_vmcb_state(sc, vcpu); ctx = svm_get_guest_regctx(sc, vcpu); state->rax = result & 0xffffffff; ctx->sctx_rdx = result >> 32; } return (error); } #ifdef KTR static const char * exit_reason_to_str(uint64_t reason) { static char reasonbuf[32]; switch (reason) { case VMCB_EXIT_INVALID: return ("invalvmcb"); case VMCB_EXIT_SHUTDOWN: return ("shutdown"); case VMCB_EXIT_NPF: return ("nptfault"); case VMCB_EXIT_PAUSE: return ("pause"); case VMCB_EXIT_HLT: return ("hlt"); case VMCB_EXIT_CPUID: return ("cpuid"); case VMCB_EXIT_IO: return ("inout"); case VMCB_EXIT_MC: return ("mchk"); case VMCB_EXIT_INTR: return ("extintr"); case VMCB_EXIT_NMI: return ("nmi"); case VMCB_EXIT_VINTR: return ("vintr"); case VMCB_EXIT_MSR: return ("msr"); case VMCB_EXIT_IRET: return ("iret"); case VMCB_EXIT_MONITOR: return ("monitor"); case VMCB_EXIT_MWAIT: return ("mwait"); default: snprintf(reasonbuf, sizeof(reasonbuf), "%#lx", reason); return (reasonbuf); } } #endif /* KTR */ /* * From section "State Saved on Exit" in APMv2: nRIP is saved for all #VMEXITs * that are due to instruction intercepts as well as MSR and IOIO intercepts * and exceptions caused by INT3, INTO and BOUND instructions. * * Return 1 if the nRIP is valid and 0 otherwise. */ static int nrip_valid(uint64_t exitcode) { switch (exitcode) { case 0x00 ... 0x0F: /* read of CR0 through CR15 */ case 0x10 ... 0x1F: /* write of CR0 through CR15 */ case 0x20 ... 0x2F: /* read of DR0 through DR15 */ case 0x30 ... 0x3F: /* write of DR0 through DR15 */ case 0x43: /* INT3 */ case 0x44: /* INTO */ case 0x45: /* BOUND */ case 0x65 ... 0x7C: /* VMEXIT_CR0_SEL_WRITE ... VMEXIT_MSR */ case 0x80 ... 0x8D: /* VMEXIT_VMRUN ... VMEXIT_XSETBV */ return (1); default: return (0); } } static int svm_vmexit(struct svm_softc *svm_sc, int vcpu, struct vm_exit *vmexit) { struct vmcb *vmcb; struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct svm_regctx *ctx; uint64_t code, info1, info2, val; uint32_t eax, ecx, edx; int error, errcode_valid, handled, idtvec, reflect; bool retu; ctx = svm_get_guest_regctx(svm_sc, vcpu); vmcb = svm_get_vmcb(svm_sc, vcpu); state = &vmcb->state; ctrl = &vmcb->ctrl; handled = 0; code = ctrl->exitcode; info1 = ctrl->exitinfo1; info2 = ctrl->exitinfo2; vmexit->exitcode = VM_EXITCODE_BOGUS; vmexit->rip = state->rip; vmexit->inst_length = nrip_valid(code) ? ctrl->nrip - state->rip : 0; vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_COUNT, 1); /* * #VMEXIT(INVALID) needs to be handled early because the VMCB is * in an inconsistent state and can trigger assertions that would * never happen otherwise. */ if (code == VMCB_EXIT_INVALID) { vm_exit_svm(vmexit, code, info1, info2); return (0); } KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) == 0, ("%s: event " "injection valid bit is set %#lx", __func__, ctrl->eventinj)); KASSERT(vmexit->inst_length >= 0 && vmexit->inst_length <= 15, ("invalid inst_length %d: code (%#lx), info1 (%#lx), info2 (%#lx)", vmexit->inst_length, code, info1, info2)); svm_update_virqinfo(svm_sc, vcpu); svm_save_intinfo(svm_sc, vcpu); switch (code) { case VMCB_EXIT_IRET: /* * Restart execution at "iret" but with the intercept cleared. */ vmexit->inst_length = 0; clear_nmi_blocking(svm_sc, vcpu); handled = 1; break; case VMCB_EXIT_VINTR: /* interrupt window exiting */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_VINTR, 1); handled = 1; break; case VMCB_EXIT_INTR: /* external interrupt */ vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXTINT, 1); handled = 1; break; case VMCB_EXIT_NMI: /* external NMI */ handled = 1; break; case 0x40 ... 0x5F: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_EXCEPTION, 1); reflect = 1; idtvec = code - 0x40; switch (idtvec) { case IDT_MC: /* * Call the machine check handler by hand. Also don't * reflect the machine check back into the guest. */ reflect = 0; VCPU_CTR0(svm_sc->vm, vcpu, "Vectoring to MCE handler"); __asm __volatile("int $18"); break; case IDT_PF: error = svm_setreg(svm_sc, vcpu, VM_REG_GUEST_CR2, info2); KASSERT(error == 0, ("%s: error %d updating cr2", __func__, error)); /* fallthru */ case IDT_NP: case IDT_SS: case IDT_GP: case IDT_AC: case IDT_TS: errcode_valid = 1; break; case IDT_DF: errcode_valid = 1; info1 = 0; break; case IDT_BP: case IDT_OF: case IDT_BR: /* * The 'nrip' field is populated for INT3, INTO and * BOUND exceptions and this also implies that * 'inst_length' is non-zero. * * Reset 'inst_length' to zero so the guest %rip at * event injection is identical to what it was when * the exception originally happened. */ VCPU_CTR2(svm_sc->vm, vcpu, "Reset inst_length from %d " "to zero before injecting exception %d", vmexit->inst_length, idtvec); vmexit->inst_length = 0; /* fallthru */ default: errcode_valid = 0; info1 = 0; break; } KASSERT(vmexit->inst_length == 0, ("invalid inst_length (%d) " "when reflecting exception %d into guest", vmexit->inst_length, idtvec)); if (reflect) { /* Reflect the exception back into the guest */ VCPU_CTR2(svm_sc->vm, vcpu, "Reflecting exception " "%d/%#x into the guest", idtvec, (int)info1); error = vm_inject_exception(svm_sc->vm, vcpu, idtvec, errcode_valid, info1, 0); KASSERT(error == 0, ("%s: vm_inject_exception error %d", __func__, error)); } handled = 1; break; case VMCB_EXIT_MSR: /* MSR access. */ eax = state->rax; ecx = ctx->sctx_rcx; edx = ctx->sctx_rdx; retu = false; if (info1) { vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_WRMSR, 1); val = (uint64_t)edx << 32 | eax; VCPU_CTR2(svm_sc->vm, vcpu, "wrmsr %#x val %#lx", ecx, val); if (emulate_wrmsr(svm_sc, vcpu, ecx, val, &retu)) { vmexit->exitcode = VM_EXITCODE_WRMSR; vmexit->u.msr.code = ecx; vmexit->u.msr.wval = val; } else if (!retu) { handled = 1; } else { KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_wrmsr retu with bogus exitcode")); } } else { VCPU_CTR1(svm_sc->vm, vcpu, "rdmsr %#x", ecx); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_RDMSR, 1); if (emulate_rdmsr(svm_sc, vcpu, ecx, &retu)) { vmexit->exitcode = VM_EXITCODE_RDMSR; vmexit->u.msr.code = ecx; } else if (!retu) { handled = 1; } else { KASSERT(vmexit->exitcode != VM_EXITCODE_BOGUS, ("emulate_rdmsr retu with bogus exitcode")); } } break; case VMCB_EXIT_IO: handled = svm_handle_io(svm_sc, vcpu, vmexit); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INOUT, 1); break; case VMCB_EXIT_CPUID: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_CPUID, 1); handled = x86_emulate_cpuid(svm_sc->vm, vcpu, (uint32_t *)&state->rax, (uint32_t *)&ctx->sctx_rbx, (uint32_t *)&ctx->sctx_rcx, (uint32_t *)&ctx->sctx_rdx); break; case VMCB_EXIT_HLT: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_HLT, 1); vmexit->exitcode = VM_EXITCODE_HLT; vmexit->u.hlt.rflags = state->rflags; break; case VMCB_EXIT_PAUSE: vmexit->exitcode = VM_EXITCODE_PAUSE; vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_PAUSE, 1); break; case VMCB_EXIT_NPF: /* EXITINFO2 contains the faulting guest physical address */ if (info1 & VMCB_NPF_INFO1_RSV) { VCPU_CTR2(svm_sc->vm, vcpu, "nested page fault with " "reserved bits set: info1(%#lx) info2(%#lx)", info1, info2); } else if (vm_mem_allocated(svm_sc->vm, vcpu, info2)) { vmexit->exitcode = VM_EXITCODE_PAGING; vmexit->u.paging.gpa = info2; vmexit->u.paging.fault_type = npf_fault_type(info1); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_NESTED_FAULT, 1); VCPU_CTR3(svm_sc->vm, vcpu, "nested page fault " "on gpa %#lx/%#lx at rip %#lx", info2, info1, state->rip); } else if (svm_npf_emul_fault(info1)) { svm_handle_inst_emul(vmcb, info2, vmexit); vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_INST_EMUL, 1); VCPU_CTR3(svm_sc->vm, vcpu, "inst_emul fault " "for gpa %#lx/%#lx at rip %#lx", info2, info1, state->rip); } break; case VMCB_EXIT_MONITOR: vmexit->exitcode = VM_EXITCODE_MONITOR; break; case VMCB_EXIT_MWAIT: vmexit->exitcode = VM_EXITCODE_MWAIT; break; default: vmm_stat_incr(svm_sc->vm, vcpu, VMEXIT_UNKNOWN, 1); break; } VCPU_CTR4(svm_sc->vm, vcpu, "%s %s vmexit at %#lx/%d", handled ? "handled" : "unhandled", exit_reason_to_str(code), vmexit->rip, vmexit->inst_length); if (handled) { vmexit->rip += vmexit->inst_length; vmexit->inst_length = 0; state->rip = vmexit->rip; } else { if (vmexit->exitcode == VM_EXITCODE_BOGUS) { /* * If this VM exit was not claimed by anybody then * treat it as a generic SVM exit. */ vm_exit_svm(vmexit, code, info1, info2); } else { /* * The exitcode and collateral have been populated. * The VM exit will be processed further in userland. */ } } return (handled); } static void svm_inj_intinfo(struct svm_softc *svm_sc, int vcpu) { uint64_t intinfo; if (!vm_entry_intinfo(svm_sc->vm, vcpu, &intinfo)) return; KASSERT(VMCB_EXITINTINFO_VALID(intinfo), ("%s: entry intinfo is not " "valid: %#lx", __func__, intinfo)); svm_eventinject(svm_sc, vcpu, VMCB_EXITINTINFO_TYPE(intinfo), VMCB_EXITINTINFO_VECTOR(intinfo), VMCB_EXITINTINFO_EC(intinfo), VMCB_EXITINTINFO_EC_VALID(intinfo)); vmm_stat_incr(svm_sc->vm, vcpu, VCPU_INTINFO_INJECTED, 1); VCPU_CTR1(svm_sc->vm, vcpu, "Injected entry intinfo: %#lx", intinfo); } /* * Inject event to virtual cpu. */ static void svm_inj_interrupts(struct svm_softc *sc, int vcpu, struct vlapic *vlapic) { struct vmcb_ctrl *ctrl; struct vmcb_state *state; struct svm_vcpu *vcpustate; uint8_t v_tpr; int vector, need_intr_window, pending_apic_vector; state = svm_get_vmcb_state(sc, vcpu); ctrl = svm_get_vmcb_ctrl(sc, vcpu); vcpustate = svm_get_vcpu(sc, vcpu); need_intr_window = 0; pending_apic_vector = 0; if (vcpustate->nextrip != state->rip) { ctrl->intr_shadow = 0; VCPU_CTR2(sc->vm, vcpu, "Guest interrupt blocking " "cleared due to rip change: %#lx/%#lx", vcpustate->nextrip, state->rip); } /* * Inject pending events or exceptions for this vcpu. * * An event might be pending because the previous #VMEXIT happened * during event delivery (i.e. ctrl->exitintinfo). * * An event might also be pending because an exception was injected * by the hypervisor (e.g. #PF during instruction emulation). */ svm_inj_intinfo(sc, vcpu); /* NMI event has priority over interrupts. */ if (vm_nmi_pending(sc->vm, vcpu)) { if (nmi_blocked(sc, vcpu)) { /* * Can't inject another NMI if the guest has not * yet executed an "iret" after the last NMI. */ VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due " "to NMI-blocking"); } else if (ctrl->intr_shadow) { /* * Can't inject an NMI if the vcpu is in an intr_shadow. */ VCPU_CTR0(sc->vm, vcpu, "Cannot inject NMI due to " "interrupt shadow"); need_intr_window = 1; goto done; } else if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { /* * If there is already an exception/interrupt pending * then defer the NMI until after that. */ VCPU_CTR1(sc->vm, vcpu, "Cannot inject NMI due to " "eventinj %#lx", ctrl->eventinj); /* * Use self-IPI to trigger a VM-exit as soon as * possible after the event injection is completed. * * This works only if the external interrupt exiting * is at a lower priority than the event injection. * * Although not explicitly specified in APMv2 the * relative priorities were verified empirically. */ ipi_cpu(curcpu, IPI_AST); /* XXX vmm_ipinum? */ } else { vm_nmi_clear(sc->vm, vcpu); /* Inject NMI, vector number is not used */ svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_NMI, IDT_NMI, 0, false); /* virtual NMI blocking is now in effect */ enable_nmi_blocking(sc, vcpu); VCPU_CTR0(sc->vm, vcpu, "Injecting vNMI"); } } if (!vm_extint_pending(sc->vm, vcpu)) { /* * APIC interrupts are delivered using the V_IRQ offload. * * The primary benefit is that the hypervisor doesn't need to * deal with the various conditions that inhibit interrupts. * It also means that TPR changes via CR8 will be handled * without any hypervisor involvement. * * Note that the APIC vector must remain pending in the vIRR * until it is confirmed that it was delivered to the guest. * This can be confirmed based on the value of V_IRQ at the * next #VMEXIT (1 = pending, 0 = delivered). * * Also note that it is possible that another higher priority * vector can become pending before this vector is delivered * to the guest. This is alright because vcpu_notify_event() * will send an IPI and force the vcpu to trap back into the * hypervisor. The higher priority vector will be injected on * the next VMRUN. */ if (vlapic_pending_intr(vlapic, &vector)) { KASSERT(vector >= 16 && vector <= 255, ("invalid vector %d from local APIC", vector)); pending_apic_vector = vector; } goto done; } /* Ask the legacy pic for a vector to inject */ vatpic_pending_intr(sc->vm, &vector); KASSERT(vector >= 0 && vector <= 255, ("invalid vector %d from INTR", vector)); /* * If the guest has disabled interrupts or is in an interrupt shadow * then we cannot inject the pending interrupt. */ if ((state->rflags & PSL_I) == 0) { VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " "rflags %#lx", vector, state->rflags); need_intr_window = 1; goto done; } if (ctrl->intr_shadow) { VCPU_CTR1(sc->vm, vcpu, "Cannot inject vector %d due to " "interrupt shadow", vector); need_intr_window = 1; goto done; } if (ctrl->eventinj & VMCB_EVENTINJ_VALID) { VCPU_CTR2(sc->vm, vcpu, "Cannot inject vector %d due to " "eventinj %#lx", vector, ctrl->eventinj); need_intr_window = 1; goto done; } /* * Legacy PIC interrupts are delivered via the event injection * mechanism. */ svm_eventinject(sc, vcpu, VMCB_EVENTINJ_TYPE_INTR, vector, 0, false); vm_extint_clear(sc->vm, vcpu); vatpic_intr_accepted(sc->vm, vector); /* * Force a VM-exit as soon as the vcpu is ready to accept another * interrupt. This is done because the PIC might have another vector * that it wants to inject. Also, if the APIC has a pending interrupt * that was preempted by the ExtInt then it allows us to inject the * APIC vector as soon as possible. */ need_intr_window = 1; done: /* * The guest can modify the TPR by writing to %CR8. In guest mode * the processor reflects this write to V_TPR without hypervisor * intervention. * * The guest can also modify the TPR by writing to it via the memory * mapped APIC page. In this case, the write will be emulated by the * hypervisor. For this reason V_TPR must be updated before every * VMRUN. */ v_tpr = vlapic_get_cr8(vlapic); KASSERT(v_tpr <= 15, ("invalid v_tpr %#x", v_tpr)); if (ctrl->v_tpr != v_tpr) { VCPU_CTR2(sc->vm, vcpu, "VMCB V_TPR changed from %#x to %#x", ctrl->v_tpr, v_tpr); ctrl->v_tpr = v_tpr; svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); } if (pending_apic_vector) { /* * If an APIC vector is being injected then interrupt window * exiting is not possible on this VMRUN. */ KASSERT(!need_intr_window, ("intr_window exiting impossible")); VCPU_CTR1(sc->vm, vcpu, "Injecting vector %d using V_IRQ", pending_apic_vector); ctrl->v_irq = 1; ctrl->v_ign_tpr = 0; ctrl->v_intr_vector = pending_apic_vector; ctrl->v_intr_prio = pending_apic_vector >> 4; svm_set_dirty(sc, vcpu, VMCB_CACHE_TPR); } else if (need_intr_window) { /* * We use V_IRQ in conjunction with the VINTR intercept to * trap into the hypervisor as soon as a virtual interrupt * can be delivered. * * Since injected events are not subject to intercept checks * we need to ensure that the V_IRQ is not actually going to * be delivered on VM entry. The KASSERT below enforces this. */ KASSERT((ctrl->eventinj & VMCB_EVENTINJ_VALID) != 0 || (state->rflags & PSL_I) == 0 || ctrl->intr_shadow, ("Bogus intr_window_exiting: eventinj (%#lx), " "intr_shadow (%u), rflags (%#lx)", ctrl->eventinj, ctrl->intr_shadow, state->rflags)); enable_intr_window_exiting(sc, vcpu); } else { disable_intr_window_exiting(sc, vcpu); } } static __inline void restore_host_tss(void) { struct system_segment_descriptor *tss_sd; /* * The TSS descriptor was in use prior to launching the guest so it * has been marked busy. * * 'ltr' requires the descriptor to be marked available so change the * type to "64-bit available TSS". */ tss_sd = PCPU_GET(tss); tss_sd->sd_type = SDT_SYSTSS; ltr(GSEL(GPROC0_SEL, SEL_KPL)); } static void check_asid(struct svm_softc *sc, int vcpuid, pmap_t pmap, u_int thiscpu) { struct svm_vcpu *vcpustate; struct vmcb_ctrl *ctrl; long eptgen; bool alloc_asid; KASSERT(CPU_ISSET(thiscpu, &pmap->pm_active), ("%s: nested pmap not " "active on cpu %u", __func__, thiscpu)); vcpustate = svm_get_vcpu(sc, vcpuid); ctrl = svm_get_vmcb_ctrl(sc, vcpuid); /* * The TLB entries associated with the vcpu's ASID are not valid * if either of the following conditions is true: * * 1. The vcpu's ASID generation is different than the host cpu's * ASID generation. This happens when the vcpu migrates to a new * host cpu. It can also happen when the number of vcpus executing * on a host cpu is greater than the number of ASIDs available. * * 2. The pmap generation number is different than the value cached in * the 'vcpustate'. This happens when the host invalidates pages * belonging to the guest. * * asidgen eptgen Action * mismatch mismatch * 0 0 (a) * 0 1 (b1) or (b2) * 1 0 (c) * 1 1 (d) * * (a) There is no mismatch in eptgen or ASID generation and therefore * no further action is needed. * * (b1) If the cpu supports FlushByAsid then the vcpu's ASID is * retained and the TLB entries associated with this ASID * are flushed by VMRUN. * * (b2) If the cpu does not support FlushByAsid then a new ASID is * allocated. * * (c) A new ASID is allocated. * * (d) A new ASID is allocated. */ alloc_asid = false; eptgen = pmap->pm_eptgen; ctrl->tlb_ctrl = VMCB_TLB_FLUSH_NOTHING; if (vcpustate->asid.gen != asid[thiscpu].gen) { alloc_asid = true; /* (c) and (d) */ } else if (vcpustate->eptgen != eptgen) { if (flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; /* (b1) */ else alloc_asid = true; /* (b2) */ } else { /* * This is the common case (a). */ KASSERT(!alloc_asid, ("ASID allocation not necessary")); KASSERT(ctrl->tlb_ctrl == VMCB_TLB_FLUSH_NOTHING, ("Invalid VMCB tlb_ctrl: %#x", ctrl->tlb_ctrl)); } if (alloc_asid) { if (++asid[thiscpu].num >= nasid) { asid[thiscpu].num = 1; if (++asid[thiscpu].gen == 0) asid[thiscpu].gen = 1; /* * If this cpu does not support "flush-by-asid" * then flush the entire TLB on a generation * bump. Subsequent ASID allocation in this * generation can be done without a TLB flush. */ if (!flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_ALL; } vcpustate->asid.gen = asid[thiscpu].gen; vcpustate->asid.num = asid[thiscpu].num; ctrl->asid = vcpustate->asid.num; svm_set_dirty(sc, vcpuid, VMCB_CACHE_ASID); /* * If this cpu supports "flush-by-asid" then the TLB * was not flushed after the generation bump. The TLB * is flushed selectively after every new ASID allocation. */ if (flush_by_asid()) ctrl->tlb_ctrl = VMCB_TLB_FLUSH_GUEST; } vcpustate->eptgen = eptgen; KASSERT(ctrl->asid != 0, ("Guest ASID must be non-zero")); KASSERT(ctrl->asid == vcpustate->asid.num, ("ASID mismatch: %u/%u", ctrl->asid, vcpustate->asid.num)); } static __inline void disable_gintr(void) { __asm __volatile("clgi"); } static __inline void enable_gintr(void) { __asm __volatile("stgi"); } /* * Start vcpu with specified RIP. */ static int svm_vmrun(void *arg, int vcpu, register_t rip, pmap_t pmap, struct vm_eventinfo *evinfo) { struct svm_regctx *gctx; struct svm_softc *svm_sc; struct svm_vcpu *vcpustate; struct vmcb_state *state; struct vmcb_ctrl *ctrl; struct vm_exit *vmexit; struct vlapic *vlapic; struct vm *vm; uint64_t vmcb_pa; int handled; svm_sc = arg; vm = svm_sc->vm; vcpustate = svm_get_vcpu(svm_sc, vcpu); state = svm_get_vmcb_state(svm_sc, vcpu); ctrl = svm_get_vmcb_ctrl(svm_sc, vcpu); vmexit = vm_exitinfo(vm, vcpu); vlapic = vm_lapic(vm, vcpu); gctx = svm_get_guest_regctx(svm_sc, vcpu); vmcb_pa = svm_sc->vcpu[vcpu].vmcb_pa; if (vcpustate->lastcpu != curcpu) { /* * Force new ASID allocation by invalidating the generation. */ vcpustate->asid.gen = 0; /* * Invalidate the VMCB state cache by marking all fields dirty. */ svm_set_dirty(svm_sc, vcpu, 0xffffffff); /* * XXX * Setting 'vcpustate->lastcpu' here is bit premature because * we may return from this function without actually executing * the VMRUN instruction. This could happen if a rendezvous * or an AST is pending on the first time through the loop. * * This works for now but any new side-effects of vcpu * migration should take this case into account. */ vcpustate->lastcpu = curcpu; vmm_stat_incr(vm, vcpu, VCPU_MIGRATIONS, 1); } svm_msr_guest_enter(svm_sc, vcpu); /* Update Guest RIP */ state->rip = rip; do { /* * Disable global interrupts to guarantee atomicity during * loading of guest state. This includes not only the state * loaded by the "vmrun" instruction but also software state * maintained by the hypervisor: suspended and rendezvous * state, NPT generation number, vlapic interrupts etc. */ disable_gintr(); if (vcpu_suspended(evinfo)) { enable_gintr(); vm_exit_suspended(vm, vcpu, state->rip); break; } if (vcpu_rendezvous_pending(evinfo)) { enable_gintr(); vm_exit_rendezvous(vm, vcpu, state->rip); break; } if (vcpu_reqidle(evinfo)) { enable_gintr(); vm_exit_reqidle(vm, vcpu, state->rip); break; } /* We are asked to give the cpu by scheduler. */ if (vcpu_should_yield(vm, vcpu)) { enable_gintr(); vm_exit_astpending(vm, vcpu, state->rip); break; } svm_inj_interrupts(svm_sc, vcpu, vlapic); /* Activate the nested pmap on 'curcpu' */ CPU_SET_ATOMIC_ACQ(curcpu, &pmap->pm_active); /* * Check the pmap generation and the ASID generation to * ensure that the vcpu does not use stale TLB mappings. */ check_asid(svm_sc, vcpu, pmap, curcpu); ctrl->vmcb_clean = vmcb_clean & ~vcpustate->dirty; vcpustate->dirty = 0; VCPU_CTR1(vm, vcpu, "vmcb clean %#x", ctrl->vmcb_clean); /* Launch Virtual Machine. */ VCPU_CTR1(vm, vcpu, "Resume execution at %#lx", state->rip); svm_launch(vmcb_pa, gctx, &__pcpu[curcpu]); CPU_CLR_ATOMIC(curcpu, &pmap->pm_active); /* * The host GDTR and IDTR is saved by VMRUN and restored * automatically on #VMEXIT. However, the host TSS needs * to be restored explicitly. */ restore_host_tss(); /* #VMEXIT disables interrupts so re-enable them here. */ enable_gintr(); /* Update 'nextrip' */ vcpustate->nextrip = state->rip; /* Handle #VMEXIT and if required return to user space. */ handled = svm_vmexit(svm_sc, vcpu, vmexit); } while (handled); svm_msr_guest_exit(svm_sc, vcpu); return (0); } static void svm_vmcleanup(void *arg) { struct svm_softc *sc = arg; free(sc, M_SVM); } static register_t * swctx_regptr(struct svm_regctx *regctx, int reg) { switch (reg) { case VM_REG_GUEST_RBX: return (®ctx->sctx_rbx); case VM_REG_GUEST_RCX: return (®ctx->sctx_rcx); case VM_REG_GUEST_RDX: return (®ctx->sctx_rdx); case VM_REG_GUEST_RDI: return (®ctx->sctx_rdi); case VM_REG_GUEST_RSI: return (®ctx->sctx_rsi); case VM_REG_GUEST_RBP: return (®ctx->sctx_rbp); case VM_REG_GUEST_R8: return (®ctx->sctx_r8); case VM_REG_GUEST_R9: return (®ctx->sctx_r9); case VM_REG_GUEST_R10: return (®ctx->sctx_r10); case VM_REG_GUEST_R11: return (®ctx->sctx_r11); case VM_REG_GUEST_R12: return (®ctx->sctx_r12); case VM_REG_GUEST_R13: return (®ctx->sctx_r13); case VM_REG_GUEST_R14: return (®ctx->sctx_r14); case VM_REG_GUEST_R15: return (®ctx->sctx_r15); default: return (NULL); } } static int svm_getreg(void *arg, int vcpu, int ident, uint64_t *val) { struct svm_softc *svm_sc; register_t *reg; svm_sc = arg; if (ident == VM_REG_GUEST_INTR_SHADOW) { return (svm_get_intr_shadow(svm_sc, vcpu, val)); } if (vmcb_read(svm_sc, vcpu, ident, val) == 0) { return (0); } reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); if (reg != NULL) { *val = *reg; return (0); } VCPU_CTR1(svm_sc->vm, vcpu, "svm_getreg: unknown register %#x", ident); return (EINVAL); } static int svm_setreg(void *arg, int vcpu, int ident, uint64_t val) { struct svm_softc *svm_sc; register_t *reg; svm_sc = arg; if (ident == VM_REG_GUEST_INTR_SHADOW) { return (svm_modify_intr_shadow(svm_sc, vcpu, val)); } if (vmcb_write(svm_sc, vcpu, ident, val) == 0) { return (0); } reg = swctx_regptr(svm_get_guest_regctx(svm_sc, vcpu), ident); if (reg != NULL) { *reg = val; return (0); } /* * XXX deal with CR3 and invalidate TLB entries tagged with the * vcpu's ASID. This needs to be treated differently depending on * whether 'running' is true/false. */ VCPU_CTR1(svm_sc->vm, vcpu, "svm_setreg: unknown register %#x", ident); return (EINVAL); } static int svm_setcap(void *arg, int vcpu, int type, int val) { struct svm_softc *sc; int error; sc = arg; error = 0; switch (type) { case VM_CAP_HALT_EXIT: svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT, val); break; case VM_CAP_PAUSE_EXIT: svm_set_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PAUSE, val); break; case VM_CAP_UNRESTRICTED_GUEST: /* Unrestricted guest execution cannot be disabled in SVM */ if (val == 0) error = EINVAL; break; default: error = ENOENT; break; } return (error); } static int svm_getcap(void *arg, int vcpu, int type, int *retval) { struct svm_softc *sc; int error; sc = arg; error = 0; switch (type) { case VM_CAP_HALT_EXIT: *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_HLT); break; case VM_CAP_PAUSE_EXIT: *retval = svm_get_intercept(sc, vcpu, VMCB_CTRL1_INTCPT, VMCB_INTCPT_PAUSE); break; case VM_CAP_UNRESTRICTED_GUEST: *retval = 1; /* unrestricted guest is always enabled */ break; default: error = ENOENT; break; } return (error); } static struct vlapic * svm_vlapic_init(void *arg, int vcpuid) { struct svm_softc *svm_sc; struct vlapic *vlapic; svm_sc = arg; vlapic = malloc(sizeof(struct vlapic), M_SVM_VLAPIC, M_WAITOK | M_ZERO); vlapic->vm = svm_sc->vm; vlapic->vcpuid = vcpuid; vlapic->apic_page = (struct LAPIC *)&svm_sc->apic_page[vcpuid]; vlapic_init(vlapic); return (vlapic); } static void svm_vlapic_cleanup(void *arg, struct vlapic *vlapic) { vlapic_cleanup(vlapic); free(vlapic, M_SVM_VLAPIC); } struct vmm_ops vmm_ops_amd = { svm_init, svm_cleanup, svm_restore, svm_vminit, svm_vmrun, svm_vmcleanup, svm_getreg, svm_setreg, vmcb_getdesc, vmcb_setdesc, svm_getcap, svm_setcap, svm_npt_alloc, svm_npt_free, svm_vlapic_init, svm_vlapic_cleanup }; Index: head/sys/arm/annapurna/alpine/alpine_machdep.c =================================================================== --- head/sys/arm/annapurna/alpine/alpine_machdep.c (revision 295879) +++ head/sys/arm/annapurna/alpine/alpine_machdep.c (revision 295880) @@ -1,148 +1,147 @@ /*- * Copyright (c) 2013 Ruslan Bukin * Copyright (c) 2015 Semihalf * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #define _ARM32_BUS_DMA_PRIVATE #include #include #include #include #include #include #include #include #include /* For trapframe_t, used in */ #include -#include #include #include #include #include #include "opt_ddb.h" #include "opt_platform.h" struct mtx al_dbg_lock; #define DEVMAP_MAX_VA_ADDRESS 0xF0000000 bus_addr_t al_devmap_pa; bus_addr_t al_devmap_size; #define AL_NB_SERVICE_OFFSET 0x70000 #define AL_NB_CCU_OFFSET 0x90000 #define AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET 0x4000 #define AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET 0x5000 #define AL_CCU_SPECULATION_CONTROL_OFFSET 0x4 #define AL_NB_ACF_MISC_OFFSET 0xD0 #define AL_NB_ACF_MISC_READ_BYPASS (1 << 30) int alpine_get_devmap_base(bus_addr_t *pa, bus_addr_t *size); vm_offset_t platform_lastaddr(void) { return (DEVMAP_MAX_VA_ADDRESS); } void platform_probe_and_attach(void) { } void platform_gpio_init(void) { } void platform_late_init(void) { bus_addr_t reg_baddr; uint32_t val; if (!mtx_initialized(&al_dbg_lock)) mtx_init(&al_dbg_lock, "ALDBG", "ALDBG", MTX_SPIN); /* configure system fabric */ if (bus_space_map(fdtbus_bs_tag, al_devmap_pa, al_devmap_size, 0, ®_baddr)) panic("Couldn't map Register Space area"); /* do not allow reads to bypass writes to different addresses */ val = bus_space_read_4(fdtbus_bs_tag, reg_baddr, AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET); val &= ~AL_NB_ACF_MISC_READ_BYPASS; bus_space_write_4(fdtbus_bs_tag, reg_baddr, AL_NB_SERVICE_OFFSET + AL_NB_ACF_MISC_OFFSET, val); /* enable cache snoop */ bus_space_write_4(fdtbus_bs_tag, reg_baddr, AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_0_OFFSET, 1); bus_space_write_4(fdtbus_bs_tag, reg_baddr, AL_NB_CCU_OFFSET + AL_CCU_SNOOP_CONTROL_IOFAB_1_OFFSET, 1); /* disable speculative fetches from masters */ bus_space_write_4(fdtbus_bs_tag, reg_baddr, AL_NB_CCU_OFFSET + AL_CCU_SPECULATION_CONTROL_OFFSET, 7); bus_space_unmap(fdtbus_bs_tag, reg_baddr, al_devmap_size); } /* * Construct devmap table with DT-derived config data. */ int platform_devmap_init(void) { alpine_get_devmap_base(&al_devmap_pa, &al_devmap_size); arm_devmap_add_entry(al_devmap_pa, al_devmap_size); return (0); } struct arm32_dma_range * bus_dma_get_range(void) { return (NULL); } int bus_dma_get_range_nb(void) { return (0); } Index: head/sys/arm/arm/mp_machdep.c =================================================================== --- head/sys/arm/arm/mp_machdep.c (revision 295879) +++ head/sys/arm/arm/mp_machdep.c (revision 295880) @@ -1,525 +1,524 @@ /*- * Copyright (c) 2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #ifdef VFP #include #endif #ifdef CPU_MV_PJ4B #include #include #endif #include "opt_smp.h" extern struct pcpu __pcpu[]; /* used to hold the AP's until we are ready to release them */ struct mtx ap_boot_mtx; struct pcb stoppcbs[MAXCPU]; /* # of Applications processors */ volatile int mp_naps; /* Set to 1 once we're ready to let the APs out of the pen. */ volatile int aps_ready = 0; #ifndef ARM_INTRNG static int ipi_handler(void *arg); #endif void set_stackptrs(int cpu); /* Temporary variables for init_secondary() */ void *dpcpu[MAXCPU - 1]; /* Determine if we running MP machine */ int cpu_mp_probe(void) { CPU_SETOF(0, &all_cpus); return (platform_mp_probe()); } /* Start Application Processor via platform specific function */ static int check_ap(void) { uint32_t ms; for (ms = 0; ms < 2000; ++ms) { if ((mp_naps + 1) == mp_ncpus) return (0); /* success */ else DELAY(1000); } return (-2); } extern unsigned char _end[]; /* Initialize and fire up non-boot processors */ void cpu_mp_start(void) { int error, i; mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN); /* Reserve memory for application processors */ for(i = 0; i < (mp_ncpus - 1); i++) dpcpu[i] = (void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO); dcache_wbinv_poc_all(); /* Initialize boot code and start up processors */ platform_mp_start_ap(); /* Check if ap's started properly */ error = check_ap(); if (error) printf("WARNING: Some AP's failed to start\n"); else for (i = 1; i < mp_ncpus; i++) CPU_SET(i, &all_cpus); } /* Introduce rest of cores to the world */ void cpu_mp_announce(void) { } extern vm_paddr_t pmap_pa; void init_secondary(int cpu) { struct pcpu *pc; uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif uint32_t actlr_mask, actlr_set; pmap_set_tex(); cpuinfo_get_actlr_modifier(&actlr_mask, &actlr_set); reinit_mmu(pmap_kern_ttb, actlr_mask, actlr_set); cpu_setup(); /* Provide stack pointers for other processor modes. */ set_stackptrs(cpu); enable_interrupts(PSR_A); pc = &__pcpu[cpu]; /* * pcpu_init() updates queue, so it should not be executed in parallel * on several cores */ while(mp_naps < (cpu - 1)) ; pcpu_init(pc, cpu, sizeof(struct pcpu)); dpcpu_init(dpcpu[cpu - 1], cpu); /* Signal our startup to BSP */ atomic_add_rel_32(&mp_naps, 1); /* Spin until the BSP releases the APs */ while (!atomic_load_acq_int(&aps_ready)) { #if __ARM_ARCH >= 7 __asm __volatile("wfe"); #endif } /* Initialize curthread */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_idlethread->td_pcb; set_curthread(pc->pc_idlethread); #ifdef VFP vfp_init(); #endif mtx_lock_spin(&ap_boot_mtx); atomic_add_rel_32(&smp_cpus, 1); if (smp_cpus == mp_ncpus) { /* enable IPI's, tlb shootdown, freezes etc */ atomic_store_rel_int(&smp_started, 1); } mtx_unlock_spin(&ap_boot_mtx); #ifndef ARM_INTRNG /* Enable ipi */ #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) arm_unmask_irq(i); #endif /* INTRNG */ enable_interrupts(PSR_I); loop_counter = 0; while (smp_started == 0) { DELAY(100); loop_counter++; if (loop_counter == 1000) CTR0(KTR_SMP, "AP still wait for smp_started"); } /* Start per-CPU event timers. */ cpu_initclocks_ap(); CTR0(KTR_SMP, "go into scheduler"); platform_mp_init_secondary(); /* Enter the scheduler */ sched_throw(NULL); panic("scheduler returned us to %s", __func__); /* NOTREACHED */ } #ifdef ARM_INTRNG static void ipi_rendezvous(void *dummy __unused) { CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); } static void ipi_ast(void *dummy __unused) { CTR0(KTR_SMP, "IPI_AST"); } static void ipi_stop(void *dummy __unused) { u_int cpu; /* * IPI_STOP_HARD is mapped to IPI_STOP. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); cpu = PCPU_GET(cpuid); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); } static void ipi_preempt(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(td); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } static void ipi_hardclock(void *arg) { struct trapframe *oldframe; struct thread *td; critical_enter(); td = curthread; td->td_intr_nesting_level++; oldframe = td->td_intr_frame; td->td_intr_frame = (struct trapframe *)arg; CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); td->td_intr_frame = oldframe; td->td_intr_nesting_level--; critical_exit(); } #else static int ipi_handler(void *arg) { u_int cpu, ipi; cpu = PCPU_GET(cpuid); ipi = pic_ipi_read((int)arg); while ((ipi != 0x3ff)) { switch (ipi) { case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); savectx(&stoppcbs[cpu]); /* * CPUs are stopped when entering the debugger and at * system shutdown, both events which can precede a * panic dump. For the dump to be correct, all caches * must be flushed and invalidated, but on ARM there's * no way to broadcast a wbinv_all to other cores. * Instead, we have each core do the local wbinv_all as * part of stopping the core. The core requesting the * stop will do the l2 cache flush after all other cores * have done their l1 flushes and stopped. */ dcache_wbinv_poc_all(); /* Indicate we are stopped */ CPU_SET_ATOMIC(cpu, &stopped_cpus); /* Wait for restart */ while (!CPU_ISSET(cpu, &started_cpus)) cpu_spinwait(); CPU_CLR_ATOMIC(cpu, &started_cpus); CPU_CLR_ATOMIC(cpu, &stopped_cpus); #ifdef DDB dbg_resume_dbreg(); #endif CTR0(KTR_SMP, "IPI_STOP (restart)"); break; case IPI_PREEMPT: CTR1(KTR_SMP, "%s: IPI_PREEMPT", __func__); sched_preempt(curthread); break; case IPI_HARDCLOCK: CTR1(KTR_SMP, "%s: IPI_HARDCLOCK", __func__); hardclockintr(); break; default: panic("Unknown IPI 0x%0x on cpu %d", ipi, curcpu); } pic_ipi_clear(ipi); ipi = pic_ipi_read(-1); } return (FILTER_HANDLED); } #endif static void release_aps(void *dummy __unused) { uint32_t loop_counter; #ifndef ARM_INTRNG int start = 0, end = 0; #endif if (mp_ncpus == 1) return; #ifdef ARM_INTRNG intr_ipi_set_handler(IPI_RENDEZVOUS, "rendezvous", ipi_rendezvous, NULL, 0); intr_ipi_set_handler(IPI_AST, "ast", ipi_ast, NULL, 0); intr_ipi_set_handler(IPI_STOP, "stop", ipi_stop, NULL, 0); intr_ipi_set_handler(IPI_PREEMPT, "preempt", ipi_preempt, NULL, 0); intr_ipi_set_handler(IPI_HARDCLOCK, "hardclock", ipi_hardclock, NULL, 0); #else #ifdef IPI_IRQ_START start = IPI_IRQ_START; #ifdef IPI_IRQ_END end = IPI_IRQ_END; #else end = IPI_IRQ_START; #endif #endif for (int i = start; i <= end; i++) { /* * IPI handler */ /* * Use 0xdeadbeef as the argument value for irq 0, * if we used 0, the intr code will give the trap frame * pointer instead. */ arm_setup_irqhandler("ipi", ipi_handler, NULL, (void *)i, i, INTR_TYPE_MISC | INTR_EXCL, NULL); /* Enable ipi */ arm_unmask_irq(i); } #endif atomic_store_rel_int(&aps_ready, 1); /* Wake the other threads up */ #if __ARM_ARCH >= 7 armv7_sev(); #endif printf("Release APs\n"); for (loop_counter = 0; loop_counter < 2000; loop_counter++) { if (smp_started) return; DELAY(1000); } printf("AP's not started\n"); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, release_aps, NULL); struct cpu_group * cpu_topo(void) { return (smp_topo_1level(CG_SHARE_L2, mp_ncpus, 0)); } void cpu_mp_setmaxid(void) { platform_mp_setmaxid(); } /* Sending IPI */ void ipi_all_but_self(u_int ipi) { cpuset_t other_cpus; other_cpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &other_cpus); CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); platform_ipi_send(other_cpus, ipi); } void ipi_cpu(int cpu, u_int ipi) { cpuset_t cpus; CPU_ZERO(&cpus); CPU_SET(cpu, &cpus); CTR3(KTR_SMP, "%s: cpu: %d, ipi: %x", __func__, cpu, ipi); platform_ipi_send(cpus, ipi); } void ipi_selected(cpuset_t cpus, u_int ipi) { CTR2(KTR_SMP, "%s: ipi: %x", __func__, ipi); platform_ipi_send(cpus, ipi); } Index: head/sys/arm64/arm64/minidump_machdep.c =================================================================== --- head/sys/arm64/arm64/minidump_machdep.c (revision 295879) +++ head/sys/arm64/arm64/minidump_machdep.c (revision 295880) @@ -1,467 +1,466 @@ /*- * Copyright (c) 2006 Peter Wemm * Copyright (c) 2015 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under * sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_watchdog.h" #include "opt_watchdog.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); /* * Don't touch the first SIZEOF_METADATA bytes on the dump device. This * is to protect us from metadata and to protect metadata from us. */ #define SIZEOF_METADATA (64*1024) uint64_t *vm_page_dump; int vm_page_dump_size; static struct kerneldumpheader kdh; static off_t dumplo; /* Handle chunked writes. */ static size_t fragsz; static void *dump_va; static size_t counter, progress, dumpsize; static uint64_t tmpbuffer[PAGE_SIZE / sizeof(uint64_t)]; CTASSERT(sizeof(*vm_page_dump) == 8); static int is_dumpable(vm_paddr_t pa) { vm_page_t m; int i; if ((m = vm_phys_paddr_to_vm_page(pa)) != NULL) return ((m->flags & PG_NODUMP) == 0); for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) return (1); } return (0); } static int blk_flush(struct dumperinfo *di) { int error; if (fragsz == 0) return (0); error = dump_write(di, dump_va, 0, dumplo, fragsz); dumplo += fragsz; fragsz = 0; return (error); } static struct { int min_per; int max_per; int visited; } progress_track[10] = { { 0, 10, 0}, { 10, 20, 0}, { 20, 30, 0}, { 30, 40, 0}, { 40, 50, 0}, { 50, 60, 0}, { 60, 70, 0}, { 70, 80, 0}, { 80, 90, 0}, { 90, 100, 0} }; static void report_progress(size_t progress, size_t dumpsize) { int sofar, i; sofar = 100 - ((progress * 100) / dumpsize); for (i = 0; i < nitems(progress_track); i++) { if (sofar < progress_track[i].min_per || sofar > progress_track[i].max_per) continue; if (progress_track[i].visited) return; progress_track[i].visited = 1; printf("..%d%%", sofar); return; } } static int blk_write(struct dumperinfo *di, char *ptr, vm_paddr_t pa, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = min(di->maxiosize, MAXDUMPPGS * PAGE_SIZE); if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; if ((sz % PAGE_SIZE) != 0) { printf("size not page aligned\n"); return (EINVAL); } if (ptr != NULL && pa != 0) { printf("cant have both va and pa!\n"); return (EINVAL); } if ((((uintptr_t)pa) % PAGE_SIZE) != 0) { printf("address not page aligned %p\n", ptr); return (EINVAL); } if (ptr != NULL) { /* * If we're doing a virtual dump, flush any * pre-existing pa pages. */ error = blk_flush(di); if (error) return (error); } while (sz) { len = maxdumpsz - fragsz; if (len > sz) len = sz; counter += len; progress -= len; if (counter >> 22) { report_progress(progress, dumpsize); counter &= (1 << 22) - 1; } wdog_kern_pat(WD_LASTVAL); if (ptr) { error = dump_write(di, ptr, 0, dumplo, len); if (error) return (error); dumplo += len; ptr += len; sz -= len; } else { dump_va = (void *)PHYS_TO_DMAP(pa); fragsz += len; pa += len; sz -= len; error = blk_flush(di); if (error) return (error); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int minidumpsys(struct dumperinfo *di) { pd_entry_t *l1, *l2; pt_entry_t *l3; uint32_t pmapsize; vm_offset_t va; vm_paddr_t pa; int error; uint64_t bits; int i, bit; int retry_count; struct minidumphdr mdhdr; retry_count = 0; retry: retry_count++; error = 0; pmapsize = 0; for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) { pmapsize += PAGE_SIZE; if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) continue; /* We should always be using the l2 table for kvm */ if (l2 == NULL) continue; if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) { pa = *l2 & ~ATTR_MASK; for (i = 0; i < Ln_ENTRIES; i++, pa += PAGE_SIZE) { if (is_dumpable(pa)) dump_add_page(pa); } } else if ((*l2 & ATTR_DESCR_MASK) == L2_TABLE) { for (i = 0; i < Ln_ENTRIES; i++) { if ((l3[i] & ATTR_DESCR_MASK) != L3_PAGE) continue; pa = l3[i] & ~ATTR_MASK; if (is_dumpable(pa)) dump_add_page(pa); } } } /* Calculate dump size. */ dumpsize = pmapsize; dumpsize += round_page(msgbufp->msg_size); dumpsize += round_page(vm_page_dump_size); for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { bits = vm_page_dump[i]; while (bits) { bit = ffsl(bits) - 1; pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE; /* Clear out undumpable pages now if needed */ if (is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); bits &= ~(1ul << bit); } } dumpsize += PAGE_SIZE; /* Determine dump offset on device. */ if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) { error = E2BIG; goto fail; } dumplo = di->mediaoffset + di->mediasize - dumpsize; dumplo -= sizeof(kdh) * 2; progress = dumpsize; /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = msgbufp->msg_size; mdhdr.bitmapsize = vm_page_dump_size; mdhdr.pmapsize = pmapsize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mdhdr.dmapphys = DMAP_MIN_PHYSADDR; mdhdr.dmapbase = DMAP_MIN_ADDRESS; mdhdr.dmapend = DMAP_MAX_ADDRESS; mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_AARCH64_VERSION, dumpsize, di->blocksize); printf("Dumping %llu out of %ju MB:", (long long)dumpsize >> 20, ptoa((uintmax_t)physmem) / 1048576); /* Dump leader */ error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); if (error) goto fail; dumplo += sizeof(kdh); /* Dump my header */ bzero(&tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, &tmpbuffer, sizeof(mdhdr)); error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = blk_write(di, (char *)msgbufp->msg_ptr, 0, round_page(msgbufp->msg_size)); if (error) goto fail; /* Dump bitmap */ error = blk_write(di, (char *)vm_page_dump, 0, round_page(vm_page_dump_size)); if (error) goto fail; /* Dump kernel page directory pages */ bzero(&tmpbuffer, sizeof(tmpbuffer)); for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += L2_SIZE) { if (!pmap_get_tables(pmap_kernel(), va, &l1, &l2, &l3)) { /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; } else if (l2 == NULL) { pa = (*l1 & ~ATTR_MASK) | (va & L1_OFFSET); /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = pa + (i * PAGE_SIZE) | ATTR_DEFAULT | L3_PAGE; } /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse tmpbuffer in the same block*/ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); } else if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK) { /* TODO: Handle an invalid L2 entry */ pa = (*l2 & ~ATTR_MASK) | (va & L2_OFFSET); /* Generate fake l3 entries based upon the l1 entry */ for (i = 0; i < Ln_ENTRIES; i++) { tmpbuffer[i] = pa + (i * PAGE_SIZE) | ATTR_DEFAULT | L3_PAGE; } /* We always write a page, even if it is zero */ error = blk_write(di, (char *)&tmpbuffer, 0, PAGE_SIZE); if (error) goto fail; /* flush, in case we reuse fakepd in the same block */ error = blk_flush(di); if (error) goto fail; bzero(&tmpbuffer, sizeof(tmpbuffer)); continue; } else { pa = *l2 & ~ATTR_MASK; /* We always write a page, even if it is zero */ error = blk_write(di, NULL, pa, PAGE_SIZE); if (error) goto fail; } } /* Dump memory chunks */ /* XXX cluster it up and use blk_dump() */ for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { bits = vm_page_dump[i]; while (bits) { bit = ffsl(bits) - 1; pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE; error = blk_write(di, 0, pa, PAGE_SIZE); if (error) goto fail; bits &= ~(1ul << bit); } } error = blk_flush(di); if (error) goto fail; /* Dump trailer */ error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); if (error) goto fail; dumplo += sizeof(kdh); /* Signal completion, signoff and exit stage left. */ dump_write(di, NULL, 0, 0, 0); printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; printf("\n"); if (error == ENOSPC) { printf("Dump map grown while dumping. "); if (retry_count < 5) { printf("Retrying...\n"); goto retry; } printf("Dump failed.\n"); } else if (error == ECANCELED) printf("Dump aborted\n"); else if (error == E2BIG) printf("Dump failed. Partition too small.\n"); else printf("** DUMP FAILED (ERROR %d) **\n", error); return (error); } void dump_add_page(vm_paddr_t pa) { int idx, bit; pa >>= PAGE_SHIFT; idx = pa >> 6; /* 2^6 = 64 */ bit = pa & 63; atomic_set_long(&vm_page_dump[idx], 1ul << bit); } void dump_drop_page(vm_paddr_t pa) { int idx, bit; pa >>= PAGE_SHIFT; idx = pa >> 6; /* 2^6 = 64 */ bit = pa & 63; atomic_clear_long(&vm_page_dump[idx], 1ul << bit); } Index: head/sys/arm64/cloudabi64/cloudabi64_sysvec.c =================================================================== --- head/sys/arm64/cloudabi64/cloudabi64_sysvec.c (revision 295879) +++ head/sys/arm64/cloudabi64/cloudabi64_sysvec.c (revision 295880) @@ -1,162 +1,161 @@ /*- * Copyright (c) 2015 Nuxi, https://nuxi.nl/ * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include -#include #include #include #include #include extern const char *cloudabi64_syscallnames[]; extern struct sysent cloudabi64_sysent[]; static int cloudabi64_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct trapframe *frame = td->td_frame; int i; /* Obtain system call number. */ sa->code = frame->tf_x[8]; if (sa->code >= CLOUDABI64_SYS_MAXSYSCALL) return (ENOSYS); sa->callp = &cloudabi64_sysent[sa->code]; /* Fetch system call arguments. */ for (i = 0; i < MAXARGS; i++) sa->args[i] = frame->tf_x[i]; /* Default system call return values. */ td->td_retval[0] = 0; td->td_retval[1] = frame->tf_x[1]; return (0); } static void cloudabi64_set_syscall_retval(struct thread *td, int error) { struct trapframe *frame = td->td_frame; switch (error) { case 0: /* System call succeeded. */ frame->tf_x[0] = td->td_retval[0]; frame->tf_x[1] = td->td_retval[1]; frame->tf_spsr &= ~PSR_C; break; case ERESTART: /* Restart system call. */ frame->tf_elr -= 4; break; case EJUSTRETURN: break; default: /* System call returned an error. */ frame->tf_x[0] = cloudabi_convert_errno(error); frame->tf_spsr |= PSR_C; break; } } static void cloudabi64_schedtail(struct thread *td) { struct trapframe *frame = td->td_frame; /* * Initial register values for processes returning from fork. * Make sure that we only set these values when forking, not * when creating a new thread. */ if ((td->td_pflags & TDP_FORKING) != 0) { frame->tf_x[0] = CLOUDABI_PROCESS_CHILD; frame->tf_x[1] = td->td_tid; } } void cloudabi64_thread_setregs(struct thread *td, const cloudabi64_threadattr_t *attr) { struct trapframe *frame; stack_t stack; /* Perform standard register initialization. */ stack.ss_sp = (void *)attr->stack; stack.ss_size = attr->stack_size; cpu_set_upcall_kse(td, (void *)attr->entry_point, NULL, &stack); /* * Pass in the thread ID of the new thread and the argument * pointer provided by the parent thread in as arguments to the * entry point. */ frame = td->td_frame; frame->tf_x[0] = td->td_tid; frame->tf_x[1] = attr->argument; } static struct sysentvec cloudabi64_elf_sysvec = { .sv_size = CLOUDABI64_SYS_MAXSYSCALL, .sv_table = cloudabi64_sysent, .sv_fixup = cloudabi64_fixup, .sv_name = "CloudABI ELF64", .sv_coredump = elf64_coredump, .sv_pagesize = PAGE_SIZE, .sv_minuser = VM_MIN_ADDRESS, .sv_maxuser = VM_MAXUSER_ADDRESS, .sv_usrstack = USRSTACK, .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE, .sv_copyout_strings = cloudabi64_copyout_strings, .sv_flags = SV_ABI_CLOUDABI | SV_CAPSICUM, .sv_set_syscall_retval = cloudabi64_set_syscall_retval, .sv_fetch_syscall_args = cloudabi64_fetch_syscall_args, .sv_syscallnames = cloudabi64_syscallnames, .sv_schedtail = cloudabi64_schedtail, }; INIT_SYSENTVEC(elf_sysvec, &cloudabi64_elf_sysvec); Elf64_Brandinfo cloudabi64_brand = { .brand = ELFOSABI_CLOUDABI, .machine = EM_AARCH64, .sysvec = &cloudabi64_elf_sysvec, .compat_3_brand = "CloudABI", }; Index: head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h (revision 295879) +++ head/sys/compat/linuxkpi/common/include/linux/dma-mapping.h (revision 295880) @@ -1,281 +1,280 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_DMA_MAPPING_H_ #define _LINUX_DMA_MAPPING_H_ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include enum dma_data_direction { DMA_BIDIRECTIONAL = 0, DMA_TO_DEVICE = 1, DMA_FROM_DEVICE = 2, DMA_NONE = 3, }; struct dma_map_ops { void* (*alloc_coherent)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp); void (*free_coherent)(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle); dma_addr_t (*map_page)(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs); int (*map_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); void (*unmap_sg)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs); void (*sync_single_for_cpu)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_for_device)(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction dir); void (*sync_single_range_for_cpu)(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction dir); void (*sync_single_range_for_device)(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction dir); void (*sync_sg_for_cpu)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); void (*sync_sg_for_device)(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir); int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); int (*dma_supported)(struct device *dev, u64 mask); int is_phys; }; #define DMA_BIT_MASK(n) ((2ULL << ((n) - 1)) - 1ULL) static inline int dma_supported(struct device *dev, u64 mask) { /* XXX busdma takes care of this elsewhere. */ return (1); } static inline int dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return (0); } static inline int dma_set_coherent_mask(struct device *dev, u64 mask) { if (!dma_supported(dev, mask)) return -EIO; /* XXX Currently we don't support a seperate coherent mask. */ return 0; } static inline void * dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { vm_paddr_t high; size_t align; void *mem; if (dev->dma_mask) high = *dev->dma_mask; else high = BUS_SPACE_MAXADDR_32BIT; align = PAGE_SIZE << get_order(size); mem = (void *)kmem_alloc_contig(kmem_arena, size, flag, 0, high, align, 0, VM_MEMATTR_DEFAULT); if (mem) *dma_handle = vtophys(mem); else *dma_handle = 0; return (mem); } static inline void * dma_zalloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag) { return (dma_alloc_coherent(dev, size, dma_handle, flag | __GFP_ZERO)); } static inline void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t dma_handle) { kmem_free(kmem_arena, (vm_offset_t)cpu_addr, size); } /* XXX This only works with no iommu. */ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return vtophys(ptr); } static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { } static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; for_each_sg(sgl, sg, nents, i) sg_dma_address(sg) = sg_phys(sg); return (nents); } static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { } static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction direction) { return VM_PAGE_TO_PHYS(page) + offset; } static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_single(struct device *dev, dma_addr_t addr, size_t size, enum dma_data_direction dir) { dma_sync_single_for_cpu(dev, addr, size, dir); } static inline void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { } static inline void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { } static inline void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { } static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) { return (0); } static inline unsigned int dma_set_max_seg_size(struct device *dev, unsigned int size) { return (0); } #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) #define DEFINE_DMA_UNMAP_ADDR(name) dma_addr_t name #define DEFINE_DMA_UNMAP_LEN(name) __u32 name #define dma_unmap_addr(p, name) ((p)->name) #define dma_unmap_addr_set(p, name, v) (((p)->name) = (v)) #define dma_unmap_len(p, name) ((p)->name) #define dma_unmap_len_set(p, name, v) (((p)->name) = (v)) extern int uma_align_cache; #define dma_get_cache_alignment() uma_align_cache #endif /* _LINUX_DMA_MAPPING_H_ */ Index: head/sys/compat/linuxkpi/common/include/linux/list.h =================================================================== --- head/sys/compat/linuxkpi/common/include/linux/list.h (revision 295879) +++ head/sys/compat/linuxkpi/common/include/linux/list.h (revision 295880) @@ -1,439 +1,438 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _LINUX_LIST_H_ #define _LINUX_LIST_H_ /* * Since LIST_HEAD conflicts with the linux definition we must include any * FreeBSD header which requires it here so it is resolved with the correct * definition prior to the undef. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #define prefetch(x) struct list_head { struct list_head *next; struct list_head *prev; }; static inline void INIT_LIST_HEAD(struct list_head *list) { list->next = list->prev = list; } static inline int list_empty(const struct list_head *head) { return (head->next == head); } static inline void list_del(struct list_head *entry) { entry->next->prev = entry->prev; entry->prev->next = entry->next; } static inline void list_replace(struct list_head *old, struct list_head *new) { new->next = old->next; new->next->prev = new; new->prev = old->prev; new->prev->next = new; } static inline void linux_list_add(struct list_head *new, struct list_head *prev, struct list_head *next) { next->prev = new; new->next = next; new->prev = prev; prev->next = new; } static inline void list_del_init(struct list_head *entry) { list_del(entry); INIT_LIST_HEAD(entry); } #define list_entry(ptr, type, field) container_of(ptr, type, field) #define list_first_entry(ptr, type, member) \ list_entry((ptr)->next, type, member) #define list_next_entry(ptr, member) \ list_entry(((ptr)->member.next), typeof(*(ptr)), member) #define list_for_each(p, head) \ for (p = (head)->next; p != (head); p = (p)->next) #define list_for_each_safe(p, n, head) \ for (p = (head)->next, n = (p)->next; p != (head); p = n, n = (p)->next) #define list_for_each_entry(p, h, field) \ for (p = list_entry((h)->next, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.next, typeof(*p), field)) #define list_for_each_entry_safe(p, n, h, field) \ for (p = list_entry((h)->next, typeof(*p), field), \ n = list_entry((p)->field.next, typeof(*p), field); &(p)->field != (h);\ p = n, n = list_entry(n->field.next, typeof(*n), field)) #define list_for_each_entry_from(p, h, field) \ for ( ; &(p)->field != (h); \ p = list_entry((p)->field.next, typeof(*p), field)) #define list_for_each_entry_continue(p, h, field) \ for (p = list_next_entry((p), field); &(p)->field != (h); \ p = list_next_entry((p), field)) #define list_for_each_entry_safe_from(pos, n, head, member) \ for (n = list_entry((pos)->member.next, typeof(*pos), member); \ &(pos)->member != (head); \ pos = n, n = list_entry(n->member.next, typeof(*n), member)) #define list_for_each_entry_reverse(p, h, field) \ for (p = list_entry((h)->prev, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.prev, typeof(*p), field)) #define list_for_each_entry_continue_reverse(p, h, field) \ for (p = list_entry((p)->field.prev, typeof(*p), field); &(p)->field != (h); \ p = list_entry((p)->field.prev, typeof(*p), field)) #define list_for_each_prev(p, h) for (p = (h)->prev; p != (h); p = (p)->prev) static inline void list_add(struct list_head *new, struct list_head *head) { linux_list_add(new, head, head->next); } static inline void list_add_tail(struct list_head *new, struct list_head *head) { linux_list_add(new, head->prev, head); } static inline void list_move(struct list_head *list, struct list_head *head) { list_del(list); list_add(list, head); } static inline void list_move_tail(struct list_head *entry, struct list_head *head) { list_del(entry); list_add_tail(entry, head); } static inline void linux_list_splice(const struct list_head *list, struct list_head *prev, struct list_head *next) { struct list_head *first; struct list_head *last; if (list_empty(list)) return; first = list->next; last = list->prev; first->prev = prev; prev->next = first; last->next = next; next->prev = last; } static inline void list_splice(const struct list_head *list, struct list_head *head) { linux_list_splice(list, head, head->next); } static inline void list_splice_tail(struct list_head *list, struct list_head *head) { linux_list_splice(list, head->prev, head); } static inline void list_splice_init(struct list_head *list, struct list_head *head) { linux_list_splice(list, head, head->next); INIT_LIST_HEAD(list); } static inline void list_splice_tail_init(struct list_head *list, struct list_head *head) { linux_list_splice(list, head->prev, head); INIT_LIST_HEAD(list); } #undef LIST_HEAD #define LIST_HEAD(name) struct list_head name = { &(name), &(name) } struct hlist_head { struct hlist_node *first; }; struct hlist_node { struct hlist_node *next, **pprev; }; #define HLIST_HEAD_INIT { } #define HLIST_HEAD(name) struct hlist_head name = HLIST_HEAD_INIT #define INIT_HLIST_HEAD(head) (head)->first = NULL #define INIT_HLIST_NODE(node) \ do { \ (node)->next = NULL; \ (node)->pprev = NULL; \ } while (0) static inline int hlist_unhashed(const struct hlist_node *h) { return !h->pprev; } static inline int hlist_empty(const struct hlist_head *h) { return !h->first; } static inline void hlist_del(struct hlist_node *n) { if (n->next) n->next->pprev = n->pprev; *n->pprev = n->next; } static inline void hlist_del_init(struct hlist_node *n) { if (hlist_unhashed(n)) return; hlist_del(n); INIT_HLIST_NODE(n); } static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) { n->next = h->first; if (h->first) h->first->pprev = &n->next; h->first = n; n->pprev = &h->first; } static inline void hlist_add_before(struct hlist_node *n, struct hlist_node *next) { n->pprev = next->pprev; n->next = next; next->pprev = &n->next; *(n->pprev) = n; } static inline void hlist_add_after(struct hlist_node *n, struct hlist_node *next) { next->next = n->next; n->next = next; next->pprev = &n->next; if (next->next) next->next->pprev = &next->next; } static inline void hlist_move_list(struct hlist_head *old, struct hlist_head *new) { new->first = old->first; if (new->first) new->first->pprev = &new->first; old->first = NULL; } /** * list_is_singular - tests whether a list has just one entry. * @head: the list to test. */ static inline int list_is_singular(const struct list_head *head) { return !list_empty(head) && (head->next == head->prev); } static inline void __list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { struct list_head *new_first = entry->next; list->next = head->next; list->next->prev = list; list->prev = entry; entry->next = list; head->next = new_first; new_first->prev = head; } /** * list_cut_position - cut a list into two * @list: a new list to add all removed entries * @head: a list with entries * @entry: an entry within head, could be the head itself * and if so we won't cut the list * * This helper moves the initial part of @head, up to and * including @entry, from @head to @list. You should * pass on @entry an element you know is on @head. @list * should be an empty list or a list you do not care about * losing its data. * */ static inline void list_cut_position(struct list_head *list, struct list_head *head, struct list_head *entry) { if (list_empty(head)) return; if (list_is_singular(head) && (head->next != entry && head != entry)) return; if (entry == head) INIT_LIST_HEAD(list); else __list_cut_position(list, head, entry); } /** * list_is_last - tests whether @list is the last entry in list @head * @list: the entry to test * @head: the head of the list */ static inline int list_is_last(const struct list_head *list, const struct list_head *head) { return list->next == head; } #define hlist_entry(ptr, type, field) container_of(ptr, type, field) #define hlist_for_each(p, head) \ for (p = (head)->first; p; p = (p)->next) #define hlist_for_each_safe(p, n, head) \ for (p = (head)->first; p && ({ n = (p)->next; 1; }); p = n) #define hlist_entry_safe(ptr, type, member) \ ((ptr) ? hlist_entry(ptr, type, member) : NULL) #define hlist_for_each_entry(pos, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\ pos; \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_continue(pos, member) \ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member); \ (pos); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_from(pos, member) \ for (; (pos); \ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) #define hlist_for_each_entry_safe(pos, n, head, member) \ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ (pos) && ({ n = (pos)->member.next; 1; }); \ pos = hlist_entry_safe(n, typeof(*(pos)), member)) #endif /* _LINUX_LIST_H_ */ Index: head/sys/compat/linuxkpi/common/src/linux_compat.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_compat.c (revision 295879) +++ head/sys/compat/linuxkpi/common/src/linux_compat.c (revision 295880) @@ -1,1299 +1,1298 @@ /*- * Copyright (c) 2010 Isilon Systems, Inc. * Copyright (c) 2010 iX Systems, Inc. * Copyright (c) 2010 Panasas, Inc. * Copyright (c) 2013-2016 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_KMALLOC, "linux", "Linux kmalloc compat"); #include /* Undo Linux compat changes. */ #undef RB_ROOT #undef file #undef cdev #define RB_ROOT(head) (head)->rbh_root struct kobject linux_class_root; struct device linux_root_device; struct class linux_class_misc; struct list_head pci_drivers; struct list_head pci_devices; struct net init_net; spinlock_t pci_lock; struct sx linux_global_rcu_lock; unsigned long linux_timer_hz_mask; int panic_cmp(struct rb_node *one, struct rb_node *two) { panic("no cmp"); } RB_GENERATE(linux_root, rb_node, __entry, panic_cmp); int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list args) { va_list tmp_va; int len; char *old; char *name; char dummy; old = kobj->name; if (old && fmt == NULL) return (0); /* compute length of string */ va_copy(tmp_va, args); len = vsnprintf(&dummy, 0, fmt, tmp_va); va_end(tmp_va); /* account for zero termination */ len++; /* check for error */ if (len < 1) return (-EINVAL); /* allocate memory for string */ name = kzalloc(len, GFP_KERNEL); if (name == NULL) return (-ENOMEM); vsnprintf(name, len, fmt, args); kobj->name = name; /* free old string */ kfree(old); /* filter new string */ for (; *name != '\0'; name++) if (*name == '/') *name = '!'; return (0); } int kobject_set_name(struct kobject *kobj, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); return (error); } static int kobject_add_complete(struct kobject *kobj, struct kobject *parent) { const struct kobj_type *t; int error; kobj->parent = parent; error = sysfs_create_dir(kobj); if (error == 0 && kobj->ktype && kobj->ktype->default_attrs) { struct attribute **attr; t = kobj->ktype; for (attr = t->default_attrs; *attr != NULL; attr++) { error = sysfs_create_file(kobj, *attr); if (error) break; } if (error) sysfs_remove_dir(kobj); } return (error); } int kobject_add(struct kobject *kobj, struct kobject *parent, const char *fmt, ...) { va_list args; int error; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } void linux_kobject_release(struct kref *kref) { struct kobject *kobj; char *name; kobj = container_of(kref, struct kobject, kref); sysfs_remove_dir(kobj); name = kobj->name; if (kobj->ktype && kobj->ktype->release) kobj->ktype->release(kobj); kfree(name); } static void linux_kobject_kfree(struct kobject *kobj) { kfree(kobj); } static void linux_kobject_kfree_name(struct kobject *kobj) { if (kobj) { kfree(kobj->name); } } const struct kobj_type linux_kfree_type = { .release = linux_kobject_kfree }; static void linux_device_release(struct device *dev) { pr_debug("linux_device_release: %s\n", dev_name(dev)); kfree(dev); } static ssize_t linux_class_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct class, kobj), dattr, buf); return (error); } static ssize_t linux_class_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct class_attribute *dattr; ssize_t error; dattr = container_of(attr, struct class_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct class, kobj), dattr, buf, count); return (error); } static void linux_class_release(struct kobject *kobj) { struct class *class; class = container_of(kobj, struct class, kobj); if (class->class_release) class->class_release(class); } static const struct sysfs_ops linux_class_sysfs = { .show = linux_class_show, .store = linux_class_store, }; const struct kobj_type linux_class_ktype = { .release = linux_class_release, .sysfs_ops = &linux_class_sysfs }; static void linux_dev_release(struct kobject *kobj) { struct device *dev; dev = container_of(kobj, struct device, kobj); /* This is the precedence defined by linux. */ if (dev->release) dev->release(dev); else if (dev->class && dev->class->dev_release) dev->class->dev_release(dev); } static ssize_t linux_dev_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->show) error = dattr->show(container_of(kobj, struct device, kobj), dattr, buf); return (error); } static ssize_t linux_dev_store(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count) { struct device_attribute *dattr; ssize_t error; dattr = container_of(attr, struct device_attribute, attr); error = -EIO; if (dattr->store) error = dattr->store(container_of(kobj, struct device, kobj), dattr, buf, count); return (error); } static const struct sysfs_ops linux_dev_sysfs = { .show = linux_dev_show, .store = linux_dev_store, }; const struct kobj_type linux_dev_ktype = { .release = linux_dev_release, .sysfs_ops = &linux_dev_sysfs }; struct device * device_create(struct class *class, struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) { struct device *dev; va_list args; dev = kzalloc(sizeof(*dev), M_WAITOK); dev->parent = parent; dev->class = class; dev->devt = devt; dev->driver_data = drvdata; dev->release = linux_device_release; va_start(args, fmt); kobject_set_name_vargs(&dev->kobj, fmt, args); va_end(args); device_register(dev); return (dev); } int kobject_init_and_add(struct kobject *kobj, const struct kobj_type *ktype, struct kobject *parent, const char *fmt, ...) { va_list args; int error; kobject_init(kobj, ktype); kobj->ktype = ktype; kobj->parent = parent; kobj->name = NULL; va_start(args, fmt); error = kobject_set_name_vargs(kobj, fmt, args); va_end(args); if (error) return (error); return kobject_add_complete(kobj, parent); } static void linux_file_dtor(void *cdp) { struct linux_file *filp; filp = cdp; filp->f_op->release(filp->f_vnode, filp); vdrop(filp->f_vnode); kfree(filp); } static int linux_dev_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (ENODEV); filp = kzalloc(sizeof(*filp), GFP_KERNEL); filp->f_dentry = &filp->f_dentry_store; filp->f_op = ldev->ops; filp->f_flags = file->f_flag; vhold(file->f_vnode); filp->f_vnode = file->f_vnode; if (filp->f_op->open) { error = -filp->f_op->open(file->f_vnode, filp); if (error) { kfree(filp); return (error); } } error = devfs_set_cdevpriv(filp, linux_file_dtor); if (error) { filp->f_op->release(file->f_vnode, filp); kfree(filp); return (error); } return 0; } static int linux_dev_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; devfs_clear_cdevpriv(); return (0); } static int linux_dev_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; /* * Linux does not have a generic ioctl copyin/copyout layer. All * linux ioctls must be converted to void ioctls which pass a * pointer to the address of the data. We want the actual user * address so we dereference here. */ data = *(void **)data; if (filp->f_op->unlocked_ioctl) error = -filp->f_op->unlocked_ioctl(filp, cmd, (u_long)data); else error = ENOTTY; return (error); } static int linux_dev_read(struct cdev *dev, struct uio *uio, int ioflag) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; ssize_t bytes; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; if (uio->uio_iovcnt != 1) panic("linux_dev_read: uio %p iovcnt %d", uio, uio->uio_iovcnt); if (filp->f_op->read) { bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else error = -bytes; } else error = ENXIO; return (error); } static int linux_dev_write(struct cdev *dev, struct uio *uio, int ioflag) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; ssize_t bytes; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; if (uio->uio_iovcnt != 1) panic("linux_dev_write: uio %p iovcnt %d", uio, uio->uio_iovcnt); if (filp->f_op->write) { bytes = filp->f_op->write(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else error = -bytes; } else error = ENXIO; return (error); } static int linux_dev_poll(struct cdev *dev, int events, struct thread *td) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; int revents; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (0); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; if (filp->f_op->poll) revents = filp->f_op->poll(filp, NULL) & events; else revents = 0; return (revents); } static int linux_dev_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct linux_cdev *ldev; struct linux_file *filp; struct file *file; struct vm_area_struct vma; int error; file = curthread->td_fpop; ldev = dev->si_drv1; if (ldev == NULL) return (ENODEV); if ((error = devfs_get_cdevpriv((void **)&filp)) != 0) return (error); filp->f_flags = file->f_flag; vma.vm_start = 0; vma.vm_end = size; vma.vm_pgoff = *offset / PAGE_SIZE; vma.vm_pfn = 0; vma.vm_page_prot = VM_MEMATTR_DEFAULT; if (filp->f_op->mmap) { error = -filp->f_op->mmap(filp, &vma); if (error == 0) { struct sglist *sg; sg = sglist_alloc(1, M_WAITOK); sglist_append_phys(sg, (vm_paddr_t)vma.vm_pfn << PAGE_SHIFT, vma.vm_len); *object = vm_pager_allocate(OBJT_SG, sg, vma.vm_len, nprot, 0, curthread->td_ucred); if (*object == NULL) { sglist_free(sg); return (EINVAL); } *offset = 0; if (vma.vm_page_prot != VM_MEMATTR_DEFAULT) { VM_OBJECT_WLOCK(*object); vm_object_set_memattr(*object, vma.vm_page_prot); VM_OBJECT_WUNLOCK(*object); } } } else error = ENODEV; return (error); } struct cdevsw linuxcdevsw = { .d_version = D_VERSION, .d_flags = D_TRACKCLOSE, .d_open = linux_dev_open, .d_close = linux_dev_close, .d_read = linux_dev_read, .d_write = linux_dev_write, .d_ioctl = linux_dev_ioctl, .d_mmap_single = linux_dev_mmap_single, .d_poll = linux_dev_poll, }; static int linux_file_read(struct file *file, struct uio *uio, struct ucred *active_cred, int flags, struct thread *td) { struct linux_file *filp; ssize_t bytes; int error; error = 0; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (uio->uio_iovcnt != 1) panic("linux_file_read: uio %p iovcnt %d", uio, uio->uio_iovcnt); if (filp->f_op->read) { bytes = filp->f_op->read(filp, uio->uio_iov->iov_base, uio->uio_iov->iov_len, &uio->uio_offset); if (bytes >= 0) { uio->uio_iov->iov_base = ((uint8_t *)uio->uio_iov->iov_base) + bytes; uio->uio_iov->iov_len -= bytes; uio->uio_resid -= bytes; } else error = -bytes; } else error = ENXIO; return (error); } static int linux_file_poll(struct file *file, int events, struct ucred *active_cred, struct thread *td) { struct linux_file *filp; int revents; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; if (filp->f_op->poll) revents = filp->f_op->poll(filp, NULL) & events; else revents = 0; return (0); } static int linux_file_close(struct file *file, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)file->f_data; filp->f_flags = file->f_flag; error = -filp->f_op->release(NULL, filp); funsetown(&filp->f_sigio); kfree(filp); return (error); } static int linux_file_ioctl(struct file *fp, u_long cmd, void *data, struct ucred *cred, struct thread *td) { struct linux_file *filp; int error; filp = (struct linux_file *)fp->f_data; filp->f_flags = fp->f_flag; error = 0; switch (cmd) { case FIONBIO: break; case FIOASYNC: if (filp->f_op->fasync == NULL) break; error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); break; case FIOSETOWN: error = fsetown(*(int *)data, &filp->f_sigio); if (error == 0) error = filp->f_op->fasync(0, filp, fp->f_flag & FASYNC); break; case FIOGETOWN: *(int *)data = fgetown(&filp->f_sigio); break; default: error = ENOTTY; break; } return (error); } static int linux_file_stat(struct file *fp, struct stat *sb, struct ucred *active_cred, struct thread *td) { return (EOPNOTSUPP); } static int linux_file_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) { return (0); } struct fileops linuxfileops = { .fo_read = linux_file_read, .fo_write = invfo_rdwr, .fo_truncate = invfo_truncate, .fo_kqfilter = invfo_kqfilter, .fo_stat = linux_file_stat, .fo_fill_kinfo = linux_file_fill_kinfo, .fo_poll = linux_file_poll, .fo_close = linux_file_close, .fo_ioctl = linux_file_ioctl, .fo_chmod = invfo_chmod, .fo_chown = invfo_chown, .fo_sendfile = invfo_sendfile, }; /* * Hash of vmmap addresses. This is infrequently accessed and does not * need to be particularly large. This is done because we must store the * caller's idea of the map size to properly unmap. */ struct vmmap { LIST_ENTRY(vmmap) vm_next; void *vm_addr; unsigned long vm_size; }; struct vmmaphd { struct vmmap *lh_first; }; #define VMMAP_HASH_SIZE 64 #define VMMAP_HASH_MASK (VMMAP_HASH_SIZE - 1) #define VM_HASH(addr) ((uintptr_t)(addr) >> PAGE_SHIFT) & VMMAP_HASH_MASK static struct vmmaphd vmmaphead[VMMAP_HASH_SIZE]; static struct mtx vmmaplock; static void vmmap_add(void *addr, unsigned long size) { struct vmmap *vmmap; vmmap = kmalloc(sizeof(*vmmap), GFP_KERNEL); mtx_lock(&vmmaplock); vmmap->vm_size = size; vmmap->vm_addr = addr; LIST_INSERT_HEAD(&vmmaphead[VM_HASH(addr)], vmmap, vm_next); mtx_unlock(&vmmaplock); } static struct vmmap * vmmap_remove(void *addr) { struct vmmap *vmmap; mtx_lock(&vmmaplock); LIST_FOREACH(vmmap, &vmmaphead[VM_HASH(addr)], vm_next) if (vmmap->vm_addr == addr) break; if (vmmap) LIST_REMOVE(vmmap, vm_next); mtx_unlock(&vmmaplock); return (vmmap); } #if defined(__i386__) || defined(__amd64__) void * _ioremap_attr(vm_paddr_t phys_addr, unsigned long size, int attr) { void *addr; addr = pmap_mapdev_attr(phys_addr, size, attr); if (addr == NULL) return (NULL); vmmap_add(addr, size); return (addr); } #endif void iounmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; #if defined(__i386__) || defined(__amd64__) pmap_unmapdev((vm_offset_t)addr, vmmap->vm_size); #endif kfree(vmmap); } void * vmap(struct page **pages, unsigned int count, unsigned long flags, int prot) { vm_offset_t off; size_t size; size = count * PAGE_SIZE; off = kva_alloc(size); if (off == 0) return (NULL); vmmap_add((void *)off, size); pmap_qenter(off, pages, count); return ((void *)off); } void vunmap(void *addr) { struct vmmap *vmmap; vmmap = vmmap_remove(addr); if (vmmap == NULL) return; pmap_qremove((vm_offset_t)addr, vmmap->vm_size / PAGE_SIZE); kva_free((vm_offset_t)addr, vmmap->vm_size); kfree(vmmap); } char * kvasprintf(gfp_t gfp, const char *fmt, va_list ap) { unsigned int len; char *p; va_list aq; va_copy(aq, ap); len = vsnprintf(NULL, 0, fmt, aq); va_end(aq); p = kmalloc(len + 1, gfp); if (p != NULL) vsnprintf(p, len + 1, fmt, ap); return (p); } char * kasprintf(gfp_t gfp, const char *fmt, ...) { va_list ap; char *p; va_start(ap, fmt); p = kvasprintf(gfp, fmt, ap); va_end(ap); return (p); } static int linux_timer_jiffies_until(unsigned long expires) { int delta = expires - jiffies; /* guard against already expired values */ if (delta < 1) delta = 1; return (delta); } static void linux_timer_callback_wrapper(void *context) { struct timer_list *timer; timer = context; timer->function(timer->data); } void mod_timer(struct timer_list *timer, unsigned long expires) { timer->expires = expires; callout_reset(&timer->timer_callout, linux_timer_jiffies_until(expires), &linux_timer_callback_wrapper, timer); } void add_timer(struct timer_list *timer) { callout_reset(&timer->timer_callout, linux_timer_jiffies_until(timer->expires), &linux_timer_callback_wrapper, timer); } static void linux_timer_init(void *arg) { /* * Compute an internal HZ value which can divide 2**32 to * avoid timer rounding problems when the tick value wraps * around 2**32: */ linux_timer_hz_mask = 1; while (linux_timer_hz_mask < (unsigned long)hz) linux_timer_hz_mask *= 2; linux_timer_hz_mask--; } SYSINIT(linux_timer, SI_SUB_DRIVERS, SI_ORDER_FIRST, linux_timer_init, NULL); void linux_complete_common(struct completion *c, int all) { int wakeup_swapper; sleepq_lock(c); c->done++; if (all) wakeup_swapper = sleepq_broadcast(c, SLEEPQ_SLEEP, 0, 0); else wakeup_swapper = sleepq_signal(c, SLEEPQ_SLEEP, 0, 0); sleepq_release(c); if (wakeup_swapper) kick_proc0(); } /* * Indefinite wait for done != 0 with or without signals. */ long linux_wait_for_common(struct completion *c, int flags) { if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); if (flags & SLEEPQ_INTERRUPTIBLE) { if (sleepq_wait_sig(c, 0) != 0) return (-ERESTARTSYS); } else sleepq_wait(c, 0); } c->done--; sleepq_release(c); return (0); } /* * Time limited wait for done != 0 with or without signals. */ long linux_wait_for_timeout_common(struct completion *c, long timeout, int flags) { long end = jiffies + timeout; if (flags != 0) flags = SLEEPQ_INTERRUPTIBLE | SLEEPQ_SLEEP; else flags = SLEEPQ_SLEEP; for (;;) { int ret; sleepq_lock(c); if (c->done) break; sleepq_add(c, NULL, "completion", flags, 0); sleepq_set_timeout(c, linux_timer_jiffies_until(end)); if (flags & SLEEPQ_INTERRUPTIBLE) ret = sleepq_timedwait_sig(c, 0); else ret = sleepq_timedwait(c, 0); if (ret != 0) { /* check for timeout or signal */ if (ret == EWOULDBLOCK) return (0); else return (-ERESTARTSYS); } } c->done--; sleepq_release(c); /* return how many jiffies are left */ return (linux_timer_jiffies_until(end)); } int linux_try_wait_for_completion(struct completion *c) { int isdone; isdone = 1; sleepq_lock(c); if (c->done) c->done--; else isdone = 0; sleepq_release(c); return (isdone); } int linux_completion_done(struct completion *c) { int isdone; isdone = 1; sleepq_lock(c); if (c->done == 0) isdone = 0; sleepq_release(c); return (isdone); } void linux_delayed_work_fn(void *arg) { struct delayed_work *work; work = arg; taskqueue_enqueue(work->work.taskqueue, &work->work.work_task); } void linux_work_fn(void *context, int pending) { struct work_struct *work; work = context; work->fn(work); } void linux_flush_fn(void *context, int pending) { } struct workqueue_struct * linux_create_workqueue_common(const char *name, int cpus) { struct workqueue_struct *wq; wq = kmalloc(sizeof(*wq), M_WAITOK); wq->taskqueue = taskqueue_create(name, M_WAITOK, taskqueue_thread_enqueue, &wq->taskqueue); atomic_set(&wq->draining, 0); taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name); return (wq); } void destroy_workqueue(struct workqueue_struct *wq) { taskqueue_free(wq->taskqueue); kfree(wq); } static void linux_cdev_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kfree(cdev); kobject_put(parent); } static void linux_cdev_static_release(struct kobject *kobj) { struct linux_cdev *cdev; struct kobject *parent; cdev = container_of(kobj, struct linux_cdev, kobj); parent = kobj->parent; if (cdev->cdev) destroy_dev(cdev->cdev); kobject_put(parent); } const struct kobj_type linux_cdev_ktype = { .release = linux_cdev_release, }; const struct kobj_type linux_cdev_static_ktype = { .release = linux_cdev_static_release, }; static void linux_handle_ifnet_link_event(void *arg, struct ifnet *ifp, int linkstate) { struct notifier_block *nb; nb = arg; if (linkstate == LINK_STATE_UP) nb->notifier_call(nb, NETDEV_UP, ifp); else nb->notifier_call(nb, NETDEV_DOWN, ifp); } static void linux_handle_ifnet_arrival_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_REGISTER, ifp); } static void linux_handle_ifnet_departure_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_UNREGISTER, ifp); } static void linux_handle_iflladdr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEADDR, ifp); } static void linux_handle_ifaddr_event(void *arg, struct ifnet *ifp) { struct notifier_block *nb; nb = arg; nb->notifier_call(nb, NETDEV_CHANGEIFADDR, ifp); } int register_netdevice_notifier(struct notifier_block *nb) { nb->tags[NETDEV_UP] = EVENTHANDLER_REGISTER( ifnet_link_event, linux_handle_ifnet_link_event, nb, 0); nb->tags[NETDEV_REGISTER] = EVENTHANDLER_REGISTER( ifnet_arrival_event, linux_handle_ifnet_arrival_event, nb, 0); nb->tags[NETDEV_UNREGISTER] = EVENTHANDLER_REGISTER( ifnet_departure_event, linux_handle_ifnet_departure_event, nb, 0); nb->tags[NETDEV_CHANGEADDR] = EVENTHANDLER_REGISTER( iflladdr_event, linux_handle_iflladdr_event, nb, 0); return (0); } int register_inetaddr_notifier(struct notifier_block *nb) { nb->tags[NETDEV_CHANGEIFADDR] = EVENTHANDLER_REGISTER( ifaddr_event, linux_handle_ifaddr_event, nb, 0); return (0); } int unregister_netdevice_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifnet_link_event, nb->tags[NETDEV_UP]); EVENTHANDLER_DEREGISTER(ifnet_arrival_event, nb->tags[NETDEV_REGISTER]); EVENTHANDLER_DEREGISTER(ifnet_departure_event, nb->tags[NETDEV_UNREGISTER]); EVENTHANDLER_DEREGISTER(iflladdr_event, nb->tags[NETDEV_CHANGEADDR]); return (0); } int unregister_inetaddr_notifier(struct notifier_block *nb) { EVENTHANDLER_DEREGISTER(ifaddr_event, nb->tags[NETDEV_CHANGEIFADDR]); return (0); } void linux_irq_handler(void *ent) { struct irq_ent *irqe; irqe = ent; irqe->handler(irqe->irq, irqe->arg); } static void linux_compat_init(void *arg) { struct sysctl_oid *rootoid; int i; sx_init(&linux_global_rcu_lock, "LinuxGlobalRCU"); rootoid = SYSCTL_ADD_ROOT_NODE(NULL, OID_AUTO, "sys", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "sys"); kobject_init(&linux_class_root, &linux_class_ktype); kobject_set_name(&linux_class_root, "class"); linux_class_root.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "class", CTLFLAG_RD|CTLFLAG_MPSAFE, NULL, "class"); kobject_init(&linux_root_device.kobj, &linux_dev_ktype); kobject_set_name(&linux_root_device.kobj, "device"); linux_root_device.kobj.oidp = SYSCTL_ADD_NODE(NULL, SYSCTL_CHILDREN(rootoid), OID_AUTO, "device", CTLFLAG_RD, NULL, "device"); linux_root_device.bsddev = root_bus; linux_class_misc.name = "misc"; class_register(&linux_class_misc); INIT_LIST_HEAD(&pci_drivers); INIT_LIST_HEAD(&pci_devices); spin_lock_init(&pci_lock); mtx_init(&vmmaplock, "IO Map lock", NULL, MTX_DEF); for (i = 0; i < VMMAP_HASH_SIZE; i++) LIST_INIT(&vmmaphead[i]); } SYSINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_init, NULL); static void linux_compat_uninit(void *arg) { linux_kobject_kfree_name(&linux_class_root); linux_kobject_kfree_name(&linux_root_device.kobj); linux_kobject_kfree_name(&linux_class_misc.kobj); synchronize_rcu(); sx_destroy(&linux_global_rcu_lock); } SYSUNINIT(linux_compat, SI_SUB_DRIVERS, SI_ORDER_SECOND, linux_compat_uninit, NULL); /* * NOTE: Linux frequently uses "unsigned long" for pointer to integer * conversion and vice versa, where in FreeBSD "uintptr_t" would be * used. Assert these types have the same size, else some parts of the * LinuxKPI may not work like expected: */ CTASSERT(sizeof(unsigned long) == sizeof(uintptr_t)); Index: head/sys/compat/linuxkpi/common/src/linux_pci.c =================================================================== --- head/sys/compat/linuxkpi/common/src/linux_pci.c (revision 295879) +++ head/sys/compat/linuxkpi/common/src/linux_pci.c (revision 295880) @@ -1,254 +1,253 @@ /*- * Copyright (c) 2015 Mellanox Technologies, Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include static device_probe_t linux_pci_probe; static device_attach_t linux_pci_attach; static device_detach_t linux_pci_detach; static device_suspend_t linux_pci_suspend; static device_resume_t linux_pci_resume; static device_shutdown_t linux_pci_shutdown; static device_method_t pci_methods[] = { DEVMETHOD(device_probe, linux_pci_probe), DEVMETHOD(device_attach, linux_pci_attach), DEVMETHOD(device_detach, linux_pci_detach), DEVMETHOD(device_suspend, linux_pci_suspend), DEVMETHOD(device_resume, linux_pci_resume), DEVMETHOD(device_shutdown, linux_pci_shutdown), DEVMETHOD_END }; static struct pci_driver * linux_pci_find(device_t dev, const struct pci_device_id **idp) { const struct pci_device_id *id; struct pci_driver *pdrv; uint16_t vendor; uint16_t device; vendor = pci_get_vendor(dev); device = pci_get_device(dev); spin_lock(&pci_lock); list_for_each_entry(pdrv, &pci_drivers, links) { for (id = pdrv->id_table; id->vendor != 0; id++) { if (vendor == id->vendor && device == id->device) { *idp = id; spin_unlock(&pci_lock); return (pdrv); } } } spin_unlock(&pci_lock); return (NULL); } static int linux_pci_probe(device_t dev) { const struct pci_device_id *id; struct pci_driver *pdrv; if ((pdrv = linux_pci_find(dev, &id)) == NULL) return (ENXIO); if (device_get_driver(dev) != &pdrv->driver) return (ENXIO); device_set_desc(dev, pdrv->name); return (0); } static int linux_pci_attach(device_t dev) { struct resource_list_entry *rle; struct pci_dev *pdev; struct pci_driver *pdrv; const struct pci_device_id *id; int error; pdrv = linux_pci_find(dev, &id); pdev = device_get_softc(dev); pdev->dev.parent = &linux_root_device; pdev->dev.bsddev = dev; INIT_LIST_HEAD(&pdev->dev.irqents); pdev->device = id->device; pdev->vendor = id->vendor; pdev->dev.dma_mask = &pdev->dma_mask; pdev->pdrv = pdrv; kobject_init(&pdev->dev.kobj, &linux_dev_ktype); kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, kobject_name(&pdev->dev.kobj)); rle = _pci_get_rle(pdev, SYS_RES_IRQ, 0); if (rle) pdev->dev.irq = rle->start; else pdev->dev.irq = 0; pdev->irq = pdev->dev.irq; mtx_unlock(&Giant); spin_lock(&pci_lock); list_add(&pdev->links, &pci_devices); spin_unlock(&pci_lock); error = pdrv->probe(pdev, id); mtx_lock(&Giant); if (error) { spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); put_device(&pdev->dev); return (-error); } return (0); } static int linux_pci_detach(device_t dev) { struct pci_dev *pdev; pdev = device_get_softc(dev); mtx_unlock(&Giant); pdev->pdrv->remove(pdev); mtx_lock(&Giant); spin_lock(&pci_lock); list_del(&pdev->links); spin_unlock(&pci_lock); put_device(&pdev->dev); return (0); } static int linux_pci_suspend(device_t dev) { struct pm_message pm = { }; struct pci_dev *pdev; int err; pdev = device_get_softc(dev); if (pdev->pdrv->suspend != NULL) err = -pdev->pdrv->suspend(pdev, pm); else err = 0; return (err); } static int linux_pci_resume(device_t dev) { struct pci_dev *pdev; int err; pdev = device_get_softc(dev); if (pdev->pdrv->resume != NULL) err = -pdev->pdrv->resume(pdev); else err = 0; return (err); } static int linux_pci_shutdown(device_t dev) { struct pci_dev *pdev; pdev = device_get_softc(dev); if (pdev->pdrv->shutdown != NULL) pdev->pdrv->shutdown(pdev); return (0); } int pci_register_driver(struct pci_driver *pdrv) { devclass_t bus; int error = 0; bus = devclass_find("pci"); spin_lock(&pci_lock); list_add(&pdrv->links, &pci_drivers); spin_unlock(&pci_lock); pdrv->driver.name = pdrv->name; pdrv->driver.methods = pci_methods; pdrv->driver.size = sizeof(struct pci_dev); mtx_lock(&Giant); if (bus != NULL) { error = devclass_add_driver(bus, &pdrv->driver, BUS_PASS_DEFAULT, &pdrv->bsdclass); } mtx_unlock(&Giant); return (-error); } void pci_unregister_driver(struct pci_driver *pdrv) { devclass_t bus; bus = devclass_find("pci"); list_del(&pdrv->links); mtx_lock(&Giant); if (bus != NULL) devclass_delete_driver(bus, &pdrv->driver); mtx_unlock(&Giant); } Index: head/sys/dev/ce/if_ce.c =================================================================== --- head/sys/dev/ce/if_ce.c (revision 295879) +++ head/sys/dev/ce/if_ce.c (revision 295880) @@ -1,2644 +1,2643 @@ /* * Cronyx-Tau32-PCI adapter driver for FreeBSD. * * Copyright (C) 2003-2005 Cronyx Engineering. * Copyright (C) 2003-2005 Kurakin Roman, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * $Cronyx: if_ce.c,v 1.9.2.8 2005/11/21 14:17:44 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #if __FreeBSD_version >= 500000 # define NPCI 1 #else # include "pci.h" #endif #if NPCI > 0 #include #include #include #include #include #include #include #include #include #include #include #if __FreeBSD_version >= 504000 #include #endif #include #include #include #include #include #include #if __FreeBSD_version > 501000 # include # include #else # include # include #endif #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include -#include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #ifndef IFP2SP #define IFP2SP(ifp) ((struct sppp*)ifp) #endif #ifndef SP2IFP #define SP2IFP(sp) ((struct ifnet*)sp) #endif #ifndef PCIR_BAR #define PCIR_BAR(x) (PCIR_MAPS + (x) * 4) #endif /* define as our previous return value */ #ifndef BUS_PROBE_DEFAULT #define BUS_PROBE_DEFAULT 0 #endif #define CE_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CE_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #ifndef IF_DRAIN #define IF_DRAIN(ifq) do { \ struct mbuf *m; \ for (;;) { \ IF_DEQUEUE(ifq, m); \ if (m == NULL) \ break; \ m_freem(m); \ } \ } while (0) #endif #ifndef _IF_QLEN #define _IF_QLEN(ifq) ((ifq)->ifq_len) #endif #ifndef callout_drain #define callout_drain callout_stop #endif #define CE_LOCK_NAME "ceX" #define CE_LOCK(_bd) mtx_lock (&(_bd)->ce_mtx) #define CE_UNLOCK(_bd) mtx_unlock (&(_bd)->ce_mtx) #define CE_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->ce_mtx, MA_OWNED) #define CDEV_MAJOR 185 static int ce_probe __P((device_t)); static int ce_attach __P((device_t)); static int ce_detach __P((device_t)); static device_method_t ce_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ce_probe), DEVMETHOD(device_attach, ce_attach), DEVMETHOD(device_detach, ce_detach), DEVMETHOD_END }; typedef struct _ce_dma_mem_t { unsigned long phys; void *virt; size_t size; #if __FreeBSD_version >= 500000 bus_dma_tag_t dmat; bus_dmamap_t mapp; #endif } ce_dma_mem_t; typedef struct _drv_t { char name [8]; int running; ce_board_t *board; ce_chan_t *chan; struct ifqueue rqueue; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; #if __FreeBSD_version >= 500000 struct cdev *devt; #else /* __FreeBSD_version < 500000 */ dev_t devt; #endif ce_dma_mem_t dmamem; } drv_t; typedef struct _bdrv_t { ce_board_t *board; struct resource *ce_res; struct resource *ce_irq; void *ce_intrhand; ce_dma_mem_t dmamem; drv_t channel [NCHAN]; #if __FreeBSD_version >= 504000 struct mtx ce_mtx; #endif } bdrv_t; static driver_t ce_driver = { "ce", ce_methods, sizeof(bdrv_t), }; static devclass_t ce_devclass; static void ce_receive (ce_chan_t *c, unsigned char *data, int len); static void ce_transmit (ce_chan_t *c, void *attachment, int len); static void ce_error (ce_chan_t *c, int data); static void ce_up (drv_t *d); static void ce_start (drv_t *d); static void ce_down (drv_t *d); static void ce_watchdog (drv_t *d); static void ce_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void ce_ifstart (struct ifnet *ifp); static void ce_tlf (struct sppp *sp); static void ce_tls (struct sppp *sp); static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void ce_initialize (void *softc); #endif static ce_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int ce_destroy = 0; #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p); static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p); static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p); #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); #endif #if __FreeBSD_version < 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, -1 }; #elif __FreeBSD_version == 500000 static struct cdevsw ce_cdevsw = { ce_open, ce_close, noread, nowrite, ce_ioctl, nopoll, nommap, nostrategy, "ce", CDEV_MAJOR, nodump, nopsize, D_NAGGED, }; #elif __FreeBSD_version <= 501000 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_read = noread, .d_write = nowrite, .d_ioctl = ce_ioctl, .d_poll = nopoll, .d_mmap = nommap, .d_strategy = nostrategy, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_dump = nodump, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 502103 static struct cdevsw ce_cdevsw = { .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NAGGED, }; #elif __FreeBSD_version < 600000 static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", .d_maj = CDEV_MAJOR, .d_flags = D_NEEDGIANT, }; #else /* __FreeBSD_version >= 600000 */ static struct cdevsw ce_cdevsw = { .d_version = D_VERSION, .d_open = ce_open, .d_close = ce_close, .d_ioctl = ce_ioctl, .d_name = "ce", }; #endif /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (!(MCLGET(m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int ce_probe (device_t dev) { if ((pci_get_vendor (dev) == TAU32_PCI_VENDOR_ID) && (pci_get_device (dev) == TAU32_PCI_DEVICE_ID)) { device_set_desc (dev, "Cronyx-Tau32-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void ce_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (ce_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CE_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_E1: ce_e1_timer (d->chan); break; default: break; } CE_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!ce_destroy) callout_reset (&timeout_handle, hz, ce_timeout, 0); splx (s); } static void ce_led_off (void *arg) { ce_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); TAU32_LedSet (b->ddk.pControllerObject, 0); CE_UNLOCK (bd); splx (s); } static void ce_intr (void *arg) { bdrv_t *bd = arg; ce_board_t *b = bd->board; int s; int i; #if __FreeBSD_version >= 500000 && defined NETGRAPH int error; #endif s = splimp (); if (ce_destroy) { splx (s); return; } CE_LOCK (bd); /* Turn LED on. */ TAU32_LedSet (b->ddk.pControllerObject, 1); TAU32_HandleInterrupt (b->ddk.pControllerObject); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, ce_led_off, b); CE_UNLOCK (bd); splx (s); /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->rqueue)) { IF_DEQUEUE (&d->rqueue,m); if (!m) continue; #ifdef NETGRAPH if (d->hook) { #if __FreeBSD_version >= 500000 NG_SEND_DATA_ONLY (error, d->hook, m); #else ng_queue_data (d->hook, m, 0); #endif } else { IF_DRAIN (&d->rqueue); } #else sppp_input (d->ifp, m); #endif } } } #if __FreeBSD_version >= 500000 static void ce_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } #ifndef BUS_DMA_ZERO #define BUS_DMA_ZERO 0 #endif static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, #if __FreeBSD_version >= 502000 NULL, NULL, #endif &dmem->dmat); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, ce_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } #if __FreeBSD_version >= 502000 bzero (dmem->virt, dmem->size); #endif return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } #else static int ce_bus_dma_mem_alloc (int bnum, int cnum, ce_dma_mem_t *dmem) { dmem->virt = contigmalloc (dmem->size, M_DEVBUF, M_WAITOK, 0x100000, 0xffffffff, 16, 0); if (dmem->virt == NULL) { if (cnum >= 0) printf ("ce%d-%d: ", bnum, cnum); else printf ("ce%d: ", bnum); printf ("couldn't allocate dma memory\n"); return 0; } dmem->phys = vtophys (dmem->virt); bzero (dmem->virt, dmem->size); return 1; } static void ce_bus_dma_mem_free (ce_dma_mem_t *dmem) { contigfree (dmem->virt, dmem->size, M_DEVBUF); } #endif /* * Called if the probe succeeded. */ static int ce_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); #if __FreeBSD_version >= 504000 char *ce_ln = CE_LOCK_NAME; #endif vm_offset_t vbase; int rid, error; ce_board_t *b; ce_chan_t *c; drv_t *d; int s; b = malloc (sizeof(ce_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("ce%d: couldn't allocate memory\n", unit); return (ENXIO); } bzero (b, sizeof(ce_board_t)); b->ddk.sys = &b; #if __FreeBSD_version >= 440000 pci_enable_busmaster (dev); #endif bd->dmamem.size = TAU32_ControllerObjectSize; if (! ce_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); return (ENXIO); } b->ddk.pControllerObject = bd->dmamem.virt; bd->board = b; b->sys = bd; rid = PCIR_BAR(0); bd->ce_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->ce_res) { printf ("ce%d: cannot map memory\n", unit); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->ce_res); b->ddk.PciBar1VirtualAddress = (void *)vbase; b->ddk.ControllerObjectPhysicalAddress = bd->dmamem.phys; b->ddk.pErrorNotifyCallback = ce_error_callback; b->ddk.pStatusNotifyCallback = ce_status_callback; b->num = unit; TAU32_BeforeReset(&b->ddk); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_ON, 4); pci_write_config (dev, TAU32_PCI_RESET_ADDRESS, TAU32_PCI_RESET_OFF, 4); if(!TAU32_Initialize(&b->ddk, 0)) { printf ("ce%d: init adapter error 0x%08x, bus dead bits 0x%08lx\n", unit, b->ddk.InitErrors, b->ddk.DeadBits); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); return (ENXIO); } s = splimp (); ce_init_board (b); rid = 0; bd->ce_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->ce_irq) { printf ("ce%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } #if __FreeBSD_version >= 500000 callout_init (&led_timo[unit], 1); #else callout_init (&led_timo[unit]); #endif error = bus_setup_intr (dev, bd->ce_irq, #if __FreeBSD_version >= 500013 INTR_TYPE_NET|INTR_MPSAFE, #else INTR_TYPE_NET, #endif NULL, ce_intr, bd, &bd->ce_intrhand); if (error) { printf ("ce%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); splx (s); return (ENXIO); } switch (b->ddk.Model) { case 1: strcpy (b->name, TAU32_BASE_NAME); break; case 2: strcpy (b->name, TAU32_LITE_NAME); break; case 3: strcpy (b->name, TAU32_ADPCM_NAME); break; default: strcpy (b->name, TAU32_UNKNOWN_NAME); break; } printf ("ce%d: %s\n", unit, b->name); for (c = b->chan; c < b->chan + NCHAN; ++c) { c->num = (c - b->chan); c->board = b; d = &bd->channel[c->num]; d->dmamem.size = sizeof(ce_buf_t); if (! ce_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num * NCHAN + c->num] = d; sprintf (d->name, "ce%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; } for (c = b->chan; c < b->chan + NCHAN; ++c) { if (c->sys == NULL) continue; d = c->sys; callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } #if __FreeBSD_version >= 500000 NG_NODE_SET_PRIVATE (d->node, d); #else d->node->private = d; #endif sprintf (d->nodename, "%s%d", NG_CE_NODE_TYPE, c->board->num * NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); #if __FreeBSD_version >= 500000 NG_NODE_UNREF (d->node); #else ng_rmnode (d->node); ng_unref (d->node); #endif continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->queue.ifq_mtx, "ce_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "ce_queue_hi", NULL, MTX_DEF); mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif #else /*NETGRAPH*/ #if __FreeBSD_version >= 600031 d->ifp = if_alloc(IFT_PPP); #else d->ifp = malloc (sizeof(struct sppp), M_DEVBUF, M_WAITOK); bzero (d->ifp, sizeof(struct sppp)); #endif if (!d->ifp) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; #if __FreeBSD_version > 501000 if_initname (d->ifp, "ce", b->num * NCHAN + c->num); #else d->ifp->if_unit = b->num * NCHAN + c->num; d->ifp->if_name = "ce"; #endif d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = ce_sioctl; d->ifp->if_start = ce_ifstart; d->ifp->if_init = ce_initialize; d->rqueue.ifq_maxlen = ifqmaxlen; #if __FreeBSD_version >= 500000 mtx_init (&d->rqueue.ifq_mtx, "ce_rqueue", NULL, MTX_DEF); #endif sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = ce_tlf; IFP2SP(d->ifp)->pp_tls = ce_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ ce_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ ce_register_transmit (c, &ce_transmit); ce_register_receive (c, &ce_receive); ce_register_error (c, &ce_error); d->devt = make_dev (&ce_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "ce%d", b->num*NCHAN+c->num); } #if __FreeBSD_version >= 504000 ce_ln[2] = '0' + unit; mtx_init (&bd->ce_mtx, ce_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); #endif CE_LOCK (bd); TAU32_EnableInterrupts(b->ddk.pControllerObject); adapter[unit] = b; CE_UNLOCK (bd); splx (s); return 0; } static int ce_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); ce_board_t *b = bd->board; ce_chan_t *c; int s; #if __FreeBSD_version >= 504000 KASSERT (mtx_initialized (&bd->ce_mtx), ("ce mutex not initialized")); #endif s = splimp (); CE_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; /* XXX Non existen chan! */ if (! d || ! d->chan) continue; if (d->running) { CE_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should disable interrupts */ ce_destroy = 1; TAU32_DisableInterrupts(b->ddk.pControllerObject); callout_stop (&led_timo[b->num]); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); #if __FreeBSD_version > 600031 if_free(d->ifp); #else free (d->ifp, M_DEVBUF); #endif IF_DRAIN (&d->rqueue); #if __FreeBSD_version >= 500000 mtx_destroy (&d->rqueue.ifq_mtx); #endif #else #if __FreeBSD_version >= 500000 if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } IF_DRAIN (&d->rqueue); mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); mtx_destroy (&d->rqueue.ifq_mtx); #else ng_rmnode (d->node); d->node = 0; #endif #endif destroy_dev (d->devt); } CE_UNLOCK (bd); splx (s); callout_drain (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->ce_irq, bd->ce_intrhand); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->ce_irq); TAU32_DestructiveHalt (b->ddk.pControllerObject, 0); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->ce_res); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan) continue; callout_drain (&d->timeout_handle); channel [b->num * NCHAN + c->num] = 0; /* Deallocate buffers. */ ce_bus_dma_mem_free (&d->dmamem); } adapter [b->num] = 0; ce_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); #if __FreeBSD_version >= 504000 mtx_destroy (&bd->ce_mtx); #endif return 0; } #ifndef NETGRAPH static void ce_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CE_LOCK (bd); ce_start (d); CE_UNLOCK (bd); } static void ce_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tlf\n")); sp->pp_down (sp); } static void ce_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CE_DEBUG2 (d, ("ce_tls\n")); sp->pp_up (sp); } /* * Process an ioctl request. */ static int ce_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; #if __FreeBSD_version >= 600034 was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else was_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CE_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CE_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CE_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CE_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CE_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 600034 should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; #else should_be_up = (ifp->if_flags & IFF_RUNNING) != 0; #endif if (! was_up && should_be_up) { /* Interface goes up -- start it. */ ce_up (d); ce_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ ce_down (d); } CE_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CE_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void ce_initialize (void *softc) { drv_t *d = softc; CE_DEBUG (d, ("ce_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void ce_down (drv_t *d) { CE_DEBUG (d, ("ce_down\n")); /* Interface is going down -- stop it. */ ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void ce_up (drv_t *d) { CE_DEBUG (d, ("ce_up\n")); ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void ce_send (drv_t *d) { struct mbuf *m; u_short len; CE_DEBUG2 (d, ("ce_send\n")); /* No output if the interface is down. */ if (! d->running) return; while (ce_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH #if __FreeBSD_version >= 500000 BPF_MTAP (d->ifp, m); #else if (d->ifp->if_bpf) bpf_mtap (d->ifp, m); #endif #endif #if __FreeBSD_version >= 490000 len = m_length (m, NULL); #else len = m->m_pkthdr.len; #endif if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) ce_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { ce_buf_item_t *item = (ce_buf_item_t*)d->chan->tx_queue; m_copydata (m, 0, len, item->buf); ce_send_packet (d->chan, item->buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH #if __FreeBSD_version >= 600034 d->ifp->if_flags |= IFF_DRV_OACTIVE; #else d->ifp->if_flags |= IFF_OACTIVE; #endif #endif } /* * Start output on the interface. * Always called on splimp(). */ static void ce_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) ce_set_dtr (d->chan, 1); if (! d->chan->rts) ce_set_rts (d->chan, 1); ce_send (d); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void ce_watchdog (drv_t *d) { CE_DEBUG (d, ("device timeout\n")); if (d->running) { ce_set_dtr (d->chan, 0); ce_set_rts (d->chan, 0); /* ce_stop_chan (d->chan);*/ /* ce_start_chan (d->chan, 1, 1, 0, 0);*/ ce_set_dtr (d->chan, 1); ce_set_rts (d->chan, 1); ce_start (d); } } static void ce_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CE_LOCK(bd); if (d->timeout == 1) ce_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); CE_UNLOCK(bd); } static void ce_transmit (ce_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); } static void ce_receive (ce_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; if (! d->running) return; m = makembuf (data, len); if (! m) { CE_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; IF_ENQUEUE(&d->rqueue, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE(&d->rqueue, m); #endif } static void ce_error (ce_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CE_FRAME: CE_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_CRC: CE_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERRUN: CE_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_OVERFLOW: CE_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CE_UNDERRUN: CE_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); #if __FreeBSD_version >= 600034 d->ifp->if_flags &= ~IFF_DRV_OACTIVE; #else d->ifp->if_flags &= ~IFF_OACTIVE; #endif #endif ce_start (d); break; default: CE_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ #if __FreeBSD_version < 500000 static int ce_open (dev_t dev, int oflags, int devtype, struct proc *p) #else static int ce_open (struct cdev *dev, int oflags, int devtype, struct thread *td) #endif { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CE_DEBUG2 (d, ("ce_open\n")); return 0; } /* * Only called on the LAST close. */ #if __FreeBSD_version < 500000 static int ce_close (dev_t dev, int fflag, int devtype, struct proc *p) #else static int ce_close (struct cdev *dev, int fflag, int devtype, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; CE_DEBUG2 (d, ("ce_close\n")); return 0; } static int ce_modem_status (ce_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CE_LOCK (bd); if (ce_get_cd (c)) status |= TIOCM_CD; if (ce_get_cts (c)) status |= TIOCM_CTS; if (ce_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CE_UNLOCK (bd); splx (s); return status; } #if __FreeBSD_version < 500000 static int ce_ioctl (dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) #else static int ce_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) #endif { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; ce_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CE_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CE_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #if __FreeBSD_version >= 600034 if (d->ifp->if_flags & IFF_DRV_RUNNING) #else if (d->ifp->if_flags & IFF_RUNNING) #endif return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CE_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CE_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CE_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CE_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CE_DEBUG2 (d, ("ioctl: getcfg\n")); *(char*)data = 'c'; return 0; case SERIAL_SETCFG: CE_DEBUG2 (d, ("ioctl: setcfg\n")); #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (*((char*)data) != 'c') return EINVAL; return 0; case SERIAL_GETSTAT: CE_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CE_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = 0; if (c->status & ESTS_NOALARM) opte1->status |= E1_NOALARM; if (c->status & ESTS_LOS) opte1->status |= E1_LOS; if (c->status & ESTS_LOF) opte1->status |= E1_LOF; if (c->status & ESTS_AIS) opte1->status |= E1_AIS; if (c->status & ESTS_LOMF) opte1->status |= E1_LOMF; if (c->status & ESTS_AIS16) opte1->status |= E1_AIS16; if (c->status & ESTS_FARLOF) opte1->status |= E1_FARLOF; if (c->status & ESTS_FARLOMF) opte1->status |= E1_FARLOMF; if (c->status & ESTS_TSTREQ) opte1->status |= E1_TSTREQ; if (c->status & ESTS_TSTERR) opte1->status |= E1_TSTERR; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_CLRSTAT: CE_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); return 0; case SERIAL_GETLOOP: CE_DEBUG2 (d, ("ioctl: getloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_lloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CE_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->rloop; return 0; case SERIAL_SETRLOOP: CE_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_rloop (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CE_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CE_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETBAUD: CE_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CE_DEBUG2 (d, ("ioctl: setbaud\n")); if (c->type != T_E1 || !c->unfram) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_baud (c, *(long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CE_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_ts (c, *(u_long*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETHIGAIN: CE_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CE_DEBUG2 (d, ("ioctl: sethigain\n")); if (c->type != T_E1) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_higain (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CE_DEBUG2 (d, ("ioctl: getphony\n")); *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CE_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_phony (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CE_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CE_DEBUG2 (d, ("ioctl: setunfram\n")); if (c->type != T_E1 || c->num != 0) return EINVAL; /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_unfram (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: getscrambler\n")); if (!c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CE_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (!c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_scrambler (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CE_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CE_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_monitor (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CE_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CE_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_use16 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CE_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CE_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1 || c->unfram) return EINVAL; s = splimp (); CE_LOCK (bd); ce_set_crc4 (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CE_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; } return 0; case SERIAL_SETCLK: CE_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); switch (*(int*)data) { default: ce_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: ce_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: ce_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: ce_set_gsyn (c, GSYN_RCV1); break; } CE_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CE_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* ce_reset (c->board, 0, 0);*/ CE_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CE_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); /* hard_reset (c->board); */ CE_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CE_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_E1) return EINVAL; s = splimp (); CE_LOCK (bd); *(int*)data = CABLE_TP; CE_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CE_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CE_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ #if __FreeBSD_version < 500000 error = suser (p); #elif __FreeBSD_version < 700000 error = suser (td); #else error = priv_check (td, PRIV_DRIVER); #endif if (error) return error; s = splimp (); CE_LOCK (bd); ce_set_dir (c, *(int*)data); CE_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CE_LOCK (bd); ce_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); ce_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 1); CE_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CE_LOCK (bd); if (*(int*)data & TIOCM_DTR) ce_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) ce_set_rts (c, 0); CE_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = ce_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH #if __FreeBSD_version >= 500000 static int ng_ce_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); #else static int ng_ce_constructor (node_p *node) { drv_t *d = (*node)->private; #endif CE_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_ce_newhook (node_p node, hook_p hook, const char *name) { int s; #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); #else drv_t *d = node->private; #endif bdrv_t *bd = d->board->sys; CE_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CE_HOOK_DEBUG) == 0) { #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, NULL); #else hook->private = 0; #endif d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CE_HOOK_RAW) != 0) return EINVAL; #if __FreeBSD_version >= 500000 NG_HOOK_SET_PRIVATE (hook, d); #else hook->private = d; #endif d->hook = hook; s = splimp (); CE_LOCK (bd); ce_up (d); CE_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, ce_chan_t *c, int need_header) { int status = ce_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, ce_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, ce_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, ce_chan_t *c) { drv_t *d = c->sys; int length = 0; length += sprintf (s + length, "ce%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); length += sprintf (s + length, "\n"); return length; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; #else static int ng_ce_rcvmsg (node_p node, struct ng_mesg *msg, const char *retaddr, struct ng_mesg **rptr) { drv_t *d = node->private; #endif struct ng_mesg *resp = NULL; int error = 0; CE_DEBUG (d, ("Rcvmsg\n")); #if __FreeBSD_version >= 500000 NGI_GET_MSG (item, msg); #endif switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CE_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; #if __FreeBSD_version >= 500000 NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } #else resp = malloc (M_NETGRAPH, M_NOWAIT); if (! resp) { error = ENOMEM; break; } bzero (resp, dl); #endif s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); #if __FreeBSD_version < 500000 (resp)->header.version = NG_VERSION; (resp)->header.arglen = strlen (s) + 1; (resp)->header.token = msg->header.token; (resp)->header.typecookie = NGM_CE_COOKIE; (resp)->header.cmd = msg->header.cmd; #endif strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } #if __FreeBSD_version >= 500000 NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); #else *rptr = resp; free (msg, M_NETGRAPH); #endif return error; } #if __FreeBSD_version >= 500000 static int ng_ce_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; #if __FreeBSD_version < 502120 meta_p meta; #else struct ng_tag_prio *ptag; #endif #else static int ng_ce_rcvdata (hook_p hook, struct mbuf *m, meta_p meta) { drv_t *d = hook->node->private; #endif bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CE_DEBUG2 (d, ("Rcvdata\n")); #if __FreeBSD_version >= 500000 NGI_GET_M (item, m); #if __FreeBSD_version < 502120 NGI_GET_META (item, meta); #endif NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif #else if (! hook->private || ! d) { NG_FREE_DATA (m,meta); #endif return ENETDOWN; } #if __FreeBSD_version >= 502120 /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; #else q = (meta && meta->priority > 0) ? &d->hi_queue : &d->queue; #endif s = splimp (); CE_LOCK (bd); #if __FreeBSD_version >= 500000 IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CE_UNLOCK (bd); splx (s); NG_FREE_M (m); #if __FreeBSD_version < 502120 NG_FREE_META (meta); #endif return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); #else if (IF_QFULL (q)) { IF_DROP (q); CE_UNLOCK (bd); splx (s); NG_FREE_DATA (m, meta); return ENOBUFS; } IF_ENQUEUE (q, m); #endif ce_start (d); CE_UNLOCK (bd); splx (s); return 0; } static int ng_ce_rmnode (node_p node) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (node); CE_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE #if __FreeBSD_version >= 502120 if (node->nd_flags & NGF_REALLY_DIE) { #else if (node->nd_flags & NG_REALLY_DIE) { #endif NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } #if __FreeBSD_version >= 502120 NG_NODE_REVIVE(node); /* Persistant node */ #else node->nd_flags &= ~NG_INVALID; #endif #endif #else /* __FreeBSD_version < 500000 */ drv_t *d = node->private; if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } node->flags |= NG_INVALID; ng_cutlinks (node); #ifdef KLD_MODULE ng_unname (node); ng_unref (node); #endif #endif return 0; } static int ng_ce_connect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, ce_watchdog_timer, d); } return 0; } static int ng_ce_disconnect (hook_p hook) { #if __FreeBSD_version >= 500000 drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); #else drv_t *d = hook->node->private; #endif if (d) { CE_DEBUG (d, ("Disconnect\n")); #if __FreeBSD_version >= 500000 if (NG_HOOK_PRIVATE (hook)) #else if (hook->private) #endif { bdrv_t *bd = d->board->sys; int s = splimp (); CE_LOCK (bd); ce_down (d); CE_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int ce_modevent (module_t mod, int type, void *unused) { #if __FreeBSD_version < 500000 dev_t dev; struct cdevsw *cdsw; #endif static int load_count = 0; #if __FreeBSD_version < 500000 dev = makedev (CDEV_MAJOR, 0); #endif switch (type) { case MOD_LOAD: #if __FreeBSD_version < 500000 if (dev != NODEV && (cdsw = devsw (dev)) && cdsw->d_maj == CDEV_MAJOR) { printf ("Tau32-PCI driver is already in system\n"); return (ENXIO); } #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_ce\n"); #endif ++load_count; #if __FreeBSD_version <= 500000 cdevsw_add (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 callout_init (&timeout_handle, 1); #else callout_init (&timeout_handle); #endif callout_reset (&timeout_handle, hz*5, ce_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau32-PCI\n"); #if __FreeBSD_version <= 500000 cdevsw_remove (&ce_cdevsw); #endif #if __FreeBSD_version >= 500000 && defined NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH #if __FreeBSD_version >= 502100 static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CE_NODE_TYPE, .constructor = ng_ce_constructor, .rcvmsg = ng_ce_rcvmsg, .shutdown = ng_ce_rmnode, .newhook = ng_ce_newhook, .connect = ng_ce_connect, .rcvdata = ng_ce_rcvdata, .disconnect = ng_ce_disconnect, }; #else /* __FreeBSD_version < 502100 */ static struct ng_type typestruct = { #if __FreeBSD_version >= 500000 NG_ABI_VERSION, #else NG_VERSION, #endif NG_CE_NODE_TYPE, ce_modevent, ng_ce_constructor, ng_ce_rcvmsg, ng_ce_rmnode, ng_ce_newhook, NULL, ng_ce_connect, ng_ce_rcvdata, #if __FreeBSD_version < 500000 NULL, #endif ng_ce_disconnect, NULL }; #endif /* __FreeBSD_version < 502100 */ #endif /*NETGRAPH*/ #if __FreeBSD_version >= 500000 #ifdef NETGRAPH MODULE_DEPEND (ng_ce, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (ce, sppp, 1, 1, 1); #endif #ifdef KLD_MODULE DRIVER_MODULE (cemod, pci, ce_driver, ce_devclass, ce_modevent, NULL); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #else /* if __FreeBSD_version < 500000*/ #ifdef NETGRAPH DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ng_mod_event, &typestruct); #else DRIVER_MODULE (ce, pci, ce_driver, ce_devclass, ce_modevent, NULL); #endif #endif /* __FreeBSD_version < 500000 */ #endif /* NPCI */ Index: head/sys/dev/cp/if_cp.c =================================================================== --- head/sys/dev/cp/if_cp.c (revision 295879) +++ head/sys/dev/cp/if_cp.c (revision 295880) @@ -1,2270 +1,2269 @@ /*- * Cronyx-Tau-PCI adapter driver for FreeBSD. * Supports PPP/HDLC, Cisco/HDLC and FrameRelay protocol in synchronous mode, * and asynchronous channels with full modem control. * Keepalive protocol implemented in both Cisco and PPP modes. * * Copyright (C) 1999-2004 Cronyx Engineering. * Author: Kurakin Roman, * * Copyright (C) 1999-2002 Cronyx Engineering. * Author: Serge Vakulenko, * * This software is distributed with NO WARRANTIES, not even the implied * warranties for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Authors grant any other persons or organisations a permission to use, * modify and redistribute this software in source and binary forms, * as long as this message is kept with the software, all derivative * works or modified versions. * * Cronyx Id: if_cp.c,v 1.1.2.41 2004/06/23 17:09:13 rik Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_ng_cronyx.h" #ifdef NETGRAPH_CRONYX # include "opt_netgraph.h" # ifndef NETGRAPH # error #option NETGRAPH missed from configuration # endif # include # include # include #else # include # include #include # define PP_CISCO IFF_LINK2 # include #endif #include #include #include #include -#include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR #define PP_FR 0 #endif #define CP_DEBUG(d,s) ({if (d->chan->debug) {\ printf ("%s: ", d->name); printf s;}}) #define CP_DEBUG2(d,s) ({if (d->chan->debug>1) {\ printf ("%s: ", d->name); printf s;}}) #define CP_LOCK_NAME "cpX" #define CP_LOCK(_bd) mtx_lock (&(_bd)->cp_mtx) #define CP_UNLOCK(_bd) mtx_unlock (&(_bd)->cp_mtx) #define CP_LOCK_ASSERT(_bd) mtx_assert (&(_bd)->cp_mtx, MA_OWNED) static int cp_probe __P((device_t)); static int cp_attach __P((device_t)); static int cp_detach __P((device_t)); static device_method_t cp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cp_probe), DEVMETHOD(device_attach, cp_attach), DEVMETHOD(device_detach, cp_detach), DEVMETHOD_END }; typedef struct _cp_dma_mem_t { unsigned long phys; void *virt; size_t size; bus_dma_tag_t dmat; bus_dmamap_t mapp; } cp_dma_mem_t; typedef struct _drv_t { char name [8]; int running; cp_chan_t *chan; cp_board_t *board; cp_dma_mem_t dmamem; #ifdef NETGRAPH char nodename [NG_NODESIZE]; hook_p hook; hook_p debug_hook; node_p node; struct ifqueue queue; struct ifqueue hi_queue; #else struct ifqueue queue; struct ifnet *ifp; #endif short timeout; struct callout timeout_handle; struct cdev *devt; } drv_t; typedef struct _bdrv_t { cp_board_t *board; struct resource *cp_res; struct resource *cp_irq; void *cp_intrhand; cp_dma_mem_t dmamem; drv_t channel [NCHAN]; struct mtx cp_mtx; } bdrv_t; static driver_t cp_driver = { "cp", cp_methods, sizeof(bdrv_t), }; static devclass_t cp_devclass; static void cp_receive (cp_chan_t *c, unsigned char *data, int len); static void cp_transmit (cp_chan_t *c, void *attachment, int len); static void cp_error (cp_chan_t *c, int data); static void cp_up (drv_t *d); static void cp_start (drv_t *d); static void cp_down (drv_t *d); static void cp_watchdog (drv_t *d); static void cp_watchdog_timer (void *arg); #ifdef NETGRAPH extern struct ng_type typestruct; #else static void cp_ifstart (struct ifnet *ifp); static void cp_tlf (struct sppp *sp); static void cp_tls (struct sppp *sp); static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data); static void cp_initialize (void *softc); #endif static cp_board_t *adapter [NBRD]; static drv_t *channel [NBRD*NCHAN]; static struct callout led_timo [NBRD]; static struct callout timeout_handle; static int cp_destroy = 0; static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td); static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td); static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td); static struct cdevsw cp_cdevsw = { .d_version = D_VERSION, .d_open = cp_open, .d_close = cp_close, .d_ioctl = cp_ioctl, .d_name = "cp", }; /* * Make an mbuf from data. */ static struct mbuf *makembuf (void *buf, unsigned len) { struct mbuf *m; MGETHDR (m, M_NOWAIT, MT_DATA); if (! m) return 0; if (!(MCLGET (m, M_NOWAIT))) { m_freem (m); return 0; } m->m_pkthdr.len = m->m_len = len; bcopy (buf, mtod (m, caddr_t), len); return m; } static int cp_probe (device_t dev) { if ((pci_get_vendor (dev) == cp_vendor_id) && (pci_get_device (dev) == cp_device_id)) { device_set_desc (dev, "Cronyx-Tau-PCI serial adapter"); return BUS_PROBE_DEFAULT; } return ENXIO; } static void cp_timeout (void *arg) { drv_t *d; int s, i, k; for (i = 0; i < NBRD; ++i) { if (adapter[i] == NULL) continue; for (k = 0; k < NCHAN; ++k) { s = splimp (); if (cp_destroy) { splx (s); return; } d = channel[i * NCHAN + k]; if (!d) { splx (s); continue; } CP_LOCK ((bdrv_t *)d->board->sys); switch (d->chan->type) { case T_G703: cp_g703_timer (d->chan); break; case T_E1: cp_e1_timer (d->chan); break; case T_E3: case T_T3: case T_STS1: cp_e3_timer (d->chan); break; default: break; } CP_UNLOCK ((bdrv_t *)d->board->sys); splx (s); } } s = splimp (); if (!cp_destroy) callout_reset (&timeout_handle, hz, cp_timeout, 0); splx (s); } static void cp_led_off (void *arg) { cp_board_t *b = arg; bdrv_t *bd = (bdrv_t *) b->sys; int s; s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); cp_led (b, 0); CP_UNLOCK (bd); splx (s); } static void cp_intr (void *arg) { bdrv_t *bd = arg; cp_board_t *b = bd->board; #ifndef NETGRAPH int i; #endif int s = splimp (); if (cp_destroy) { splx (s); return; } CP_LOCK (bd); /* Check if we are ready */ if (b->sys == NULL) { /* Not we are not, just cleanup. */ cp_interrupt_poll (b, 1); CP_UNLOCK (bd); return; } /* Turn LED on. */ cp_led (b, 1); cp_interrupt (b); /* Turn LED off 50 msec later. */ callout_reset (&led_timo[b->num], hz/20, cp_led_off, b); CP_UNLOCK (bd); splx (s); #ifndef NETGRAPH /* Pass packets in a lock-free state */ for (i = 0; i < NCHAN && b->chan[i].type; i++) { drv_t *d = b->chan[i].sys; struct mbuf *m; if (!d || !d->running) continue; while (_IF_QLEN(&d->queue)) { IF_DEQUEUE (&d->queue,m); if (!m) continue; sppp_input (d->ifp, m); } } #endif } static void cp_bus_dmamap_addr (void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned long *addr; if (error) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); addr = arg; *addr = segs->ds_addr; } static int cp_bus_dma_mem_alloc (int bnum, int cnum, cp_dma_mem_t *dmem) { int error; error = bus_dma_tag_create (NULL, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, dmem->size, 1, dmem->size, 0, NULL, NULL, &dmem->dmat); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate tag for dma memory\n"); return 0; } error = bus_dmamem_alloc (dmem->dmat, (void **)&dmem->virt, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &dmem->mapp); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't allocate mem for dma memory\n"); bus_dma_tag_destroy (dmem->dmat); return 0; } error = bus_dmamap_load (dmem->dmat, dmem->mapp, dmem->virt, dmem->size, cp_bus_dmamap_addr, &dmem->phys, 0); if (error) { if (cnum >= 0) printf ("cp%d-%d: ", bnum, cnum); else printf ("cp%d: ", bnum); printf ("couldn't load mem map for dma memory\n"); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); return 0; } return 1; } static void cp_bus_dma_mem_free (cp_dma_mem_t *dmem) { bus_dmamap_unload (dmem->dmat, dmem->mapp); bus_dmamem_free (dmem->dmat, dmem->virt, dmem->mapp); bus_dma_tag_destroy (dmem->dmat); } /* * Called if the probe succeeded. */ static int cp_attach (device_t dev) { bdrv_t *bd = device_get_softc (dev); int unit = device_get_unit (dev); char *cp_ln = CP_LOCK_NAME; unsigned short res; vm_offset_t vbase; int rid, error; cp_board_t *b; cp_chan_t *c; drv_t *d; int s = splimp (); b = malloc (sizeof(cp_board_t), M_DEVBUF, M_WAITOK); if (!b) { printf ("cp%d: couldn't allocate memory\n", unit); splx (s); return (ENXIO); } bzero (b, sizeof(cp_board_t)); bd->board = b; rid = PCIR_BAR(0); bd->cp_res = bus_alloc_resource (dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); if (! bd->cp_res) { printf ("cp%d: cannot map memory\n", unit); free (b, M_DEVBUF); splx (s); return (ENXIO); } vbase = (vm_offset_t) rman_get_virtual (bd->cp_res); cp_ln[2] = '0' + unit; mtx_init (&bd->cp_mtx, cp_ln, MTX_NETWORK_LOCK, MTX_DEF|MTX_RECURSE); res = cp_init (b, unit, (u_char*) vbase); if (res) { printf ("cp%d: can't init, error code:%x\n", unit, res); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); free (b, M_DEVBUF); splx (s); return (ENXIO); } bd->dmamem.size = sizeof(cp_qbuf_t); if (! cp_bus_dma_mem_alloc (unit, -1, &bd->dmamem)) { free (b, M_DEVBUF); splx (s); return (ENXIO); } CP_LOCK (bd); cp_reset (b, bd->dmamem.virt, bd->dmamem.phys); CP_UNLOCK (bd); rid = 0; bd->cp_irq = bus_alloc_resource (dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (! bd->cp_irq) { cp_destroy = 1; printf ("cp%d: cannot map interrupt\n", unit); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } callout_init (&led_timo[unit], 1); error = bus_setup_intr (dev, bd->cp_irq, INTR_TYPE_NET|INTR_MPSAFE, NULL, cp_intr, bd, &bd->cp_intrhand); if (error) { cp_destroy = 1; printf ("cp%d: cannot set up irq\n", unit); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); mtx_destroy (&bd->cp_mtx); free (b, M_DEVBUF); splx (s); return (ENXIO); } printf ("cp%d: %s, clock %ld MHz\n", unit, b->name, b->osc / 1000000); for (c = b->chan; c < b->chan + NCHAN; ++c) { if (! c->type) continue; d = &bd->channel[c->num]; d->dmamem.size = sizeof(cp_buf_t); if (! cp_bus_dma_mem_alloc (unit, c->num, &d->dmamem)) continue; channel [b->num*NCHAN + c->num] = d; sprintf (d->name, "cp%d.%d", b->num, c->num); d->board = b; d->chan = c; c->sys = d; callout_init (&d->timeout_handle, 1); #ifdef NETGRAPH if (ng_make_node_common (&typestruct, &d->node) != 0) { printf ("%s: cannot make common node\n", d->name); d->node = NULL; continue; } NG_NODE_SET_PRIVATE (d->node, d); sprintf (d->nodename, "%s%d", NG_CP_NODE_TYPE, c->board->num*NCHAN + c->num); if (ng_name_node (d->node, d->nodename)) { printf ("%s: cannot name node\n", d->nodename); NG_NODE_UNREF (d->node); continue; } d->queue.ifq_maxlen = ifqmaxlen; d->hi_queue.ifq_maxlen = ifqmaxlen; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); mtx_init (&d->hi_queue.ifq_mtx, "cp_queue_hi", NULL, MTX_DEF); #else /*NETGRAPH*/ d->ifp = if_alloc(IFT_PPP); if (d->ifp == NULL) { printf ("%s: cannot if_alloc() interface\n", d->name); continue; } d->ifp->if_softc = d; if_initname (d->ifp, "cp", b->num * NCHAN + c->num); d->ifp->if_mtu = PP_MTU; d->ifp->if_flags = IFF_POINTOPOINT | IFF_MULTICAST; d->ifp->if_ioctl = cp_sioctl; d->ifp->if_start = cp_ifstart; d->ifp->if_init = cp_initialize; d->queue.ifq_maxlen = NRBUF; mtx_init (&d->queue.ifq_mtx, "cp_queue", NULL, MTX_DEF); sppp_attach (d->ifp); if_attach (d->ifp); IFP2SP(d->ifp)->pp_tlf = cp_tlf; IFP2SP(d->ifp)->pp_tls = cp_tls; /* If BPF is in the kernel, call the attach for it. * The header size of PPP or Cisco/HDLC is 4 bytes. */ bpfattach (d->ifp, DLT_PPP, 4); #endif /*NETGRAPH*/ cp_start_e1 (c); cp_start_chan (c, 1, 1, d->dmamem.virt, d->dmamem.phys); /* Register callback functions. */ cp_register_transmit (c, &cp_transmit); cp_register_receive (c, &cp_receive); cp_register_error (c, &cp_error); d->devt = make_dev (&cp_cdevsw, b->num*NCHAN+c->num, UID_ROOT, GID_WHEEL, 0600, "cp%d", b->num*NCHAN+c->num); } CP_LOCK (bd); b->sys = bd; adapter[unit] = b; CP_UNLOCK (bd); splx (s); return 0; } static int cp_detach (device_t dev) { bdrv_t *bd = device_get_softc (dev); cp_board_t *b = bd->board; cp_chan_t *c; int s; KASSERT (mtx_initialized (&bd->cp_mtx), ("cp mutex not initialized")); s = splimp (); CP_LOCK (bd); /* Check if the device is busy (open). */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; if (d->running) { CP_UNLOCK (bd); splx (s); return EBUSY; } } /* Ok, we can unload driver */ /* At first we should stop all channels */ for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; cp_stop_chan (c); cp_stop_e1 (c); cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); } /* Reset the adapter. */ cp_destroy = 1; cp_interrupt_poll (b, 1); cp_led_off (b); cp_reset (b, 0 ,0); callout_stop (&led_timo[b->num]); /* Disable the interrupt request. */ bus_teardown_intr (dev, bd->cp_irq, bd->cp_intrhand); for (c=b->chan; cchan+NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_stop (&d->timeout_handle); #ifndef NETGRAPH /* Detach from the packet filter list of interfaces. */ bpfdetach (d->ifp); /* Detach from the sync PPP list. */ sppp_detach (d->ifp); /* Detach from the system list of interfaces. */ if_detach (d->ifp); if_free (d->ifp); IF_DRAIN (&d->queue); mtx_destroy (&d->queue.ifq_mtx); #else if (d->node) { ng_rmnode_self (d->node); NG_NODE_UNREF (d->node); d->node = NULL; } mtx_destroy (&d->queue.ifq_mtx); mtx_destroy (&d->hi_queue.ifq_mtx); #endif destroy_dev (d->devt); } b->sys = NULL; CP_UNLOCK (bd); bus_release_resource (dev, SYS_RES_IRQ, 0, bd->cp_irq); bus_release_resource (dev, SYS_RES_MEMORY, PCIR_BAR(0), bd->cp_res); CP_LOCK (bd); cp_led_off (b); CP_UNLOCK (bd); callout_drain (&led_timo[b->num]); splx (s); for (c = b->chan; c < b->chan + NCHAN; ++c) { drv_t *d = (drv_t*) c->sys; if (! d || ! d->chan->type) continue; callout_drain (&d->timeout_handle); channel [b->num*NCHAN + c->num] = 0; /* Deallocate buffers. */ cp_bus_dma_mem_free (&d->dmamem); } adapter [b->num] = 0; cp_bus_dma_mem_free (&bd->dmamem); free (b, M_DEVBUF); mtx_destroy (&bd->cp_mtx); return 0; } #ifndef NETGRAPH static void cp_ifstart (struct ifnet *ifp) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; CP_LOCK (bd); cp_start (d); CP_UNLOCK (bd); } static void cp_tlf (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tlf\n")); /* XXXRIK: Don't forget to protect them by LOCK, or kill them. */ /* cp_set_dtr (d->chan, 0);*/ /* cp_set_rts (d->chan, 0);*/ if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_down (sp); } static void cp_tls (struct sppp *sp) { drv_t *d = SP2IFP(sp)->if_softc; CP_DEBUG2 (d, ("cp_tls\n")); if (!(sp->pp_flags & PP_FR) && !(d->ifp->if_flags & PP_CISCO)) sp->pp_up (sp); } /* * Process an ioctl request. */ static int cp_sioctl (struct ifnet *ifp, u_long cmd, caddr_t data) { drv_t *d = ifp->if_softc; bdrv_t *bd = d->board->sys; int error, s, was_up, should_be_up; was_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; error = sppp_ioctl (ifp, cmd, data); if (error) return error; if (! (ifp->if_flags & IFF_DEBUG)) d->chan->debug = 0; else d->chan->debug = d->chan->debug_shadow; switch (cmd) { default: CP_DEBUG2 (d, ("ioctl 0x%lx\n", cmd)); return 0; case SIOCADDMULTI: CP_DEBUG2 (d, ("ioctl SIOCADDMULTI\n")); return 0; case SIOCDELMULTI: CP_DEBUG2 (d, ("ioctl SIOCDELMULTI\n")); return 0; case SIOCSIFFLAGS: CP_DEBUG2 (d, ("ioctl SIOCSIFFLAGS\n")); break; case SIOCSIFADDR: CP_DEBUG2 (d, ("ioctl SIOCSIFADDR\n")); break; } /* We get here only in case of SIFFLAGS or SIFADDR. */ s = splimp (); CP_LOCK (bd); should_be_up = (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0; if (! was_up && should_be_up) { /* Interface goes up -- start it. */ cp_up (d); cp_start (d); } else if (was_up && ! should_be_up) { /* Interface is going down -- stop it. */ /* if ((IFP2SP(ifp)->pp_flags & PP_FR) || (ifp->if_flags & PP_CISCO))*/ cp_down (d); } CP_DEBUG (d, ("ioctl 0x%lx p4\n", cmd)); CP_UNLOCK (bd); splx (s); return 0; } /* * Initialization of interface. * It seems to be never called by upper level? */ static void cp_initialize (void *softc) { drv_t *d = softc; CP_DEBUG (d, ("cp_initialize\n")); } #endif /*NETGRAPH*/ /* * Stop the interface. Called on splimp(). */ static void cp_down (drv_t *d) { CP_DEBUG (d, ("cp_down\n")); /* Interface is going down -- stop it. */ cp_set_dtr (d->chan, 0); cp_set_rts (d->chan, 0); d->running = 0; callout_stop (&d->timeout_handle); } /* * Start the interface. Called on splimp(). */ static void cp_up (drv_t *d) { CP_DEBUG (d, ("cp_up\n")); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); d->running = 1; } /* * Start output on the interface. Get another datagram to send * off of the interface queue, and copy it to the interface * before starting the output. */ static void cp_send (drv_t *d) { struct mbuf *m; u_short len; CP_DEBUG2 (d, ("cp_send, tn=%d te=%d\n", d->chan->tn, d->chan->te)); /* No output if the interface is down. */ if (! d->running) return; /* No output if the modem is off. */ if (! (d->chan->lloop || d->chan->type != T_SERIAL || cp_get_dsr (d->chan))) return; while (cp_transmit_space (d->chan)) { /* Get the packet to send. */ #ifdef NETGRAPH IF_DEQUEUE (&d->hi_queue, m); if (! m) IF_DEQUEUE (&d->queue, m); #else m = sppp_dequeue (d->ifp); #endif if (! m) return; #ifndef NETGRAPH BPF_MTAP (d->ifp, m); #endif len = m_length (m, NULL); if (len >= BUFSZ) printf ("%s: too long packet: %d bytes: ", d->name, len); else if (! m->m_next) cp_send_packet (d->chan, (u_char*) mtod (m, caddr_t), len, 0); else { u_char *buf = d->chan->tbuf[d->chan->te]; m_copydata (m, 0, len, buf); cp_send_packet (d->chan, buf, len, 0); } m_freem (m); /* Set up transmit timeout, if the transmit ring is not empty.*/ d->timeout = 10; } #ifndef NETGRAPH d->ifp->if_drv_flags |= IFF_DRV_OACTIVE; #endif } /* * Start output on the interface. * Always called on splimp(). */ static void cp_start (drv_t *d) { if (d->running) { if (! d->chan->dtr) cp_set_dtr (d->chan, 1); if (! d->chan->rts) cp_set_rts (d->chan, 1); cp_send (d); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } } /* * Handle transmit timeouts. * Recover after lost transmit interrupts. * Always called on splimp(). */ static void cp_watchdog (drv_t *d) { CP_DEBUG (d, ("device timeout\n")); if (d->running) { cp_stop_chan (d->chan); cp_stop_e1 (d->chan); cp_start_e1 (d->chan); cp_start_chan (d->chan, 1, 1, 0, 0); cp_set_dtr (d->chan, 1); cp_set_rts (d->chan, 1); cp_start (d); } } static void cp_watchdog_timer (void *arg) { drv_t *d = arg; bdrv_t *bd = d->board->sys; CP_LOCK (bd); if (d->timeout == 1) cp_watchdog (d); if (d->timeout) d->timeout--; callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); CP_UNLOCK (bd); } static void cp_transmit (cp_chan_t *c, void *attachment, int len) { drv_t *d = c->sys; d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OPACKETS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); } static void cp_receive (cp_chan_t *c, unsigned char *data, int len) { drv_t *d = c->sys; struct mbuf *m; #ifdef NETGRAPH int error; #endif if (! d->running) return; m = makembuf (data, len); if (! m) { CP_DEBUG (d, ("no memory for packet\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IQDROPS, 1); #endif return; } if (c->debug > 1) m_print (m, 0); #ifdef NETGRAPH m->m_pkthdr.rcvif = 0; NG_SEND_DATA_ONLY (error, d->hook, m); #else if_inc_counter(d->ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = d->ifp; /* Check if there's a BPF listener on this interface. * If so, hand off the raw packet to bpf. */ BPF_MTAP(d->ifp, m); IF_ENQUEUE (&d->queue, m); #endif } static void cp_error (cp_chan_t *c, int data) { drv_t *d = c->sys; switch (data) { case CP_FRAME: CP_DEBUG (d, ("frame error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_CRC: CP_DEBUG (d, ("crc error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERRUN: CP_DEBUG (d, ("overrun error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_COLLISIONS, 1); if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_OVERFLOW: CP_DEBUG (d, ("overflow error\n")); #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_IERRORS, 1); #endif break; case CP_UNDERRUN: CP_DEBUG (d, ("underrun error\n")); d->timeout = 0; #ifndef NETGRAPH if_inc_counter(d->ifp, IFCOUNTER_OERRORS, 1); d->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; #endif cp_start (d); break; default: CP_DEBUG (d, ("error #%d\n", data)); break; } } /* * You also need read, write, open, close routines. * This should get you started */ static int cp_open (struct cdev *dev, int oflags, int devtype, struct thread *td) { int unit = dev2unit (dev); drv_t *d; if (unit >= NBRD*NCHAN || ! (d = channel[unit])) return ENXIO; CP_DEBUG2 (d, ("cp_open\n")); return 0; } /* * Only called on the LAST close. */ static int cp_close (struct cdev *dev, int fflag, int devtype, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; CP_DEBUG2 (d, ("cp_close\n")); return 0; } static int cp_modem_status (cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int status, s; status = d->running ? TIOCM_LE : 0; s = splimp (); CP_LOCK (bd); if (cp_get_cd (c)) status |= TIOCM_CD; if (cp_get_cts (c)) status |= TIOCM_CTS; if (cp_get_dsr (c)) status |= TIOCM_DSR; if (c->dtr) status |= TIOCM_DTR; if (c->rts) status |= TIOCM_RTS; CP_UNLOCK (bd); splx (s); return status; } static int cp_ioctl (struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td) { drv_t *d = channel [dev2unit (dev)]; bdrv_t *bd = d->board->sys; cp_chan_t *c = d->chan; struct serial_statistics *st; struct e1_statistics *opte1; struct e3_statistics *opte3; int error, s; char mask[16]; switch (cmd) { case SERIAL_GETREGISTERED: CP_DEBUG2 (d, ("ioctl: getregistered\n")); bzero (mask, sizeof(mask)); for (s=0; sifp)->pp_flags & PP_FR) ? "fr" : (d->ifp->if_flags & PP_CISCO) ? "cisco" : "ppp"); return 0; case SERIAL_SETPROTO: CP_DEBUG2 (d, ("ioctl: setproto\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (d->ifp->if_drv_flags & IFF_DRV_RUNNING) return EBUSY; if (! strcmp ("cisco", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~(PP_FR); IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; d->ifp->if_flags |= PP_CISCO; #if PP_FR != 0 } else if (! strcmp ("fr", (char*)data)) { d->ifp->if_flags &= ~(PP_CISCO); IFP2SP(d->ifp)->pp_flags |= PP_FR | PP_KEEPALIVE; #endif } else if (! strcmp ("ppp", (char*)data)) { IFP2SP(d->ifp)->pp_flags &= ~PP_FR; IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; d->ifp->if_flags &= ~(PP_CISCO); } else return EINVAL; return 0; case SERIAL_GETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: getkeepalive\n")); if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; *(int*)data = (IFP2SP(d->ifp)->pp_flags & PP_KEEPALIVE) ? 1 : 0; return 0; case SERIAL_SETKEEPALIVE: CP_DEBUG2 (d, ("ioctl: setkeepalive\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((IFP2SP(d->ifp)->pp_flags & PP_FR) || (d->ifp->if_flags & PP_CISCO)) return EINVAL; s = splimp (); CP_LOCK (bd); if (*(int*)data) IFP2SP(d->ifp)->pp_flags |= PP_KEEPALIVE; else IFP2SP(d->ifp)->pp_flags &= ~PP_KEEPALIVE; CP_UNLOCK (bd); splx (s); return 0; #endif /*NETGRAPH*/ case SERIAL_GETMODE: CP_DEBUG2 (d, ("ioctl: getmode\n")); *(int*)data = SERIAL_HDLC; return 0; case SERIAL_SETMODE: /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (*(int*)data != SERIAL_HDLC) return EINVAL; return 0; case SERIAL_GETCFG: CP_DEBUG2 (d, ("ioctl: getcfg\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(char*)data = c->board->mux ? 'c' : 'a'; return 0; case SERIAL_SETCFG: CP_DEBUG2 (d, ("ioctl: setcfg\n")); error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_mux (c->board, *((char*)data) == 'c'); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSTAT: CP_DEBUG2 (d, ("ioctl: getstat\n")); st = (struct serial_statistics*) data; st->rintr = c->rintr; st->tintr = c->tintr; st->mintr = 0; st->ibytes = c->ibytes; st->ipkts = c->ipkts; st->obytes = c->obytes; st->opkts = c->opkts; st->ierrs = c->overrun + c->frame + c->crc; st->oerrs = c->underrun; return 0; case SERIAL_GETESTAT: CP_DEBUG2 (d, ("ioctl: getestat\n")); if (c->type != T_E1 && c->type != T_G703) return EINVAL; opte1 = (struct e1_statistics*) data; opte1->status = c->status; opte1->cursec = c->cursec; opte1->totsec = c->totsec + c->cursec; opte1->currnt.bpv = c->currnt.bpv; opte1->currnt.fse = c->currnt.fse; opte1->currnt.crce = c->currnt.crce; opte1->currnt.rcrce = c->currnt.rcrce; opte1->currnt.uas = c->currnt.uas; opte1->currnt.les = c->currnt.les; opte1->currnt.es = c->currnt.es; opte1->currnt.bes = c->currnt.bes; opte1->currnt.ses = c->currnt.ses; opte1->currnt.oofs = c->currnt.oofs; opte1->currnt.css = c->currnt.css; opte1->currnt.dm = c->currnt.dm; opte1->total.bpv = c->total.bpv + c->currnt.bpv; opte1->total.fse = c->total.fse + c->currnt.fse; opte1->total.crce = c->total.crce + c->currnt.crce; opte1->total.rcrce = c->total.rcrce + c->currnt.rcrce; opte1->total.uas = c->total.uas + c->currnt.uas; opte1->total.les = c->total.les + c->currnt.les; opte1->total.es = c->total.es + c->currnt.es; opte1->total.bes = c->total.bes + c->currnt.bes; opte1->total.ses = c->total.ses + c->currnt.ses; opte1->total.oofs = c->total.oofs + c->currnt.oofs; opte1->total.css = c->total.css + c->currnt.css; opte1->total.dm = c->total.dm + c->currnt.dm; for (s=0; s<48; ++s) { opte1->interval[s].bpv = c->interval[s].bpv; opte1->interval[s].fse = c->interval[s].fse; opte1->interval[s].crce = c->interval[s].crce; opte1->interval[s].rcrce = c->interval[s].rcrce; opte1->interval[s].uas = c->interval[s].uas; opte1->interval[s].les = c->interval[s].les; opte1->interval[s].es = c->interval[s].es; opte1->interval[s].bes = c->interval[s].bes; opte1->interval[s].ses = c->interval[s].ses; opte1->interval[s].oofs = c->interval[s].oofs; opte1->interval[s].css = c->interval[s].css; opte1->interval[s].dm = c->interval[s].dm; } return 0; case SERIAL_GETE3STAT: CP_DEBUG2 (d, ("ioctl: gete3stat\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; opte3 = (struct e3_statistics*) data; opte3->status = c->e3status; opte3->cursec = (c->e3csec_5 * 2 + 1) / 10; opte3->totsec = c->e3tsec + opte3->cursec; opte3->ccv = c->e3ccv; opte3->tcv = c->e3tcv + opte3->ccv; for (s = 0; s < 48; ++s) { opte3->icv[s] = c->e3icv[s]; } return 0; case SERIAL_CLRSTAT: CP_DEBUG2 (d, ("ioctl: clrstat\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; c->rintr = 0; c->tintr = 0; c->ibytes = 0; c->obytes = 0; c->ipkts = 0; c->opkts = 0; c->overrun = 0; c->frame = 0; c->crc = 0; c->underrun = 0; bzero (&c->currnt, sizeof (c->currnt)); bzero (&c->total, sizeof (c->total)); bzero (c->interval, sizeof (c->interval)); c->e3ccv = 0; c->e3tcv = 0; bzero (c->e3icv, sizeof (c->e3icv)); return 0; case SERIAL_GETBAUD: CP_DEBUG2 (d, ("ioctl: getbaud\n")); *(long*)data = c->baud; return 0; case SERIAL_SETBAUD: CP_DEBUG2 (d, ("ioctl: setbaud\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_baud (c, *(long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLOOP: CP_DEBUG2 (d, ("ioctl: getloop\n")); *(int*)data = c->lloop; return 0; case SERIAL_SETLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_lloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDPLL: CP_DEBUG2 (d, ("ioctl: getdpll\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->dpll; return 0; case SERIAL_SETDPLL: CP_DEBUG2 (d, ("ioctl: setdpll\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_dpll (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETNRZI: CP_DEBUG2 (d, ("ioctl: getnrzi\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->nrzi; return 0; case SERIAL_SETNRZI: CP_DEBUG2 (d, ("ioctl: setnrzi\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_nrzi (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDEBUG: CP_DEBUG2 (d, ("ioctl: getdebug\n")); *(int*)data = d->chan->debug; return 0; case SERIAL_SETDEBUG: CP_DEBUG2 (d, ("ioctl: setdebug\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; #ifndef NETGRAPH /* * The debug_shadow is always greater than zero for logic * simplicity. For switching debug off the IFF_DEBUG is * responsible. */ d->chan->debug_shadow = (*(int*)data) ? (*(int*)data) : 1; if (d->ifp->if_flags & IFF_DEBUG) d->chan->debug = d->chan->debug_shadow; #else d->chan->debug = *(int*)data; #endif return 0; case SERIAL_GETHIGAIN: CP_DEBUG2 (d, ("ioctl: gethigain\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->higain; return 0; case SERIAL_SETHIGAIN: CP_DEBUG2 (d, ("ioctl: sethigain\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_higain (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETPHONY: CP_DEBUG2 (d, ("ioctl: getphony\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->phony; return 0; case SERIAL_SETPHONY: CP_DEBUG2 (d, ("ioctl: setphony\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_phony (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUNFRAM: CP_DEBUG2 (d, ("ioctl: getunfram\n")); if (c->type != T_E1) return EINVAL; *(int*)data = c->unfram; return 0; case SERIAL_SETUNFRAM: CP_DEBUG2 (d, ("ioctl: setunfram\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_unfram (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: getscrambler\n")); if (c->type != T_G703 && !c->unfram) return EINVAL; *(int*)data = c->scrambler; return 0; case SERIAL_SETSCRAMBLER: CP_DEBUG2 (d, ("ioctl: setscrambler\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_G703 && !c->unfram) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_scrambler (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETMONITOR: CP_DEBUG2 (d, ("ioctl: getmonitor\n")); if (c->type != T_E1 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->monitor; return 0; case SERIAL_SETMONITOR: CP_DEBUG2 (d, ("ioctl: setmonitor\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_monitor (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETUSE16: CP_DEBUG2 (d, ("ioctl: getuse16\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->use16; return 0; case SERIAL_SETUSE16: CP_DEBUG2 (d, ("ioctl: setuse16\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_use16 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCRC4: CP_DEBUG2 (d, ("ioctl: getcrc4\n")); if (c->type != T_E1 || c->unfram) return EINVAL; *(int*)data = c->crc4; return 0; case SERIAL_SETCRC4: CP_DEBUG2 (d, ("ioctl: setcrc4\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_crc4 (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCLK: CP_DEBUG2 (d, ("ioctl: getclk\n")); if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; switch (c->gsyn) { default: *(int*)data = E1CLK_INTERNAL; break; case GSYN_RCV: *(int*)data = E1CLK_RECEIVE; break; case GSYN_RCV0: *(int*)data = E1CLK_RECEIVE_CHAN0; break; case GSYN_RCV1: *(int*)data = E1CLK_RECEIVE_CHAN1; break; case GSYN_RCV2: *(int*)data = E1CLK_RECEIVE_CHAN2; break; case GSYN_RCV3: *(int*)data = E1CLK_RECEIVE_CHAN3; break; } return 0; case SERIAL_SETCLK: CP_DEBUG2 (d, ("ioctl: setclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_E1 && c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; s = splimp (); CP_LOCK (bd); switch (*(int*)data) { default: cp_set_gsyn (c, GSYN_INT); break; case E1CLK_RECEIVE: cp_set_gsyn (c, GSYN_RCV); break; case E1CLK_RECEIVE_CHAN0: cp_set_gsyn (c, GSYN_RCV0); break; case E1CLK_RECEIVE_CHAN1: cp_set_gsyn (c, GSYN_RCV1); break; case E1CLK_RECEIVE_CHAN2: cp_set_gsyn (c, GSYN_RCV2); break; case E1CLK_RECEIVE_CHAN3: cp_set_gsyn (c, GSYN_RCV3); break; } CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: gettimeslots\n")); if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; *(u_long*)data = c->ts; return 0; case SERIAL_SETTIMESLOTS: CP_DEBUG2 (d, ("ioctl: settimeslots\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if ((c->type != T_E1 || c->unfram) && c->type != T_DATA) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_ts (c, *(u_long*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVCLK: CP_DEBUG2 (d, ("ioctl: getinvclk\n")); #if 1 return EINVAL; #else if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; #endif case SERIAL_SETINVCLK: CP_DEBUG2 (d, ("ioctl: setinvclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVTCLK: CP_DEBUG2 (d, ("ioctl: getinvtclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invtxc; return 0; case SERIAL_SETINVTCLK: CP_DEBUG2 (d, ("ioctl: setinvtclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invtxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETINVRCLK: CP_DEBUG2 (d, ("ioctl: getinvrclk\n")); if (c->type != T_SERIAL) return EINVAL; *(int*)data = c->invrxc; return 0; case SERIAL_SETINVRCLK: CP_DEBUG2 (d, ("ioctl: setinvrclk\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); cp_set_invrxc (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETLEVEL: CP_DEBUG2 (d, ("ioctl: getlevel\n")); if (c->type != T_G703) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_lq (c); CP_UNLOCK (bd); splx (s); return 0; #if 0 case SERIAL_RESET: CP_DEBUG2 (d, ("ioctl: reset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_reset (c->board, 0, 0); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_HARDRESET: CP_DEBUG2 (d, ("ioctl: hardreset\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); /* hard_reset (c->board); */ CP_UNLOCK (bd); splx (s); return 0; #endif case SERIAL_GETCABLE: CP_DEBUG2 (d, ("ioctl: getcable\n")); if (c->type != T_SERIAL) return EINVAL; s = splimp (); CP_LOCK (bd); *(int*)data = cp_get_cable (c); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETDIR: CP_DEBUG2 (d, ("ioctl: getdir\n")); if (c->type != T_E1 && c->type != T_DATA) return EINVAL; *(int*)data = c->dir; return 0; case SERIAL_SETDIR: CP_DEBUG2 (d, ("ioctl: setdir\n")); /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_dir (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETRLOOP: CP_DEBUG2 (d, ("ioctl: getrloop\n")); if (c->type != T_G703 && c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = cp_get_rloop (c); return 0; case SERIAL_SETRLOOP: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_E3 && c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_rloop (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case SERIAL_GETCABLEN: CP_DEBUG2 (d, ("ioctl: getcablen\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; *(int*)data = c->cablen; return 0; case SERIAL_SETCABLEN: CP_DEBUG2 (d, ("ioctl: setloop\n")); if (c->type != T_T3 && c->type != T_STS1) return EINVAL; /* Only for superuser! */ error = priv_check (td, PRIV_DRIVER); if (error) return error; s = splimp (); CP_LOCK (bd); cp_set_cablen (c, *(int*)data); CP_UNLOCK (bd); splx (s); return 0; case TIOCSDTR: /* Set DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCCDTR: /* Clear DTR */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMSET: /* Set DTR/RTS */ s = splimp (); CP_LOCK (bd); cp_set_dtr (c, (*(int*)data & TIOCM_DTR) ? 1 : 0); cp_set_rts (c, (*(int*)data & TIOCM_RTS) ? 1 : 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIS: /* Add DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 1); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 1); CP_UNLOCK (bd); splx (s); return 0; case TIOCMBIC: /* Clear DTR/RTS */ s = splimp (); CP_LOCK (bd); if (*(int*)data & TIOCM_DTR) cp_set_dtr (c, 0); if (*(int*)data & TIOCM_RTS) cp_set_rts (c, 0); CP_UNLOCK (bd); splx (s); return 0; case TIOCMGET: /* Get modem status */ *(int*)data = cp_modem_status (c); return 0; } return ENOTTY; } #ifdef NETGRAPH static int ng_cp_constructor (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Constructor\n")); return EINVAL; } static int ng_cp_newhook (node_p node, hook_p hook, const char *name) { int s; drv_t *d = NG_NODE_PRIVATE (node); bdrv_t *bd = d->board->sys; CP_DEBUG (d, ("Newhook\n")); /* Attach debug hook */ if (strcmp (name, NG_CP_HOOK_DEBUG) == 0) { NG_HOOK_SET_PRIVATE (hook, NULL); d->debug_hook = hook; return 0; } /* Check for raw hook */ if (strcmp (name, NG_CP_HOOK_RAW) != 0) return EINVAL; NG_HOOK_SET_PRIVATE (hook, d); d->hook = hook; s = splimp (); CP_LOCK (bd); cp_up (d); CP_UNLOCK (bd); splx (s); return 0; } static char *format_timeslots (u_long s) { static char buf [100]; char *p = buf; int i; for (i=1; i<32; ++i) if ((s >> i) & 1) { int prev = (i > 1) & (s >> (i-1)); int next = (i < 31) & (s >> (i+1)); if (prev) { if (next) continue; *p++ = '-'; } else if (p > buf) *p++ = ','; if (i >= 10) *p++ = '0' + i / 10; *p++ = '0' + i % 10; } *p = 0; return buf; } static int print_modems (char *s, cp_chan_t *c, int need_header) { int status = cp_modem_status (c); int length = 0; if (need_header) length += sprintf (s + length, " LE DTR DSR RTS CTS CD\n"); length += sprintf (s + length, "%4s %4s %4s %4s %4s %4s\n", status & TIOCM_LE ? "On" : "-", status & TIOCM_DTR ? "On" : "-", status & TIOCM_DSR ? "On" : "-", status & TIOCM_RTS ? "On" : "-", status & TIOCM_CTS ? "On" : "-", status & TIOCM_CD ? "On" : "-"); return length; } static int print_stats (char *s, cp_chan_t *c, int need_header) { int length = 0; if (need_header) length += sprintf (s + length, " Rintr Tintr Mintr Ibytes Ipkts Ierrs Obytes Opkts Oerrs\n"); length += sprintf (s + length, "%7ld %7ld %7ld %8lu %7ld %7ld %8lu %7ld %7ld\n", c->rintr, c->tintr, 0l, (unsigned long) c->ibytes, c->ipkts, c->overrun + c->frame + c->crc, (unsigned long) c->obytes, c->opkts, c->underrun); return length; } static char *format_e1_status (u_char status) { static char buf [80]; if (status & E1_NOALARM) return "Ok"; buf[0] = 0; if (status & E1_LOS) strcat (buf, ",LOS"); if (status & E1_AIS) strcat (buf, ",AIS"); if (status & E1_LOF) strcat (buf, ",LOF"); if (status & E1_LOMF) strcat (buf, ",LOMF"); if (status & E1_FARLOF) strcat (buf, ",FARLOF"); if (status & E1_AIS16) strcat (buf, ",AIS16"); if (status & E1_FARLOMF) strcat (buf, ",FARLOMF"); if (status & E1_TSTREQ) strcat (buf, ",TSTREQ"); if (status & E1_TSTERR) strcat (buf, ",TSTERR"); if (buf[0] == ',') return buf+1; return "Unknown"; } static int print_frac (char *s, int leftalign, u_long numerator, u_long divider) { int n, length = 0; if (numerator < 1 || divider < 1) { length += sprintf (s+length, leftalign ? "/- " : " -"); return length; } n = (int) (0.5 + 1000.0 * numerator / divider); if (n < 1000) { length += sprintf (s+length, leftalign ? "/.%-3d" : " .%03d", n); return length; } *(s + length) = leftalign ? '/' : ' '; length ++; if (n >= 1000000) n = (n+500) / 1000 * 1000; else if (n >= 100000) n = (n+50) / 100 * 100; else if (n >= 10000) n = (n+5) / 10 * 10; switch (n) { case 1000: length += printf (s+length, ".999"); return length; case 10000: n = 9990; break; case 100000: n = 99900; break; case 1000000: n = 999000; break; } if (n < 10000) length += sprintf (s+length, "%d.%d", n/1000, n/10%100); else if (n < 100000) length += sprintf (s+length, "%d.%d", n/1000, n/100%10); else if (n < 1000000) length += sprintf (s+length, "%d.", n/1000); else length += sprintf (s+length, "%d", n/1000); return length; } static int print_e1_stats (char *s, cp_chan_t *c) { struct e1_counters total; u_long totsec; int length = 0; totsec = c->totsec + c->cursec; total.bpv = c->total.bpv + c->currnt.bpv; total.fse = c->total.fse + c->currnt.fse; total.crce = c->total.crce + c->currnt.crce; total.rcrce = c->total.rcrce + c->currnt.rcrce; total.uas = c->total.uas + c->currnt.uas; total.les = c->total.les + c->currnt.les; total.es = c->total.es + c->currnt.es; total.bes = c->total.bes + c->currnt.bes; total.ses = c->total.ses + c->currnt.ses; total.oofs = c->total.oofs + c->currnt.oofs; total.css = c->total.css + c->currnt.css; total.dm = c->total.dm + c->currnt.dm; length += sprintf (s + length, " Unav/Degr Bpv/Fsyn CRC/RCRC Err/Lerr Sev/Bur Oof/Slp Status\n"); /* Unavailable seconds, degraded minutes */ length += print_frac (s + length, 0, c->currnt.uas, c->cursec); length += print_frac (s + length, 1, 60 * c->currnt.dm, c->cursec); /* Bipolar violations, frame sync errors */ length += print_frac (s + length, 0, c->currnt.bpv, c->cursec); length += print_frac (s + length, 1, c->currnt.fse, c->cursec); /* CRC errors, remote CRC errors (E-bit) */ length += print_frac (s + length, 0, c->currnt.crce, c->cursec); length += print_frac (s + length, 1, c->currnt.rcrce, c->cursec); /* Errored seconds, line errored seconds */ length += print_frac (s + length, 0, c->currnt.es, c->cursec); length += print_frac (s + length, 1, c->currnt.les, c->cursec); /* Severely errored seconds, burst errored seconds */ length += print_frac (s + length, 0, c->currnt.ses, c->cursec); length += print_frac (s + length, 1, c->currnt.bes, c->cursec); /* Out of frame seconds, controlled slip seconds */ length += print_frac (s + length, 0, c->currnt.oofs, c->cursec); length += print_frac (s + length, 1, c->currnt.css, c->cursec); length += sprintf (s + length, " %s\n", format_e1_status (c->status)); /* Print total statistics. */ length += print_frac (s + length, 0, total.uas, totsec); length += print_frac (s + length, 1, 60 * total.dm, totsec); length += print_frac (s + length, 0, total.bpv, totsec); length += print_frac (s + length, 1, total.fse, totsec); length += print_frac (s + length, 0, total.crce, totsec); length += print_frac (s + length, 1, total.rcrce, totsec); length += print_frac (s + length, 0, total.es, totsec); length += print_frac (s + length, 1, total.les, totsec); length += print_frac (s + length, 0, total.ses, totsec); length += print_frac (s + length, 1, total.bes, totsec); length += print_frac (s + length, 0, total.oofs, totsec); length += print_frac (s + length, 1, total.css, totsec); length += sprintf (s + length, " -- Total\n"); return length; } static int print_chan (char *s, cp_chan_t *c) { drv_t *d = c->sys; bdrv_t *bd = d->board->sys; int length = 0; length += sprintf (s + length, "cp%d", c->board->num * NCHAN + c->num); if (d->chan->debug) length += sprintf (s + length, " debug=%d", d->chan->debug); if (c->board->mux) { length += sprintf (s + length, " cfg=C"); } else { length += sprintf (s + length, " cfg=A"); } if (c->baud) length += sprintf (s + length, " %ld", c->baud); else length += sprintf (s + length, " extclock"); if (c->type == T_E1 || c->type == T_G703) switch (c->gsyn) { case GSYN_INT : length += sprintf (s + length, " syn=int"); break; case GSYN_RCV : length += sprintf (s + length, " syn=rcv"); break; case GSYN_RCV0 : length += sprintf (s + length, " syn=rcv0"); break; case GSYN_RCV1 : length += sprintf (s + length, " syn=rcv1"); break; case GSYN_RCV2 : length += sprintf (s + length, " syn=rcv2"); break; case GSYN_RCV3 : length += sprintf (s + length, " syn=rcv3"); break; } if (c->type == T_SERIAL) { length += sprintf (s + length, " dpll=%s", c->dpll ? "on" : "off"); length += sprintf (s + length, " nrzi=%s", c->nrzi ? "on" : "off"); length += sprintf (s + length, " invclk=%s", c->invtxc ? "on" : "off"); } if (c->type == T_E1) length += sprintf (s + length, " higain=%s", c->higain ? "on" : "off"); length += sprintf (s + length, " loop=%s", c->lloop ? "on" : "off"); if (c->type == T_E1) length += sprintf (s + length, " ts=%s", format_timeslots (c->ts)); if (c->type == T_G703) { int lq, x; x = splimp (); CP_LOCK (bd); lq = cp_get_lq (c); CP_UNLOCK (bd); splx (x); length += sprintf (s + length, " (level=-%.1fdB)", lq / 10.0); } length += sprintf (s + length, "\n"); return length; } static int ng_cp_rcvmsg (node_p node, item_p item, hook_p lasthook) { drv_t *d = NG_NODE_PRIVATE (node); struct ng_mesg *msg; struct ng_mesg *resp = NULL; int error = 0; CP_DEBUG (d, ("Rcvmsg\n")); NGI_GET_MSG (item, msg); switch (msg->header.typecookie) { default: error = EINVAL; break; case NGM_CP_COOKIE: printf ("Not implemented yet\n"); error = EINVAL; break; case NGM_GENERIC_COOKIE: switch (msg->header.cmd) { default: error = EINVAL; break; case NGM_TEXT_STATUS: { char *s; int l = 0; int dl = sizeof (struct ng_mesg) + 730; NG_MKRESPONSE (resp, msg, dl, M_NOWAIT); if (! resp) { error = ENOMEM; break; } s = (resp)->data; if (d) { l += print_chan (s + l, d->chan); l += print_stats (s + l, d->chan, 1); l += print_modems (s + l, d->chan, 1); l += print_e1_stats (s + l, d->chan); } else l += sprintf (s + l, "Error: node not connect to channel"); strncpy ((resp)->header.cmdstr, "status", NG_CMDSTRSIZ); } break; } break; } NG_RESPOND_MSG (error, node, item, resp); NG_FREE_MSG (msg); return error; } static int ng_cp_rcvdata (hook_p hook, item_p item) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE(hook)); struct mbuf *m; struct ng_tag_prio *ptag; bdrv_t *bd = d->board->sys; struct ifqueue *q; int s; CP_DEBUG2 (d, ("Rcvdata\n")); NGI_GET_M (item, m); NG_FREE_ITEM (item); if (! NG_HOOK_PRIVATE (hook) || ! d) { NG_FREE_M (m); return ENETDOWN; } /* Check for high priority data */ if ((ptag = (struct ng_tag_prio *)m_tag_locate(m, NGM_GENERIC_COOKIE, NG_TAG_PRIO, NULL)) != NULL && (ptag->priority > NG_PRIO_CUTOFF) ) q = &d->hi_queue; else q = &d->queue; s = splimp (); CP_LOCK (bd); IF_LOCK (q); if (_IF_QFULL (q)) { IF_UNLOCK (q); CP_UNLOCK (bd); splx (s); NG_FREE_M (m); return ENOBUFS; } _IF_ENQUEUE (q, m); IF_UNLOCK (q); cp_start (d); CP_UNLOCK (bd); splx (s); return 0; } static int ng_cp_rmnode (node_p node) { drv_t *d = NG_NODE_PRIVATE (node); CP_DEBUG (d, ("Rmnode\n")); if (d && d->running) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } #ifdef KLD_MODULE if (node->nd_flags & NGF_REALLY_DIE) { NG_NODE_SET_PRIVATE (node, NULL); NG_NODE_UNREF (node); } NG_NODE_REVIVE(node); /* Persistant node */ #endif return 0; } static int ng_cp_connect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Connect\n")); callout_reset (&d->timeout_handle, hz, cp_watchdog_timer, d); } return 0; } static int ng_cp_disconnect (hook_p hook) { drv_t *d = NG_NODE_PRIVATE (NG_HOOK_NODE (hook)); if (d) { CP_DEBUG (d, ("Disconnect\n")); if (NG_HOOK_PRIVATE (hook)) { bdrv_t *bd = d->board->sys; int s = splimp (); CP_LOCK (bd); cp_down (d); CP_UNLOCK (bd); splx (s); } /* If we were wait it than it reasserted now, just stop it. */ if (!callout_drain (&d->timeout_handle)) callout_stop (&d->timeout_handle); } return 0; } #endif static int cp_modevent (module_t mod, int type, void *unused) { static int load_count = 0; switch (type) { case MOD_LOAD: #ifdef NETGRAPH if (ng_newtype (&typestruct)) printf ("Failed to register ng_cp\n"); #endif ++load_count; callout_init (&timeout_handle, 1); callout_reset (&timeout_handle, hz*5, cp_timeout, 0); break; case MOD_UNLOAD: if (load_count == 1) { printf ("Removing device entry for Tau-PCI\n"); #ifdef NETGRAPH ng_rmtype (&typestruct); #endif } /* If we were wait it than it reasserted now, just stop it. * Actually we shouldn't get this condition. But code could be * changed in the future, so just be a litle paranoid. */ if (!callout_drain (&timeout_handle)) callout_stop (&timeout_handle); --load_count; break; case MOD_SHUTDOWN: break; } return 0; } #ifdef NETGRAPH static struct ng_type typestruct = { .version = NG_ABI_VERSION, .name = NG_CP_NODE_TYPE, .constructor = ng_cp_constructor, .rcvmsg = ng_cp_rcvmsg, .shutdown = ng_cp_rmnode, .newhook = ng_cp_newhook, .connect = ng_cp_connect, .rcvdata = ng_cp_rcvdata, .disconnect = ng_cp_disconnect, }; #endif /*NETGRAPH*/ #ifdef NETGRAPH MODULE_DEPEND (ng_cp, netgraph, NG_ABI_VERSION, NG_ABI_VERSION, NG_ABI_VERSION); #else MODULE_DEPEND (cp, sppp, 1, 1, 1); #endif DRIVER_MODULE (cp, pci, cp_driver, cp_devclass, cp_modevent, NULL); MODULE_VERSION (cp, 1); Index: head/sys/dev/drm/drmP.h =================================================================== --- head/sys/dev/drm/drmP.h (revision 295879) +++ head/sys/dev/drm/drmP.h (revision 295880) @@ -1,1012 +1,1011 @@ /* drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*- * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com */ /*- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Rickard E. (Rik) Faith * Gareth Hughes * */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_P_H_ #define _DRM_P_H_ #if defined(_KERNEL) || defined(__KERNEL__) struct drm_device; struct drm_file; #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #if _BYTE_ORDER == _BIG_ENDIAN #define __BIG_ENDIAN 1 #else #define __LITTLE_ENDIAN 1 #endif #include #include #include #include #include #include #include #include #include #include #include "dev/drm/drm.h" #include "dev/drm/drm_atomic.h" #include "dev/drm/drm_internal.h" #include "dev/drm/drm_linux_list.h" #include #ifdef DRM_DEBUG #undef DRM_DEBUG #define DRM_DEBUG_DEFAULT_ON 1 #endif /* DRM_DEBUG */ #if defined(DRM_LINUX) && DRM_LINUX && !defined(__amd64__) #include #include #include #include #else /* Either it was defined when it shouldn't be (FreeBSD amd64) or it isn't * supported on this OS yet. */ #undef DRM_LINUX #define DRM_LINUX 0 #endif /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 #define DRIVER_REQUIRE_AGP 0x2 #define DRIVER_USE_MTRR 0x4 #define DRIVER_PCI_DMA 0x8 #define DRIVER_SG 0x10 #define DRIVER_HAVE_DMA 0x20 #define DRIVER_HAVE_IRQ 0x40 #define DRIVER_DMA_QUEUE 0x100 #define DRM_HASH_SIZE 16 /* Size of key hash table */ #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */ MALLOC_DECLARE(DRM_MEM_DMA); MALLOC_DECLARE(DRM_MEM_SAREA); MALLOC_DECLARE(DRM_MEM_DRIVER); MALLOC_DECLARE(DRM_MEM_MAGIC); MALLOC_DECLARE(DRM_MEM_IOCTLS); MALLOC_DECLARE(DRM_MEM_MAPS); MALLOC_DECLARE(DRM_MEM_BUFS); MALLOC_DECLARE(DRM_MEM_SEGS); MALLOC_DECLARE(DRM_MEM_PAGES); MALLOC_DECLARE(DRM_MEM_FILES); MALLOC_DECLARE(DRM_MEM_QUEUES); MALLOC_DECLARE(DRM_MEM_CMDS); MALLOC_DECLARE(DRM_MEM_MAPPINGS); MALLOC_DECLARE(DRM_MEM_BUFLISTS); MALLOC_DECLARE(DRM_MEM_AGPLISTS); MALLOC_DECLARE(DRM_MEM_CTXBITMAP); MALLOC_DECLARE(DRM_MEM_SGLISTS); MALLOC_DECLARE(DRM_MEM_DRAWABLE); MALLOC_DECLARE(DRM_MEM_MM); MALLOC_DECLARE(DRM_MEM_HASHTAB); SYSCTL_DECL(_hw_drm); #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) /* Internal types and structures */ #define DRM_ARRAY_SIZE(x) (sizeof(x)/sizeof(x[0])) #define DRM_MIN(a,b) ((a)<(b)?(a):(b)) #define DRM_MAX(a,b) ((a)>(b)?(a):(b)) #define DRM_IF_VERSION(maj, min) (maj << 16 | min) #define __OS_HAS_AGP 1 #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) #define DRM_DEV_UID UID_ROOT #define DRM_DEV_GID GID_VIDEO #define wait_queue_head_t atomic_t #define DRM_WAKEUP(w) wakeup((void *)w) #define DRM_WAKEUP_INT(w) wakeup(w) #define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0) #define DRM_CURPROC curthread #define DRM_STRUCTPROC struct thread #define DRM_SPINTYPE struct mtx #define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF) #define DRM_SPINUNINIT(l) mtx_destroy(l) #define DRM_SPINLOCK(l) mtx_lock(l) #define DRM_SPINUNLOCK(u) mtx_unlock(u) #define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \ mtx_lock(l); \ (void)irqflags; \ } while (0) #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u) #define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED) #define DRM_CURRENTPID curthread->td_proc->p_pid #define DRM_LOCK() mtx_lock(&dev->dev_lock) #define DRM_UNLOCK() mtx_unlock(&dev->dev_lock) #define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) #define DRM_IRQ_ARGS void *arg typedef void irqreturn_t; #define IRQ_HANDLED /* nothing */ #define IRQ_NONE /* nothing */ #define unlikely(x) __builtin_expect(!!(x), 0) #define container_of(ptr, type, member) ({ \ __typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) enum { DRM_IS_NOT_AGP, DRM_IS_AGP, DRM_MIGHT_BE_AGP }; #define DRM_AGP_MEM struct agp_memory_info #define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1) #define PAGE_ALIGN(addr) round_page(addr) /* DRM_SUSER returns true if the user is superuser */ #define DRM_SUSER(p) (priv_check(p, PRIV_DRIVER) == 0) #define DRM_AGP_FIND_DEVICE() agp_find_device() #define DRM_MTRR_WC MDF_WRITECOMBINE #define jiffies ticks typedef vm_paddr_t dma_addr_t; typedef u_int64_t u64; typedef u_int32_t u32; typedef u_int16_t u16; typedef u_int8_t u8; /* DRM_READMEMORYBARRIER() prevents reordering of reads. * DRM_WRITEMEMORYBARRIER() prevents reordering of writes. * DRM_MEMORYBARRIER() prevents reordering of reads and writes. */ #define DRM_READMEMORYBARRIER() rmb() #define DRM_WRITEMEMORYBARRIER() wmb() #define DRM_MEMORYBARRIER() mb() #define DRM_READ8(map, offset) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) #define DRM_READ16(map, offset) \ le16toh(*(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset))) #define DRM_READ32(map, offset) \ le32toh(*(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset))) #define DRM_WRITE8(map, offset, val) \ *(volatile u_int8_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = val #define DRM_WRITE16(map, offset, val) \ *(volatile u_int16_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = htole16(val) #define DRM_WRITE32(map, offset, val) \ *(volatile u_int32_t *)(((vm_offset_t)(map)->virtual) + \ (vm_offset_t)(offset)) = htole32(val) #define DRM_VERIFYAREA_READ( uaddr, size ) \ (!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ)) #define DRM_COPY_TO_USER(user, kern, size) \ copyout(kern, user, size) #define DRM_COPY_FROM_USER(kern, user, size) \ copyin(user, kern, size) #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ copyin(arg2, arg1, arg3) #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ copyout(arg2, arg1, arg3) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ ((val) = fuword32(uaddr), 0) #define cpu_to_le32(x) htole32(x) #define le32_to_cpu(x) le32toh(x) #define DRM_HZ hz #define DRM_UDELAY(udelay) DELAY(udelay) #define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */ #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ (_map) = (_dev)->context_sareas[_ctx]; \ } while(0) #define LOCK_TEST_WITH_RETURN(dev, file_priv) \ do { \ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \ dev->lock.file_priv != file_priv) { \ DRM_ERROR("%s called without lock held\n", \ __FUNCTION__); \ return EINVAL; \ } \ } while (0) /* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_UNLOCK(); \ mtx_lock(&dev->irq_lock); \ if (!(condition)) \ ret = -mtx_sleep(&(queue), &dev->irq_lock, \ PCATCH, "drmwtq", (timeout)); \ mtx_unlock(&dev->irq_lock); \ DRM_LOCK(); \ } #define DRM_ERROR(fmt, ...) \ printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \ DRM_CURRENTPID, __func__ , ##__VA_ARGS__) #define DRM_INFO(fmt, ...) printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__) #define DRM_DEBUG(fmt, ...) do { \ if (drm_debug_flag) \ printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID, \ __func__ , ##__VA_ARGS__); \ } while (0) typedef struct drm_pci_id_list { int vendor; int device; long driver_private; char *name; } drm_pci_id_list_t; struct drm_msi_blacklist_entry { int vendor; int device; }; #define DRM_AUTH 0x1 #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 typedef struct drm_ioctl_desc { unsigned long cmd; int (*func)(struct drm_device *dev, void *data, struct drm_file *file_priv); int flags; } drm_ioctl_desc_t; /** * Creates a driver or general drm_ioctl_desc array entry for the given * ioctl, for use by drm_ioctl(). */ #define DRM_IOCTL_DEF(ioctl, func, flags) \ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} typedef struct drm_magic_entry { drm_magic_t magic; struct drm_file *priv; struct drm_magic_entry *next; } drm_magic_entry_t; typedef struct drm_magic_head { struct drm_magic_entry *head; struct drm_magic_entry *tail; } drm_magic_head_t; typedef struct drm_buf { int idx; /* Index into master buflist */ int total; /* Buffer size */ int order; /* log-base-2(total) */ int used; /* Amount of buffer in use (for DMA) */ unsigned long offset; /* Byte offset (used internally) */ void *address; /* Address of buffer */ unsigned long bus_address; /* Bus address of buffer */ struct drm_buf *next; /* Kernel-only: used for free list */ __volatile__ int pending; /* On hardware DMA queue */ struct drm_file *file_priv; /* Unique identifier of holding process */ int context; /* Kernel queue for this buffer */ enum { DRM_LIST_NONE = 0, DRM_LIST_FREE = 1, DRM_LIST_WAIT = 2, DRM_LIST_PEND = 3, DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /* Which list we're on */ int dev_priv_size; /* Size of buffer private stoarge */ void *dev_private; /* Per-buffer private storage */ } drm_buf_t; typedef struct drm_freelist { int initialized; /* Freelist in use */ atomic_t count; /* Number of free buffers */ drm_buf_t *next; /* End pointer */ int low_mark; /* Low water mark */ int high_mark; /* High water mark */ } drm_freelist_t; typedef struct drm_dma_handle { void *vaddr; bus_addr_t busaddr; bus_dma_tag_t tag; bus_dmamap_t map; } drm_dma_handle_t; typedef struct drm_buf_entry { int buf_size; int buf_count; drm_buf_t *buflist; int seg_count; drm_dma_handle_t **seglist; int page_order; drm_freelist_t freelist; } drm_buf_entry_t; typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t; struct drm_file { TAILQ_ENTRY(drm_file) link; struct drm_device *dev; int authenticated; int master; pid_t pid; uid_t uid; drm_magic_t magic; unsigned long ioctl_count; void *driver_priv; }; typedef struct drm_lock_data { struct drm_hw_lock *hw_lock; /* Hardware lock */ struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/ int lock_queue; /* Queue of blocked processes */ unsigned long lock_time; /* Time of last lock in jiffies */ } drm_lock_data_t; /* This structure, in the struct drm_device, is always initialized while the * device * is open. dev->dma_lock protects the incrementing of dev->buf_use, which * when set marks that no further bufs may be allocated until device teardown * occurs (when the last open of the device has closed). The high/low * watermarks of bufs are only touched by the X Server, and thus not * concurrently accessed, so no locking is needed. */ typedef struct drm_device_dma { drm_buf_entry_t bufs[DRM_MAX_ORDER+1]; int buf_count; drm_buf_t **buflist; /* Vector of pointers info bufs */ int seg_count; int page_count; unsigned long *pagelist; unsigned long byte_count; enum { _DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_SG = 0x02 } flags; } drm_device_dma_t; typedef struct drm_agp_mem { void *handle; unsigned long bound; /* address */ int pages; struct drm_agp_mem *prev; struct drm_agp_mem *next; } drm_agp_mem_t; typedef struct drm_agp_head { device_t agpdev; struct agp_info info; const char *chipset; drm_agp_mem_t *memory; unsigned long mode; int enabled; int acquired; unsigned long base; int mtrr; int cant_use_aperture; unsigned long page_mask; } drm_agp_head_t; typedef struct drm_sg_mem { vm_offset_t vaddr; vm_paddr_t *busaddr; vm_pindex_t pages; } drm_sg_mem_t; #define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24) #define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS) typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t; typedef struct drm_local_map { unsigned long offset; /* Physical address (0 for SAREA) */ unsigned long size; /* Physical size (bytes) */ enum drm_map_type type; /* Type of memory mapped */ enum drm_map_flags flags; /* Flags */ void *handle; /* User-space: "Handle" to pass to mmap */ /* Kernel-space: kernel-virtual address */ int mtrr; /* Boolean: MTRR used */ /* Private data */ int rid; /* PCI resource ID for bus_space */ void *virtual; /* Kernel-space: kernel-virtual address */ struct resource *bsr; bus_space_tag_t bst; bus_space_handle_t bsh; drm_dma_handle_t *dmah; TAILQ_ENTRY(drm_local_map) link; } drm_local_map_t; struct drm_vblank_info { wait_queue_head_t queue; /* vblank wait queue */ atomic_t count; /* number of VBLANK interrupts */ /* (driver must alloc the right number of counters) */ atomic_t refcount; /* number of users of vblank interrupts */ u32 last; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ int enabled; /* so we don't call enable more than */ /* once per disable */ int inmodeset; /* Display driver is setting mode */ }; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 #define DRM_ATI_GART_PCI 1 #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 struct drm_ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; dma_addr_t table_mask; dma_addr_t member_mask; struct drm_dma_handle *table_handle; drm_local_map_t mapping; int table_size; struct drm_dma_handle *dmah; /* handle for ATI PCIGART table */ }; #ifndef DMA_BIT_MASK #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #endif #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) struct drm_driver_info { int (*load)(struct drm_device *, unsigned long flags); int (*firstopen)(struct drm_device *); int (*open)(struct drm_device *, struct drm_file *); void (*preclose)(struct drm_device *, struct drm_file *file_priv); void (*postclose)(struct drm_device *, struct drm_file *); void (*lastclose)(struct drm_device *); int (*unload)(struct drm_device *); void (*reclaim_buffers_locked)(struct drm_device *, struct drm_file *file_priv); int (*dma_ioctl)(struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready)(struct drm_device *); int (*dma_quiescent)(struct drm_device *); int (*dma_flush_block_and_flush)(struct drm_device *, int context, enum drm_lock_flags flags); int (*dma_flush_unblock)(struct drm_device *, int context, enum drm_lock_flags flags); int (*context_ctor)(struct drm_device *dev, int context); int (*context_dtor)(struct drm_device *dev, int context); int (*kernel_context_switch)(struct drm_device *dev, int old, int new); int (*kernel_context_switch_unlock)(struct drm_device *dev); void (*irq_preinstall)(struct drm_device *dev); int (*irq_postinstall)(struct drm_device *dev); void (*irq_uninstall)(struct drm_device *dev); void (*irq_handler)(DRM_IRQ_ARGS); u32 (*get_vblank_counter)(struct drm_device *dev, int crtc); int (*enable_vblank)(struct drm_device *dev, int crtc); void (*disable_vblank)(struct drm_device *dev, int crtc); drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ /** * Called by \c drm_device_is_agp. Typically used to determine if a * card is really attached to AGP or not. * * \param dev DRM device handle * * \returns * One of three values is returned depending on whether or not the * card is absolutely \b not AGP (return of 0), absolutely \b is AGP * (return of 1), or may or may not be AGP (return of 2). */ int (*device_is_agp) (struct drm_device * dev); drm_ioctl_desc_t *ioctls; int max_ioctl; int buf_priv_size; int major; int minor; int patchlevel; const char *name; /* Simple driver name */ const char *desc; /* Longer driver name */ const char *date; /* Date of last major changes. */ u32 driver_features; }; /* Length for the array of resource pointers for drm_get_resource_*. */ #define DRM_MAX_PCI_RESOURCE 6 /** * DRM device functions structure */ struct drm_device { struct drm_driver_info *driver; drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ u_int16_t pci_device; /* PCI device id */ u_int16_t pci_vendor; /* PCI vendor id */ char *unique; /* Unique identifier: e.g., busid */ int unique_len; /* Length of unique field */ device_t device; /* Device instance from newbus */ struct cdev *devnode; /* Device number for mknod */ int if_version; /* Highest interface version set */ int flags; /* Flags to open(2) */ /* Locks */ struct mtx vbl_lock; /* protects vblank operations */ struct mtx dma_lock; /* protects dev->dma */ struct mtx irq_lock; /* protects irq condition checks */ struct mtx dev_lock; /* protects everything else */ DRM_SPINTYPE drw_lock; /* Usage Counters */ int open_count; /* Outstanding files open */ int buf_use; /* Buffers in use -- cannot alloc */ /* Performance counters */ unsigned long counters; enum drm_stat_type types[15]; atomic_t counts[15]; /* Authentication */ drm_file_list_t files; drm_magic_head_t magiclist[DRM_HASH_SIZE]; /* Linked list of mappable regions. Protected by dev_lock */ drm_map_list_t maplist; struct unrhdr *map_unrhdr; drm_local_map_t **context_sareas; int max_context; drm_lock_data_t lock; /* Information on hardware lock */ /* DMA queues (contexts) */ drm_device_dma_t *dma; /* Optional pointer for DMA support */ /* Context support */ int irq; /* Interrupt used by board */ int irq_enabled; /* True if the irq handler is enabled */ int msi_enabled; /* MSI enabled */ int irqrid; /* Interrupt used by board */ struct resource *irqr; /* Resource for interrupt used by board */ void *irqh; /* Handle from bus_setup_intr */ /* Storage of resource pointers for drm_get_resource_* */ struct resource *pcir[DRM_MAX_PCI_RESOURCE]; int pcirid[DRM_MAX_PCI_RESOURCE]; int pci_domain; int pci_bus; int pci_slot; int pci_func; atomic_t context_flag; /* Context swapping flag */ int last_context; /* Last current context */ int vblank_disable_allowed; struct callout vblank_disable_timer; u32 max_vblank_count; /* size of vblank counter register */ struct drm_vblank_info *vblank; /* per crtc vblank info */ int num_crtcs; struct sigio *buf_sigio; /* Processes waiting for SIGIO */ /* Sysctl support */ struct drm_sysctl_info *sysctl; drm_agp_head_t *agp; drm_sg_mem_t *sg; /* Scatter gather memory */ atomic_t *ctx_bitmap; void *dev_private; unsigned int agp_buffer_token; drm_local_map_t *agp_buffer_map; struct unrhdr *drw_unrhdr; /* RB tree of drawable infos */ RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head; }; static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { return ((dev->driver->driver_features & feature) ? 1 : 0); } #if __OS_HAS_AGP static inline int drm_core_has_AGP(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_USE_AGP); } #else #define drm_core_has_AGP(dev) (0) #endif extern int drm_debug_flag; /* Device setup support (drm_drv.c) */ int drm_probe(device_t kdev, drm_pci_id_list_t *idlist); int drm_attach(device_t kdev, drm_pci_id_list_t *idlist); void drm_close(void *data); int drm_detach(device_t kdev); d_ioctl_t drm_ioctl; d_open_t drm_open; d_read_t drm_read; d_poll_t drm_poll; d_mmap_t drm_mmap; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* File operations helpers (drm_fops.c) */ extern int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p, struct drm_device *dev); /* Memory management support (drm_memory.c) */ void drm_mem_init(void); void drm_mem_uninit(void); void *drm_ioremap_wc(struct drm_device *dev, drm_local_map_t *map); void *drm_ioremap(struct drm_device *dev, drm_local_map_t *map); void drm_ioremapfree(drm_local_map_t *map); int drm_mtrr_add(unsigned long offset, size_t size, int flags); int drm_mtrr_del(int handle, unsigned long offset, size_t size, int flags); int drm_context_switch(struct drm_device *dev, int old, int new); int drm_context_switch_complete(struct drm_device *dev, int new); int drm_ctxbitmap_init(struct drm_device *dev); void drm_ctxbitmap_cleanup(struct drm_device *dev); void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); int drm_ctxbitmap_next(struct drm_device *dev); /* Locking IOCTL support (drm_lock.c) */ int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context); int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); /* Buffer management support (drm_bufs.c) */ unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); void drm_rmmap(struct drm_device *dev, drm_local_map_t *map); int drm_order(unsigned long size); int drm_addmap(struct drm_device *dev, unsigned long offset, unsigned long size, enum drm_map_type type, enum drm_map_flags flags, drm_local_map_t **map_ptr); int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request); int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request); int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request); /* DMA support (drm_dma.c) */ int drm_dma_setup(struct drm_device *dev); void drm_dma_takedown(struct drm_device *dev); void drm_free_buffer(struct drm_device *dev, drm_buf_t *buf); void drm_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv); #define drm_core_reclaim_buffers drm_reclaim_buffers /* IRQ support (drm_irq.c) */ int drm_irq_install(struct drm_device *dev); int drm_irq_uninstall(struct drm_device *dev); irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); void drm_driver_irq_preinstall(struct drm_device *dev); void drm_driver_irq_postinstall(struct drm_device *dev); void drm_driver_irq_uninstall(struct drm_device *dev); void drm_handle_vblank(struct drm_device *dev, int crtc); u32 drm_vblank_count(struct drm_device *dev, int crtc); int drm_vblank_get(struct drm_device *dev, int crtc); void drm_vblank_put(struct drm_device *dev, int crtc); void drm_vblank_cleanup(struct drm_device *dev); int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); int drm_vblank_init(struct drm_device *dev, int num_crtcs); int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* AGP/PCI Express/GART support (drm_agpsupport.c) */ int drm_device_is_agp(struct drm_device *dev); int drm_device_is_pcie(struct drm_device *dev); drm_agp_head_t *drm_agp_init(void); int drm_agp_acquire(struct drm_device *dev); int drm_agp_release(struct drm_device *dev); int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info); int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); void *drm_agp_allocate_memory(size_t pages, u32 type); int drm_agp_free_memory(void *handle); int drm_agp_bind_memory(void *handle, off_t start); int drm_agp_unbind_memory(void *handle); int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); /* Scatter Gather Support (drm_scatter.c) */ void drm_sg_cleanup(drm_sg_mem_t *entry); int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); /* sysctl support (drm_sysctl.h) */ extern int drm_sysctl_init(struct drm_device *dev); extern int drm_sysctl_cleanup(struct drm_device *dev); /* ATI PCIGART support (ati_pcigart.c) */ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); /* Locking IOCTL support (drm_drv.c) */ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Misc. IOCTL support (drm_ioctl.c) */ int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Context IOCTL support (drm_context.c) */ int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_switchctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.c) */ int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_update_draw(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, int handle); /* Drawable support (drm_drawable.c) */ void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.c) */ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Buffer management support (drm_bufs.c) */ int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); /* DMA support (drm_dma.c) */ int drm_dma(struct drm_device *dev, void *data, struct drm_file *file_priv); /* IRQ support (drm_irq.c) */ int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv); /* AGP/GART support (drm_agpsupport.c) */ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Scatter Gather Support (drm_scatter.c) */ int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv); /* consistent PCI memory functions (drm_pci.c) */ drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* Inline replacements for drm_alloc and friends */ static __inline__ void * drm_alloc(size_t size, struct malloc_type *area) { return malloc(size, area, M_NOWAIT); } static __inline__ void * drm_calloc(size_t nmemb, size_t size, struct malloc_type *area) { return malloc(size * nmemb, area, M_NOWAIT | M_ZERO); } static __inline__ void * drm_realloc(void *oldpt, size_t oldsize, size_t size, struct malloc_type *area) { return reallocf(oldpt, size, area, M_NOWAIT); } static __inline__ void drm_free(void *pt, size_t size, struct malloc_type *area) { free(pt, area); } /* Inline replacements for DRM_IOREMAP macros */ static __inline__ void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev) { map->virtual = drm_ioremap_wc(dev, map); } static __inline__ void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev) { map->virtual = drm_ioremap(dev, map); } static __inline__ void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev) { if ( map->virtual && map->size ) drm_ioremapfree(map); } static __inline__ struct drm_local_map * drm_core_findmap(struct drm_device *dev, unsigned long offset) { drm_local_map_t *map; DRM_SPINLOCK_ASSERT(&dev->dev_lock); TAILQ_FOREACH(map, &dev->maplist, link) { if (offset == (unsigned long)map->handle) return map; } return NULL; } static __inline__ void drm_core_dropmap(struct drm_map *map) { } #endif /* __KERNEL__ */ #endif /* _DRM_P_H_ */ Index: head/sys/dev/drm2/drmP.h =================================================================== --- head/sys/dev/drm2/drmP.h (revision 295879) +++ head/sys/dev/drm2/drmP.h (revision 295880) @@ -1,1807 +1,1806 @@ /** * \file drmP.h * Private header for Direct Rendering Manager * * \author Rickard E. (Rik) Faith * \author Gareth Hughes */ /* * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * Copyright (c) 2009-2010, Code Aurora Forum. * All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #ifndef _DRM_P_H_ #define _DRM_P_H_ #if defined(_KERNEL) || defined(__KERNEL__) #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE))) #define __OS_HAS_MTRR (defined(CONFIG_MTRR)) struct drm_file; struct drm_device; #include #include #include "opt_compat.h" #include "opt_drm.h" #include "opt_syscons.h" #ifdef DRM_DEBUG #undef DRM_DEBUG #define DRM_DEBUG_DEFAULT_ON 1 #endif /* DRM_DEBUG */ #define DRM_DEBUGBITS_DEBUG 0x1 #define DRM_DEBUGBITS_KMS 0x2 #define DRM_DEBUGBITS_FAILED_IOCTL 0x4 #undef DRM_LINUX #define DRM_LINUX 0 /***********************************************************************/ /** \name DRM template customization defaults */ /*@{*/ /* driver capabilities and requirements mask */ #define DRIVER_USE_AGP 0x1 #define DRIVER_REQUIRE_AGP 0x2 #define DRIVER_USE_MTRR 0x4 #define DRIVER_PCI_DMA 0x8 #define DRIVER_SG 0x10 #define DRIVER_HAVE_DMA 0x20 #define DRIVER_HAVE_IRQ 0x40 #define DRIVER_IRQ_SHARED 0x80 #define DRIVER_IRQ_VBL 0x100 #define DRIVER_DMA_QUEUE 0x200 #define DRIVER_FB_DMA 0x400 #define DRIVER_IRQ_VBL2 0x800 #define DRIVER_GEM 0x1000 #define DRIVER_MODESET 0x2000 #define DRIVER_PRIME 0x4000 #define DRIVER_BUS_PCI 0x1 #define DRIVER_BUS_PLATFORM 0x2 #define DRIVER_BUS_USB 0x3 /***********************************************************************/ /** \name Begin the DRM... */ /*@{*/ #define DRM_DEBUG_CODE 2 /**< Include debugging code if > 1, then also include looping detection. */ #define DRM_MAGIC_HASH_ORDER 4 /**< Size of key hash table. Must be power of 2. */ #define DRM_KERNEL_CONTEXT 0 /**< Change drm_resctx if changed */ #define DRM_RESERVED_CONTEXTS 1 /**< Change drm_resctx if changed */ #define DRM_LOOPING_LIMIT 5000000 #define DRM_TIME_SLICE (HZ/20) /**< Time slice for GLXContexts */ #define DRM_LOCK_SLICE 1 /**< Time slice for lock, in jiffies */ #define DRM_FLAG_DEBUG 0x01 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8) #define DRM_MAP_HASH_OFFSET 0x10000000 /*@}*/ /***********************************************************************/ /** \name Macros to make printk easier */ /*@{*/ /** * Error output. * * \param fmt printf() like format string. * \param arg arguments */ #define DRM_ERROR(fmt, ...) \ printf("error: [" DRM_NAME ":pid%d:%s] *ERROR* " fmt, \ DRM_CURRENTPID, __func__ , ##__VA_ARGS__) #define DRM_WARNING(fmt, ...) printf("warning: [" DRM_NAME "] " fmt , ##__VA_ARGS__) #define DRM_INFO(fmt, ...) printf("info: [" DRM_NAME "] " fmt , ##__VA_ARGS__) /** * Debug output. * * \param fmt printf() like format string. * \param arg arguments */ #define DRM_DEBUG(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_DEBUG) != 0) \ printf("[" DRM_NAME ":pid%d:%s] " fmt, DRM_CURRENTPID, \ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_DEBUG_DRIVER(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_DEBUG_KMS(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME ":KMS:pid%d:%s] " fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_LOG(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME "]:pid%d:%s]" fmt, DRM_CURRENTPID, \ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_LOG_KMS(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME "]:KMS:pid%d:%s]" fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_LOG_MODE(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME "]:pid%d:%s]" fmt, DRM_CURRENTPID, \ __func__ , ##__VA_ARGS__); \ } while (0) #define DRM_LOG_DRIVER(fmt, ...) do { \ if ((drm_debug & DRM_DEBUGBITS_KMS) != 0) \ printf("[" DRM_NAME "]:KMS:pid%d:%s]" fmt, DRM_CURRENTPID,\ __func__ , ##__VA_ARGS__); \ } while (0) /*@}*/ /***********************************************************************/ /** \name Internal types and structures */ /*@{*/ #define DRM_ARRAY_SIZE(x) ARRAY_SIZE(x) #define DRM_LEFTCOUNT(x) (((x)->rp + (x)->count - (x)->wp) % ((x)->count + 1)) #define DRM_BUFCOUNT(x) ((x)->count - DRM_LEFTCOUNT(x)) #define DRM_IF_VERSION(maj, min) (maj << 16 | min) /** * Test that the hardware lock is held by the caller, returning otherwise. * * \param dev DRM device. * \param filp file pointer of the caller. */ #define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \ do { \ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \ _file_priv->master->lock.file_priv != _file_priv) { \ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\ _file_priv->master->lock.file_priv, _file_priv); \ return -EINVAL; \ } \ } while (0) /** * Ioctl function type. * * \param inode device inode. * \param file_priv DRM file private pointer. * \param cmd command. * \param arg argument. */ typedef int drm_ioctl_t(struct drm_device *dev, void *data, struct drm_file *file_priv); #define DRM_IOCTL_NR(n) ((n) & 0xff) #define DRM_MAJOR 226 #define DRM_AUTH 0x1 #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 #define DRM_CONTROL_ALLOW 0x8 #define DRM_UNLOCKED 0x10 struct drm_ioctl_desc { unsigned long cmd; int flags; drm_ioctl_t *func; unsigned int cmd_drv; }; /** * Creates a driver or general drm_ioctl_desc array entry for the given * ioctl, for use by drm_ioctl(). */ #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} struct drm_magic_entry { struct list_head head; struct drm_hash_item hash_item; struct drm_file *priv; }; /** * DMA buffer. */ struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ int used; /**< Amount of buffer in use (for DMA) */ unsigned long offset; /**< Byte offset (used internally) */ void *address; /**< Address of buffer */ unsigned long bus_address; /**< Bus address of buffer */ struct drm_buf *next; /**< Kernel-only: used for free list */ __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ enum { DRM_LIST_NONE = 0, DRM_LIST_FREE = 1, DRM_LIST_WAIT = 2, DRM_LIST_PEND = 3, DRM_LIST_PRIO = 4, DRM_LIST_RECLAIM = 5 } list; /**< Which list we're on */ int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ }; struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ struct drm_buf *next; /**< End pointer */ #ifdef FREEBSD_NOTYET wait_queue_head_t waiting; /**< Processes waiting on free bufs */ #endif /* defined(FREEBSD_NOTYET) */ int low_mark; /**< Low water mark */ int high_mark; /**< High water mark */ #ifdef FREEBSD_NOTYET atomic_t wfh; /**< If waiting for high mark */ spinlock_t lock; #endif /* defined(FREEBSD_NOTYET) */ }; typedef struct drm_dma_handle { void *vaddr; bus_addr_t busaddr; bus_dma_tag_t tag; bus_dmamap_t map; } drm_dma_handle_t; /** * Buffer entry. There is one of this for each buffer size order. */ struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ struct drm_buf *buflist; /**< buffer list */ int seg_count; int page_order; struct drm_dma_handle **seglist; struct drm_freelist freelist; }; /* Event queued up for userspace to read */ struct drm_pending_event { struct drm_event *event; struct list_head link; struct drm_file *file_priv; pid_t pid; /* pid of requester, no guarantee it's valid by the time we deliver the event, for tracing only */ void (*destroy)(struct drm_pending_event *event); }; /* initial implementaton using a linked list - todo hashtab */ struct drm_prime_file_private { struct list_head head; struct mtx lock; }; struct drm_file { int authenticated; pid_t pid; uid_t uid; drm_magic_t magic; unsigned long ioctl_count; struct list_head lhead; struct drm_minor *minor; unsigned long lock_count; void *driver_priv; struct drm_gem_names object_names; int is_master; /* this file private is a master for a minor */ struct drm_master *master; /* master this node is currently associated with N.B. not always minor->master */ struct list_head fbs; struct selinfo event_poll; struct list_head event_list; int event_space; struct drm_prime_file_private prime; }; /** * Lock data. */ struct drm_lock_data { struct drm_hw_lock *hw_lock; /**< Hardware lock */ /** Private of lock holder's file (NULL=kernel) */ struct drm_file *file_priv; wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ struct mtx spinlock; uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; }; /** * DMA data. */ struct drm_device_dma { struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ unsigned long byte_count; enum { _DRM_DMA_USE_AGP = 0x01, _DRM_DMA_USE_SG = 0x02, _DRM_DMA_USE_FB = 0x04, _DRM_DMA_USE_PCI_RO = 0x08 } flags; }; /** * AGP memory entry. Stored as a doubly linked list. */ struct drm_agp_mem { unsigned long handle; /**< handle */ DRM_AGP_MEM *memory; unsigned long bound; /**< address */ int pages; struct list_head head; }; /** * AGP data. * * \sa drm_agp_init() and drm_device::agp. */ struct drm_agp_head { DRM_AGP_KERN agp_info; /**< AGP device information */ struct list_head memory; unsigned long mode; /**< AGP mode */ device_t bridge; int enabled; /**< whether the AGP bus as been enabled */ int acquired; /**< whether the AGP device has been acquired */ unsigned long base; int agp_mtrr; int cant_use_aperture; }; /** * Scatter-gather memory. */ struct drm_sg_mem { vm_offset_t vaddr; vm_paddr_t *busaddr; vm_pindex_t pages; }; struct drm_sigdata { int context; struct drm_hw_lock *lock; }; /** * Kernel side of a mapping */ #define DRM_MAP_HANDLE_BITS (sizeof(void *) == 4 ? 4 : 24) #define DRM_MAP_HANDLE_SHIFT (sizeof(void *) * 8 - DRM_MAP_HANDLE_BITS) struct drm_local_map { resource_size_t offset; /**< Requested physical address (0 for SAREA)*/ unsigned long size; /**< Requested physical size (bytes) */ enum drm_map_type type; /**< Type of memory to map */ enum drm_map_flags flags; /**< Flags */ void *handle; /**< User-space: "Handle" to pass to mmap() */ /**< Kernel-space: kernel-virtual address */ int mtrr; /**< MTRR slot used */ /* Private data */ drm_dma_handle_t *dmah; }; typedef struct drm_local_map drm_local_map_t; /** * Mappings list */ struct drm_map_list { struct list_head head; /**< list head */ struct drm_hash_item hash; struct drm_local_map *map; /**< mapping */ uint64_t user_token; struct drm_master *master; struct drm_mm_node *file_offset_node; /**< fake offset */ }; /** * Context handle list */ struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ struct drm_file *tag; /**< associated fd private data */ }; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 #define DRM_ATI_GART_FB 2 #define DRM_ATI_GART_PCI 1 #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 struct drm_ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; dma_addr_t table_mask; struct drm_dma_handle *table_handle; struct drm_local_map mapping; int table_size; struct drm_dma_handle *dmah; /* handle for ATI PCIGART table FIXME */ }; /** * GEM specific mm private for tracking GEM objects */ struct drm_gem_mm { struct unrhdr *idxunr; struct drm_open_hash offset_hash; /**< User token hash table for maps */ }; /** * This structure defines the drm_mm memory object, which will be used by the * DRM for its buffer objects. */ struct drm_gem_object { /** Reference count of this object */ u_int refcount; /** Handle count of this object. Each handle also holds a reference */ atomic_t handle_count; /* number of handles on this object */ /** Related drm device */ struct drm_device *dev; /** File representing the shmem storage: filp in Linux parlance */ vm_object_t vm_obj; /* Mapping info for this object */ bool on_map; struct drm_hash_item map_list; /** * Size of the object, in bytes. Immutable over the object's * lifetime. */ size_t size; /** * Global name for this object, starts at 1. 0 means unnamed. * Access is covered by the object_name_lock in the related drm_device */ int name; /** * Memory domains. These monitor which caches contain read/write data * related to the object. When transitioning from one set of domains * to another, the driver is called to ensure that caches are suitably * flushed and invalidated */ uint32_t read_domains; uint32_t write_domain; /** * While validating an exec operation, the * new read/write domain values are computed here. * They will be transferred to the above values * at the point that any cache flushing occurs */ uint32_t pending_read_domains; uint32_t pending_write_domain; void *driver_private; #ifdef FREEBSD_NOTYET /* dma buf exported from this GEM object */ struct dma_buf *export_dma_buf; /* dma buf attachment backing this object */ struct dma_buf_attachment *import_attach; #endif /* FREEBSD_NOTYET */ }; #include /* per-master structure */ struct drm_master { u_int refcount; /* refcount for this master */ struct list_head head; /**< each minor contains a list of masters */ struct drm_minor *minor; /**< link back to minor we are a master for */ char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ int unique_size; /**< amount allocated */ int blocked; /**< Blocked due to VC switch? */ /** \name Authentication */ /*@{ */ struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ struct drm_lock_data lock; /**< Information on hardware lock */ void *driver_priv; /**< Private structure for driver to use */ }; /* Size of ringbuffer for vblank timestamps. Just double-buffer * in initial implementation. */ #define DRM_VBLANKTIME_RBSIZE 2 /* Flags and return codes for get_vblank_timestamp() driver function. */ #define DRM_CALLED_FROM_VBLIRQ 1 #define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0) #define DRM_VBLANKTIME_INVBL (1 << 1) /* get_scanout_position() return flags */ #define DRM_SCANOUTPOS_VALID (1 << 0) #define DRM_SCANOUTPOS_INVBL (1 << 1) #define DRM_SCANOUTPOS_ACCURATE (1 << 2) struct drm_bus { int bus_type; int (*get_irq)(struct drm_device *dev); void (*free_irq)(struct drm_device *dev); const char *(*get_name)(struct drm_device *dev); int (*set_busid)(struct drm_device *dev, struct drm_master *master); int (*set_unique)(struct drm_device *dev, struct drm_master *master, struct drm_unique *unique); int (*irq_by_busid)(struct drm_device *dev, struct drm_irq_busid *p); /* hooks that are for PCI */ int (*agp_init)(struct drm_device *dev); }; /** * DRM driver structure. This structure represent the common code for * a family of cards. There will one drm_device for each card present * in this family */ struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); int (*open) (struct drm_device *, struct drm_file *); void (*preclose) (struct drm_device *, struct drm_file *file_priv); void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); int (*dma_quiescent) (struct drm_device *); int (*context_dtor) (struct drm_device *dev, int context); /** * get_vblank_counter - get raw hardware vblank counter * @dev: DRM device * @crtc: counter to fetch * * Driver callback for fetching a raw hardware vblank counter for @crtc. * If a device doesn't have a hardware counter, the driver can simply * return the value of drm_vblank_count. The DRM core will account for * missed vblank events while interrupts where disabled based on system * timestamps. * * Wraparound handling and loss of events due to modesetting is dealt * with in the DRM core code. * * RETURNS * Raw vblank counter value. */ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); /** * enable_vblank - enable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. * * RETURNS * Zero on success, appropriate errno if the given @crtc's vblank * interrupt cannot be enabled. */ int (*enable_vblank) (struct drm_device *dev, int crtc); /** * disable_vblank - disable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. */ void (*disable_vblank) (struct drm_device *dev, int crtc); /** * Called by \c drm_device_is_agp. Typically used to determine if a * card is really attached to AGP or not. * * \param dev DRM device handle * * \returns * One of three values is returned depending on whether or not the * card is absolutely \b not AGP (return of 0), absolutely \b is AGP * (return of 1), or may or may not be AGP (return of 2). */ int (*device_is_agp) (struct drm_device *dev); /** * Called by vblank timestamping code. * * Return the current display scanout position from a crtc. * * \param dev DRM device. * \param crtc Id of the crtc to query. * \param *vpos Target location for current vertical scanout position. * \param *hpos Target location for current horizontal scanout position. * * Returns vpos as a positive number while in active scanout area. * Returns vpos as a negative number inside vblank, counting the number * of scanlines to go until end of vblank, e.g., -1 means "one scanline * until start of active scanout / end of vblank." * * \return Flags, or'ed together as follows: * * DRM_SCANOUTPOS_VALID = Query successful. * DRM_SCANOUTPOS_INVBL = Inside vblank. * DRM_SCANOUTPOS_ACCURATE = Returned position is accurate. A lack of * this flag means that returned position may be offset by a constant * but unknown small number of scanlines wrt. real scanout position. * */ int (*get_scanout_position) (struct drm_device *dev, int crtc, int *vpos, int *hpos); /** * Called by \c drm_get_last_vbltimestamp. Should return a precise * timestamp when the most recent VBLANK interval ended or will end. * * Specifically, the timestamp in @vblank_time should correspond as * closely as possible to the time when the first video scanline of * the video frame after the end of VBLANK will start scanning out, * the time immediately after end of the VBLANK interval. If the * @crtc is currently inside VBLANK, this will be a time in the future. * If the @crtc is currently scanning out a frame, this will be the * past start time of the current scanout. This is meant to adhere * to the OpenML OML_sync_control extension specification. * * \param dev dev DRM device handle. * \param crtc crtc for which timestamp should be returned. * \param *max_error Maximum allowable timestamp error in nanoseconds. * Implementation should strive to provide timestamp * with an error of at most *max_error nanoseconds. * Returns true upper bound on error for timestamp. * \param *vblank_time Target location for returned vblank timestamp. * \param flags 0 = Defaults, no special treatment needed. * \param DRM_CALLED_FROM_VBLIRQ = Function is called from vblank * irq handler. Some drivers need to apply some workarounds * for gpu-specific vblank irq quirks if flag is set. * * \returns * Zero if timestamping isn't supported in current display mode or a * negative number on failure. A positive status code on success, * which describes how the vblank_time timestamp was computed. */ int (*get_vblank_timestamp) (struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags); /* these have to be filled in */ irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); void (*irq_preinstall) (struct drm_device *dev); int (*irq_postinstall) (struct drm_device *dev); void (*irq_uninstall) (struct drm_device *dev); void (*set_version) (struct drm_device *dev, struct drm_set_version *sv); /* Master routines */ int (*master_create)(struct drm_device *dev, struct drm_master *master); void (*master_destroy)(struct drm_device *dev, struct drm_master *master); /** * master_set is called whenever the minor master is set. * master_drop is called whenever the minor master is dropped. */ int (*master_set)(struct drm_device *dev, struct drm_file *file_priv, bool from_open); void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv, bool from_release); /** * Driver-specific constructor for drm_gem_objects, to set up * obj->driver_private. * * Returns 0 on success. */ int (*gem_init_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj); int (*gem_open_object) (struct drm_gem_object *, struct drm_file *); void (*gem_close_object) (struct drm_gem_object *, struct drm_file *); #ifdef FREEBSD_NOTYET /* prime: */ /* export handle -> fd (see drm_gem_prime_handle_to_fd() helper) */ int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); /* import fd -> handle (see drm_gem_prime_fd_to_handle() helper) */ int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); /* export GEM -> dmabuf */ struct dma_buf * (*gem_prime_export)(struct drm_device *dev, struct drm_gem_object *obj, int flags); /* import dmabuf -> GEM */ struct drm_gem_object * (*gem_prime_import)(struct drm_device *dev, struct dma_buf *dma_buf); #endif /* defined(FREEBSD_NOTYET) */ /* dumb alloc support */ int (*dumb_create)(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); int (*dumb_map_offset)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset); int (*dumb_destroy)(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle); /* Driver private ops for this object */ struct cdev_pager_ops *gem_pager_ops; int (*sysctl_init)(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); void (*sysctl_cleanup)(struct drm_device *dev); int major; int minor; int patchlevel; char *name; char *desc; char *date; u32 driver_features; int dev_priv_size; struct drm_ioctl_desc *ioctls; int num_ioctls; struct drm_bus *bus; #ifdef COMPAT_FREEBSD32 struct drm_ioctl_desc *compat_ioctls; int *num_compat_ioctls; #endif int buf_priv_size; }; #define DRM_MINOR_UNASSIGNED 0 #define DRM_MINOR_LEGACY 1 #define DRM_MINOR_CONTROL 2 #define DRM_MINOR_RENDER 3 /** * DRM minor structure. This structure represents a drm minor number. */ struct drm_minor { int index; /**< Minor device number */ int type; /**< Control or render */ struct cdev *device; /**< Device number for mknod */ device_t kdev; /**< OS device */ struct drm_device *dev; struct drm_master *master; /* currently active master for this node */ struct list_head master_list; struct drm_mode_group mode_group; struct sigio *buf_sigio; /* Processes waiting for SIGIO */ }; /* mode specified on the command line */ struct drm_cmdline_mode { bool specified; bool refresh_specified; bool bpp_specified; int xres, yres; int bpp; int refresh; bool rb; bool interlace; bool cvt; bool margins; enum drm_connector_force force; }; struct drm_pending_vblank_event { struct drm_pending_event base; int pipe; struct drm_event_vblank event; }; /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ struct drm_device { int if_version; /**< Highest interface version set */ /** \name Locks */ /*@{ */ struct mtx count_lock; /**< For inuse, drm_device::open_count, drm_device::buf_use */ struct sx dev_struct_lock; /**< For others */ /*@} */ /** \name Usage Counters */ /*@{ */ int open_count; /**< Outstanding files open */ atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ atomic_t vma_count; /**< Outstanding vma areas open */ int buf_use; /**< Buffers in use -- cannot alloc */ atomic_t buf_alloc; /**< Buffer allocation in progress */ /*@} */ /** \name Performance counters */ /*@{ */ unsigned long counters; enum drm_stat_type types[15]; atomic_t counts[15]; /*@} */ struct list_head filelist; /** \name Memory management */ /*@{ */ struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ /** \name Context handle management */ /*@{ */ struct list_head ctxlist; /**< Linked list of context handles */ int ctx_count; /**< Number of context handles */ struct mtx ctxlist_mutex; /**< For ctxlist */ drm_local_map_t **context_sareas; int max_context; unsigned long *ctx_bitmap; /*@} */ /** \name DMA support */ /*@{ */ struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ /** \name Context support */ /*@{ */ int irq_enabled; /**< True if irq handler is enabled */ atomic_t context_flag; /**< Context swapping flag */ atomic_t interrupt_flag; /**< Interruption handler flag */ atomic_t dma_flag; /**< DMA dispatch flag */ wait_queue_head_t context_wait; /**< Processes waiting on ctx switch */ int last_checked; /**< Last context checked for DMA */ int last_context; /**< Last current context */ unsigned long last_switch; /**< jiffies at last context switch */ /*@} */ /** \name VBLANK IRQ support */ /*@{ */ /* * At load time, disabling the vblank interrupt won't be allowed since * old clients may not call the modeset ioctl and therefore misbehave. * Once the modeset ioctl *has* been called though, we can safely * disable them when unused. */ int vblank_disable_allowed; atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ struct timeval *_vblank_time; /**< timestamp of current vblank_count (drivers must alloc right number of fields) */ struct mtx vblank_time_lock; /**< Protects vblank count and time updates during vblank enable/disable */ struct mtx vbl_lock; atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ int *vblank_enabled; /* so we don't call enable more than once per disable */ int *vblank_inmodeset; /* Display driver is setting mode */ u32 *last_vblank_wait; /* Last vblank seqno waited per CRTC */ struct callout vblank_disable_callout; u32 max_vblank_count; /**< size of vblank counter register */ /** * List of events */ struct list_head vblank_event_list; struct mtx event_lock; /*@} */ struct drm_agp_head *agp; /**< AGP data */ device_t dev; /* Device instance from newbus */ uint16_t pci_device; /* PCI device id */ uint16_t pci_vendor; /* PCI vendor id */ uint16_t pci_subdevice; /* PCI subsystem device id */ uint16_t pci_subvendor; /* PCI subsystem vendor id */ struct drm_sg_mem *sg; /**< Scatter gather memory */ unsigned int num_crtcs; /**< Number of CRTCs on this device */ void *dev_private; /**< device private data */ void *mm_private; struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; struct drm_driver *driver; struct drm_local_map *agp_buffer_map; unsigned int agp_buffer_token; struct drm_minor *control; /**< Control node for card */ struct drm_minor *primary; /**< render type primary screen head */ struct drm_mode_config mode_config; /**< Current mode config */ /** \name GEM information */ /*@{ */ struct sx object_name_lock; struct drm_gem_names object_names; /*@} */ int switch_power_state; atomic_t unplugged; /* device has been unplugged or gone away */ /* Locks */ struct mtx dma_lock; /* protects dev->dma */ struct mtx irq_lock; /* protects irq condition checks */ /* Context support */ int irq; /* Interrupt used by board */ int msi_enabled; /* MSI enabled */ int irqrid; /* Interrupt used by board */ struct resource *irqr; /* Resource for interrupt used by board */ void *irqh; /* Handle from bus_setup_intr */ /* Storage of resource pointers for drm_get_resource_* */ #define DRM_MAX_PCI_RESOURCE 6 struct resource *pcir[DRM_MAX_PCI_RESOURCE]; int pcirid[DRM_MAX_PCI_RESOURCE]; struct mtx pcir_lock; int pci_domain; int pci_bus; int pci_slot; int pci_func; /* Sysctl support */ struct drm_sysctl_info *sysctl; int sysctl_node_idx; void *drm_ttm_bdev; void *sysctl_private; char busid_str[128]; int modesetting; drm_pci_id_list_t *id_entry; /* PCI ID, name, and chipset private */ }; #define DRM_SWITCH_POWER_ON 0 #define DRM_SWITCH_POWER_OFF 1 #define DRM_SWITCH_POWER_CHANGING 2 static __inline__ int drm_core_check_feature(struct drm_device *dev, int feature) { return ((dev->driver->driver_features & feature) ? 1 : 0); } static inline int drm_dev_to_irq(struct drm_device *dev) { return dev->driver->bus->get_irq(dev); } #if __OS_HAS_AGP static inline int drm_core_has_AGP(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_USE_AGP); } #else #define drm_core_has_AGP(dev) (0) #endif #if __OS_HAS_MTRR static inline int drm_core_has_MTRR(struct drm_device *dev) { return drm_core_check_feature(dev, DRIVER_USE_MTRR); } #define DRM_MTRR_WC MDF_WRITECOMBINE int drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags); int drm_mtrr_del(int handle, unsigned long offset, unsigned long size, unsigned int flags); #else #define drm_core_has_MTRR(dev) (0) #define DRM_MTRR_WC 0 static inline int drm_mtrr_add(unsigned long offset, unsigned long size, unsigned int flags) { return 0; } static inline int drm_mtrr_del(int handle, unsigned long offset, unsigned long size, unsigned int flags) { return 0; } #endif /******************************************************************/ /** \name Internal function definitions */ /*@{*/ /* Driver support (drm_drv.h) */ d_ioctl_t drm_ioctl; extern int drm_lastclose(struct drm_device *dev); /* Device support (drm_fops.h) */ extern struct sx drm_global_mutex; d_open_t drm_open; d_read_t drm_read; extern void drm_release(void *data); /* Mapping support (drm_vm.h) */ d_mmap_t drm_mmap; int drm_mmap_single(struct cdev *kdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); d_poll_t drm_poll; /* Memory management support (drm_memory.h) */ extern void drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); #ifdef FREEBSD_NOTYET extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, struct page **pages, unsigned long num_pages, uint32_t gtt_offset, uint32_t type); #endif /* FREEBSD_NOTYET */ extern int drm_unbind_agp(DRM_AGP_MEM * handle); /* Misc. IOCTL support (drm_ioctl.h) */ extern int drm_irq_by_busid(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getunique(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_setunique(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getmap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getclient(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getstats(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_noop(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Context IOCTL support (drm_context.h) */ extern int drm_resctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_addctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_switchctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_newctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_rmctx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_ctxbitmap_init(struct drm_device *dev); extern void drm_ctxbitmap_cleanup(struct drm_device *dev); extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); extern int drm_setsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_getsareactx(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Authentication IOCTL support (drm_auth.h) */ extern int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_authmagic(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic); /* Cache management (drm_cache.c) */ void drm_clflush_pages(vm_page_t *pages, unsigned long num_pages); void drm_clflush_virt_range(char *addr, unsigned long length); /* Locking IOCTL support (drm_lock.h) */ extern int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); extern void drm_idlelock_take(struct drm_lock_data *lock_data); extern void drm_idlelock_release(struct drm_lock_data *lock_data); /* * These are exported to drivers so that they can implement fencing using * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ extern int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); extern int drm_addmap(struct drm_device *dev, resource_size_t offset, unsigned int size, enum drm_map_type type, enum drm_map_flags flags, struct drm_local_map **map_ptr); extern int drm_addmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_rmmap(struct drm_device *dev, struct drm_local_map *map); extern int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map); extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_addbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_infobufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_markbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_freebufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_mapbufs(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_order(unsigned long size); /* DMA support (drm_dma.h) */ extern int drm_dma_setup(struct drm_device *dev); extern void drm_dma_takedown(struct drm_device *dev); extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); extern void drm_core_reclaim_buffers(struct drm_device *dev, struct drm_file *filp); /* IRQ support (drm_irq.h) */ extern int drm_control(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_irq_install(struct drm_device *dev); extern int drm_irq_uninstall(struct drm_device *dev); extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp); extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, struct timeval *vblanktime); extern void drm_send_vblank_event(struct drm_device *dev, int crtc, struct drm_pending_vblank_event *e); extern bool drm_handle_vblank(struct drm_device *dev, int crtc); extern int drm_vblank_get(struct drm_device *dev, int crtc); extern void drm_vblank_put(struct drm_device *dev, int crtc); extern void drm_vblank_off(struct drm_device *dev, int crtc); extern void drm_vblank_cleanup(struct drm_device *dev); extern u32 drm_get_last_vbltimestamp(struct drm_device *dev, int crtc, struct timeval *tvblank, unsigned flags); extern int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, int *max_error, struct timeval *vblank_time, unsigned flags, struct drm_crtc *refcrtc); extern void drm_calc_timestamping_constants(struct drm_crtc *crtc); extern bool drm_mode_parse_command_line_for_connector(const char *mode_option, struct drm_connector *connector, struct drm_cmdline_mode *mode); extern struct drm_display_mode * drm_mode_create_from_cmdline_mode(struct drm_device *dev, struct drm_cmdline_mode *cmd); /* Modesetting support */ extern void drm_vblank_pre_modeset(struct drm_device *dev, int crtc); extern void drm_vblank_post_modeset(struct drm_device *dev, int crtc); extern int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* AGP/GART support (drm_agpsupport.h) */ extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); extern int drm_agp_acquire(struct drm_device *dev); extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_release(struct drm_device *dev); extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); /* Stub support (drm_stub.h) */ extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_dropmaster_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); struct drm_master *drm_master_create(struct drm_minor *minor); extern struct drm_master *drm_master_get(struct drm_master *master); extern void drm_master_put(struct drm_master **master); extern void drm_put_dev(struct drm_device *dev); extern int drm_put_minor(struct drm_minor **minor); extern void drm_unplug_dev(struct drm_device *dev); extern unsigned int drm_debug; extern unsigned int drm_notyet; extern unsigned int drm_vblank_offdelay; extern unsigned int drm_timestamp_precision; extern unsigned int drm_timestamp_monotonic; extern struct drm_local_map *drm_getsarea(struct drm_device *dev); #ifdef FREEBSD_NOTYET extern int drm_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv, uint32_t handle, uint32_t flags, int *prime_fd); extern int drm_gem_prime_fd_to_handle(struct drm_device *dev, struct drm_file *file_priv, int prime_fd, uint32_t *handle); extern int drm_prime_handle_to_fd_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_prime_fd_to_handle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, vm_page_t *pages, dma_addr_t *addrs, int max_pages); extern struct sg_table *drm_prime_pages_to_sg(vm_page_t *pages, int nr_pages); extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg); void drm_prime_init_file_private(struct drm_prime_file_private *prime_fpriv); void drm_prime_destroy_file_private(struct drm_prime_file_private *prime_fpriv); int drm_prime_add_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t handle); int drm_prime_lookup_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf, uint32_t *handle); void drm_prime_remove_imported_buf_handle(struct drm_prime_file_private *prime_fpriv, struct dma_buf *dma_buf); int drm_prime_add_dma_buf(struct drm_device *dev, struct drm_gem_object *obj); int drm_prime_lookup_obj(struct drm_device *dev, struct dma_buf *buf, struct drm_gem_object **obj); #endif /* FREEBSD_NOTYET */ /* Scatter Gather Support (drm_scatter.h) */ extern void drm_sg_cleanup(struct drm_sg_mem * entry); extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); extern int drm_sg_free(struct drm_device *dev, void *data, struct drm_file *file_priv); /* ATI PCIGART support (ati_pcigart.h) */ extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info * gart_info); extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info * gart_info); extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah); /* Graphics Execution Manager library functions (drm_gem.c) */ int drm_gem_init(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev); void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_free(struct drm_gem_object *obj); struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, size_t size); int drm_gem_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); int drm_gem_private_object_init(struct drm_device *dev, struct drm_gem_object *obj, size_t size); void drm_gem_object_handle_free(struct drm_gem_object *obj); int drm_gem_mmap_single(struct drm_device *dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); void drm_gem_pager_dtr(void *obj); #include static inline void drm_gem_object_reference(struct drm_gem_object *obj) { KASSERT(obj->refcount > 0, ("Dangling obj %p", obj)); refcount_acquire(&obj->refcount); } static inline void drm_gem_object_unreference(struct drm_gem_object *obj) { if (obj == NULL) return; if (refcount_release(&obj->refcount)) drm_gem_object_free(obj); } static inline void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) { if (obj != NULL) { struct drm_device *dev = obj->dev; DRM_LOCK(dev); drm_gem_object_unreference(obj); DRM_UNLOCK(dev); } } int drm_gem_handle_create(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); int drm_gem_handle_delete(struct drm_file *filp, u32 handle); static inline void drm_gem_object_handle_reference(struct drm_gem_object *obj) { drm_gem_object_reference(obj); atomic_inc(&obj->handle_count); } static inline void drm_gem_object_handle_unreference(struct drm_gem_object *obj) { if (obj == NULL) return; if (atomic_read(&obj->handle_count) == 0) return; /* * Must bump handle count first as this may be the last * ref, in which case the object would disappear before we * checked for a name */ if (atomic_dec_and_test(&obj->handle_count)) drm_gem_object_handle_free(obj); drm_gem_object_unreference(obj); } static inline void drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) { if (obj == NULL) return; if (atomic_read(&obj->handle_count) == 0) return; /* * Must bump handle count first as this may be the last * ref, in which case the object would disappear before we * checked for a name */ if (atomic_dec_and_test(&obj->handle_count)) drm_gem_object_handle_free(obj); drm_gem_object_unreference_unlocked(obj); } void drm_gem_free_mmap_offset(struct drm_gem_object *obj); int drm_gem_create_mmap_offset(struct drm_gem_object *obj); struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, u32 handle); int drm_gem_close_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_flink_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int drm_gem_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); extern void drm_core_ioremap(struct drm_local_map *map, struct drm_device *dev); extern void drm_core_ioremap_wc(struct drm_local_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_local_map *map, struct drm_device *dev); static __inline__ struct drm_local_map *drm_core_findmap(struct drm_device *dev, unsigned int token) { struct drm_map_list *_entry; list_for_each_entry(_entry, &dev->maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; } static __inline__ void drm_core_dropmap(struct drm_local_map *map) { } extern int drm_fill_in_dev(struct drm_device *dev, struct drm_driver *driver); extern void drm_cancel_fill_in_dev(struct drm_device *dev); int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int type); /*@}*/ /* PCI section */ int drm_pci_device_is_agp(struct drm_device *dev); int drm_pci_device_is_pcie(struct drm_device *dev); extern int drm_get_pci_dev(device_t kdev, struct drm_device *dev, struct drm_driver *driver); #define DRM_PCIE_SPEED_25 1 #define DRM_PCIE_SPEED_50 2 #define DRM_PCIE_SPEED_80 4 extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask); #define drm_can_sleep() (DRM_HZ & 1) /* FreeBSD specific -- should be moved to drm_os_freebsd.h */ #define DRM_GEM_MAPPING_MASK (3ULL << 62) #define DRM_GEM_MAPPING_KEY (2ULL << 62) /* Non-canonical address form */ #define DRM_GEM_MAX_IDX 0x3fffff #define DRM_GEM_MAPPING_IDX(o) (((o) >> 40) & DRM_GEM_MAX_IDX) #define DRM_GEM_MAPPING_OFF(i) (((uint64_t)(i)) << 40) #define DRM_GEM_MAPPING_MAPOFF(o) \ ((o) & ~(DRM_GEM_MAPPING_OFF(DRM_GEM_MAX_IDX) | DRM_GEM_MAPPING_KEY)) SYSCTL_DECL(_hw_drm); #define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP) #define DRM_DEV_UID UID_ROOT #define DRM_DEV_GID GID_VIDEO #define DRM_WAKEUP(w) wakeup((void *)w) #define DRM_WAKEUP_INT(w) wakeup(w) #define DRM_INIT_WAITQUEUE(queue) do {(void)(queue);} while (0) #define DRM_CURPROC curthread #define DRM_STRUCTPROC struct thread #define DRM_SPINTYPE struct mtx #define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF) #define DRM_SPINUNINIT(l) mtx_destroy(l) #define DRM_SPINLOCK(l) mtx_lock(l) #define DRM_SPINUNLOCK(u) mtx_unlock(u) #define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \ mtx_lock(l); \ (void)irqflags; \ } while (0) #define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u) #define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED) #define DRM_LOCK_SLEEP(dev, chan, flags, msg, timeout) \ (sx_sleep((chan), &(dev)->dev_struct_lock, (flags), (msg), (timeout))) #if defined(INVARIANTS) #define DRM_LOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_XLOCKED) #define DRM_UNLOCK_ASSERT(dev) sx_assert(&(dev)->dev_struct_lock, SA_UNLOCKED) #else #define DRM_LOCK_ASSERT(d) #define DRM_UNLOCK_ASSERT(d) #endif #define DRM_SYSCTL_HANDLER_ARGS (SYSCTL_HANDLER_ARGS) enum { DRM_IS_NOT_AGP, DRM_IS_AGP, DRM_MIGHT_BE_AGP }; #define DRM_VERIFYAREA_READ( uaddr, size ) \ (!useracc(__DECONST(caddr_t, uaddr), size, VM_PROT_READ)) #define DRM_COPY_TO_USER(user, kern, size) \ copyout(kern, user, size) #define DRM_COPY_FROM_USER(kern, user, size) \ copyin(user, kern, size) #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \ copyin(arg2, arg1, arg3) #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \ copyout(arg2, arg1, arg3) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ ((val) = fuword32(uaddr), 0) #define DRM_GET_PRIV_SAREA(_dev, _ctx, _map) do { \ (_map) = (_dev)->context_sareas[_ctx]; \ } while(0) /* Returns -errno to shared code */ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ for ( ret = 0 ; !ret && !(condition) ; ) { \ DRM_UNLOCK(dev); \ mtx_lock(&dev->irq_lock); \ if (!(condition)) \ ret = -mtx_sleep(&(queue), &dev->irq_lock, \ PCATCH, "drmwtq", (timeout)); \ if (ret == -ERESTART) \ ret = -ERESTARTSYS; \ mtx_unlock(&dev->irq_lock); \ DRM_LOCK(dev); \ } #define dev_err(dev, fmt, ...) \ device_printf((dev), "error: " fmt, ## __VA_ARGS__) #define dev_warn(dev, fmt, ...) \ device_printf((dev), "warning: " fmt, ## __VA_ARGS__) #define dev_info(dev, fmt, ...) \ device_printf((dev), "info: " fmt, ## __VA_ARGS__) #define dev_dbg(dev, fmt, ...) do { \ if ((drm_debug& DRM_DEBUGBITS_KMS) != 0) { \ device_printf((dev), "debug: " fmt, ## __VA_ARGS__); \ } \ } while (0) struct drm_msi_blacklist_entry { int vendor; int device; }; struct drm_vblank_info { wait_queue_head_t queue; /* vblank wait queue */ atomic_t count; /* number of VBLANK interrupts */ /* (driver must alloc the right number of counters) */ atomic_t refcount; /* number of users of vblank interrupts */ u32 last; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ int enabled; /* so we don't call enable more than */ /* once per disable */ int inmodeset; /* Display driver is setting mode */ }; #ifndef DMA_BIT_MASK #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : (1ULL<<(n)) - 1) #endif #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) enum dmi_field { DMI_NONE, DMI_BIOS_VENDOR, DMI_BIOS_VERSION, DMI_BIOS_DATE, DMI_SYS_VENDOR, DMI_PRODUCT_NAME, DMI_PRODUCT_VERSION, DMI_PRODUCT_SERIAL, DMI_PRODUCT_UUID, DMI_BOARD_VENDOR, DMI_BOARD_NAME, DMI_BOARD_VERSION, DMI_BOARD_SERIAL, DMI_BOARD_ASSET_TAG, DMI_CHASSIS_VENDOR, DMI_CHASSIS_TYPE, DMI_CHASSIS_VERSION, DMI_CHASSIS_SERIAL, DMI_CHASSIS_ASSET_TAG, DMI_STRING_MAX, }; struct dmi_strmatch { unsigned char slot; char substr[79]; }; struct dmi_system_id { int (*callback)(const struct dmi_system_id *); const char *ident; struct dmi_strmatch matches[4]; }; #define DMI_MATCH(a, b) {(a), (b)} bool dmi_check_system(const struct dmi_system_id *); /* Device setup support (drm_drv.c) */ int drm_probe_helper(device_t kdev, drm_pci_id_list_t *idlist); int drm_attach_helper(device_t kdev, drm_pci_id_list_t *idlist, struct drm_driver *driver); int drm_generic_detach(device_t kdev); void drm_event_wakeup(struct drm_pending_event *e); int drm_add_busid_modesetting(struct drm_device *dev, struct sysctl_ctx_list *ctx, struct sysctl_oid *top); /* Buffer management support (drm_bufs.c) */ unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); /* IRQ support (drm_irq.c) */ irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); void drm_driver_irq_preinstall(struct drm_device *dev); void drm_driver_irq_postinstall(struct drm_device *dev); void drm_driver_irq_uninstall(struct drm_device *dev); /* sysctl support (drm_sysctl.h) */ extern int drm_sysctl_init(struct drm_device *dev); extern int drm_sysctl_cleanup(struct drm_device *dev); int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); /* consistent PCI memory functions (drm_pci.c) */ int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master); int drm_pci_set_unique(struct drm_device *dev, struct drm_master *master, struct drm_unique *u); int drm_pci_agp_init(struct drm_device *dev); int drm_pci_enable_msi(struct drm_device *dev); void drm_pci_disable_msi(struct drm_device *dev); struct ttm_bo_device; int ttm_bo_mmap_single(struct ttm_bo_device *bdev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **obj_res, int nprot); struct ttm_buffer_object; void ttm_bo_release_mmap(struct ttm_buffer_object *bo); #endif /* __KERNEL__ */ #endif Index: head/sys/dev/fb/machfb.c =================================================================== --- head/sys/dev/fb/machfb.c (revision 295879) +++ head/sys/dev/fb/machfb.c (revision 295880) @@ -1,1580 +1,1579 @@ /*- * Copyright (c) 2002 Bang Jun-Young * Copyright (c) 2005 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: NetBSD: machfb.c,v 1.23 2005/03/07 21:45:24 martin Exp */ #include __FBSDID("$FreeBSD$"); /* * Driver for ATI Mach64 graphics chips. Some code is derived from the * ATI Rage Pro and Derivatives Programmer's Guide. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include /* #define MACHFB_DEBUG */ #define MACHFB_DRIVER_NAME "machfb" #define MACH64_REG_OFF 0x7ffc00 #define MACH64_REG_SIZE 1024 struct machfb_softc { video_adapter_t sc_va; /* must be first */ phandle_t sc_node; uint16_t sc_chip_id; uint8_t sc_chip_rev; struct resource *sc_memres; struct resource *sc_vmemres; bus_space_tag_t sc_memt; bus_space_tag_t sc_regt; bus_space_tag_t sc_vmemt; bus_space_handle_t sc_memh; bus_space_handle_t sc_vmemh; bus_space_handle_t sc_regh; u_long sc_mem; u_long sc_vmem; u_int sc_height; u_int sc_width; u_int sc_depth; u_int sc_xmargin; u_int sc_ymargin; size_t sc_memsize; u_int sc_memtype; u_int sc_mem_freq; u_int sc_ramdac_freq; u_int sc_ref_freq; u_int sc_ref_div; u_int sc_mclk_post_div; u_int sc_mclk_fb_div; const u_char *sc_font; u_int sc_cbwidth; vm_offset_t sc_curoff; int sc_bg_cache; int sc_fg_cache; u_int sc_draw_cache; #define MACHFB_DRAW_CHAR (1 << 0) #define MACHFB_DRAW_FILLRECT (1 << 1) u_int sc_flags; #define MACHFB_CONSOLE (1 << 0) #define MACHFB_CUREN (1 << 1) #define MACHFB_DSP (1 << 2) #define MACHFB_SWAP (1 << 3) }; static const struct { uint16_t chip_id; const char *name; uint32_t ramdac_freq; } machfb_info[] = { { ATI_MACH64_CT, "ATI Mach64 CT", 135000 }, { ATI_RAGE_PRO_AGP, "ATI 3D Rage Pro (AGP)", 230000 }, { ATI_RAGE_PRO_AGP1X, "ATI 3D Rage Pro (AGP 1x)", 230000 }, { ATI_RAGE_PRO_PCI_B, "ATI 3D Rage Pro Turbo", 230000 }, { ATI_RAGE_XC_PCI66, "ATI Rage XL (PCI66)", 230000 }, { ATI_RAGE_XL_AGP, "ATI Rage XL (AGP)", 230000 }, { ATI_RAGE_XC_AGP, "ATI Rage XC (AGP)", 230000 }, { ATI_RAGE_XL_PCI66, "ATI Rage XL (PCI66)", 230000 }, { ATI_RAGE_PRO_PCI_P, "ATI 3D Rage Pro", 230000 }, { ATI_RAGE_PRO_PCI_L, "ATI 3D Rage Pro (limited 3D)", 230000 }, { ATI_RAGE_XL_PCI, "ATI Rage XL", 230000 }, { ATI_RAGE_XC_PCI, "ATI Rage XC", 230000 }, { ATI_RAGE_II, "ATI 3D Rage I/II", 135000 }, { ATI_RAGE_IIP, "ATI 3D Rage II+", 200000 }, { ATI_RAGE_IIC_PCI, "ATI 3D Rage IIC", 230000 }, { ATI_RAGE_IIC_AGP_B, "ATI 3D Rage IIC (AGP)", 230000 }, { ATI_RAGE_IIC_AGP_P, "ATI 3D Rage IIC (AGP)", 230000 }, { ATI_RAGE_LT_PRO_AGP, "ATI 3D Rage LT Pro (AGP 133MHz)", 230000 }, { ATI_RAGE_MOB_M3_PCI, "ATI Rage Mobility M3", 230000 }, { ATI_RAGE_MOB_M3_AGP, "ATI Rage Mobility M3 (AGP)", 230000 }, { ATI_RAGE_LT, "ATI 3D Rage LT", 230000 }, { ATI_RAGE_LT_PRO_PCI, "ATI 3D Rage LT Pro", 230000 }, { ATI_RAGE_MOBILITY, "ATI Rage Mobility", 230000 }, { ATI_RAGE_L_MOBILITY, "ATI Rage L Mobility", 230000 }, { ATI_RAGE_LT_PRO, "ATI 3D Rage LT Pro", 230000 }, { ATI_RAGE_LT_PRO2, "ATI 3D Rage LT Pro", 230000 }, { ATI_RAGE_MOB_M1_PCI, "ATI Rage Mobility M1 (PCI)", 230000 }, { ATI_RAGE_L_MOB_M1_PCI, "ATI Rage L Mobility (PCI)", 230000 }, { ATI_MACH64_VT, "ATI Mach64 VT", 170000 }, { ATI_MACH64_VTB, "ATI Mach64 VTB", 200000 }, { ATI_MACH64_VT4, "ATI Mach64 VT4", 230000 } }; static const struct machfb_cmap { uint8_t red; uint8_t green; uint8_t blue; } machfb_default_cmap[16] = { {0x00, 0x00, 0x00}, /* black */ {0x00, 0x00, 0xff}, /* blue */ {0x00, 0xff, 0x00}, /* green */ {0x00, 0xc0, 0xc0}, /* cyan */ {0xff, 0x00, 0x00}, /* red */ {0xc0, 0x00, 0xc0}, /* magenta */ {0xc0, 0xc0, 0x00}, /* brown */ {0xc0, 0xc0, 0xc0}, /* light grey */ {0x80, 0x80, 0x80}, /* dark grey */ {0x80, 0x80, 0xff}, /* light blue */ {0x80, 0xff, 0x80}, /* light green */ {0x80, 0xff, 0xff}, /* light cyan */ {0xff, 0x80, 0x80}, /* light red */ {0xff, 0x80, 0xff}, /* light magenta */ {0xff, 0xff, 0x80}, /* yellow */ {0xff, 0xff, 0xff} /* white */ }; #define MACHFB_CMAP_OFF 16 static const u_char machfb_mouse_pointer_bits[64][8] = { { 0x00, 0x00, }, /* ............ */ { 0x80, 0x00, }, /* *........... */ { 0xc0, 0x00, }, /* **.......... */ { 0xe0, 0x00, }, /* ***......... */ { 0xf0, 0x00, }, /* ****........ */ { 0xf8, 0x00, }, /* *****....... */ { 0xfc, 0x00, }, /* ******...... */ { 0xfe, 0x00, }, /* *******..... */ { 0xff, 0x00, }, /* ********.... */ { 0xff, 0x80, }, /* *********... */ { 0xfc, 0xc0, }, /* ******..**.. */ { 0xdc, 0x00, }, /* **.***...... */ { 0x8e, 0x00, }, /* *...***..... */ { 0x0e, 0x00, }, /* ....***..... */ { 0x07, 0x00, }, /* .....***.... */ { 0x04, 0x00, }, /* .....*...... */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ { 0x00, 0x00, }, /* ............ */ }; /* * Lookup table to perform a bit-swap of the mouse pointer bits, * map set bits to CUR_CLR0 and unset bits to transparent. */ static const u_char machfb_mouse_pointer_lut[] = { 0xaa, 0x2a, 0x8a, 0x0a, 0xa2, 0x22, 0x82, 0x02, 0xa8, 0x28, 0x88, 0x08, 0xa0, 0x20, 0x80, 0x00 }; static const char *const machfb_memtype_names[] = { "(N/A)", "DRAM", "EDO DRAM", "EDO DRAM", "SDRAM", "SGRAM", "WRAM", "(unknown type)" }; extern const struct gfb_font gallant12x22; static struct machfb_softc machfb_softc; static struct bus_space_tag machfb_bst_store[1]; static device_probe_t machfb_pci_probe; static device_attach_t machfb_pci_attach; static device_detach_t machfb_pci_detach; static device_method_t machfb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, machfb_pci_probe), DEVMETHOD(device_attach, machfb_pci_attach), DEVMETHOD(device_detach, machfb_pci_detach), { 0, 0 } }; static driver_t machfb_pci_driver = { MACHFB_DRIVER_NAME, machfb_methods, sizeof(struct machfb_softc), }; static devclass_t machfb_devclass; DRIVER_MODULE(machfb, pci, machfb_pci_driver, machfb_devclass, 0, 0); MODULE_DEPEND(machfb, pci, 1, 1, 1); static void machfb_cursor_enable(struct machfb_softc *, int); static int machfb_cursor_install(struct machfb_softc *); static int machfb_get_memsize(struct machfb_softc *); static void machfb_reset_engine(struct machfb_softc *); static void machfb_init_engine(struct machfb_softc *); #if 0 static void machfb_adjust_frame(struct machfb_softc *, int, int); #endif static void machfb_shutdown_final(void *); static void machfb_shutdown_reset(void *); static int machfb_configure(int); static vi_probe_t machfb_probe; static vi_init_t machfb_init; static vi_get_info_t machfb_get_info; static vi_query_mode_t machfb_query_mode; static vi_set_mode_t machfb_set_mode; static vi_save_font_t machfb_save_font; static vi_load_font_t machfb_load_font; static vi_show_font_t machfb_show_font; static vi_save_palette_t machfb_save_palette; static vi_load_palette_t machfb_load_palette; static vi_set_border_t machfb_set_border; static vi_save_state_t machfb_save_state; static vi_load_state_t machfb_load_state; static vi_set_win_org_t machfb_set_win_org; static vi_read_hw_cursor_t machfb_read_hw_cursor; static vi_set_hw_cursor_t machfb_set_hw_cursor; static vi_set_hw_cursor_shape_t machfb_set_hw_cursor_shape; static vi_blank_display_t machfb_blank_display; static vi_mmap_t machfb_mmap; static vi_ioctl_t machfb_ioctl; static vi_clear_t machfb_clear; static vi_fill_rect_t machfb_fill_rect; static vi_bitblt_t machfb_bitblt; static vi_diag_t machfb_diag; static vi_save_cursor_palette_t machfb_save_cursor_palette; static vi_load_cursor_palette_t machfb_load_cursor_palette; static vi_copy_t machfb_copy; static vi_putp_t machfb_putp; static vi_putc_t machfb_putc; static vi_puts_t machfb_puts; static vi_putm_t machfb_putm; static video_switch_t machfbvidsw = { .probe = machfb_probe, .init = machfb_init, .get_info = machfb_get_info, .query_mode = machfb_query_mode, .set_mode = machfb_set_mode, .save_font = machfb_save_font, .load_font = machfb_load_font, .show_font = machfb_show_font, .save_palette = machfb_save_palette, .load_palette = machfb_load_palette, .set_border = machfb_set_border, .save_state = machfb_save_state, .load_state = machfb_load_state, .set_win_org = machfb_set_win_org, .read_hw_cursor = machfb_read_hw_cursor, .set_hw_cursor = machfb_set_hw_cursor, .set_hw_cursor_shape = machfb_set_hw_cursor_shape, .blank_display = machfb_blank_display, .mmap = machfb_mmap, .ioctl = machfb_ioctl, .clear = machfb_clear, .fill_rect = machfb_fill_rect, .bitblt = machfb_bitblt, .diag = machfb_diag, .save_cursor_palette = machfb_save_cursor_palette, .load_cursor_palette = machfb_load_cursor_palette, .copy = machfb_copy, .putp = machfb_putp, .putc = machfb_putc, .puts = machfb_puts, .putm = machfb_putm }; VIDEO_DRIVER(machfb, machfbvidsw, machfb_configure); extern sc_rndr_sw_t txtrndrsw; RENDERER(machfb, 0, txtrndrsw, gfb_set); RENDERER_MODULE(machfb, gfb_set); /* * Inline functions for getting access to register aperture. */ static inline uint32_t regr(struct machfb_softc *, uint32_t); static inline uint8_t regrb(struct machfb_softc *, uint32_t); static inline void regw(struct machfb_softc *, uint32_t, uint32_t); static inline void regwb(struct machfb_softc *, uint32_t, uint8_t); static inline void regwb_pll(struct machfb_softc *, uint32_t, uint8_t); static inline uint32_t regr(struct machfb_softc *sc, uint32_t index) { return bus_space_read_4(sc->sc_regt, sc->sc_regh, index); } static inline uint8_t regrb(struct machfb_softc *sc, uint32_t index) { return bus_space_read_1(sc->sc_regt, sc->sc_regh, index); } static inline void regw(struct machfb_softc *sc, uint32_t index, uint32_t data) { bus_space_write_4(sc->sc_regt, sc->sc_regh, index, data); bus_space_barrier(sc->sc_regt, sc->sc_regh, index, 4, BUS_SPACE_BARRIER_WRITE); } static inline void regwb(struct machfb_softc *sc, uint32_t index, uint8_t data) { bus_space_write_1(sc->sc_regt, sc->sc_regh, index, data); bus_space_barrier(sc->sc_regt, sc->sc_regh, index, 1, BUS_SPACE_BARRIER_WRITE); } static inline void regwb_pll(struct machfb_softc *sc, uint32_t index, uint8_t data) { regwb(sc, CLOCK_CNTL + 1, (index << 2) | PLL_WR_EN); regwb(sc, CLOCK_CNTL + 2, data); regwb(sc, CLOCK_CNTL + 1, (index << 2) & ~PLL_WR_EN); } static inline void wait_for_fifo(struct machfb_softc *sc, uint8_t v) { while ((regr(sc, FIFO_STAT) & 0xffff) > (0x8000 >> v)) ; } static inline void wait_for_idle(struct machfb_softc *sc) { wait_for_fifo(sc, 16); while ((regr(sc, GUI_STAT) & 1) != 0) ; } /* * Inline functions for setting the background and foreground colors. */ static inline void machfb_setbg(struct machfb_softc *sc, int bg); static inline void machfb_setfg(struct machfb_softc *sc, int fg); static inline void machfb_setbg(struct machfb_softc *sc, int bg) { if (bg == sc->sc_bg_cache) return; sc->sc_bg_cache = bg; wait_for_fifo(sc, 1); regw(sc, DP_BKGD_CLR, bg + MACHFB_CMAP_OFF); } static inline void machfb_setfg(struct machfb_softc *sc, int fg) { if (fg == sc->sc_fg_cache) return; sc->sc_fg_cache = fg; wait_for_fifo(sc, 1); regw(sc, DP_FRGD_CLR, fg + MACHFB_CMAP_OFF); } /* * video driver interface */ static int machfb_configure(int flags) { struct machfb_softc *sc; phandle_t chosen, output; ihandle_t stdout; bus_addr_t addr; uint32_t id; int i, space; /* * For the high-level console probing return the number of * registered adapters. */ if (!(flags & VIO_PROBE_ONLY)) { for (i = 0; vid_find_adapter(MACHFB_DRIVER_NAME, i) >= 0; i++) ; return (i); } /* Low-level console probing and initialization. */ sc = &machfb_softc; if (sc->sc_va.va_flags & V_ADP_REGISTERED) goto found; if ((chosen = OF_finddevice("/chosen")) == -1) /* Quis contra nos? */ return (0); if (OF_getprop(chosen, "stdout", &stdout, sizeof(stdout)) == -1) return (0); if ((output = OF_instance_to_package(stdout)) == -1) return (0); if ((OF_getprop(output, "vendor-id", &id, sizeof(id)) == -1) || id != ATI_VENDOR) return (0); if (OF_getprop(output, "device-id", &id, sizeof(id)) == -1) return (0); for (i = 0; i < sizeof(machfb_info) / sizeof(machfb_info[0]); i++) { if (id == machfb_info[i].chip_id) { sc->sc_flags = MACHFB_CONSOLE; sc->sc_node = output; sc->sc_chip_id = id; break; } } if (!(sc->sc_flags & MACHFB_CONSOLE)) return (0); if (OF_getprop(output, "revision-id", &sc->sc_chip_rev, sizeof(sc->sc_chip_rev)) == -1) return (0); if (OF_decode_addr(output, 0, &space, &addr) != 0) return (0); sc->sc_memt = &machfb_bst_store[0]; sc->sc_memh = sparc64_fake_bustag(space, addr, sc->sc_memt); sc->sc_regt = sc->sc_memt; bus_space_subregion(sc->sc_regt, sc->sc_memh, MACH64_REG_OFF, MACH64_REG_SIZE, &sc->sc_regh); if (machfb_init(0, &sc->sc_va, 0) < 0) return (0); found: /* Return number of found adapters. */ return (1); } static int machfb_probe(int unit, video_adapter_t **adpp, void *arg, int flags) { return (0); } static int machfb_init(int unit, video_adapter_t *adp, int flags) { struct machfb_softc *sc; phandle_t options; video_info_t *vi; char buf[32]; int i; uint8_t dac_mask, dac_rindex, dac_windex; sc = (struct machfb_softc *)adp; vi = &adp->va_info; if ((regr(sc, CONFIG_CHIP_ID) & 0xffff) != sc->sc_chip_id) return (ENXIO); sc->sc_ramdac_freq = 0; for (i = 0; i < sizeof(machfb_info) / sizeof(machfb_info[0]); i++) { if (sc->sc_chip_id == machfb_info[i].chip_id) { sc->sc_ramdac_freq = machfb_info[i].ramdac_freq; break; } } if (sc->sc_ramdac_freq == 0) return (ENXIO); if (sc->sc_chip_id == ATI_RAGE_II && sc->sc_chip_rev & 0x07) sc->sc_ramdac_freq = 170000; vid_init_struct(adp, MACHFB_DRIVER_NAME, -1, unit); if (OF_getprop(sc->sc_node, "height", &sc->sc_height, sizeof(sc->sc_height)) == -1) return (ENXIO); if (OF_getprop(sc->sc_node, "width", &sc->sc_width, sizeof(sc->sc_width)) == -1) return (ENXIO); if (OF_getprop(sc->sc_node, "depth", &sc->sc_depth, sizeof(sc->sc_depth)) == -1) return (ENXIO); if ((options = OF_finddevice("/options")) == -1) return (ENXIO); if (OF_getprop(options, "screen-#rows", buf, sizeof(buf)) == -1) return (ENXIO); vi->vi_height = strtol(buf, NULL, 10); if (OF_getprop(options, "screen-#columns", buf, sizeof(buf)) == -1) return (ENXIO); vi->vi_width = strtol(buf, NULL, 10); vi->vi_cwidth = gallant12x22.width; vi->vi_cheight = gallant12x22.height; vi->vi_flags = V_INFO_COLOR; vi->vi_mem_model = V_INFO_MM_OTHER; sc->sc_font = gallant12x22.data; sc->sc_cbwidth = howmany(vi->vi_cwidth, NBBY); /* width in bytes */ sc->sc_xmargin = (sc->sc_width - (vi->vi_width * vi->vi_cwidth)) / 2; sc->sc_ymargin = (sc->sc_height - (vi->vi_height * vi->vi_cheight)) / 2; if (sc->sc_chip_id != ATI_MACH64_CT && !((sc->sc_chip_id == ATI_MACH64_VT || sc->sc_chip_id == ATI_RAGE_II) && (sc->sc_chip_rev & 0x07) == 0)) sc->sc_flags |= MACHFB_DSP; sc->sc_memsize = machfb_get_memsize(sc); if (sc->sc_memsize == 8192) /* The last page is used as register aperture. */ sc->sc_memsize -= 4; sc->sc_memtype = regr(sc, CONFIG_STAT0) & 0x07; if ((sc->sc_chip_id >= ATI_RAGE_XC_PCI66 && sc->sc_chip_id <= ATI_RAGE_XL_PCI66) || (sc->sc_chip_id >= ATI_RAGE_XL_PCI && sc->sc_chip_id <= ATI_RAGE_XC_PCI)) sc->sc_ref_freq = 29498; else sc->sc_ref_freq = 14318; regwb(sc, CLOCK_CNTL + 1, PLL_REF_DIV << 2); sc->sc_ref_div = regrb(sc, CLOCK_CNTL + 2); regwb(sc, CLOCK_CNTL + 1, MCLK_FB_DIV << 2); sc->sc_mclk_fb_div = regrb(sc, CLOCK_CNTL + 2); sc->sc_mem_freq = (2 * sc->sc_ref_freq * sc->sc_mclk_fb_div) / (sc->sc_ref_div * 2); sc->sc_mclk_post_div = (sc->sc_mclk_fb_div * 2 * sc->sc_ref_freq) / (sc->sc_mem_freq * sc->sc_ref_div); machfb_init_engine(sc); #if 0 machfb_adjust_frame(0, 0); #endif machfb_set_mode(adp, 0); /* * Install our 16-color color map. This is done only once and with * an offset of 16 on sparc64 as there the OBP driver expects white * to be at index 0 and black at 255 (some versions also use 1 - 8 * for color text support or the full palette for the boot banner * logo but no versions seems to use the ISO 6429-1983 color map). * Otherwise the colors are inverted when back in the OFW. */ dac_rindex = regrb(sc, DAC_RINDEX); dac_windex = regrb(sc, DAC_WINDEX); dac_mask = regrb(sc, DAC_MASK); regwb(sc, DAC_MASK, 0xff); regwb(sc, DAC_WINDEX, MACHFB_CMAP_OFF); for (i = 0; i < 16; i++) { regwb(sc, DAC_DATA, machfb_default_cmap[i].red); regwb(sc, DAC_DATA, machfb_default_cmap[i].green); regwb(sc, DAC_DATA, machfb_default_cmap[i].blue); } regwb(sc, DAC_MASK, dac_mask); regwb(sc, DAC_RINDEX, dac_rindex); regwb(sc, DAC_WINDEX, dac_windex); machfb_blank_display(adp, V_DISPLAY_ON); machfb_clear(adp); /* * Setting V_ADP_MODECHANGE serves as hack so machfb_set_mode() * (which will invalidate our caches) is called as a precaution * when the X server shuts down. */ adp->va_flags |= V_ADP_COLOR | V_ADP_MODECHANGE | V_ADP_PALETTE | V_ADP_BORDER | V_ADP_INITIALIZED; if (vid_register(adp) < 0) return (ENXIO); adp->va_flags |= V_ADP_REGISTERED; return (0); } static int machfb_get_info(video_adapter_t *adp, int mode, video_info_t *info) { bcopy(&adp->va_info, info, sizeof(*info)); return (0); } static int machfb_query_mode(video_adapter_t *adp, video_info_t *info) { return (ENODEV); } static int machfb_set_mode(video_adapter_t *adp, int mode) { struct machfb_softc *sc; sc = (struct machfb_softc *)adp; sc->sc_bg_cache = -1; sc->sc_fg_cache = -1; sc->sc_draw_cache = 0; return (0); } static int machfb_save_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (ENODEV); } static int machfb_load_font(video_adapter_t *adp, int page, int size, int width, u_char *data, int c, int count) { return (ENODEV); } static int machfb_show_font(video_adapter_t *adp, int page) { return (ENODEV); } static int machfb_save_palette(video_adapter_t *adp, u_char *palette) { struct machfb_softc *sc; int i; uint8_t dac_mask, dac_rindex, dac_windex; sc = (struct machfb_softc *)adp; dac_rindex = regrb(sc, DAC_RINDEX); dac_windex = regrb(sc, DAC_WINDEX); dac_mask = regrb(sc, DAC_MASK); regwb(sc, DAC_MASK, 0xff); regwb(sc, DAC_RINDEX, 0x0); for (i = 0; i < 256 * 3; i++) palette[i] = regrb(sc, DAC_DATA); regwb(sc, DAC_MASK, dac_mask); regwb(sc, DAC_RINDEX, dac_rindex); regwb(sc, DAC_WINDEX, dac_windex); return (0); } static int machfb_load_palette(video_adapter_t *adp, u_char *palette) { struct machfb_softc *sc; int i; uint8_t dac_mask, dac_rindex, dac_windex; sc = (struct machfb_softc *)adp; dac_rindex = regrb(sc, DAC_RINDEX); dac_windex = regrb(sc, DAC_WINDEX); dac_mask = regrb(sc, DAC_MASK); regwb(sc, DAC_MASK, 0xff); regwb(sc, DAC_WINDEX, 0x0); for (i = 0; i < 256 * 3; i++) regwb(sc, DAC_DATA, palette[i]); regwb(sc, DAC_MASK, dac_mask); regwb(sc, DAC_RINDEX, dac_rindex); regwb(sc, DAC_WINDEX, dac_windex); return (0); } static int machfb_set_border(video_adapter_t *adp, int border) { struct machfb_softc *sc; sc = (struct machfb_softc *)adp; machfb_fill_rect(adp, border, 0, 0, sc->sc_width, sc->sc_ymargin); machfb_fill_rect(adp, border, 0, sc->sc_height - sc->sc_ymargin, sc->sc_width, sc->sc_ymargin); machfb_fill_rect(adp, border, 0, 0, sc->sc_xmargin, sc->sc_height); machfb_fill_rect(adp, border, sc->sc_width - sc->sc_xmargin, 0, sc->sc_xmargin, sc->sc_height); return (0); } static int machfb_save_state(video_adapter_t *adp, void *p, size_t size) { return (ENODEV); } static int machfb_load_state(video_adapter_t *adp, void *p) { return (ENODEV); } static int machfb_set_win_org(video_adapter_t *adp, off_t offset) { return (ENODEV); } static int machfb_read_hw_cursor(video_adapter_t *adp, int *col, int *row) { *col = 0; *row = 0; return (0); } static int machfb_set_hw_cursor(video_adapter_t *adp, int col, int row) { return (ENODEV); } static int machfb_set_hw_cursor_shape(video_adapter_t *adp, int base, int height, int celsize, int blink) { return (ENODEV); } static int machfb_blank_display(video_adapter_t *adp, int mode) { struct machfb_softc *sc; uint32_t crtc_gen_cntl; sc = (struct machfb_softc *)adp; crtc_gen_cntl = (regr(sc, CRTC_GEN_CNTL) | CRTC_EXT_DISP_EN | CRTC_EN) & ~(CRTC_HSYNC_DIS | CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS); switch (mode) { case V_DISPLAY_ON: break; case V_DISPLAY_BLANK: crtc_gen_cntl |= CRTC_HSYNC_DIS | CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS; break; case V_DISPLAY_STAND_BY: crtc_gen_cntl |= CRTC_HSYNC_DIS | CRTC_DISPLAY_DIS; break; case V_DISPLAY_SUSPEND: crtc_gen_cntl |= CRTC_VSYNC_DIS | CRTC_DISPLAY_DIS; break; } regw(sc, CRTC_GEN_CNTL, crtc_gen_cntl); return (0); } static int machfb_mmap(video_adapter_t *adp, vm_ooffset_t offset, vm_paddr_t *paddr, int prot, vm_memattr_t *memattr) { struct machfb_softc *sc; video_info_t *vi; sc = (struct machfb_softc *)adp; vi = &adp->va_info; /* BAR 2 - VGA memory */ if (sc->sc_vmem != 0 && offset >= sc->sc_vmem && offset < sc->sc_vmem + vi->vi_registers_size) { *paddr = vi->vi_registers + offset - sc->sc_vmem; return (0); } /* BAR 0 - framebuffer */ if (offset >= sc->sc_mem && offset < sc->sc_mem + vi->vi_buffer_size) { *paddr = vi->vi_buffer + offset - sc->sc_mem; return (0); } /* 'regular' framebuffer mmap()ing */ if (offset < adp->va_window_size) { *paddr = vi->vi_window + offset; return (0); } return (EINVAL); } static int machfb_ioctl(video_adapter_t *adp, u_long cmd, caddr_t data) { struct machfb_softc *sc; struct fbcursor *fbc; struct fbtype *fb; sc = (struct machfb_softc *)adp; switch (cmd) { case FBIOGTYPE: fb = (struct fbtype *)data; fb->fb_type = FBTYPE_PCIMISC; fb->fb_height = sc->sc_height; fb->fb_width = sc->sc_width; fb->fb_depth = sc->sc_depth; if (sc->sc_depth <= 1 || sc->sc_depth > 8) fb->fb_cmsize = 0; else fb->fb_cmsize = 1 << sc->sc_depth; fb->fb_size = adp->va_buffer_size; break; case FBIOSCURSOR: fbc = (struct fbcursor *)data; if (fbc->set & FB_CUR_SETCUR && fbc->enable == 0) { machfb_cursor_enable(sc, 0); sc->sc_flags &= ~MACHFB_CUREN; } else return (ENODEV); break; default: return (fb_commonioctl(adp, cmd, data)); } return (0); } static int machfb_clear(video_adapter_t *adp) { struct machfb_softc *sc; sc = (struct machfb_softc *)adp; machfb_fill_rect(adp, (SC_NORM_ATTR >> 4) & 0xf, 0, 0, sc->sc_width, sc->sc_height); return (0); } static int machfb_fill_rect(video_adapter_t *adp, int val, int x, int y, int cx, int cy) { struct machfb_softc *sc; sc = (struct machfb_softc *)adp; if (sc->sc_draw_cache != MACHFB_DRAW_FILLRECT) { wait_for_fifo(sc, 7); regw(sc, DP_WRITE_MASK, 0xff); regw(sc, DP_PIX_WIDTH, DST_8BPP | SRC_8BPP | HOST_8BPP); regw(sc, DP_SRC, FRGD_SRC_FRGD_CLR); regw(sc, DP_MIX, MIX_SRC << 16); regw(sc, CLR_CMP_CNTL, 0); /* no transparency */ regw(sc, SRC_CNTL, SRC_LINE_X_LEFT_TO_RIGHT); regw(sc, DST_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); sc->sc_draw_cache = MACHFB_DRAW_FILLRECT; } machfb_setfg(sc, val); wait_for_fifo(sc, 4); regw(sc, SRC_Y_X, (x << 16) | y); regw(sc, SRC_WIDTH1, cx); regw(sc, DST_Y_X, (x << 16) | y); regw(sc, DST_HEIGHT_WIDTH, (cx << 16) | cy); return (0); } static int machfb_bitblt(video_adapter_t *adp, ...) { return (ENODEV); } static int machfb_diag(video_adapter_t *adp, int level) { video_info_t info; fb_dump_adp_info(adp->va_name, adp, level); machfb_get_info(adp, 0, &info); fb_dump_mode_info(adp->va_name, adp, &info, level); return (0); } static int machfb_save_cursor_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int machfb_load_cursor_palette(video_adapter_t *adp, u_char *palette) { return (ENODEV); } static int machfb_copy(video_adapter_t *adp, vm_offset_t src, vm_offset_t dst, int n) { return (ENODEV); } static int machfb_putp(video_adapter_t *adp, vm_offset_t off, uint32_t p, uint32_t a, int size, int bpp, int bit_ltor, int byte_ltor) { return (ENODEV); } static int machfb_putc(video_adapter_t *adp, vm_offset_t off, uint8_t c, uint8_t a) { struct machfb_softc *sc; const uint8_t *p; int i; sc = (struct machfb_softc *)adp; if (sc->sc_draw_cache != MACHFB_DRAW_CHAR) { wait_for_fifo(sc, 8); regw(sc, DP_WRITE_MASK, 0xff); /* XXX only good for 8 bit */ regw(sc, DP_PIX_WIDTH, DST_8BPP | SRC_1BPP | HOST_1BPP); regw(sc, DP_SRC, MONO_SRC_HOST | BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR); regw(sc, DP_MIX ,((MIX_SRC & 0xffff) << 16) | MIX_SRC); regw(sc, CLR_CMP_CNTL, 0); /* no transparency */ regw(sc, SRC_CNTL, SRC_LINE_X_LEFT_TO_RIGHT); regw(sc, DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT); regw(sc, HOST_CNTL, HOST_BYTE_ALIGN); sc->sc_draw_cache = MACHFB_DRAW_CHAR; } machfb_setbg(sc, (a >> 4) & 0xf); machfb_setfg(sc, a & 0xf); wait_for_fifo(sc, 4 + (adp->va_info.vi_cheight / sc->sc_cbwidth)); regw(sc, SRC_Y_X, 0); regw(sc, SRC_WIDTH1, adp->va_info.vi_cwidth); regw(sc, DST_Y_X, ((((off % adp->va_info.vi_width) * adp->va_info.vi_cwidth) + sc->sc_xmargin) << 16) | (((off / adp->va_info.vi_width) * adp->va_info.vi_cheight) + sc->sc_ymargin)); regw(sc, DST_HEIGHT_WIDTH, (adp->va_info.vi_cwidth << 16) | adp->va_info.vi_cheight); p = sc->sc_font + (c * adp->va_info.vi_cheight * sc->sc_cbwidth); for (i = 0; i < adp->va_info.vi_cheight * sc->sc_cbwidth; i += 4) regw(sc, HOST_DATA0 + i, (p[i + 3] << 24 | p[i + 2] << 16 | p[i + 1] << 8 | p[i])); return (0); } static int machfb_puts(video_adapter_t *adp, vm_offset_t off, uint16_t *s, int len) { struct machfb_softc *sc; int blanks, i, x1, x2, y1, y2; uint8_t a, c, color1, color2; sc = (struct machfb_softc *)adp; #define MACHFB_BLANK machfb_fill_rect(adp, color1, x1, y1, \ blanks * adp->va_info.vi_cwidth, \ adp->va_info.vi_cheight) blanks = color1 = x1 = y1 = 0; for (i = 0; i < len; i++) { /* * Accelerate continuous blanks by drawing a respective * rectangle instead. Drawing a rectangle of any size * takes about the same number of operations as drawing * a single character. */ c = s[i] & 0xff; a = (s[i] & 0xff00) >> 8; if (c == 0x00 || c == 0x20 || c == 0xdb || c == 0xff) { color2 = (a >> (c == 0xdb ? 0 : 4) & 0xf); x2 = (((off + i) % adp->va_info.vi_width) * adp->va_info.vi_cwidth) + sc->sc_xmargin; y2 = (((off + i) / adp->va_info.vi_width) * adp->va_info.vi_cheight) + sc->sc_ymargin; if (blanks == 0) { color1 = color2; x1 = x2; y1 = y2; blanks++; } else if (color1 != color2 || y1 != y2) { MACHFB_BLANK; color1 = color2; x1 = x2; y1 = y2; blanks = 1; } else blanks++; } else { if (blanks != 0) { MACHFB_BLANK; blanks = 0; } vidd_putc(adp, off + i, c, a); } } if (blanks != 0) MACHFB_BLANK; #undef MACHFB_BLANK return (0); } static int machfb_putm(video_adapter_t *adp, int x, int y, uint8_t *pixel_image, uint32_t pixel_mask, int size, int width) { struct machfb_softc *sc; int error; sc = (struct machfb_softc *)adp; if ((!(sc->sc_flags & MACHFB_CUREN)) && (error = machfb_cursor_install(sc)) < 0) return (error); else { /* * The hardware cursor always must be disabled when * fiddling with its bits otherwise some artifacts * may appear on the screen. */ machfb_cursor_enable(sc, 0); } regw(sc, CUR_HORZ_VERT_OFF, 0); if ((regr(sc, GEN_TEST_CNTL) & CRTC_DBL_SCAN_EN) != 0) y <<= 1; regw(sc, CUR_HORZ_VERT_POSN, ((y + sc->sc_ymargin) << 16) | (x + sc->sc_xmargin)); machfb_cursor_enable(sc, 1); sc->sc_flags |= MACHFB_CUREN; return (0); } /* * PCI bus interface */ static int machfb_pci_probe(device_t dev) { int i; if (pci_get_class(dev) != PCIC_DISPLAY || pci_get_subclass(dev) != PCIS_DISPLAY_VGA) return (ENXIO); for (i = 0; i < sizeof(machfb_info) / sizeof(machfb_info[0]); i++) { if (pci_get_device(dev) == machfb_info[i].chip_id) { device_set_desc(dev, machfb_info[i].name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int machfb_pci_attach(device_t dev) { struct machfb_softc *sc; video_adapter_t *adp; video_switch_t *sw; video_info_t *vi; phandle_t node; int error, i, rid; uint32_t *p32, u32; uint8_t *p; node = ofw_bus_get_node(dev); if ((sc = (struct machfb_softc *)vid_get_adapter(vid_find_adapter( MACHFB_DRIVER_NAME, 0))) != NULL && sc->sc_node == node) { device_printf(dev, "console\n"); device_set_softc(dev, sc); } else { sc = device_get_softc(dev); sc->sc_node = node; sc->sc_chip_id = pci_get_device(dev); sc->sc_chip_rev = pci_get_revid(dev); } adp = &sc->sc_va; vi = &adp->va_info; rid = PCIR_BAR(0); if ((sc->sc_memres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate memory resources\n"); return (ENXIO); } sc->sc_memt = rman_get_bustag(sc->sc_memres); sc->sc_memh = rman_get_bushandle(sc->sc_memres); sc->sc_mem = rman_get_start(sc->sc_memres); vi->vi_buffer = sc->sc_memh; vi->vi_buffer_size = rman_get_size(sc->sc_memres); if (OF_getprop(sc->sc_node, "address", &u32, sizeof(u32)) > 0 && vtophys(u32) == sc->sc_memh) adp->va_mem_base = u32; else { if (bus_space_map(sc->sc_memt, vi->vi_buffer, vi->vi_buffer_size, BUS_SPACE_MAP_LINEAR, &sc->sc_memh) != 0) { device_printf(dev, "cannot map memory resources\n"); error = ENXIO; goto fail_memres; } adp->va_mem_base = (vm_offset_t)rman_get_virtual(sc->sc_memres); } adp->va_mem_size = vi->vi_buffer_size; adp->va_buffer = adp->va_mem_base; adp->va_buffer_size = adp->va_mem_size; sc->sc_regt = sc->sc_memt; if (bus_space_subregion(sc->sc_regt, sc->sc_memh, MACH64_REG_OFF, MACH64_REG_SIZE, &sc->sc_regh) != 0) { device_printf(dev, "cannot allocate register resources\n"); error = ENXIO; goto fail_memmap; } /* * Depending on the firmware version the VGA I/O and/or memory * resources of the Mach64 chips come up disabled. These will be * enabled by pci(4) when activating the resource in question but * this doesn't necessarily mean that the resource is valid. * Invalid resources seem to have in common that they start at * address 0. We don't allocate the VGA memory in this case in * order to avoid warnings in apb(4) and crashes when using this * invalid resources. X.Org is aware of this and doesn't use the * VGA memory resource in this case (but demands it if it's valid). */ rid = PCIR_BAR(2); if (bus_get_resource_start(dev, SYS_RES_MEMORY, rid) != 0) { if ((sc->sc_vmemres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate VGA memory resources\n"); error = ENXIO; goto fail_memmap; } sc->sc_vmemt = rman_get_bustag(sc->sc_vmemres); sc->sc_vmemh = rman_get_bushandle(sc->sc_vmemres); sc->sc_vmem = rman_get_start(sc->sc_vmemres); vi->vi_registers = sc->sc_vmemh; vi->vi_registers_size = rman_get_size(sc->sc_vmemres); if (bus_space_map(sc->sc_vmemt, vi->vi_registers, vi->vi_registers_size, BUS_SPACE_MAP_LINEAR, &sc->sc_vmemh) != 0) { device_printf(dev, "cannot map VGA memory resources\n"); error = ENXIO; goto fail_vmemres; } adp->va_registers = (vm_offset_t)rman_get_virtual(sc->sc_vmemres); adp->va_registers_size = vi->vi_registers_size; } if (!(sc->sc_flags & MACHFB_CONSOLE)) { if ((sw = vid_get_switch(MACHFB_DRIVER_NAME)) == NULL) { device_printf(dev, "cannot get video switch\n"); error = ENODEV; goto fail_vmemmap; } /* * During device configuration we don't necessarily probe * the adapter which is the console first so we can't use * the device unit number for the video adapter unit. The * worst case would be that we use the video adapter unit * 0 twice. As it doesn't really matter which unit number * the corresponding video adapter has just use the next * unused one. */ for (i = 0; i < devclass_get_maxunit(machfb_devclass); i++) if (vid_find_adapter(MACHFB_DRIVER_NAME, i) < 0) break; if ((error = sw->init(i, adp, 0)) != 0) { device_printf(dev, "cannot initialize adapter\n"); goto fail_vmemmap; } } /* * Test whether the aperture is byte swapped or not, set * va_window and va_window_size as appropriate. Note that * the aperture could be mapped either big or little endian * independently of the endianess of the host so this has * to be a runtime test. */ p32 = (uint32_t *)adp->va_buffer; u32 = *p32; p = (uint8_t *)adp->va_buffer; *p32 = 0x12345678; if (!(p[0] == 0x12 && p[1] == 0x34 && p[2] == 0x56 && p[3] == 0x78)) { adp->va_window = adp->va_buffer + 0x800000; adp->va_window_size = adp->va_buffer_size - 0x800000; vi->vi_window = vi->vi_buffer + 0x800000; vi->vi_window_size = vi->vi_buffer_size - 0x800000; sc->sc_flags |= MACHFB_SWAP; } else { adp->va_window = adp->va_buffer; adp->va_window_size = adp->va_buffer_size; vi->vi_window = vi->vi_buffer; vi->vi_window_size = vi->vi_buffer_size; } *p32 = u32; adp->va_window_gran = adp->va_window_size; device_printf(dev, "%d MB aperture at %p %sswapped\n", (u_int)(adp->va_window_size / (1024 * 1024)), (void *)adp->va_window, (sc->sc_flags & MACHFB_SWAP) ? "" : "not "); device_printf(dev, "%ld KB %s %d.%d MHz, maximum RAMDAC clock %d MHz, %sDSP\n", (u_long)sc->sc_memsize, machfb_memtype_names[sc->sc_memtype], sc->sc_mem_freq / 1000, sc->sc_mem_freq % 1000, sc->sc_ramdac_freq / 1000, (sc->sc_flags & MACHFB_DSP) ? "" : "no "); device_printf(dev, "resolution %dx%d at %d bpp\n", sc->sc_width, sc->sc_height, sc->sc_depth); /* * Allocate one page for the mouse pointer image at the end of * the little endian aperture, right before the memory mapped * registers that might also reside there. Must be done after * sc_memsize was set and possibly adjusted to account for the * memory mapped registers. */ sc->sc_curoff = (sc->sc_memsize * 1024) - PAGE_SIZE; sc->sc_memsize -= PAGE_SIZE / 1024; machfb_cursor_enable(sc, 0); /* Initialize with an all transparent image. */ memset((void *)(adp->va_buffer + sc->sc_curoff), 0xaa, PAGE_SIZE); /* * Register a handler that performs some cosmetic surgery like * turning off the mouse pointer on halt in preparation for * handing the screen over to the OFW. Register another handler * that turns off the CRTC when resetting, otherwise the OFW * boot command issued by cpu_reset() just doesn't work. */ EVENTHANDLER_REGISTER(shutdown_final, machfb_shutdown_final, sc, SHUTDOWN_PRI_DEFAULT); EVENTHANDLER_REGISTER(shutdown_reset, machfb_shutdown_reset, sc, SHUTDOWN_PRI_DEFAULT); return (0); fail_vmemmap: if (adp->va_registers != 0) bus_space_unmap(sc->sc_vmemt, sc->sc_vmemh, vi->vi_registers_size); fail_vmemres: if (sc->sc_vmemres != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_vmemres), sc->sc_vmemres); fail_memmap: bus_space_unmap(sc->sc_memt, sc->sc_memh, vi->vi_buffer_size); fail_memres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_memres), sc->sc_memres); return (error); } static int machfb_pci_detach(device_t dev) { return (EINVAL); } /* * internal functions */ static void machfb_cursor_enable(struct machfb_softc *sc, int onoff) { if (onoff) regw(sc, GEN_TEST_CNTL, regr(sc, GEN_TEST_CNTL) | HWCURSOR_ENABLE); else regw(sc, GEN_TEST_CNTL, regr(sc, GEN_TEST_CNTL) &~ HWCURSOR_ENABLE); } static int machfb_cursor_install(struct machfb_softc *sc) { uint16_t *p, v; uint8_t fg; int i, j; if (sc->sc_curoff == 0) return (ENODEV); machfb_cursor_enable(sc, 0); regw(sc, CUR_OFFSET, sc->sc_curoff >> 3); fg = SC_NORM_ATTR & 0xf; regw(sc, CUR_CLR0, machfb_default_cmap[fg].red << 24 | machfb_default_cmap[fg].green << 16 | machfb_default_cmap[fg].blue << 8); p = (uint16_t *)(sc->sc_va.va_buffer + sc->sc_curoff); for (i = 0; i < 64; i++) { for (j = 0; j < 8; j++) { v = machfb_mouse_pointer_lut[ machfb_mouse_pointer_bits[i][j] >> 4] << 8 | machfb_mouse_pointer_lut[ machfb_mouse_pointer_bits[i][j] & 0x0f]; if (sc->sc_flags & MACHFB_SWAP) *(p++) = bswap16(v); else *(p++) = v; } } return (0); } static int machfb_get_memsize(struct machfb_softc *sc) { int tmp, memsize; const int mem_tab[] = { 512, 1024, 2048, 4096, 6144, 8192, 12288, 16384 }; tmp = regr(sc, MEM_CNTL); #ifdef MACHFB_DEBUG printf("memcntl=0x%08x\n", tmp); #endif if (sc->sc_flags & MACHFB_DSP) { tmp &= 0x0000000f; if (tmp < 8) memsize = (tmp + 1) * 512; else if (tmp < 12) memsize = (tmp - 3) * 1024; else memsize = (tmp - 7) * 2048; } else memsize = mem_tab[tmp & 0x07]; return (memsize); } static void machfb_reset_engine(struct machfb_softc *sc) { /* Reset engine.*/ regw(sc, GEN_TEST_CNTL, regr(sc, GEN_TEST_CNTL) & ~GUI_ENGINE_ENABLE); /* Enable engine. */ regw(sc, GEN_TEST_CNTL, regr(sc, GEN_TEST_CNTL) | GUI_ENGINE_ENABLE); /* * Ensure engine is not locked up by clearing any FIFO or * host errors. */ regw(sc, BUS_CNTL, regr(sc, BUS_CNTL) | BUS_HOST_ERR_ACK | BUS_FIFO_ERR_ACK); } static void machfb_init_engine(struct machfb_softc *sc) { uint32_t pitch_value; pitch_value = sc->sc_width; if (sc->sc_depth == 24) pitch_value *= 3; machfb_reset_engine(sc); wait_for_fifo(sc, 14); regw(sc, CONTEXT_MASK, 0xffffffff); regw(sc, DST_OFF_PITCH, (pitch_value / 8) << 22); regw(sc, DST_Y_X, 0); regw(sc, DST_HEIGHT, 0); regw(sc, DST_BRES_ERR, 0); regw(sc, DST_BRES_INC, 0); regw(sc, DST_BRES_DEC, 0); regw(sc, DST_CNTL, DST_LAST_PEL | DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); regw(sc, SRC_OFF_PITCH, (pitch_value / 8) << 22); regw(sc, SRC_Y_X, 0); regw(sc, SRC_HEIGHT1_WIDTH1, 1); regw(sc, SRC_Y_X_START, 0); regw(sc, SRC_HEIGHT2_WIDTH2, 1); regw(sc, SRC_CNTL, SRC_LINE_X_LEFT_TO_RIGHT); wait_for_fifo(sc, 13); regw(sc, HOST_CNTL, 0); regw(sc, PAT_REG0, 0); regw(sc, PAT_REG1, 0); regw(sc, PAT_CNTL, 0); regw(sc, SC_LEFT, 0); regw(sc, SC_TOP, 0); regw(sc, SC_BOTTOM, sc->sc_height - 1); regw(sc, SC_RIGHT, pitch_value - 1); regw(sc, DP_BKGD_CLR, 0); regw(sc, DP_FRGD_CLR, 0xffffffff); regw(sc, DP_WRITE_MASK, 0xffffffff); regw(sc, DP_MIX, (MIX_SRC << 16) | MIX_DST); regw(sc, DP_SRC, FRGD_SRC_FRGD_CLR); wait_for_fifo(sc, 3); regw(sc, CLR_CMP_CLR, 0); regw(sc, CLR_CMP_MASK, 0xffffffff); regw(sc, CLR_CMP_CNTL, 0); wait_for_fifo(sc, 2); switch (sc->sc_depth) { case 8: regw(sc, DP_PIX_WIDTH, HOST_8BPP | SRC_8BPP | DST_8BPP); regw(sc, DP_CHAIN_MASK, DP_CHAIN_8BPP); regw(sc, DAC_CNTL, regr(sc, DAC_CNTL) | DAC_8BIT_EN); break; #if 0 case 32: regw(sc, DP_PIX_WIDTH, HOST_32BPP | SRC_32BPP | DST_32BPP); regw(sc, DP_CHAIN_MASK, DP_CHAIN_32BPP); regw(sc, DAC_CNTL, regr(sc, DAC_CNTL) | DAC_8BIT_EN); break; #endif } wait_for_fifo(sc, 2); regw(sc, CRTC_INT_CNTL, regr(sc, CRTC_INT_CNTL) & ~0x20); regw(sc, GUI_TRAJ_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); wait_for_idle(sc); } #if 0 static void machfb_adjust_frame(struct machfb_softc *sc, int x, int y) { int offset; offset = ((x + y * sc->sc_width) * (sc->sc_depth >> 3)) >> 3; regw(sc, CRTC_OFF_PITCH, (regr(sc, CRTC_OFF_PITCH) & 0xfff00000) | offset); } #endif static void machfb_shutdown_final(void *v) { struct machfb_softc *sc = v; machfb_cursor_enable(sc, 0); /* * In case this is the console set the cursor of the stdout * instance to the start of the last line so OFW output ends * up beneath what FreeBSD left on the screen. */ if (sc->sc_flags & MACHFB_CONSOLE) { OF_interpret("stdout @ is my-self 0 to column#", 0); OF_interpret("stdout @ is my-self #lines 1 - to line#", 0); } } static void machfb_shutdown_reset(void *v) { struct machfb_softc *sc = v; machfb_blank_display(&sc->sc_va, V_DISPLAY_STAND_BY); } Index: head/sys/dev/isci/isci_oem_parameters.c =================================================================== --- head/sys/dev/isci/isci_oem_parameters.c (revision 295879) +++ head/sys/dev/isci/isci_oem_parameters.c (revision 295880) @@ -1,176 +1,175 @@ /*- * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include struct pcir_header { uint32_t signature; uint16_t vendor_id; uint16_t device_id; uint16_t reserved; uint16_t struct_length; uint8_t struct_revision; uint8_t cc_interface; uint8_t cc_subclass; uint8_t cc_baseclass; uint16_t image_length; uint16_t code_revision; uint8_t code_type; uint8_t indicator; uint16_t reserved1; }; struct rom_header { uint8_t signature_byte[2]; uint8_t rom_length; uint8_t jmp_code; uint16_t entry_address; uint8_t reserved[0x12]; uint16_t pcir_pointer; uint16_t pnp_pointer; }; struct oem_parameters_table { uint8_t signature[4]; /* "$OEM" */ struct revision { uint16_t major:8; /* bits [7:0] */ uint16_t minor:8; /* bits [8:15] */ } revision; uint16_t length; uint8_t checksum; uint8_t reserved1; uint16_t reserved2; uint8_t data[1]; }; void isci_get_oem_parameters(struct isci_softc *isci) { uint32_t OROM_PHYSICAL_ADDRESS_START = 0xC0000; uint32_t OROM_SEARCH_LENGTH = 0x30000; uint16_t OROM_SIGNATURE = 0xAA55; uint32_t OROM_SIZE = 512; uint8_t *orom_start = (uint8_t *)BIOS_PADDRTOVADDR(OROM_PHYSICAL_ADDRESS_START); uint32_t offset = 0; while (offset < OROM_SEARCH_LENGTH) { /* Look for the OROM signature at the beginning of every * 512-byte block in the OROM region */ if (*(uint16_t*)(orom_start + offset) == OROM_SIGNATURE) { uint32_t *rom; struct rom_header *rom_header; struct pcir_header *pcir_header; uint16_t vendor_id = isci->pci_common_header.vendor_id; uint16_t device_id = isci->pci_common_header.device_id; rom = (uint32_t *)(orom_start + offset); rom_header = (struct rom_header *)rom; pcir_header = (struct pcir_header *) ((uint8_t*)rom + rom_header->pcir_pointer); /* OROM signature was found. Now check if the PCI * device and vendor IDs match. */ if (pcir_header->vendor_id == vendor_id && pcir_header->device_id == device_id) { /* OROM for this PCI device was found. Search * this 512-byte block for the $OEM string, * which will mark the beginning of the OEM * parameter block. */ uint8_t oem_sig[4] = {'$', 'O', 'E', 'M'}; int dword_index; for (dword_index = 0; dword_index < OROM_SIZE/sizeof(uint32_t); dword_index++) if (rom[dword_index] == *(uint32_t *)oem_sig) { /* $OEM signature string was found. Now copy the OEM parameter block * into the struct ISCI_CONTROLLER objects. After the controllers are * constructed, we will pass this OEM parameter data to the SCI core * controller. */ struct oem_parameters_table *oem = (struct oem_parameters_table *)&rom[dword_index]; SCI_BIOS_OEM_PARAM_BLOCK_T *oem_data = (SCI_BIOS_OEM_PARAM_BLOCK_T *)oem->data; int index; isci->oem_parameters_found = TRUE; isci_log_message(1, "ISCI", "oem_data->header.num_elements = %d\n", oem_data->header.num_elements); for (index = 0; index < oem_data->header.num_elements; index++) { memcpy(&isci->controllers[index].oem_parameters.sds1, &oem_data->controller_element[index], sizeof(SCIC_SDS_OEM_PARAMETERS_T)); isci_log_message(1, "ISCI", "OEM Parameter Data for controller %d\n", index); for (int i = 0; i < sizeof(SCIC_SDS_OEM_PARAMETERS_T); i++) { uint8_t val = ((uint8_t *)&oem_data->controller_element[index])[i]; isci_log_message(1, "ISCI", "%02x ", val); } isci_log_message(1, "ISCI", "\n"); isci->controllers[index].oem_parameters_version = oem_data->header.version; } } /* No need to continue searching for another * OROM that matches this PCI device, so return * immediately. */ return; } } offset += OROM_SIZE; } } Index: head/sys/dev/ntb/if_ntb/if_ntb.c =================================================================== --- head/sys/dev/ntb/if_ntb/if_ntb.c (revision 295879) +++ head/sys/dev/ntb/if_ntb/if_ntb.c (revision 295880) @@ -1,1711 +1,1710 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include "../ntb_hw/ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains a protocol for sending and receiving messages, and * exposes that protocol through a simulated ethernet device called ntb. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define QP_SETSIZE 64 BITSET_DEFINE(_qpset, QP_SETSIZE); #define test_bit(pos, addr) BIT_ISSET(QP_SETSIZE, (pos), (addr)) #define set_bit(pos, addr) BIT_SET(QP_SETSIZE, (pos), (addr)) #define clear_bit(pos, addr) BIT_CLR(QP_SETSIZE, (pos), (addr)) #define ffs_bit(addr) BIT_FFS(QP_SETSIZE, (addr)) #define KTR_NTB KTR_SPARE3 #define NTB_TRANSPORT_VERSION 4 #define NTB_RX_MAX_PKTS 64 #define NTB_RXQ_SIZE 300 enum ntb_link_event { NTB_LINK_DOWN = 0, NTB_LINK_UP, }; static SYSCTL_NODE(_hw, OID_AUTO, if_ntb, CTLFLAG_RW, 0, "if_ntb"); static unsigned g_if_ntb_debug_level; SYSCTL_UINT(_hw_if_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_if_ntb_debug_level, 0, "if_ntb log level -- higher is more verbose"); #define ntb_printf(lvl, ...) do { \ if ((lvl) <= g_if_ntb_debug_level) { \ if_printf(nt->ifp, __VA_ARGS__); \ } \ } while (0) static unsigned transport_mtu = IP_MAXPACKET + ETHER_HDR_LEN + ETHER_CRC_LEN; static uint64_t max_mw_size; SYSCTL_UQUAD(_hw_if_ntb, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0, "If enabled (non-zero), limit the size of large memory windows. " "Both sides of the NTB MUST set the same value here."); static unsigned max_num_clients; SYSCTL_UINT(_hw_if_ntb, OID_AUTO, max_num_clients, CTLFLAG_RDTUN, &max_num_clients, 0, "Maximum number of NTB transport clients. " "0 (default) - use all available NTB memory windows; " "positive integer N - Limit to N memory windows."); static unsigned enable_xeon_watchdog; SYSCTL_UINT(_hw_if_ntb, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN, &enable_xeon_watchdog, 0, "If non-zero, write a register every second to " "keep a watchdog from tearing down the NTB link"); STAILQ_HEAD(ntb_queue_list, ntb_queue_entry); typedef uint32_t ntb_q_idx_t; struct ntb_queue_entry { /* ntb_queue list reference */ STAILQ_ENTRY(ntb_queue_entry) entry; /* info on data to be transferred */ void *cb_data; void *buf; uint32_t len; uint32_t flags; struct ntb_transport_qp *qp; struct ntb_payload_header *x_hdr; ntb_q_idx_t index; }; struct ntb_rx_info { ntb_q_idx_t entry; }; struct ntb_transport_qp { struct ntb_transport_ctx *transport; struct ntb_softc *ntb; void *cb_data; bool client_ready; volatile bool link_is_up; uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */ struct ntb_rx_info *rx_info; struct ntb_rx_info *remote_rx_info; void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list tx_free_q; struct mtx ntb_tx_free_q_lock; caddr_t tx_mw; bus_addr_t tx_mw_phys; ntb_q_idx_t tx_index; ntb_q_idx_t tx_max_entry; uint64_t tx_max_frame; void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); struct ntb_queue_list rx_post_q; struct ntb_queue_list rx_pend_q; /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */ struct mtx ntb_rx_q_lock; struct task rx_completion_task; struct task rxc_db_work; caddr_t rx_buff; ntb_q_idx_t rx_index; ntb_q_idx_t rx_max_entry; uint64_t rx_max_frame; void (*event_handler)(void *data, enum ntb_link_event status); struct callout link_work; struct callout queue_full; struct callout rx_full; uint64_t last_rx_no_buf; /* Stats */ uint64_t rx_bytes; uint64_t rx_pkts; uint64_t rx_ring_empty; uint64_t rx_err_no_buf; uint64_t rx_err_oflow; uint64_t rx_err_ver; uint64_t tx_bytes; uint64_t tx_pkts; uint64_t tx_ring_full; uint64_t tx_err_no_buf; }; struct ntb_queue_handlers { void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); void (*event_handler)(void *data, enum ntb_link_event status); }; struct ntb_transport_mw { vm_paddr_t phys_addr; size_t phys_size; size_t xlat_align; size_t xlat_align_size; bus_addr_t addr_limit; /* Tx buff is off vbase / phys_addr */ caddr_t vbase; size_t xlat_size; size_t buff_size; /* Rx buff is off virt_addr / dma_addr */ caddr_t virt_addr; bus_addr_t dma_addr; }; struct ntb_transport_ctx { struct ntb_softc *ntb; struct ifnet *ifp; struct ntb_transport_mw mw_vec[NTB_MAX_NUM_MW]; struct ntb_transport_qp *qp_vec; struct _qpset qp_bitmap; struct _qpset qp_bitmap_free; unsigned mw_count; unsigned qp_count; volatile bool link_is_up; struct callout link_work; struct callout link_watchdog; struct task link_cleanup; uint64_t bufsize; u_char eaddr[ETHER_ADDR_LEN]; struct mtx tx_lock; struct mtx rx_lock; /* The hardcoded single queuepair in ntb_setup_interface() */ struct ntb_transport_qp *qp; }; static struct ntb_transport_ctx net_softc; enum { IF_NTB_DESC_DONE_FLAG = 1 << 0, IF_NTB_LINK_DOWN_FLAG = 1 << 1, }; struct ntb_payload_header { ntb_q_idx_t ver; uint32_t len; uint32_t flags; }; enum { /* * The order of this enum is part of the if_ntb remote protocol. Do * not reorder without bumping protocol version (and it's probably best * to keep the protocol in lock-step with the Linux NTB driver. */ IF_NTB_VERSION = 0, IF_NTB_QP_LINKS, IF_NTB_NUM_QPS, IF_NTB_NUM_MWS, /* * N.B.: transport_link_work assumes MW1 enums = MW0 + 2. */ IF_NTB_MW0_SZ_HIGH, IF_NTB_MW0_SZ_LOW, IF_NTB_MW1_SZ_HIGH, IF_NTB_MW1_SZ_LOW, IF_NTB_MAX_SPAD, /* * Some NTB-using hardware have a watchdog to work around NTB hangs; if * a register or doorbell isn't written every few seconds, the link is * torn down. Write an otherwise unused register every few seconds to * work around this watchdog. */ IF_NTB_WATCHDOG_SPAD = 15 }; CTASSERT(IF_NTB_WATCHDOG_SPAD < XEON_SPAD_COUNT && IF_NTB_WATCHDOG_SPAD < ATOM_SPAD_COUNT); #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) #define NTB_QP_DEF_NUM_ENTRIES 100 #define NTB_LINK_DOWN_TIMEOUT 10 static int ntb_handle_module_events(struct module *m, int what, void *arg); static int ntb_setup_interface(void); static int ntb_teardown_interface(void); static void ntb_net_init(void *arg); static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void ntb_start(struct ifnet *ifp); static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len); static void ntb_net_event_handler(void *data, enum ntb_link_event status); static int ntb_transport_probe(struct ntb_softc *ntb); static void ntb_transport_free(struct ntb_transport_ctx *); static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_transport_free_queue(struct ntb_transport_qp *qp); static struct ntb_transport_qp *ntb_transport_create_queue(void *data, struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers); static void ntb_transport_link_up(struct ntb_transport_qp *qp); static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len); static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry); static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static void ntb_qp_full(void *arg); static void ntb_transport_rxc_db(void *arg, int pending); static int ntb_process_rxc(struct ntb_transport_qp *qp); static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset); static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data); static void ntb_complete_rxc(void *arg, int pending); static void ntb_transport_doorbell_callback(void *data, uint32_t vector); static void ntb_transport_event_callback(void *data); static void ntb_transport_link_work(void *arg); static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size); static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw); static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num); static void ntb_qp_link_work(void *arg); static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt); static void ntb_transport_link_cleanup_work(void *, int); static void ntb_qp_link_down(struct ntb_transport_qp *qp); static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp); static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp); static void ntb_transport_link_down(struct ntb_transport_qp *qp); static void ntb_send_link_down(struct ntb_transport_qp *qp); static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list); static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to); static void create_random_local_eui48(u_char *eaddr); static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp); static void xeon_link_watchdog_hb(void *); static const struct ntb_ctx_ops ntb_transport_ops = { .link_event = ntb_transport_event_callback, .db_event = ntb_transport_doorbell_callback, }; MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver"); static inline void iowrite32(uint32_t val, void *addr) { bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr, val); } /* Module setup and teardown */ static int ntb_handle_module_events(struct module *m, int what, void *arg) { int err = 0; switch (what) { case MOD_LOAD: err = ntb_setup_interface(); break; case MOD_UNLOAD: err = ntb_teardown_interface(); break; default: err = EOPNOTSUPP; break; } return (err); } static moduledata_t if_ntb_mod = { "if_ntb", ntb_handle_module_events, NULL }; DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY); MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1); static int ntb_setup_interface(void) { struct ifnet *ifp; struct ntb_queue_handlers handlers = { ntb_net_rx_handler, ntb_net_tx_handler, ntb_net_event_handler }; int rc; net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0); if (net_softc.ntb == NULL) { printf("ntb: Cannot find devclass\n"); return (ENXIO); } ifp = net_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { ntb_transport_free(&net_softc); printf("ntb: Cannot allocate ifnet structure\n"); return (ENOMEM); } if_initname(ifp, "ntb", 0); rc = ntb_transport_probe(net_softc.ntb); if (rc != 0) { printf("ntb: Cannot init transport: %d\n", rc); if_free(net_softc.ifp); return (rc); } net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb, &handlers); ifp->if_init = ntb_net_init; ifp->if_softc = &net_softc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX; ifp->if_ioctl = ntb_ioctl; ifp->if_start = ntb_start; IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN); ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN; IFQ_SET_READY(&ifp->if_snd); create_random_local_eui48(net_softc.eaddr); ether_ifattach(ifp, net_softc.eaddr); ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU; ifp->if_capenable = ifp->if_capabilities; ifp->if_mtu = ntb_transport_max_size(net_softc.qp) - ETHER_HDR_LEN - ETHER_CRC_LEN; ntb_transport_link_up(net_softc.qp); net_softc.bufsize = ntb_transport_max_size(net_softc.qp) + sizeof(struct ether_header); return (0); } static int ntb_teardown_interface(void) { if (net_softc.qp != NULL) { ntb_transport_link_down(net_softc.qp); ntb_transport_free_queue(net_softc.qp); ntb_transport_free(&net_softc); } if (net_softc.ifp != NULL) { ether_ifdetach(net_softc.ifp); if_free(net_softc.ifp); net_softc.ifp = NULL; } return (0); } /* Network device interface */ static void ntb_net_init(void *arg) { struct ntb_transport_ctx *ntb_softc = arg; struct ifnet *ifp = ntb_softc->ifp; ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_flags |= IFF_UP; if_link_state_change(ifp, LINK_STATE_UP); } static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ntb_transport_ctx *nt = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int error = 0; switch (command) { case SIOCSIFMTU: { if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) - ETHER_HDR_LEN - ETHER_CRC_LEN) { error = EINVAL; break; } ifp->if_mtu = ifr->ifr_mtu; break; } default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void ntb_start(struct ifnet *ifp) { struct mbuf *m_head; struct ntb_transport_ctx *nt = ifp->if_softc; int rc; mtx_lock(&nt->tx_lock); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; CTR0(KTR_NTB, "TX: ntb_start"); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); CTR1(KTR_NTB, "TX: start mbuf %p", m_head); rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, m_length(m_head, NULL)); if (rc != 0) { CTR1(KTR_NTB, "TX: could not tx mbuf %p. Returning to snd q", m_head); if (rc == EAGAIN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); callout_reset(&nt->qp->queue_full, hz / 1000, ntb_qp_full, ifp); } break; } } mtx_unlock(&nt->tx_lock); } /* Network Device Callbacks */ static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { m_freem(data); CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); } static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { struct mbuf *m = data; struct ifnet *ifp = qp_data; CTR0(KTR_NTB, "RX: rx handler"); (*ifp->if_input)(ifp, m); } static void ntb_net_event_handler(void *data, enum ntb_link_event status) { struct ifnet *ifp; ifp = data; (void)ifp; /* XXX The Linux driver munges with the carrier status here. */ switch (status) { case NTB_LINK_DOWN: break; case NTB_LINK_UP: break; default: panic("Bogus ntb_link_event %u\n", status); } } /* Transport Init and teardown */ static void xeon_link_watchdog_hb(void *arg) { struct ntb_transport_ctx *nt; nt = arg; ntb_spad_write(nt->ntb, IF_NTB_WATCHDOG_SPAD, 0); callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt); } static int ntb_transport_probe(struct ntb_softc *ntb) { struct ntb_transport_ctx *nt = &net_softc; struct ntb_transport_mw *mw; uint64_t qp_bitmap; int rc; unsigned i; nt->mw_count = ntb_mw_count(ntb); for (i = 0; i < nt->mw_count; i++) { mw = &nt->mw_vec[i]; rc = ntb_mw_get_range(ntb, i, &mw->phys_addr, &mw->vbase, &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size, &mw->addr_limit); if (rc != 0) goto err; mw->buff_size = 0; mw->xlat_size = 0; mw->virt_addr = NULL; mw->dma_addr = 0; } qp_bitmap = ntb_db_valid_mask(ntb); nt->qp_count = flsll(qp_bitmap); KASSERT(nt->qp_count != 0, ("bogus db bitmap")); nt->qp_count -= 1; if (max_num_clients != 0 && max_num_clients < nt->qp_count) nt->qp_count = max_num_clients; else if (nt->mw_count < nt->qp_count) nt->qp_count = nt->mw_count; KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count")); mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF); mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF); nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_IF, M_WAITOK | M_ZERO); for (i = 0; i < nt->qp_count; i++) { set_bit(i, &nt->qp_bitmap); set_bit(i, &nt->qp_bitmap_free); ntb_transport_init_queue(nt, i); } callout_init(&nt->link_work, 0); callout_init(&nt->link_watchdog, 0); TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt); rc = ntb_set_ctx(ntb, nt, &ntb_transport_ops); if (rc != 0) goto err; nt->link_is_up = false; ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); ntb_link_event(ntb); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); if (enable_xeon_watchdog != 0) callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt); return (0); err: free(nt->qp_vec, M_NTB_IF); nt->qp_vec = NULL; return (rc); } static void ntb_transport_free(struct ntb_transport_ctx *nt) { struct ntb_softc *ntb = nt->ntb; struct _qpset qp_bitmap_alloc; uint8_t i; ntb_transport_link_cleanup(nt); taskqueue_drain(taskqueue_swi, &nt->link_cleanup); callout_drain(&nt->link_work); callout_drain(&nt->link_watchdog); BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); /* Verify that all the QPs are freed */ for (i = 0; i < nt->qp_count; i++) if (test_bit(i, &qp_bitmap_alloc)) ntb_transport_free_queue(&nt->qp_vec[i]); ntb_link_disable(ntb); ntb_clear_ctx(ntb); for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); free(nt->qp_vec, M_NTB_IF); } static void ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_mw *mw; struct ntb_transport_qp *qp; vm_paddr_t mw_base; uint64_t mw_size, qp_offset; size_t tx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; qp = &nt->qp_vec[qp_num]; qp->qp_num = qp_num; qp->transport = nt; qp->ntb = nt->ntb; qp->client_ready = false; qp->event_handler = NULL; ntb_qp_link_down_reset(qp); if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; mw_base = mw->phys_addr; mw_size = mw->phys_size; tx_size = mw_size / num_qps_mw; qp_offset = tx_size * (qp_num / mw_count); qp->tx_mw = mw->vbase + qp_offset; KASSERT(qp->tx_mw != NULL, ("uh oh?")); /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */ qp->tx_mw_phys = mw_base + qp_offset; KASSERT(qp->tx_mw_phys != 0, ("uh oh?")); tx_size -= sizeof(struct ntb_rx_info); qp->rx_info = (void *)(qp->tx_mw + tx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->tx_max_frame = qmin(tx_size / 2, transport_mtu + sizeof(struct ntb_payload_header)); qp->tx_max_entry = tx_size / qp->tx_max_frame; callout_init(&qp->link_work, 0); callout_init(&qp->queue_full, 1); callout_init(&qp->rx_full, 1); mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN); mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN); TASK_INIT(&qp->rx_completion_task, 0, ntb_complete_rxc, qp); TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp); STAILQ_INIT(&qp->rx_post_q); STAILQ_INIT(&qp->rx_pend_q); STAILQ_INIT(&qp->tx_free_q); callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } static void ntb_transport_free_queue(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; if (qp == NULL) return; callout_drain(&qp->link_work); ntb_db_set_mask(qp->ntb, 1ull << qp->qp_num); taskqueue_drain(taskqueue_swi, &qp->rxc_db_work); taskqueue_drain(taskqueue_swi, &qp->rx_completion_task); qp->cb_data = NULL; qp->rx_handler = NULL; qp->tx_handler = NULL; qp->event_handler = NULL; while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) free(entry, M_NTB_IF); while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) free(entry, M_NTB_IF); set_bit(qp->qp_num, &qp->transport->qp_bitmap_free); } /** * ntb_transport_create_queue - Create a new NTB transport layer queue * @rx_handler: receive callback function * @tx_handler: transmit callback function * @event_handler: event callback function * * Create a new NTB transport layer queue and provide the queue with a callback * routine for both transmit and receive. The receive callback routine will be * used to pass up data when the transport has received it on the queue. The * transmit callback routine will be called when the transport has completed the * transmission of the data on the queue and the data is ready to be freed. * * RETURNS: pointer to newly created ntb_queue, NULL on error. */ static struct ntb_transport_qp * ntb_transport_create_queue(void *data, struct ntb_softc *ntb, const struct ntb_queue_handlers *handlers) { struct ntb_queue_entry *entry; struct ntb_transport_qp *qp; struct ntb_transport_ctx *nt; unsigned int free_queue; int i; nt = ntb_get_ctx(ntb, NULL); KASSERT(nt != NULL, ("bogus")); free_queue = ffs_bit(&nt->qp_bitmap); if (free_queue == 0) return (NULL); /* decrement free_queue to make it zero based */ free_queue--; qp = &nt->qp_vec[free_queue]; clear_bit(qp->qp_num, &nt->qp_bitmap_free); qp->cb_data = data; qp->rx_handler = handlers->rx_handler; qp->tx_handler = handlers->tx_handler; qp->event_handler = handlers->event_handler; for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); entry->cb_data = nt->ifp; entry->buf = NULL; entry->len = transport_mtu; ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q); } for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) { entry = malloc(sizeof(*entry), M_NTB_IF, M_WAITOK | M_ZERO); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } ntb_db_clear(ntb, 1ull << qp->qp_num); ntb_db_clear_mask(ntb, 1ull << qp->qp_num); return (qp); } /** * ntb_transport_link_up - Notify NTB transport of client readiness to use queue * @qp: NTB transport layer queue to be enabled * * Notify NTB transport layer of client readiness to use queue */ static void ntb_transport_link_up(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt; if (qp == NULL) return; qp->client_ready = true; nt = qp->transport; ntb_printf(2, "qp client ready\n"); if (qp->transport->link_is_up) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } /* Transport Tx */ /** * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry * @qp: NTB transport layer queue the entry is to be enqueued on * @cb: per buffer pointer for callback function to use * @data: pointer to data buffer that will be sent * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB * payload will be transmitted. This assumes that a lock is being held to * serialize access to the qp. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) { struct ntb_queue_entry *entry; int rc; if (qp == NULL || !qp->link_is_up || len == 0) { CTR0(KTR_NTB, "TX: link not up"); return (EINVAL); } entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry == NULL) { CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); qp->tx_err_no_buf++; return (EBUSY); } CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; rc = ntb_process_tx(qp, entry); if (rc != 0) { ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: process_tx failed. Returning entry %p to tx_free_q", entry); } return (rc); } static int ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) { void *offset; offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index; CTR3(KTR_NTB, "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u", qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry); if (qp->tx_index == qp->remote_rx_info->entry) { CTR0(KTR_NTB, "TX: ring full"); qp->tx_ring_full++; return (EAGAIN); } if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) { if (qp->tx_handler != NULL) qp->tx_handler(qp, qp->cb_data, entry->buf, EIO); else m_freem(entry->buf); entry->buf = NULL; ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: frame too big. returning entry %p to tx_free_q", entry); return (0); } CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset); ntb_memcpy_tx(qp, entry, offset); qp->tx_index++; qp->tx_index %= qp->tx_max_entry; qp->tx_pkts++; return (0); } static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ntb_payload_header *hdr; /* This piece is from Linux' ntb_async_tx() */ hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - sizeof(struct ntb_payload_header)); entry->x_hdr = hdr; iowrite32(entry->len, &hdr->len); iowrite32(qp->tx_pkts, &hdr->ver); /* This piece is ntb_memcpy_tx() */ CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); if (entry->buf != NULL) { m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); /* * Ensure that the data is fully copied before setting the * flags */ wmb(); } /* The rest is ntb_tx_copy_callback() */ iowrite32(entry->flags | IF_NTB_DESC_DONE_FLAG, &hdr->flags); CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); ntb_peer_db_set(qp->ntb, 1ull << qp->qp_num); /* * The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these * cases, there is nothing to add to the completion queue. */ if (entry->len > 0) { qp->tx_bytes += entry->len; if (qp->tx_handler) qp->tx_handler(qp, qp->cb_data, entry->buf, entry->len); else m_freem(entry->buf); entry->buf = NULL; } CTR3(KTR_NTB, "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " "to tx_free_q", entry, hdr->ver, hdr->flags); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); } static void ntb_qp_full(void *arg) { CTR0(KTR_NTB, "TX: qp_full callout"); ntb_start(arg); } /* Transport Rx */ static void ntb_transport_rxc_db(void *arg, int pending __unused) { struct ntb_transport_qp *qp = arg; ntb_q_idx_t i; int rc; /* * Limit the number of packets processed in a single interrupt to * provide fairness to others */ CTR0(KTR_NTB, "RX: transport_rx"); mtx_lock(&qp->transport->rx_lock); for (i = 0; i < qp->rx_max_entry; i++) { rc = ntb_process_rxc(qp); if (rc != 0) { CTR0(KTR_NTB, "RX: process_rxc failed"); break; } } mtx_unlock(&qp->transport->rx_lock); if (i == qp->rx_max_entry) taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); else if ((ntb_db_read(qp->ntb) & (1ull << qp->qp_num)) != 0) { /* If db is set, clear it and read it back to commit clear. */ ntb_db_clear(qp->ntb, 1ull << qp->qp_num); (void)ntb_db_read(qp->ntb); /* * An interrupt may have arrived between finishing * ntb_process_rxc and clearing the doorbell bit: there might * be some more work to do. */ taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } } static int ntb_process_rxc(struct ntb_transport_qp *qp) { struct ntb_payload_header *hdr; struct ntb_queue_entry *entry; caddr_t offset; offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; hdr = (void *)(offset + qp->rx_max_frame - sizeof(struct ntb_payload_header)); CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index); if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) { CTR0(KTR_NTB, "RX: hdr not done"); qp->rx_ring_empty++; return (EAGAIN); } if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) { CTR0(KTR_NTB, "RX: link down"); ntb_qp_link_down(qp); hdr->flags = 0; return (EAGAIN); } if (hdr->ver != (uint32_t)qp->rx_pkts) { CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). " "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts); qp->rx_err_ver++; return (EIO); } entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q); if (entry == NULL) { qp->rx_err_no_buf++; CTR0(KTR_NTB, "RX: No entries in rx_pend_q"); return (EAGAIN); } callout_stop(&qp->rx_full); CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry); entry->x_hdr = hdr; entry->index = qp->rx_index; if (hdr->len > entry->len) { CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju", (uintmax_t)hdr->len, (uintmax_t)entry->len); qp->rx_err_oflow++; entry->len = -EIO; entry->flags |= IF_NTB_DESC_DONE_FLAG; taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); } else { qp->rx_bytes += hdr->len; qp->rx_pkts++; CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts); entry->len = hdr->len; ntb_memcpy_rx(qp, entry, offset); } qp->rx_index++; qp->rx_index %= qp->rx_max_entry; return (0); } static void ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ifnet *ifp = entry->cb_data; unsigned int len = entry->len; struct mbuf *m; CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset); m = m_devget(offset, len, 0, ifp, NULL); m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID; entry->buf = (void *)m; /* Ensure that the data is globally visible before clearing the flag */ wmb(); CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, m); ntb_rx_copy_callback(qp, entry); } static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data) { struct ntb_queue_entry *entry; entry = data; entry->flags |= IF_NTB_DESC_DONE_FLAG; taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task); } static void ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; iowrite32(entry->index, &qp->rx_info->entry); STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); len = entry->len; m = entry->buf; /* * Re-initialize queue_entry for reuse; rx_handler takes * ownership of the mbuf. */ entry->buf = NULL; entry->len = transport_mtu; entry->cb_data = qp->transport->ifp; STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); else m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); } static void ntb_transport_doorbell_callback(void *data, uint32_t vector) { struct ntb_transport_ctx *nt = data; struct ntb_transport_qp *qp; struct _qpset db_bits; uint64_t vec_mask; unsigned qp_num; BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &db_bits); BIT_NAND(QP_SETSIZE, &db_bits, &nt->qp_bitmap_free); vec_mask = ntb_db_vector_mask(nt->ntb, vector); while (vec_mask != 0) { qp_num = ffsll(vec_mask) - 1; if (test_bit(qp_num, &db_bits)) { qp = &nt->qp_vec[qp_num]; taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } vec_mask &= ~(1ull << qp_num); } } /* Link Event handler */ static void ntb_transport_event_callback(void *data) { struct ntb_transport_ctx *nt = data; if (ntb_link_is_up(nt->ntb, NULL, NULL)) { ntb_printf(1, "HW link up\n"); callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt); } else { ntb_printf(1, "HW link down\n"); taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup); } } /* Link bring up */ static void ntb_transport_link_work(void *arg) { struct ntb_transport_ctx *nt = arg; struct ntb_softc *ntb = nt->ntb; struct ntb_transport_qp *qp; uint64_t val64, size; uint32_t val; unsigned i; int rc; /* send the local info, in the opposite order of the way we read it */ for (i = 0; i < nt->mw_count; i++) { size = nt->mw_vec[i].phys_size; if (max_mw_size != 0 && size > max_mw_size) size = max_mw_size; ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), size >> 32); ntb_peer_spad_write(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), size); } ntb_peer_spad_write(ntb, IF_NTB_NUM_MWS, nt->mw_count); ntb_peer_spad_write(ntb, IF_NTB_NUM_QPS, nt->qp_count); ntb_peer_spad_write(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION); /* Query the remote side for its info */ val = 0; ntb_spad_read(ntb, IF_NTB_VERSION, &val); if (val != NTB_TRANSPORT_VERSION) goto out; ntb_spad_read(ntb, IF_NTB_NUM_QPS, &val); if (val != nt->qp_count) goto out; ntb_spad_read(ntb, IF_NTB_NUM_MWS, &val); if (val != nt->mw_count) goto out; for (i = 0; i < nt->mw_count; i++) { ntb_spad_read(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2), &val); val64 = (uint64_t)val << 32; ntb_spad_read(ntb, IF_NTB_MW0_SZ_LOW + (i * 2), &val); val64 |= val; rc = ntb_set_mw(nt, i, val64); if (rc != 0) goto free_mws; } nt->link_is_up = true; ntb_printf(1, "transport link up\n"); for (i = 0; i < nt->qp_count; i++) { qp = &nt->qp_vec[i]; ntb_transport_setup_qp_mw(nt, i); if (qp->client_ready) callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp); } return; free_mws: for (i = 0; i < nt->mw_count; i++) ntb_free_mw(nt, i); out: if (ntb_link_is_up(ntb, NULL, NULL)) callout_reset(&nt->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt); } static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; size_t xlat_size, buff_size; int rc; if (size == 0) return (EINVAL); xlat_size = roundup(size, mw->xlat_align_size); buff_size = xlat_size; /* No need to re-setup */ if (mw->xlat_size == xlat_size) return (0); if (mw->buff_size != 0) ntb_free_mw(nt, num_mw); /* Alloc memory for receiving data. Must be aligned */ mw->xlat_size = xlat_size; mw->buff_size = buff_size; mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_IF, M_ZERO, 0, mw->addr_limit, mw->xlat_align, 0); if (mw->virt_addr == NULL) { ntb_printf(0, "Unable to allocate MW buffer of size %zu/%zu\n", mw->buff_size, mw->xlat_size); mw->xlat_size = 0; mw->buff_size = 0; return (ENOMEM); } /* TODO: replace with bus_space_* functions */ mw->dma_addr = vtophys(mw->virt_addr); /* * Ensure that the allocation from contigmalloc is aligned as * requested. XXX: This may not be needed -- brought in for parity * with the Linux driver. */ if (mw->dma_addr % mw->xlat_align != 0) { ntb_printf(0, "DMA memory 0x%jx not aligned to BAR size 0x%zx\n", (uintmax_t)mw->dma_addr, size); ntb_free_mw(nt, num_mw); return (ENOMEM); } /* Notify HW the memory location of the receive buffer */ rc = ntb_mw_set_trans(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size); if (rc) { ntb_printf(0, "Unable to set mw%d translation\n", num_mw); ntb_free_mw(nt, num_mw); return (rc); } return (0); } static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) { struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; if (mw->virt_addr == NULL) return; ntb_mw_clear_trans(nt->ntb, num_mw); contigfree(mw->virt_addr, mw->xlat_size, M_NTB_IF); mw->xlat_size = 0; mw->buff_size = 0; mw->virt_addr = NULL; } static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num) { struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; struct ntb_transport_mw *mw; void *offset; ntb_q_idx_t i; size_t rx_size; unsigned num_qps_mw, mw_num, mw_count; mw_count = nt->mw_count; mw_num = QP_TO_MW(nt, qp_num); mw = &nt->mw_vec[mw_num]; if (mw->virt_addr == NULL) return (ENOMEM); if (nt->qp_count % mw_count && mw_num + 1 < nt->qp_count / mw_count) num_qps_mw = nt->qp_count / mw_count + 1; else num_qps_mw = nt->qp_count / mw_count; rx_size = mw->xlat_size / num_qps_mw; qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count); rx_size -= sizeof(struct ntb_rx_info); qp->remote_rx_info = (void*)(qp->rx_buff + rx_size); /* Due to house-keeping, there must be at least 2 buffs */ qp->rx_max_frame = qmin(rx_size / 2, transport_mtu + sizeof(struct ntb_payload_header)); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* Set up the hdr offsets with 0s */ for (i = 0; i < qp->rx_max_entry; i++) { offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) - sizeof(struct ntb_payload_header)); memset(offset, 0, sizeof(struct ntb_payload_header)); } qp->rx_pkts = 0; qp->tx_pkts = 0; qp->tx_index = 0; return (0); } static void ntb_qp_link_work(void *arg) { struct ntb_transport_qp *qp = arg; struct ntb_softc *ntb = qp->ntb; struct ntb_transport_ctx *nt = qp->transport; uint32_t val, dummy; ntb_spad_read(ntb, IF_NTB_QP_LINKS, &val); ntb_peer_spad_write(ntb, IF_NTB_QP_LINKS, val | (1ull << qp->qp_num)); /* query remote spad for qp ready bits */ ntb_peer_spad_read(ntb, IF_NTB_QP_LINKS, &dummy); /* See if the remote side is up */ if ((val & (1ull << qp->qp_num)) != 0) { ntb_printf(2, "qp link up\n"); qp->link_is_up = true; if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_UP); taskqueue_enqueue(taskqueue_swi, &qp->rxc_db_work); } else if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link down event*/ static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt) { struct ntb_transport_qp *qp; struct _qpset qp_bitmap_alloc; unsigned i; BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc); BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free); /* Pass along the info to any clients */ for (i = 0; i < nt->qp_count; i++) if (test_bit(i, &qp_bitmap_alloc)) { qp = &nt->qp_vec[i]; ntb_qp_link_cleanup(qp); callout_drain(&qp->link_work); } if (!nt->link_is_up) callout_drain(&nt->link_work); /* * The scratchpad registers keep the values if the remote side * goes down, blast them now to give them a sane value the next * time they are accessed */ for (i = 0; i < IF_NTB_MAX_SPAD; i++) ntb_spad_write(nt->ntb, i, 0); } static void ntb_transport_link_cleanup_work(void *arg, int pending __unused) { ntb_transport_link_cleanup(arg); } static void ntb_qp_link_down(struct ntb_transport_qp *qp) { ntb_qp_link_cleanup(qp); } static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp) { qp->link_is_up = false; qp->tx_index = qp->rx_index = 0; qp->tx_bytes = qp->rx_bytes = 0; qp->tx_pkts = qp->rx_pkts = 0; qp->rx_ring_empty = 0; qp->tx_ring_full = 0; qp->rx_err_no_buf = qp->tx_err_no_buf = 0; qp->rx_err_oflow = qp->rx_err_ver = 0; } static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp) { struct ntb_transport_ctx *nt = qp->transport; callout_drain(&qp->link_work); ntb_qp_link_down_reset(qp); if (qp->event_handler != NULL) qp->event_handler(qp->cb_data, NTB_LINK_DOWN); if (nt->link_is_up) callout_reset(&qp->link_work, NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp); } /* Link commanded down */ /** * ntb_transport_link_down - Notify NTB transport to no longer enqueue data * @qp: NTB transport layer queue to be disabled * * Notify NTB transport layer of client's desire to no longer receive data on * transport queue specified. It is the client's responsibility to ensure all * entries on queue are purged or otherwise handled appropriately. */ static void ntb_transport_link_down(struct ntb_transport_qp *qp) { uint32_t val; if (qp == NULL) return; qp->client_ready = false; ntb_spad_read(qp->ntb, IF_NTB_QP_LINKS, &val); ntb_peer_spad_write(qp->ntb, IF_NTB_QP_LINKS, val & ~(1 << qp->qp_num)); if (qp->link_is_up) ntb_send_link_down(qp); else callout_drain(&qp->link_work); } static void ntb_send_link_down(struct ntb_transport_qp *qp) { struct ntb_queue_entry *entry; int i, rc; if (!qp->link_is_up) return; for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) { entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry != NULL) break; pause("NTB Wait for link down", hz / 10); } if (entry == NULL) return; entry->cb_data = NULL; entry->buf = NULL; entry->len = 0; entry->flags = IF_NTB_LINK_DOWN_FLAG; mtx_lock(&qp->transport->tx_lock); rc = ntb_process_tx(qp, entry); if (rc != 0) printf("ntb: Failed to send link down\n"); mtx_unlock(&qp->transport->tx_lock); ntb_qp_link_down_reset(qp); } /* List Management */ static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry, struct ntb_queue_list *list) { mtx_lock_spin(lock); STAILQ_INSERT_TAIL(list, entry, entry); mtx_unlock_spin(lock); } static struct ntb_queue_entry * ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(list)) { entry = NULL; goto out; } entry = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, entry); out: mtx_unlock_spin(lock); return (entry); } static struct ntb_queue_entry * ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from, struct ntb_queue_list *to) { struct ntb_queue_entry *entry; mtx_lock_spin(lock); if (STAILQ_EMPTY(from)) { entry = NULL; goto out; } entry = STAILQ_FIRST(from); STAILQ_REMOVE_HEAD(from, entry); STAILQ_INSERT_TAIL(to, entry, entry); out: mtx_unlock_spin(lock); return (entry); } /* Helper functions */ /* TODO: This too should really be part of the kernel */ #define EUI48_MULTICAST 1 << 0 #define EUI48_LOCALLY_ADMINISTERED 1 << 1 static void create_random_local_eui48(u_char *eaddr) { static uint8_t counter = 0; uint32_t seed = ticks; eaddr[0] = EUI48_LOCALLY_ADMINISTERED; memcpy(&eaddr[1], &seed, sizeof(uint32_t)); eaddr[5] = counter++; } /** * ntb_transport_max_size - Query the max payload size of a qp * @qp: NTB transport layer queue to be queried * * Query the maximum payload size permissible on the given qp * * RETURNS: the max payload size of a qp */ static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { if (qp == NULL) return (0); return (qp->tx_max_frame - sizeof(struct ntb_payload_header)); } Index: head/sys/dev/ntb/ntb_hw/ntb_hw.c =================================================================== --- head/sys/dev/ntb/ntb_hw/ntb_hw.c (revision 295879) +++ head/sys/dev/ntb/ntb_hw/ntb_hw.c (revision 295880) @@ -1,3300 +1,3299 @@ /*- * Copyright (C) 2013 Intel Corporation * Copyright (C) 2015 EMC Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "ntb_regs.h" #include "ntb_hw.h" /* * The Non-Transparent Bridge (NTB) is a device on some Intel processors that * allows you to connect two systems using a PCI-e link. * * This module contains the hardware abstraction layer for the NTB. It allows * you to send and recieve interrupts, map the memory windows and send and * receive messages in the scratch-pad registers. * * NOTE: Much of the code in this module is shared with Linux. Any patches may * be picked up and redistributed in Linux with a dual GPL/BSD license. */ #define MAX_MSIX_INTERRUPTS MAX(XEON_DB_COUNT, ATOM_DB_COUNT) #define NTB_HB_TIMEOUT 1 /* second */ #define ATOM_LINK_RECOVERY_TIME 500 /* ms */ #define BAR_HIGH_MASK (~((1ull << 12) - 1)) #define DEVICE2SOFTC(dev) ((struct ntb_softc *) device_get_softc(dev)) #define NTB_MSIX_VER_GUARD 0xaabbccdd #define NTB_MSIX_RECEIVED 0xe0f0e0f0 #define ONE_MB (1024u * 1024) /* * PCI constants could be somewhere more generic, but aren't defined/used in * pci.c. */ #define PCI_MSIX_ENTRY_SIZE 16 #define PCI_MSIX_ENTRY_LOWER_ADDR 0 #define PCI_MSIX_ENTRY_UPPER_ADDR 4 #define PCI_MSIX_ENTRY_DATA 8 enum ntb_device_type { NTB_XEON, NTB_ATOM }; /* ntb_conn_type are hardware numbers, cannot change. */ enum ntb_conn_type { NTB_CONN_TRANSPARENT = 0, NTB_CONN_B2B = 1, NTB_CONN_RP = 2, }; enum ntb_b2b_direction { NTB_DEV_USD = 0, NTB_DEV_DSD = 1, }; enum ntb_bar { NTB_CONFIG_BAR = 0, NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3, NTB_MAX_BARS }; enum { NTB_MSIX_GUARD = 0, NTB_MSIX_DATA0, NTB_MSIX_DATA1, NTB_MSIX_DATA2, NTB_MSIX_OFS0, NTB_MSIX_OFS1, NTB_MSIX_OFS2, NTB_MSIX_DONE, NTB_MAX_MSIX_SPAD }; /* Device features and workarounds */ #define HAS_FEATURE(feature) \ ((ntb->features & (feature)) != 0) struct ntb_hw_info { uint32_t device_id; const char *desc; enum ntb_device_type type; uint32_t features; }; struct ntb_pci_bar_info { bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; vm_paddr_t pbase; caddr_t vbase; vm_size_t size; vm_memattr_t map_mode; /* Configuration register offsets */ uint32_t psz_off; uint32_t ssz_off; uint32_t pbarxlat_off; }; struct ntb_int_info { struct resource *res; int rid; void *tag; }; struct ntb_vec { struct ntb_softc *ntb; uint32_t num; unsigned masked; }; struct ntb_reg { uint32_t ntb_ctl; uint32_t lnk_sta; uint8_t db_size; unsigned mw_bar[NTB_MAX_BARS]; }; struct ntb_alt_reg { uint32_t db_bell; uint32_t db_mask; uint32_t spad; }; struct ntb_xlat_reg { uint32_t bar0_base; uint32_t bar2_base; uint32_t bar4_base; uint32_t bar5_base; uint32_t bar2_xlat; uint32_t bar4_xlat; uint32_t bar5_xlat; uint32_t bar2_limit; uint32_t bar4_limit; uint32_t bar5_limit; }; struct ntb_b2b_addr { uint64_t bar0_addr; uint64_t bar2_addr64; uint64_t bar4_addr64; uint64_t bar4_addr32; uint64_t bar5_addr32; }; struct ntb_msix_data { uint32_t nmd_ofs; uint32_t nmd_data; }; struct ntb_softc { device_t device; enum ntb_device_type type; uint32_t features; struct ntb_pci_bar_info bar_info[NTB_MAX_BARS]; struct ntb_int_info int_info[MAX_MSIX_INTERRUPTS]; uint32_t allocated_interrupts; struct ntb_msix_data peer_msix_data[XEON_NONLINK_DB_MSIX_BITS]; struct ntb_msix_data msix_data[XEON_NONLINK_DB_MSIX_BITS]; bool peer_msix_good; bool peer_msix_done; struct ntb_pci_bar_info *peer_lapic_bar; struct callout peer_msix_work; struct callout heartbeat_timer; struct callout lr_timer; void *ntb_ctx; const struct ntb_ctx_ops *ctx_ops; struct ntb_vec *msix_vec; #define CTX_LOCK(sc) mtx_lock(&(sc)->ctx_lock) #define CTX_UNLOCK(sc) mtx_unlock(&(sc)->ctx_lock) #define CTX_ASSERT(sc,f) mtx_assert(&(sc)->ctx_lock, (f)) struct mtx ctx_lock; uint32_t ppd; enum ntb_conn_type conn_type; enum ntb_b2b_direction dev_type; /* Offset of peer bar0 in B2B BAR */ uint64_t b2b_off; /* Memory window used to access peer bar0 */ #define B2B_MW_DISABLED UINT8_MAX uint8_t b2b_mw_idx; uint8_t msix_mw_idx; uint8_t mw_count; uint8_t spad_count; uint8_t db_count; uint8_t db_vec_count; uint8_t db_vec_shift; /* Protects local db_mask. */ #define DB_MASK_LOCK(sc) mtx_lock_spin(&(sc)->db_mask_lock) #define DB_MASK_UNLOCK(sc) mtx_unlock_spin(&(sc)->db_mask_lock) #define DB_MASK_ASSERT(sc,f) mtx_assert(&(sc)->db_mask_lock, (f)) struct mtx db_mask_lock; volatile uint32_t ntb_ctl; volatile uint32_t lnk_sta; uint64_t db_valid_mask; uint64_t db_link_mask; uint64_t db_mask; int last_ts; /* ticks @ last irq */ const struct ntb_reg *reg; const struct ntb_alt_reg *self_reg; const struct ntb_alt_reg *peer_reg; const struct ntb_xlat_reg *xlat_reg; }; #ifdef __i386__ static __inline uint64_t bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #endif #define ntb_bar_read(SIZE, bar, offset) \ bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset)) #define ntb_bar_write(SIZE, bar, offset, val) \ bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \ ntb->bar_info[(bar)].pci_bus_handle, (offset), (val)) #define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset) #define ntb_reg_write(SIZE, offset, val) \ ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val) #define ntb_mw_read(SIZE, offset) \ ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset) #define ntb_mw_write(SIZE, offset, val) \ ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \ offset, val) static int ntb_probe(device_t device); static int ntb_attach(device_t device); static int ntb_detach(device_t device); static unsigned ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx); static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw); static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar); static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt); static int ntb_map_pci_bars(struct ntb_softc *ntb); static int ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx, vm_memattr_t); static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *, const char *); static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar); static void ntb_unmap_pci_bar(struct ntb_softc *ntb); static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail); static int ntb_init_isr(struct ntb_softc *ntb); static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb); static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_teardown_interrupts(struct ntb_softc *ntb); static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector); static void ntb_interrupt(struct ntb_softc *, uint32_t vec); static void ndev_vec_isr(void *arg); static void ndev_irq_isr(void *arg); static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff); static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t); static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t); static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_free_msix_vec(struct ntb_softc *ntb); static void ntb_get_msix_info(struct ntb_softc *ntb, uint32_t num_vectors); static void ntb_exchange_msix(void *); static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id); static void ntb_detect_max_mw(struct ntb_softc *ntb); static int ntb_detect_xeon(struct ntb_softc *ntb); static int ntb_detect_atom(struct ntb_softc *ntb); static int ntb_xeon_init_dev(struct ntb_softc *ntb); static int ntb_atom_init_dev(struct ntb_softc *ntb); static void ntb_teardown_xeon(struct ntb_softc *ntb); static void configure_atom_secondary_side_bars(struct ntb_softc *ntb); static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_sbar_base_and_limit(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx, enum ntb_bar regbar); static void xeon_set_pbar_xlat(struct ntb_softc *, uint64_t base_addr, enum ntb_bar idx); static int xeon_setup_b2b_mw(struct ntb_softc *, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr); static int xeon_setup_msix_bar(struct ntb_softc *); static inline bool link_is_up(struct ntb_softc *ntb); static inline bool _xeon_link_is_up(struct ntb_softc *ntb); static inline bool atom_link_is_err(struct ntb_softc *ntb); static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *); static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *); static void atom_link_hb(void *arg); static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec); static void recover_atom_link(void *arg); static bool ntb_poll_link(struct ntb_softc *ntb); static void save_bar_parameters(struct ntb_pci_bar_info *bar); static void ntb_sysctl_init(struct ntb_softc *); static int sysctl_handle_features(SYSCTL_HANDLER_ARGS); static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS); static int sysctl_handle_register(SYSCTL_HANDLER_ARGS); static unsigned g_ntb_hw_debug_level; SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose"); #define ntb_printf(lvl, ...) do { \ if ((lvl) <= g_ntb_hw_debug_level) { \ device_printf(ntb->device, __VA_ARGS__); \ } \ } while (0) #define _NTB_PAT_UC 0 #define _NTB_PAT_WC 1 #define _NTB_PAT_WT 4 #define _NTB_PAT_WP 5 #define _NTB_PAT_WB 6 #define _NTB_PAT_UCM 7 static unsigned g_ntb_mw_pat = _NTB_PAT_UC; SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN, &g_ntb_mw_pat, 0, "Configure the default memory window cache flags (PAT): " "UC: " __XSTRING(_NTB_PAT_UC) ", " "WC: " __XSTRING(_NTB_PAT_WC) ", " "WT: " __XSTRING(_NTB_PAT_WT) ", " "WP: " __XSTRING(_NTB_PAT_WP) ", " "WB: " __XSTRING(_NTB_PAT_WB) ", " "UC-: " __XSTRING(_NTB_PAT_UCM)); static inline vm_memattr_t ntb_pat_flags(void) { switch (g_ntb_mw_pat) { case _NTB_PAT_WC: return (VM_MEMATTR_WRITE_COMBINING); case _NTB_PAT_WT: return (VM_MEMATTR_WRITE_THROUGH); case _NTB_PAT_WP: return (VM_MEMATTR_WRITE_PROTECTED); case _NTB_PAT_WB: return (VM_MEMATTR_WRITE_BACK); case _NTB_PAT_UCM: return (VM_MEMATTR_WEAK_UNCACHEABLE); case _NTB_PAT_UC: /* FALLTHROUGH */ default: return (VM_MEMATTR_UNCACHEABLE); } } /* * Well, this obviously doesn't belong here, but it doesn't seem to exist * anywhere better yet. */ static inline const char * ntb_vm_memattr_to_str(vm_memattr_t pat) { switch (pat) { case VM_MEMATTR_WRITE_COMBINING: return ("WRITE_COMBINING"); case VM_MEMATTR_WRITE_THROUGH: return ("WRITE_THROUGH"); case VM_MEMATTR_WRITE_PROTECTED: return ("WRITE_PROTECTED"); case VM_MEMATTR_WRITE_BACK: return ("WRITE_BACK"); case VM_MEMATTR_WEAK_UNCACHEABLE: return ("UNCACHED"); case VM_MEMATTR_UNCACHEABLE: return ("UNCACHEABLE"); default: return ("UNKNOWN"); } } static int g_ntb_msix_idx = 0; SYSCTL_INT(_hw_ntb, OID_AUTO, msix_mw_idx, CTLFLAG_RDTUN, &g_ntb_msix_idx, 0, "Use this memory window to access the peer MSIX message complex on " "certain Xeon-based NTB systems, as a workaround for a hardware errata. " "Like b2b_mw_idx, negative values index from the last available memory " "window. (Applies on Xeon platforms with SB01BASE_LOCKUP errata.)"); static int g_ntb_mw_idx = -1; SYSCTL_INT(_hw_ntb, OID_AUTO, b2b_mw_idx, CTLFLAG_RDTUN, &g_ntb_mw_idx, 0, "Use this memory window to access the peer NTB registers. A " "non-negative value starts from the first MW index; a negative value " "starts from the last MW index. The default is -1, i.e., the last " "available memory window. Both sides of the NTB MUST set the same " "value here! (Applies on Xeon platforms with SDOORBELL_LOCKUP errata.)"); static struct ntb_hw_info pci_ids[] = { /* XXX: PS/SS IDs left out until they are supported. */ { 0x0C4E8086, "BWD Atom Processor S1200 Non-Transparent Bridge B2B", NTB_ATOM, 0 }, { 0x37258086, "JSF Xeon C35xx/C55xx Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x3C0D8086, "SNB Xeon E5/Core i7 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 }, { 0x0E0D8086, "IVT Xeon E5 V2 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP | NTB_BAR_SIZE_4K }, { 0x2F0D8086, "HSX Xeon E5 V3 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x6F0D8086, "BDX Xeon E5 V4 Non-Transparent Bridge B2B", NTB_XEON, NTB_SDOORBELL_LOCKUP | NTB_B2BDOORBELL_BIT14 | NTB_SB01BASE_LOCKUP }, { 0x00000000, NULL, NTB_ATOM, 0 } }; static const struct ntb_reg atom_reg = { .ntb_ctl = ATOM_NTBCNTL_OFFSET, .lnk_sta = ATOM_LINK_STATUS_OFFSET, .db_size = sizeof(uint64_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2 }, }; static const struct ntb_alt_reg atom_pri_reg = { .db_bell = ATOM_PDOORBELL_OFFSET, .db_mask = ATOM_PDBMSK_OFFSET, .spad = ATOM_SPAD_OFFSET, }; static const struct ntb_alt_reg atom_b2b_reg = { .db_bell = ATOM_B2B_DOORBELL_OFFSET, .spad = ATOM_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg atom_sec_xlat = { #if 0 /* "FIXME" says the Linux driver. */ .bar0_base = ATOM_SBAR0BASE_OFFSET, .bar2_base = ATOM_SBAR2BASE_OFFSET, .bar4_base = ATOM_SBAR4BASE_OFFSET, .bar2_limit = ATOM_SBAR2LMT_OFFSET, .bar4_limit = ATOM_SBAR4LMT_OFFSET, #endif .bar2_xlat = ATOM_SBAR2XLAT_OFFSET, .bar4_xlat = ATOM_SBAR4XLAT_OFFSET, }; static const struct ntb_reg xeon_reg = { .ntb_ctl = XEON_NTBCNTL_OFFSET, .lnk_sta = XEON_LINK_STATUS_OFFSET, .db_size = sizeof(uint16_t), .mw_bar = { NTB_B2B_BAR_1, NTB_B2B_BAR_2, NTB_B2B_BAR_3 }, }; static const struct ntb_alt_reg xeon_pri_reg = { .db_bell = XEON_PDOORBELL_OFFSET, .db_mask = XEON_PDBMSK_OFFSET, .spad = XEON_SPAD_OFFSET, }; static const struct ntb_alt_reg xeon_b2b_reg = { .db_bell = XEON_B2B_DOORBELL_OFFSET, .spad = XEON_B2B_SPAD_OFFSET, }; static const struct ntb_xlat_reg xeon_sec_xlat = { .bar0_base = XEON_SBAR0BASE_OFFSET, .bar2_base = XEON_SBAR2BASE_OFFSET, .bar4_base = XEON_SBAR4BASE_OFFSET, .bar5_base = XEON_SBAR5BASE_OFFSET, .bar2_limit = XEON_SBAR2LMT_OFFSET, .bar4_limit = XEON_SBAR4LMT_OFFSET, .bar5_limit = XEON_SBAR5LMT_OFFSET, .bar2_xlat = XEON_SBAR2XLAT_OFFSET, .bar4_xlat = XEON_SBAR4XLAT_OFFSET, .bar5_xlat = XEON_SBAR5XLAT_OFFSET, }; static struct ntb_b2b_addr xeon_b2b_usd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; static struct ntb_b2b_addr xeon_b2b_dsd_addr = { .bar0_addr = XEON_B2B_BAR0_ADDR, .bar2_addr64 = XEON_B2B_BAR2_ADDR64, .bar4_addr64 = XEON_B2B_BAR4_ADDR64, .bar4_addr32 = XEON_B2B_BAR4_ADDR32, .bar5_addr32 = XEON_B2B_BAR5_ADDR32, }; SYSCTL_NODE(_hw_ntb, OID_AUTO, xeon_b2b, CTLFLAG_RW, 0, "B2B MW segment overrides -- MUST be the same on both sides"); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the upstream side of the link. MUST be the same " "address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr64, 0, "See usd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar4_addr32, 0, "See usd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, usd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_usd_addr.bar5_addr32, 0, "See usd_bar2_addr64, but BAR5 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar2_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar2_addr64, 0, "If using B2B topology on Xeon " "hardware, use this 64-bit address on the bus between the NTB devices for " "the window at BAR2, on the downstream side of the link. MUST be the same" " address on both sides."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr64, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr64, 0, "See dsd_bar2_addr64, but BAR4."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar4_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar4_addr32, 0, "See dsd_bar2_addr64, but BAR4 " "(split-BAR mode)."); SYSCTL_UQUAD(_hw_ntb_xeon_b2b, OID_AUTO, dsd_bar5_addr32, CTLFLAG_RDTUN, &xeon_b2b_dsd_addr.bar5_addr32, 0, "See dsd_bar2_addr64, but BAR5 " "(split-BAR mode)."); /* * OS <-> Driver interface structures */ MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations"); static device_method_t ntb_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ntb_probe), DEVMETHOD(device_attach, ntb_attach), DEVMETHOD(device_detach, ntb_detach), DEVMETHOD_END }; static driver_t ntb_pci_driver = { "ntb_hw", ntb_pci_methods, sizeof(struct ntb_softc), }; static devclass_t ntb_devclass; DRIVER_MODULE(ntb_hw, pci, ntb_pci_driver, ntb_devclass, NULL, NULL); MODULE_VERSION(ntb_hw, 1); SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls"); /* * OS <-> Driver linkage functions */ static int ntb_probe(device_t device) { struct ntb_hw_info *p; p = ntb_get_device_info(pci_get_devid(device)); if (p == NULL) return (ENXIO); device_set_desc(device, p->desc); return (0); } static int ntb_attach(device_t device) { struct ntb_softc *ntb; struct ntb_hw_info *p; int error; ntb = DEVICE2SOFTC(device); p = ntb_get_device_info(pci_get_devid(device)); ntb->device = device; ntb->type = p->type; ntb->features = p->features; ntb->b2b_mw_idx = B2B_MW_DISABLED; ntb->msix_mw_idx = B2B_MW_DISABLED; /* Heartbeat timer for NTB_ATOM since there is no link interrupt */ callout_init(&ntb->heartbeat_timer, 1); callout_init(&ntb->lr_timer, 1); callout_init(&ntb->peer_msix_work, 1); mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN); mtx_init(&ntb->ctx_lock, "ntb ctx", NULL, MTX_DEF); if (ntb->type == NTB_ATOM) error = ntb_detect_atom(ntb); else error = ntb_detect_xeon(ntb); if (error != 0) goto out; ntb_detect_max_mw(ntb); pci_enable_busmaster(ntb->device); error = ntb_map_pci_bars(ntb); if (error != 0) goto out; if (ntb->type == NTB_ATOM) error = ntb_atom_init_dev(ntb); else error = ntb_xeon_init_dev(ntb); if (error != 0) goto out; ntb_spad_clear(ntb); ntb_poll_link(ntb); ntb_sysctl_init(ntb); out: if (error != 0) ntb_detach(device); return (error); } static int ntb_detach(device_t device) { struct ntb_softc *ntb; ntb = DEVICE2SOFTC(device); if (ntb->self_reg != NULL) { DB_MASK_LOCK(ntb); db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_valid_mask); DB_MASK_UNLOCK(ntb); } callout_drain(&ntb->heartbeat_timer); callout_drain(&ntb->lr_timer); callout_drain(&ntb->peer_msix_work); pci_disable_busmaster(ntb->device); if (ntb->type == NTB_XEON) ntb_teardown_xeon(ntb); ntb_teardown_interrupts(ntb); mtx_destroy(&ntb->db_mask_lock); mtx_destroy(&ntb->ctx_lock); ntb_unmap_pci_bar(ntb); return (0); } /* * Driver internal routines */ static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw) { KASSERT(mw < ntb->mw_count, ("%s: mw:%u > count:%u", __func__, mw, (unsigned)ntb->mw_count)); KASSERT(ntb->reg->mw_bar[mw] != 0, ("invalid mw")); return (ntb->reg->mw_bar[mw]); } static inline bool bar_is_64bit(struct ntb_softc *ntb, enum ntb_bar bar) { /* XXX This assertion could be stronger. */ KASSERT(bar < NTB_MAX_BARS, ("bogus bar")); return (bar < NTB_B2B_BAR_2 || !HAS_FEATURE(NTB_SPLIT_BAR)); } static inline void bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base, uint32_t *xlat, uint32_t *lmt) { uint32_t basev, lmtv, xlatv; switch (bar) { case NTB_B2B_BAR_1: basev = ntb->xlat_reg->bar2_base; lmtv = ntb->xlat_reg->bar2_limit; xlatv = ntb->xlat_reg->bar2_xlat; break; case NTB_B2B_BAR_2: basev = ntb->xlat_reg->bar4_base; lmtv = ntb->xlat_reg->bar4_limit; xlatv = ntb->xlat_reg->bar4_xlat; break; case NTB_B2B_BAR_3: basev = ntb->xlat_reg->bar5_base; lmtv = ntb->xlat_reg->bar5_limit; xlatv = ntb->xlat_reg->bar5_xlat; break; default: KASSERT(bar >= NTB_B2B_BAR_1 && bar < NTB_MAX_BARS, ("bad bar")); basev = lmtv = xlatv = 0; break; } if (base != NULL) *base = basev; if (xlat != NULL) *xlat = xlatv; if (lmt != NULL) *lmt = lmtv; } static int ntb_map_pci_bars(struct ntb_softc *ntb) { int rc; ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0); rc = map_mmr_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].pci_resource_id = PCIR_BAR(2); rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_1]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_1].psz_off = XEON_PBAR23SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].ssz_off = XEON_SBAR23SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off = XEON_PBAR2XLAT_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pci_resource_id = PCIR_BAR(4); rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_2]); if (rc != 0) goto out; ntb->bar_info[NTB_B2B_BAR_2].psz_off = XEON_PBAR4SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].ssz_off = XEON_SBAR4SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off = XEON_PBAR4XLAT_OFFSET; if (!HAS_FEATURE(NTB_SPLIT_BAR)) goto out; ntb->bar_info[NTB_B2B_BAR_3].pci_resource_id = PCIR_BAR(5); rc = map_memory_window_bar(ntb, &ntb->bar_info[NTB_B2B_BAR_3]); ntb->bar_info[NTB_B2B_BAR_3].psz_off = XEON_PBAR5SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].ssz_off = XEON_SBAR5SZ_OFFSET; ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off = XEON_PBAR5XLAT_OFFSET; out: if (rc != 0) device_printf(ntb->device, "unable to allocate pci resource\n"); return (rc); } static void print_map_success(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar, const char *kind) { device_printf(ntb->device, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind); } static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); bar->map_mode = VM_MEMATTR_UNCACHEABLE; print_map_success(ntb, bar, "mmr"); return (0); } static int map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar) { int rc; vm_memattr_t mapmode; uint8_t bar_size_bits = 0; bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY, &bar->pci_resource_id, RF_ACTIVE); if (bar->pci_resource == NULL) return (ENXIO); save_bar_parameters(bar); /* * Ivytown NTB BAR sizes are misreported by the hardware due to a * hardware issue. To work around this, query the size it should be * configured to by the device and modify the resource to correspond to * this new size. The BIOS on systems with this problem is required to * provide enough address space to allow the driver to make this change * safely. * * Ideally I could have just specified the size when I allocated the * resource like: * bus_alloc_resource(ntb->device, * SYS_RES_MEMORY, &bar->pci_resource_id, 0ul, ~0ul, * 1ul << bar_size_bits, RF_ACTIVE); * but the PCI driver does not honor the size in this call, so we have * to modify it after the fact. */ if (HAS_FEATURE(NTB_BAR_SIZE_4K)) { if (bar->pci_resource_id == PCIR_BAR(2)) bar_size_bits = pci_read_config(ntb->device, XEON_PBAR23SZ_OFFSET, 1); else bar_size_bits = pci_read_config(ntb->device, XEON_PBAR45SZ_OFFSET, 1); rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, bar->pci_resource, bar->pbase, bar->pbase + (1ul << bar_size_bits) - 1); if (rc != 0) { device_printf(ntb->device, "unable to resize bar\n"); return (rc); } save_bar_parameters(bar); } bar->map_mode = VM_MEMATTR_UNCACHEABLE; print_map_success(ntb, bar, "mw"); /* * Optionally, mark MW BARs as anything other than UC to improve * performance. */ mapmode = ntb_pat_flags(); if (mapmode == bar->map_mode) return (0); rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mapmode); if (rc == 0) { bar->map_mode = mapmode; device_printf(ntb->device, "Marked BAR%d v:[%p-%p] p:[%p-%p] as " "%s.\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), ntb_vm_memattr_to_str(mapmode)); } else device_printf(ntb->device, "Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as " "%s: %d\n", PCI_RID2BAR(bar->pci_resource_id), bar->vbase, (char *)bar->vbase + bar->size - 1, (void *)bar->pbase, (void *)(bar->pbase + bar->size - 1), ntb_vm_memattr_to_str(mapmode), rc); /* Proceed anyway */ return (0); } static void ntb_unmap_pci_bar(struct ntb_softc *ntb) { struct ntb_pci_bar_info *current_bar; int i; for (i = 0; i < NTB_MAX_BARS; i++) { current_bar = &ntb->bar_info[i]; if (current_bar->pci_resource != NULL) bus_release_resource(ntb->device, SYS_RES_MEMORY, current_bar->pci_resource_id, current_bar->pci_resource); } } static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; int rc; for (i = 0; i < num_vectors; i++) { ntb->int_info[i].rid = i + 1; ntb->int_info[i].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[i].rid, RF_ACTIVE); if (ntb->int_info[i].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[i].tag = NULL; ntb->allocated_interrupts++; rc = bus_setup_intr(ntb->device, ntb->int_info[i].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_vec_isr, &ntb->msix_vec[i], &ntb->int_info[i].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } } return (0); } /* * The Linux NTB driver drops from MSI-X to legacy INTx if a unique vector * cannot be allocated for each MSI-X message. JHB seems to think remapping * should be okay. This tunable should enable us to test that hypothesis * when someone gets their hands on some Xeon hardware. */ static int ntb_force_remap_mode; SYSCTL_INT(_hw_ntb, OID_AUTO, force_remap_mode, CTLFLAG_RDTUN, &ntb_force_remap_mode, 0, "If enabled, force MSI-X messages to be remapped" " to a smaller number of ithreads, even if the desired number are " "available"); /* * In case it is NOT ok, give consumers an abort button. */ static int ntb_prefer_intx; SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN, &ntb_prefer_intx, 0, "If enabled, prefer to use legacy INTx mode rather " "than remapping MSI-X messages over available slots (match Linux driver " "behavior)"); /* * Remap the desired number of MSI-X messages to available ithreads in a simple * round-robin fashion. */ static int ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail) { u_int *vectors; uint32_t i; int rc; if (ntb_prefer_intx != 0) return (ENXIO); vectors = malloc(desired * sizeof(*vectors), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < desired; i++) vectors[i] = (i % avail) + 1; rc = pci_remap_msix(dev, desired, vectors); free(vectors, M_NTB); return (rc); } static int ntb_init_isr(struct ntb_softc *ntb) { uint32_t desired_vectors, num_vectors; int rc; ntb->allocated_interrupts = 0; ntb->last_ts = ticks; /* * Mask all doorbell interrupts. (Except link events!) */ DB_MASK_LOCK(ntb); ntb->db_mask = ntb->db_valid_mask; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); num_vectors = desired_vectors = MIN(pci_msix_count(ntb->device), ntb->db_count); if (desired_vectors >= 1) { rc = pci_alloc_msix(ntb->device, &num_vectors); if (ntb_force_remap_mode != 0 && rc == 0 && num_vectors == desired_vectors) num_vectors--; if (rc == 0 && num_vectors < desired_vectors) { rc = ntb_remap_msix(ntb->device, desired_vectors, num_vectors); if (rc == 0) num_vectors = desired_vectors; else pci_release_msi(ntb->device); } if (rc != 0) num_vectors = 1; } else num_vectors = 1; if (ntb->type == NTB_XEON && num_vectors < ntb->db_vec_count) { if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { device_printf(ntb->device, "Errata workaround does not support MSI or INTX\n"); return (EINVAL); } ntb->db_vec_count = 1; ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT; rc = ntb_setup_legacy_interrupt(ntb); } else { ntb_create_msix_vec(ntb, num_vectors); rc = ntb_setup_msix(ntb, num_vectors); if (rc == 0 && HAS_FEATURE(NTB_SB01BASE_LOCKUP)) ntb_get_msix_info(ntb, num_vectors); } if (rc != 0) { device_printf(ntb->device, "Error allocating interrupts: %d\n", rc); ntb_free_msix_vec(ntb); } return (rc); } static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb) { int rc; ntb->int_info[0].rid = 0; ntb->int_info[0].res = bus_alloc_resource_any(ntb->device, SYS_RES_IRQ, &ntb->int_info[0].rid, RF_SHAREABLE|RF_ACTIVE); if (ntb->int_info[0].res == NULL) { device_printf(ntb->device, "bus_alloc_resource failed\n"); return (ENOMEM); } ntb->int_info[0].tag = NULL; ntb->allocated_interrupts = 1; rc = bus_setup_intr(ntb->device, ntb->int_info[0].res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ndev_irq_isr, ntb, &ntb->int_info[0].tag); if (rc != 0) { device_printf(ntb->device, "bus_setup_intr failed\n"); return (ENXIO); } return (0); } static void ntb_teardown_interrupts(struct ntb_softc *ntb) { struct ntb_int_info *current_int; int i; for (i = 0; i < ntb->allocated_interrupts; i++) { current_int = &ntb->int_info[i]; if (current_int->tag != NULL) bus_teardown_intr(ntb->device, current_int->res, current_int->tag); if (current_int->res != NULL) bus_release_resource(ntb->device, SYS_RES_IRQ, rman_get_rid(current_int->res), current_int->res); } ntb_free_msix_vec(ntb); pci_release_msi(ntb->device); } /* * Doorbell register and mask are 64-bit on Atom, 16-bit on Xeon. Abstract it * out to make code clearer. */ static inline uint64_t db_ioread(struct ntb_softc *ntb, uint64_t regoff) { if (ntb->type == NTB_ATOM) return (ntb_reg_read(8, regoff)); KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); return (ntb_reg_read(2, regoff)); } static inline void db_iowrite(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { KASSERT((val & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(val & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (regoff == ntb->self_reg->db_mask) DB_MASK_ASSERT(ntb, MA_OWNED); db_iowrite_raw(ntb, regoff, val); } static inline void db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val) { if (ntb->type == NTB_ATOM) { ntb_reg_write(8, regoff, val); return; } KASSERT(ntb->type == NTB_XEON, ("bad ntb type")); ntb_reg_write(2, regoff, (uint16_t)val); } void ntb_db_set_mask(struct ntb_softc *ntb, uint64_t bits) { if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) return; DB_MASK_LOCK(ntb); ntb->db_mask |= bits; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); } void ntb_db_clear_mask(struct ntb_softc *ntb, uint64_t bits) { KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) return; DB_MASK_LOCK(ntb); ntb->db_mask &= ~bits; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); } uint64_t ntb_db_read(struct ntb_softc *ntb) { if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { uint64_t res; unsigned i; res = 0; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { if (ntb->msix_vec[i].masked != 0) res |= ntb_db_vector_mask(ntb, i); } return (res); } return (db_ioread(ntb, ntb->self_reg->db_bell)); } void ntb_db_clear(struct ntb_softc *ntb, uint64_t bits) { KASSERT((bits & ~ntb->db_valid_mask) == 0, ("%s: Invalid bits 0x%jx (valid: 0x%jx)", __func__, (uintmax_t)(bits & ~ntb->db_valid_mask), (uintmax_t)ntb->db_valid_mask)); if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { unsigned i; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { if ((bits & ntb_db_vector_mask(ntb, i)) != 0) { DB_MASK_LOCK(ntb); if (ntb->msix_vec[i].masked != 0) { /* XXX These need a public API. */ #if 0 pci_unmask_msix(ntb->device, i); #endif ntb->msix_vec[i].masked = 0; } DB_MASK_UNLOCK(ntb); } } return; } db_iowrite(ntb, ntb->self_reg->db_bell, bits); } static inline uint64_t ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector) { uint64_t shift, mask; shift = ntb->db_vec_shift; mask = (1ull << shift) - 1; return (mask << (shift * db_vector)); } static void ntb_interrupt(struct ntb_softc *ntb, uint32_t vec) { uint64_t vec_mask; ntb->last_ts = ticks; vec_mask = ntb_vec_mask(ntb, vec); if ((vec_mask & ntb->db_link_mask) != 0) { if (ntb_poll_link(ntb)) ntb_link_event(ntb); } if (HAS_FEATURE(NTB_SB01BASE_LOCKUP) && (vec_mask & ntb->db_link_mask) == 0) { DB_MASK_LOCK(ntb); if (ntb->msix_vec[vec].masked == 0) { /* XXX These need a public API. */ #if 0 pci_mask_msix(ntb->device, vec); #endif ntb->msix_vec[vec].masked = 1; } DB_MASK_UNLOCK(ntb); } if ((vec_mask & ntb->db_valid_mask) != 0) ntb_db_event(ntb, vec); } static void ndev_vec_isr(void *arg) { struct ntb_vec *nvec = arg; ntb_interrupt(nvec->ntb, nvec->num); } static void ndev_irq_isr(void *arg) { /* If we couldn't set up MSI-X, we only have the one vector. */ ntb_interrupt(arg, 0); } static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors) { uint32_t i; ntb->msix_vec = malloc(num_vectors * sizeof(*ntb->msix_vec), M_NTB, M_ZERO | M_WAITOK); for (i = 0; i < num_vectors; i++) { ntb->msix_vec[i].num = i; ntb->msix_vec[i].ntb = ntb; } return (0); } static void ntb_free_msix_vec(struct ntb_softc *ntb) { if (ntb->msix_vec == NULL) return; free(ntb->msix_vec, M_NTB); ntb->msix_vec = NULL; } static void ntb_get_msix_info(struct ntb_softc *ntb, uint32_t num_vectors) { struct pci_devinfo *dinfo; struct pcicfg_msix *msix; uint32_t laddr, data, i, offset; dinfo = device_get_ivars(ntb->device); msix = &dinfo->cfg.msix; laddr = data = 0; for (i = 0; i < num_vectors; i++) { offset = msix->msix_table_offset + i * PCI_MSIX_ENTRY_SIZE; laddr = bus_read_4(msix->msix_table_res, offset + PCI_MSIX_ENTRY_LOWER_ADDR); ntb_printf(2, "local lower MSIX addr(%u): 0x%x\n", i, laddr); KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE, ("local MSIX addr 0x%x not in MSI base 0x%x", laddr, MSI_INTEL_ADDR_BASE)); ntb->msix_data[i].nmd_ofs = laddr & ~MSI_INTEL_ADDR_BASE; data = bus_read_4(msix->msix_table_res, offset + PCI_MSIX_ENTRY_DATA); ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data); ntb->msix_data[i].nmd_data = data; } } static struct ntb_hw_info * ntb_get_device_info(uint32_t device_id) { struct ntb_hw_info *ep = pci_ids; while (ep->device_id) { if (ep->device_id == device_id) return (ep); ++ep; } return (NULL); } static void ntb_teardown_xeon(struct ntb_softc *ntb) { if (ntb->reg != NULL) ntb_link_disable(ntb); } static void ntb_detect_max_mw(struct ntb_softc *ntb) { if (ntb->type == NTB_ATOM) { ntb->mw_count = ATOM_MW_COUNT; return; } if (HAS_FEATURE(NTB_SPLIT_BAR)) ntb->mw_count = XEON_HSX_SPLIT_MW_COUNT; else ntb->mw_count = XEON_SNB_MW_COUNT; } static int ntb_detect_xeon(struct ntb_softc *ntb) { uint8_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 1); ntb->ppd = ppd; if ((ppd & XEON_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; if ((ppd & XEON_PPD_SPLIT_BAR) != 0) ntb->features |= NTB_SPLIT_BAR; /* * SDOORBELL errata workaround gets in the way of SB01BASE_LOCKUP * errata workaround; only do one at a time. */ if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) ntb->features &= ~NTB_SDOORBELL_LOCKUP; conn_type = ppd & XEON_PPD_CONN_TYPE; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; case NTB_CONN_RP: case NTB_CONN_TRANSPARENT: default: device_printf(ntb->device, "Unsupported connection type: %u\n", (unsigned)conn_type); return (ENXIO); } return (0); } static int ntb_detect_atom(struct ntb_softc *ntb) { uint32_t ppd, conn_type; ppd = pci_read_config(ntb->device, NTB_PPD_OFFSET, 4); ntb->ppd = ppd; if ((ppd & ATOM_PPD_DEV_TYPE) != 0) ntb->dev_type = NTB_DEV_DSD; else ntb->dev_type = NTB_DEV_USD; conn_type = (ppd & ATOM_PPD_CONN_TYPE) >> 8; switch (conn_type) { case NTB_CONN_B2B: ntb->conn_type = conn_type; break; default: device_printf(ntb->device, "Unsupported NTB configuration\n"); return (ENXIO); } return (0); } static int ntb_xeon_init_dev(struct ntb_softc *ntb) { int rc; ntb->spad_count = XEON_SPAD_COUNT; ntb->db_count = XEON_DB_COUNT; ntb->db_link_mask = XEON_DB_LINK_BIT; ntb->db_vec_count = XEON_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = XEON_DB_MSIX_VECTOR_SHIFT; if (ntb->conn_type != NTB_CONN_B2B) { device_printf(ntb->device, "Connection type %d not supported\n", ntb->conn_type); return (ENXIO); } ntb->reg = &xeon_reg; ntb->self_reg = &xeon_pri_reg; ntb->peer_reg = &xeon_b2b_reg; ntb->xlat_reg = &xeon_sec_xlat; if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) % ntb->mw_count; ntb_printf(2, "Setting up MSIX mw idx %d means %u\n", g_ntb_msix_idx, ntb->msix_mw_idx); rc = ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx, VM_MEMATTR_UNCACHEABLE); KASSERT(rc == 0, ("shouldn't fail")); } else if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { /* * There is a Xeon hardware errata related to writes to SDOORBELL or * B2BDOORBELL in conjunction with inbound access to NTB MMIO space, * which may hang the system. To workaround this, use a memory * window to access the interrupt and scratch pad registers on the * remote system. */ ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) % ntb->mw_count; ntb_printf(2, "Setting up b2b mw idx %d means %u\n", g_ntb_mw_idx, ntb->b2b_mw_idx); rc = ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx, VM_MEMATTR_UNCACHEABLE); KASSERT(rc == 0, ("shouldn't fail")); } else if (HAS_FEATURE(NTB_B2BDOORBELL_BIT14)) /* * HW Errata on bit 14 of b2bdoorbell register. Writes will not be * mirrored to the remote system. Shrink the number of bits by one, * since bit 14 is the last bit. * * On REGS_THRU_MW errata mode, we don't use the b2bdoorbell register * anyway. Nor for non-B2B connection types. */ ntb->db_count = XEON_DB_COUNT - 1; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; if (ntb->dev_type == NTB_DEV_USD) rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_dsd_addr, &xeon_b2b_usd_addr); else rc = xeon_setup_b2b_mw(ntb, &xeon_b2b_usd_addr, &xeon_b2b_dsd_addr); if (rc != 0) return (rc); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, XEON_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); /* * Mask all doorbell interrupts. */ DB_MASK_LOCK(ntb); ntb->db_mask = ntb->db_valid_mask; db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask); DB_MASK_UNLOCK(ntb); rc = xeon_setup_msix_bar(ntb); if (rc != 0) return (rc); rc = ntb_init_isr(ntb); return (rc); } static int ntb_atom_init_dev(struct ntb_softc *ntb) { int error; KASSERT(ntb->conn_type == NTB_CONN_B2B, ("Unsupported NTB configuration (%d)\n", ntb->conn_type)); ntb->spad_count = ATOM_SPAD_COUNT; ntb->db_count = ATOM_DB_COUNT; ntb->db_vec_count = ATOM_DB_MSIX_VECTOR_COUNT; ntb->db_vec_shift = ATOM_DB_MSIX_VECTOR_SHIFT; ntb->db_valid_mask = (1ull << ntb->db_count) - 1; ntb->reg = &atom_reg; ntb->self_reg = &atom_pri_reg; ntb->peer_reg = &atom_b2b_reg; ntb->xlat_reg = &atom_sec_xlat; /* * FIXME - MSI-X bug on early Atom HW, remove once internal issue is * resolved. Mask transaction layer internal parity errors. */ pci_write_config(ntb->device, 0xFC, 0x4, 4); configure_atom_secondary_side_bars(ntb); /* Enable Bus Master and Memory Space on the secondary side */ ntb_reg_write(2, ATOM_SPCICMD_OFFSET, PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); error = ntb_init_isr(ntb); if (error != 0) return (error); /* Initiate PCI-E link training */ ntb_link_enable(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO); callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb); return (0); } /* XXX: Linux driver doesn't seem to do any of this for Atom. */ static void configure_atom_secondary_side_bars(struct ntb_softc *ntb) { if (ntb->dev_type == NTB_DEV_USD) { ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } else { ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET, XEON_B2B_BAR4_ADDR64); ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64); ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64); } } /* * When working around Xeon SDOORBELL errata by remapping remote registers in a * MW, limit the B2B MW to half a MW. By sharing a MW, half the shared MW * remains for use by a higher layer. * * Will only be used if working around SDOORBELL errata and the BIOS-configured * MW size is sufficiently large. */ static unsigned int ntb_b2b_mw_share; SYSCTL_UINT(_hw_ntb, OID_AUTO, b2b_mw_share, CTLFLAG_RDTUN, &ntb_b2b_mw_share, 0, "If enabled (non-zero), prefer to share half of the B2B peer register " "MW with higher level consumers. Both sides of the NTB MUST set the same " "value here."); static void xeon_reset_sbar_size(struct ntb_softc *ntb, enum ntb_bar idx, enum ntb_bar regbar) { struct ntb_pci_bar_info *bar; uint8_t bar_sz; if (!HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_3) return; bar = &ntb->bar_info[idx]; bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); if (idx == regbar) { if (ntb->b2b_off != 0) bar_sz--; else bar_sz = 0; } else if (HAS_FEATURE(NTB_SB01BASE_LOCKUP) && ntb_mw_to_bar(ntb, ntb->msix_mw_idx) == idx) { /* Restrict LAPIC BAR to 1MB */ pci_write_config(ntb->device, bar->psz_off, 20, 1); pci_write_config(ntb->device, bar->ssz_off, 20, 1); bar_sz = pci_read_config(ntb->device, bar->psz_off, 1); bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); (void)bar_sz; return; } pci_write_config(ntb->device, bar->ssz_off, bar_sz, 1); bar_sz = pci_read_config(ntb->device, bar->ssz_off, 1); (void)bar_sz; } static void xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr, enum ntb_bar idx, enum ntb_bar regbar) { uint64_t reg_val, lmt_addr; uint32_t base_reg, lmt_reg; bar_get_xlat_params(ntb, idx, &base_reg, NULL, &lmt_reg); if (idx == regbar) bar_addr += ntb->b2b_off; lmt_addr = bar_addr; if (HAS_FEATURE(NTB_SB01BASE_LOCKUP) && ntb_mw_to_bar(ntb, ntb->msix_mw_idx) == idx) lmt_addr += ONE_MB; /* * Set limit registers first to avoid an errata where setting the base * registers locks the limit registers. */ if (!bar_is_64bit(ntb, idx)) { ntb_reg_write(4, lmt_reg, lmt_addr); reg_val = ntb_reg_read(4, lmt_reg); (void)reg_val; ntb_reg_write(4, base_reg, bar_addr); reg_val = ntb_reg_read(4, base_reg); (void)reg_val; } else { ntb_reg_write(8, lmt_reg, lmt_addr); reg_val = ntb_reg_read(8, lmt_reg); (void)reg_val; ntb_reg_write(8, base_reg, bar_addr); reg_val = ntb_reg_read(8, base_reg); (void)reg_val; } } static void xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx) { struct ntb_pci_bar_info *bar; bar = &ntb->bar_info[idx]; if (HAS_FEATURE(NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) { ntb_reg_write(4, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(4, bar->pbarxlat_off); } else { ntb_reg_write(8, bar->pbarxlat_off, base_addr); base_addr = ntb_reg_read(8, bar->pbarxlat_off); } (void)base_addr; } static int xeon_setup_msix_bar(struct ntb_softc *ntb) { struct ntb_pci_bar_info *lapic_bar; enum ntb_bar bar_num; int rc; if (!HAS_FEATURE(NTB_SB01BASE_LOCKUP)) return (0); bar_num = ntb_mw_to_bar(ntb, ntb->msix_mw_idx); lapic_bar = &ntb->bar_info[bar_num]; /* Restrict LAPIC BAR to 1MB */ if (lapic_bar->size > ONE_MB) { rc = bus_adjust_resource(ntb->device, SYS_RES_MEMORY, lapic_bar->pci_resource, lapic_bar->pbase, lapic_bar->pbase + ONE_MB - 1); if (rc == 0) lapic_bar->size = ONE_MB; else { ntb_printf(0, "Failed to shrink LAPIC BAR resource to " "1 MB: %d\n", rc); /* Ignore error */ } } ntb->peer_lapic_bar = lapic_bar; return (0); } static int xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr, const struct ntb_b2b_addr *peer_addr) { struct ntb_pci_bar_info *b2b_bar; vm_size_t bar_size; uint64_t bar_addr; enum ntb_bar b2b_bar_num, i; if (ntb->b2b_mw_idx == B2B_MW_DISABLED) { b2b_bar = NULL; b2b_bar_num = NTB_CONFIG_BAR; ntb->b2b_off = 0; } else { b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx); KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS, ("invalid b2b mw bar")); b2b_bar = &ntb->bar_info[b2b_bar_num]; bar_size = b2b_bar->size; if (ntb_b2b_mw_share != 0 && (bar_size >> 1) >= XEON_B2B_MIN_SIZE) ntb->b2b_off = bar_size >> 1; else if (bar_size >= XEON_B2B_MIN_SIZE) { ntb->b2b_off = 0; } else { device_printf(ntb->device, "B2B bar size is too small!\n"); return (EIO); } } /* * Reset the secondary bar sizes to match the primary bar sizes. * (Except, disable or halve the size of the B2B secondary bar.) */ for (i = NTB_B2B_BAR_1; i < NTB_MAX_BARS; i++) xeon_reset_sbar_size(ntb, i, b2b_bar_num); bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = addr->bar5_addr32; else KASSERT(false, ("invalid bar")); ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr); /* * Other SBARs are normally hit by the PBAR xlat, except for the b2b * register BAR. The B2B BAR is either disabled above or configured * half-size. It starts at PBAR xlat + offset. * * Also set up incoming BAR limits == base (zero length window). */ xeon_set_sbar_base_and_limit(ntb, addr->bar2_addr64, NTB_B2B_BAR_1, b2b_bar_num); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr32, NTB_B2B_BAR_2, b2b_bar_num); xeon_set_sbar_base_and_limit(ntb, addr->bar5_addr32, NTB_B2B_BAR_3, b2b_bar_num); } else xeon_set_sbar_base_and_limit(ntb, addr->bar4_addr64, NTB_B2B_BAR_2, b2b_bar_num); /* Zero incoming translation addrs */ ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0); ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0); if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { size_t size, xlatoffset; switch (ntb_mw_to_bar(ntb, ntb->msix_mw_idx)) { case NTB_B2B_BAR_1: size = 8; xlatoffset = XEON_SBAR2XLAT_OFFSET; break; case NTB_B2B_BAR_2: xlatoffset = XEON_SBAR4XLAT_OFFSET; if (HAS_FEATURE(NTB_SPLIT_BAR)) size = 4; else size = 8; break; case NTB_B2B_BAR_3: xlatoffset = XEON_SBAR5XLAT_OFFSET; size = 4; break; default: KASSERT(false, ("Bogus msix mw idx: %u", ntb->msix_mw_idx)); return (EINVAL); } /* * We point the chosen MSIX MW BAR xlat to remote LAPIC for * workaround */ if (size == 4) ntb_reg_write(4, xlatoffset, MSI_INTEL_ADDR_BASE); else ntb_reg_write(8, xlatoffset, MSI_INTEL_ADDR_BASE); } (void)ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET); (void)ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET); /* Zero outgoing translation limits (whole bar size windows) */ ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0); ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0); /* Set outgoing translation offsets */ xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1); if (HAS_FEATURE(NTB_SPLIT_BAR)) { xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr32, NTB_B2B_BAR_2); xeon_set_pbar_xlat(ntb, peer_addr->bar5_addr32, NTB_B2B_BAR_3); } else xeon_set_pbar_xlat(ntb, peer_addr->bar4_addr64, NTB_B2B_BAR_2); /* Set the translation offset for B2B registers */ bar_addr = 0; if (b2b_bar_num == NTB_CONFIG_BAR) bar_addr = peer_addr->bar0_addr; else if (b2b_bar_num == NTB_B2B_BAR_1) bar_addr = peer_addr->bar2_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2 && !HAS_FEATURE(NTB_SPLIT_BAR)) bar_addr = peer_addr->bar4_addr64; else if (b2b_bar_num == NTB_B2B_BAR_2) bar_addr = peer_addr->bar4_addr32; else if (b2b_bar_num == NTB_B2B_BAR_3) bar_addr = peer_addr->bar5_addr32; else KASSERT(false, ("invalid bar")); /* * B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits * at a time. */ ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff); ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32); return (0); } static inline bool _xeon_link_is_up(struct ntb_softc *ntb) { if (ntb->conn_type == NTB_CONN_TRANSPARENT) return (true); return ((ntb->lnk_sta & NTB_LINK_STATUS_ACTIVE) != 0); } static inline bool link_is_up(struct ntb_softc *ntb) { if (ntb->type == NTB_XEON) return (_xeon_link_is_up(ntb) && (ntb->peer_msix_good || !HAS_FEATURE(NTB_SB01BASE_LOCKUP))); KASSERT(ntb->type == NTB_ATOM, ("ntb type")); return ((ntb->ntb_ctl & ATOM_CNTL_LINK_DOWN) == 0); } static inline bool atom_link_is_err(struct ntb_softc *ntb) { uint32_t status; KASSERT(ntb->type == NTB_ATOM, ("ntb type")); status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0) return (true); status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); return ((status & ATOM_IBIST_ERR_OFLOW) != 0); } /* Atom does not have link status interrupt, poll on that platform */ static void atom_link_hb(void *arg) { struct ntb_softc *ntb = arg; sbintime_t timo, poll_ts; timo = NTB_HB_TIMEOUT * hz; poll_ts = ntb->last_ts + timo; /* * Delay polling the link status if an interrupt was received, unless * the cached link status says the link is down. */ if ((sbintime_t)ticks - poll_ts < 0 && link_is_up(ntb)) { timo = poll_ts - ticks; goto out; } if (ntb_poll_link(ntb)) ntb_link_event(ntb); if (!link_is_up(ntb) && atom_link_is_err(ntb)) { /* Link is down with error, proceed with recovery */ callout_reset(&ntb->lr_timer, 0, recover_atom_link, ntb); return; } out: callout_reset(&ntb->heartbeat_timer, timo, atom_link_hb, ntb); } static void atom_perform_link_restart(struct ntb_softc *ntb) { uint32_t status; /* Driver resets the NTB ModPhy lanes - magic! */ ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0); ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40); ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60); ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60); /* Driver waits 100ms to allow the NTB ModPhy to settle */ pause("ModPhy", hz / 10); /* Clear AER Errors, write to clear */ status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET); status &= PCIM_AER_COR_REPLAY_ROLLOVER; ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status); /* Clear unexpected electrical idle event in LTSSM, write to clear */ status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET); status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status); /* Clear DeSkew Buffer error, write to clear */ status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET); status |= ATOM_DESKEWSTS_DBERR; ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status); status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET); status &= ATOM_IBIST_ERR_OFLOW; ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status); /* Releases the NTB state machine to allow the link to retrain */ status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET); status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status); } /* * ntb_set_ctx() - associate a driver context with an ntb device * @ntb: NTB device context * @ctx: Driver context * @ctx_ops: Driver context operations * * Associate a driver context and operations with a ntb device. The context is * provided by the client driver, and the driver may associate a different * context with each ntb device. * * Return: Zero if the context is associated, otherwise an error number. */ int ntb_set_ctx(struct ntb_softc *ntb, void *ctx, const struct ntb_ctx_ops *ops) { if (ctx == NULL || ops == NULL) return (EINVAL); if (ntb->ctx_ops != NULL) return (EINVAL); CTX_LOCK(ntb); if (ntb->ctx_ops != NULL) { CTX_UNLOCK(ntb); return (EINVAL); } ntb->ntb_ctx = ctx; ntb->ctx_ops = ops; CTX_UNLOCK(ntb); return (0); } /* * It is expected that this will only be used from contexts where the ctx_lock * is not needed to protect ntb_ctx lifetime. */ void * ntb_get_ctx(struct ntb_softc *ntb, const struct ntb_ctx_ops **ops) { KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus")); if (ops != NULL) *ops = ntb->ctx_ops; return (ntb->ntb_ctx); } /* * ntb_clear_ctx() - disassociate any driver context from an ntb device * @ntb: NTB device context * * Clear any association that may exist between a driver context and the ntb * device. */ void ntb_clear_ctx(struct ntb_softc *ntb) { CTX_LOCK(ntb); ntb->ntb_ctx = NULL; ntb->ctx_ops = NULL; CTX_UNLOCK(ntb); } /* * ntb_link_event() - notify driver context of a change in link status * @ntb: NTB device context * * Notify the driver context that the link status may have changed. The driver * should call ntb_link_is_up() to get the current status. */ void ntb_link_event(struct ntb_softc *ntb) { CTX_LOCK(ntb); if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL) ntb->ctx_ops->link_event(ntb->ntb_ctx); CTX_UNLOCK(ntb); } /* * ntb_db_event() - notify driver context of a doorbell event * @ntb: NTB device context * @vector: Interrupt vector number * * Notify the driver context of a doorbell event. If hardware supports * multiple interrupt vectors for doorbells, the vector number indicates which * vector received the interrupt. The vector number is relative to the first * vector used for doorbells, starting at zero, and must be less than * ntb_db_vector_count(). The driver may call ntb_db_read() to check which * doorbell bits need service, and ntb_db_vector_mask() to determine which of * those bits are associated with the vector number. */ static void ntb_db_event(struct ntb_softc *ntb, uint32_t vec) { CTX_LOCK(ntb); if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL) ntb->ctx_ops->db_event(ntb->ntb_ctx, vec); CTX_UNLOCK(ntb); } /* * ntb_link_enable() - enable the link on the secondary side of the ntb * @ntb: NTB device context * @max_speed: The maximum link speed expressed as PCIe generation number[0] * @max_width: The maximum link width expressed as the number of PCIe lanes[0] * * Enable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should train the link to its maximum speed and width, or the requested speed * and width, whichever is smaller, if supported. * * Return: Zero on success, otherwise an error number. * * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed * and width input will be ignored. */ int ntb_link_enable(struct ntb_softc *ntb, enum ntb_speed s __unused, enum ntb_width w __unused) { uint32_t cntl; if (ntb->type == NTB_ATOM) { pci_write_config(ntb->device, NTB_PPD_OFFSET, ntb->ppd | ATOM_PPD_INIT_LINK, 4); return (0); } if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(ntb); return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK); cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP; cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP; if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } /* * ntb_link_disable() - disable the link on the secondary side of the ntb * @ntb: NTB device context * * Disable the link on the secondary side of the ntb. This can only be done * from the primary side of the ntb in primary or b2b topology. The ntb device * should disable the link. Returning from this call must indicate that a * barrier has passed, though with no more writes may pass in either direction * across the link, except if this call returns an error number. * * Return: Zero on success, otherwise an error number. */ int ntb_link_disable(struct ntb_softc *ntb) { uint32_t cntl; if (ntb->conn_type == NTB_CONN_TRANSPARENT) { ntb_link_event(ntb); return (0); } cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP); cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP); if (HAS_FEATURE(NTB_SPLIT_BAR)) cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP); cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK; ntb_reg_write(4, ntb->reg->ntb_ctl, cntl); return (0); } static void recover_atom_link(void *arg) { struct ntb_softc *ntb = arg; unsigned speed, width, oldspeed, oldwidth; uint32_t status32; atom_perform_link_restart(ntb); /* * There is a potential race between the 2 NTB devices recovering at * the same time. If the times are the same, the link will not recover * and the driver will be stuck in this loop forever. Add a random * interval to the recovery time to prevent this race. */ status32 = arc4random() % ATOM_LINK_RECOVERY_TIME; pause("Link", (ATOM_LINK_RECOVERY_TIME + status32) * hz / 1000); if (atom_link_is_err(ntb)) goto retry; status32 = ntb_reg_read(4, ntb->reg->ntb_ctl); if ((status32 & ATOM_CNTL_LINK_DOWN) != 0) goto out; status32 = ntb_reg_read(4, ntb->reg->lnk_sta); width = NTB_LNK_STA_WIDTH(status32); speed = status32 & NTB_LINK_SPEED_MASK; oldwidth = NTB_LNK_STA_WIDTH(ntb->lnk_sta); oldspeed = ntb->lnk_sta & NTB_LINK_SPEED_MASK; if (oldwidth != width || oldspeed != speed) goto retry; out: callout_reset(&ntb->heartbeat_timer, NTB_HB_TIMEOUT * hz, atom_link_hb, ntb); return; retry: callout_reset(&ntb->lr_timer, NTB_HB_TIMEOUT * hz, recover_atom_link, ntb); } /* * Polls the HW link status register(s); returns true if something has changed. */ static bool ntb_poll_link(struct ntb_softc *ntb) { uint32_t ntb_cntl; uint16_t reg_val; if (ntb->type == NTB_ATOM) { ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl); if (ntb_cntl == ntb->ntb_ctl) return (false); ntb->ntb_ctl = ntb_cntl; ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta); } else { db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask); reg_val = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); if (reg_val == ntb->lnk_sta) return (false); ntb->lnk_sta = reg_val; if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { if (_xeon_link_is_up(ntb)) { if (!ntb->peer_msix_good) { callout_reset(&ntb->peer_msix_work, 0, ntb_exchange_msix, ntb); return (false); } } else { ntb->peer_msix_good = false; ntb->peer_msix_done = false; } } } return (true); } static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_SPEED_NONE); return (ntb->lnk_sta & NTB_LINK_SPEED_MASK); } static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *ntb) { if (!link_is_up(ntb)) return (NTB_WIDTH_NONE); return (NTB_LNK_STA_WIDTH(ntb->lnk_sta)); } SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0, "Driver state, statistics, and HW registers"); #define NTB_REGSZ_MASK (3ul << 30) #define NTB_REG_64 (1ul << 30) #define NTB_REG_32 (2ul << 30) #define NTB_REG_16 (3ul << 30) #define NTB_REG_8 (0ul << 30) #define NTB_DB_READ (1ul << 29) #define NTB_PCI_REG (1ul << 28) #define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG) static void ntb_sysctl_init(struct ntb_softc *ntb) { struct sysctl_oid_list *tree_par, *regpar, *statpar, *errpar; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree, *tmptree; ctx = device_get_sysctl_ctx(ntb->device); tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device)), OID_AUTO, "debug_info", CTLFLAG_RD, NULL, "Driver state, statistics, and HW registers"); tree_par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "conn_type", CTLFLAG_RD, &ntb->conn_type, 0, "0 - Transparent; 1 - B2B; 2 - Root Port"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "dev_type", CTLFLAG_RD, &ntb->dev_type, 0, "0 - USD; 1 - DSD"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ppd", CTLFLAG_RD, &ntb->ppd, 0, "Raw PPD register (cached)"); if (ntb->b2b_mw_idx != B2B_MW_DISABLED) { SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "b2b_idx", CTLFLAG_RD, &ntb->b2b_mw_idx, 0, "Index of the MW used for B2B remote register access"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "b2b_off", CTLFLAG_RD, &ntb->b2b_off, "If non-zero, offset of B2B register region in shared MW"); } SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "features", CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_features, "A", "Features/errata of this NTB device"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "ntb_ctl", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->ntb_ctl), 0, "NTB CTL register (cached)"); SYSCTL_ADD_UINT(ctx, tree_par, OID_AUTO, "lnk_sta", CTLFLAG_RD, __DEVOLATILE(uint32_t *, &ntb->lnk_sta), 0, "LNK STA register (cached)"); SYSCTL_ADD_PROC(ctx, tree_par, OID_AUTO, "link_status", CTLFLAG_RD | CTLTYPE_STRING, ntb, 0, sysctl_handle_link_status, "A", "Link status"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "mw_count", CTLFLAG_RD, &ntb->mw_count, 0, "MW count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "spad_count", CTLFLAG_RD, &ntb->spad_count, 0, "Scratchpad count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_count", CTLFLAG_RD, &ntb->db_count, 0, "Doorbell count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_count", CTLFLAG_RD, &ntb->db_vec_count, 0, "Doorbell vector count"); SYSCTL_ADD_U8(ctx, tree_par, OID_AUTO, "db_vec_shift", CTLFLAG_RD, &ntb->db_vec_shift, 0, "Doorbell vector shift"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_valid_mask", CTLFLAG_RD, &ntb->db_valid_mask, "Doorbell valid mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_link_mask", CTLFLAG_RD, &ntb->db_link_mask, "Doorbell link mask"); SYSCTL_ADD_UQUAD(ctx, tree_par, OID_AUTO, "db_mask", CTLFLAG_RD, &ntb->db_mask, "Doorbell mask (cached)"); tmptree = SYSCTL_ADD_NODE(ctx, tree_par, OID_AUTO, "registers", CTLFLAG_RD, NULL, "Raw HW registers (big-endian)"); regpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ntbcntl", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->reg->ntb_ctl, sysctl_handle_register, "IU", "NTB Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcap", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 0x19c, sysctl_handle_register, "IU", "NTB Link Capabilities"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnkcon", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | 0x1a0, sysctl_handle_register, "IU", "NTB Link Control register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_mask", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_mask, sysctl_handle_register, "QU", "Doorbell mask register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "db_bell", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | NTB_DB_READ | ntb->self_reg->db_bell, sysctl_handle_register, "QU", "Doorbell register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_xlat, sysctl_handle_register, "QU", "Incoming XLAT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "IU", "Incoming XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_xlat, sysctl_handle_register, "IU", "Incoming XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_xlat, sysctl_handle_register, "QU", "Incoming XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_limit, sysctl_handle_register, "QU", "Incoming LMT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "IU", "Incoming LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_limit, sysctl_handle_register, "IU", "Incoming LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "incoming_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_limit, sysctl_handle_register, "QU", "Incoming LMT45 register"); } if (ntb->type == NTB_ATOM) return; tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_stats", CTLFLAG_RD, NULL, "Xeon HW statistics"); statpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "upstream_mem_miss", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | XEON_USMEMMISS_OFFSET, sysctl_handle_register, "SU", "Upstream Memory Miss"); tmptree = SYSCTL_ADD_NODE(ctx, regpar, OID_AUTO, "xeon_hw_err", CTLFLAG_RD, NULL, "Xeon HW errors"); errpar = SYSCTL_CHILDREN(tmptree); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "ppd", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | NTB_PPD_OFFSET, sysctl_handle_register, "CU", "PPD"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR23SZ_OFFSET, sysctl_handle_register, "CU", "PBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR4SZ_OFFSET, sysctl_handle_register, "CU", "PBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "pbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_PBAR5SZ_OFFSET, sysctl_handle_register, "CU", "PBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR23SZ_OFFSET, sysctl_handle_register, "CU", "SBAR23 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR4SZ_OFFSET, sysctl_handle_register, "CU", "SBAR4 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_sz", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_8 | NTB_PCI_REG | XEON_SBAR5SZ_OFFSET, sysctl_handle_register, "CU", "SBAR5 SZ (log2)"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "devsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_DEVSTS_OFFSET, sysctl_handle_register, "SU", "DEVSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "lnksts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_LINK_STATUS_OFFSET, sysctl_handle_register, "SU", "LNKSTS"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "slnksts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_16 | NTB_PCI_REG | XEON_SLINK_STATUS_OFFSET, sysctl_handle_register, "SU", "SLNKSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "uncerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_UNCERRSTS_OFFSET, sysctl_handle_register, "IU", "UNCERRSTS"); SYSCTL_ADD_PROC(ctx, errpar, OID_AUTO, "corerrsts", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | NTB_PCI_REG | XEON_CORERRSTS_OFFSET, sysctl_handle_register, "IU", "CORERRSTS"); if (ntb->conn_type != NTB_CONN_B2B) return; SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_1].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->bar_info[NTB_B2B_BAR_3].pbarxlat_off, sysctl_handle_register, "IU", "Outgoing XLAT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_xlat45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->bar_info[NTB_B2B_BAR_2].pbarxlat_off, sysctl_handle_register, "QU", "Outgoing XLAT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt23", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | XEON_PBAR2LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT23 register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt4", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT4 register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt5", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | XEON_PBAR5LMT_OFFSET, sysctl_handle_register, "IU", "Outgoing LMT5 register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "outgoing_lmt45", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | XEON_PBAR4LMT_OFFSET, sysctl_handle_register, "QU", "Outgoing LMT45 register"); } SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar01_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar0_base, sysctl_handle_register, "QU", "Secondary BAR01 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar23_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar2_base, sysctl_handle_register, "QU", "Secondary BAR23 base register"); if (HAS_FEATURE(NTB_SPLIT_BAR)) { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar4_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "IU", "Secondary BAR4 base register"); SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar5_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_32 | ntb->xlat_reg->bar5_base, sysctl_handle_register, "IU", "Secondary BAR5 base register"); } else { SYSCTL_ADD_PROC(ctx, regpar, OID_AUTO, "sbar45_base", CTLFLAG_RD | CTLTYPE_OPAQUE, ntb, NTB_REG_64 | ntb->xlat_reg->bar4_base, sysctl_handle_register, "QU", "Secondary BAR45 base register"); } } static int sysctl_handle_features(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; struct sbuf sb; int error; error = 0; ntb = arg1; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "%b", ntb->features, NTB_FEATURES_STR); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_link_status(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; struct sbuf sb; enum ntb_speed speed; enum ntb_width width; int error; error = 0; ntb = arg1; sbuf_new_for_sysctl(&sb, NULL, 32, req); if (ntb_link_is_up(ntb, &speed, &width)) sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u", (unsigned)speed, (unsigned)width); else sbuf_printf(&sb, "down"); error = sbuf_finish(&sb); sbuf_delete(&sb); if (error || !req->newptr) return (error); return (EINVAL); } static int sysctl_handle_register(SYSCTL_HANDLER_ARGS) { struct ntb_softc *ntb; const void *outp; uintptr_t sz; uint64_t umv; char be[sizeof(umv)]; size_t outsz; uint32_t reg; bool db, pci; int error; ntb = arg1; reg = arg2 & ~NTB_REGFLAGS_MASK; sz = arg2 & NTB_REGSZ_MASK; db = (arg2 & NTB_DB_READ) != 0; pci = (arg2 & NTB_PCI_REG) != 0; KASSERT(!(db && pci), ("bogus")); if (db) { KASSERT(sz == NTB_REG_64, ("bogus")); umv = db_ioread(ntb, reg); outsz = sizeof(uint64_t); } else { switch (sz) { case NTB_REG_64: if (pci) umv = pci_read_config(ntb->device, reg, 8); else umv = ntb_reg_read(8, reg); outsz = sizeof(uint64_t); break; case NTB_REG_32: if (pci) umv = pci_read_config(ntb->device, reg, 4); else umv = ntb_reg_read(4, reg); outsz = sizeof(uint32_t); break; case NTB_REG_16: if (pci) umv = pci_read_config(ntb->device, reg, 2); else umv = ntb_reg_read(2, reg); outsz = sizeof(uint16_t); break; case NTB_REG_8: if (pci) umv = pci_read_config(ntb->device, reg, 1); else umv = ntb_reg_read(1, reg); outsz = sizeof(uint8_t); break; default: panic("bogus"); break; } } /* Encode bigendian so that sysctl -x is legible. */ be64enc(be, umv); outp = ((char *)be) + sizeof(umv) - outsz; error = SYSCTL_OUT(req, outp, outsz); if (error || !req->newptr) return (error); return (EINVAL); } static unsigned ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx) { if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && uidx >= ntb->b2b_mw_idx) || (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) uidx++; if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 && uidx >= ntb->b2b_mw_idx) && (ntb->msix_mw_idx != B2B_MW_DISABLED && uidx >= ntb->msix_mw_idx)) uidx++; return (uidx); } static void ntb_exchange_msix(void *ctx) { struct ntb_softc *ntb; uint32_t val; unsigned i; ntb = ctx; if (ntb->peer_msix_done) goto msix_done; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { ntb_peer_spad_write(ntb, NTB_MSIX_DATA0 + i, ntb->msix_data[i].nmd_data); ntb_peer_spad_write(ntb, NTB_MSIX_OFS0 + i, ntb->msix_data[i].nmd_ofs); } ntb_peer_spad_write(ntb, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD); ntb_spad_read(ntb, NTB_MSIX_GUARD, &val); if (val != NTB_MSIX_VER_GUARD) goto reschedule; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { ntb_spad_read(ntb, NTB_MSIX_DATA0 + i, &val); ntb->peer_msix_data[i].nmd_data = val; ntb_spad_read(ntb, NTB_MSIX_OFS0 + i, &val); ntb->peer_msix_data[i].nmd_ofs = val; } ntb->peer_msix_done = true; msix_done: ntb_peer_spad_write(ntb, NTB_MSIX_DONE, NTB_MSIX_RECEIVED); ntb_spad_read(ntb, NTB_MSIX_DONE, &val); if (val != NTB_MSIX_RECEIVED) goto reschedule; ntb->peer_msix_good = true; ntb_poll_link(ntb); ntb_link_event(ntb); return; reschedule: ntb->lnk_sta = pci_read_config(ntb->device, ntb->reg->lnk_sta, 2); if (_xeon_link_is_up(ntb)) callout_reset(&ntb->peer_msix_work, hz / 100, ntb_exchange_msix, ntb); else ntb_spad_clear(ntb); } /* * Public API to the rest of the OS */ /** * ntb_get_max_spads() - get the total scratch regs usable * @ntb: pointer to ntb_softc instance * * This function returns the max 32bit scratchpad registers usable by the * upper layer. * * RETURNS: total number of scratch pad registers available */ uint8_t ntb_get_max_spads(struct ntb_softc *ntb) { return (ntb->spad_count); } /* * ntb_mw_count() - Get the number of memory windows available for KPI * consumers. * * (Excludes any MW wholly reserved for register access.) */ uint8_t ntb_mw_count(struct ntb_softc *ntb) { uint8_t res; res = ntb->mw_count; if (ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0) res--; if (ntb->msix_mw_idx != B2B_MW_DISABLED) res--; return (res); } /** * ntb_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val); return (0); } /* * Zeros the local scratchpad. */ void ntb_spad_clear(struct ntb_softc *ntb) { unsigned i; for (i = 0; i < ntb->spad_count; i++) ntb_spad_write(ntb, i, 0); } /** * ntb_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4); return (0); } /** * ntb_peer_spad_write() - write to the secondary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to the scratchpad register, 0 based * @val: the data value to put into the register * * This function allows writing of a 32bit value to the indexed scratchpad * register. The register resides on the secondary (external) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_write(struct ntb_softc *ntb, unsigned int idx, uint32_t val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val); else ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val); return (0); } /** * ntb_peer_spad_read() - read from the primary scratchpad register * @ntb: pointer to ntb_softc instance * @idx: index to scratchpad register, 0 based * @val: pointer to 32bit integer for storing the register value * * This function allows reading of the 32bit scratchpad register on * the primary (internal) side. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ int ntb_peer_spad_read(struct ntb_softc *ntb, unsigned int idx, uint32_t *val) { if (idx >= ntb->spad_count) return (EINVAL); if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4); else *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4); return (0); } /* * ntb_mw_get_range() - get the range of a memory window * @ntb: NTB device context * @idx: Memory window number * @base: OUT - the base address for mapping the memory window * @size: OUT - the size for mapping the memory window * @align: OUT - the base alignment for translating the memory window * @align_size: OUT - the size alignment for translating the memory window * * Get the range of a memory window. NULL may be given for any output * parameter if the value is not needed. The base and size may be used for * mapping the memory window, to access the peer memory. The alignment and * size may be used for translating the memory window, for the peer to access * memory on the local system. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_get_range(struct ntb_softc *ntb, unsigned mw_idx, vm_paddr_t *base, caddr_t *vbase, size_t *size, size_t *align, size_t *align_size, bus_addr_t *plimit) { struct ntb_pci_bar_info *bar; bus_addr_t limit; size_t bar_b2b_off; enum ntb_bar bar_num; if (mw_idx >= ntb_mw_count(ntb)) return (EINVAL); mw_idx = ntb_user_mw_to_idx(ntb, mw_idx); bar_num = ntb_mw_to_bar(ntb, mw_idx); bar = &ntb->bar_info[bar_num]; bar_b2b_off = 0; if (mw_idx == ntb->b2b_mw_idx) { KASSERT(ntb->b2b_off != 0, ("user shouldn't get non-shared b2b mw")); bar_b2b_off = ntb->b2b_off; } if (bar_is_64bit(ntb, bar_num)) limit = BUS_SPACE_MAXADDR; else limit = BUS_SPACE_MAXADDR_32BIT; if (base != NULL) *base = bar->pbase + bar_b2b_off; if (vbase != NULL) *vbase = bar->vbase + bar_b2b_off; if (size != NULL) *size = bar->size - bar_b2b_off; if (align != NULL) *align = bar->size; if (align_size != NULL) *align_size = 1; if (plimit != NULL) *plimit = limit; return (0); } /* * ntb_mw_set_trans() - set the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * @addr: The dma address local memory to expose to the peer * @size: The size of the local memory to expose to the peer * * Set the translation of a memory window. The peer may access local memory * through the window starting at the address, up to the size. The address * must be aligned to the alignment specified by ntb_mw_get_range(). The size * must be aligned to the size alignment specified by ntb_mw_get_range(). The * address must be below the plimit specified by ntb_mw_get_range() (i.e. for * 32-bit BARs). * * Return: Zero on success, otherwise an error number. */ int ntb_mw_set_trans(struct ntb_softc *ntb, unsigned idx, bus_addr_t addr, size_t size) { struct ntb_pci_bar_info *bar; uint64_t base, limit, reg_val; size_t bar_size, mw_size; uint32_t base_reg, xlat_reg, limit_reg; enum ntb_bar bar_num; if (idx >= ntb_mw_count(ntb)) return (EINVAL); idx = ntb_user_mw_to_idx(ntb, idx); bar_num = ntb_mw_to_bar(ntb, idx); bar = &ntb->bar_info[bar_num]; bar_size = bar->size; if (idx == ntb->b2b_mw_idx) mw_size = bar_size - ntb->b2b_off; else mw_size = bar_size; /* Hardware requires that addr is aligned to bar size */ if ((addr & (bar_size - 1)) != 0) return (EINVAL); if (size > mw_size) return (EINVAL); bar_get_xlat_params(ntb, bar_num, &base_reg, &xlat_reg, &limit_reg); limit = 0; if (bar_is_64bit(ntb, bar_num)) { base = ntb_reg_read(8, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; /* Set and verify translation address */ ntb_reg_write(8, xlat_reg, addr); reg_val = ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { ntb_reg_write(8, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ ntb_reg_write(8, limit_reg, limit); reg_val = ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { ntb_reg_write(8, limit_reg, base); ntb_reg_write(8, xlat_reg, 0); return (EIO); } } else { /* Configure 32-bit (split) BAR MW */ if ((addr & UINT32_MAX) != addr) return (ERANGE); if (((addr + size) & UINT32_MAX) != (addr + size)) return (ERANGE); base = ntb_reg_read(4, base_reg) & BAR_HIGH_MASK; if (limit_reg != 0 && size != mw_size) limit = base + size; /* Set and verify translation address */ ntb_reg_write(4, xlat_reg, addr); reg_val = ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK; if (reg_val != addr) { ntb_reg_write(4, xlat_reg, 0); return (EIO); } /* Set and verify the limit */ ntb_reg_write(4, limit_reg, limit); reg_val = ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK; if (reg_val != limit) { ntb_reg_write(4, limit_reg, base); ntb_reg_write(4, xlat_reg, 0); return (EIO); } } return (0); } /* * ntb_mw_clear_trans() - clear the translation of a memory window * @ntb: NTB device context * @idx: Memory window number * * Clear the translation of a memory window. The peer may no longer access * local memory through the window. * * Return: Zero on success, otherwise an error number. */ int ntb_mw_clear_trans(struct ntb_softc *ntb, unsigned mw_idx) { return (ntb_mw_set_trans(ntb, mw_idx, 0, 0)); } /* * ntb_mw_get_wc - Get the write-combine status of a memory window * * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if * idx is an invalid memory window). * * Mode is a VM_MEMATTR_* type. */ int ntb_mw_get_wc(struct ntb_softc *ntb, unsigned idx, vm_memattr_t *mode) { struct ntb_pci_bar_info *bar; if (idx >= ntb_mw_count(ntb)) return (EINVAL); idx = ntb_user_mw_to_idx(ntb, idx); bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; *mode = bar->map_mode; return (0); } /* * ntb_mw_set_wc - Set the write-combine status of a memory window * * If 'mode' matches the current status, this does nothing and succeeds. Mode * is a VM_MEMATTR_* type. * * Returns: Zero on success, setting the caching attribute on the virtual * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid * memory window, or if changing the caching attribute fails). */ int ntb_mw_set_wc(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode) { if (idx >= ntb_mw_count(ntb)) return (EINVAL); idx = ntb_user_mw_to_idx(ntb, idx); return (ntb_mw_set_wc_internal(ntb, idx, mode)); } static int ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode) { struct ntb_pci_bar_info *bar; int rc; bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)]; if (bar->map_mode == mode) return (0); rc = pmap_change_attr((vm_offset_t)bar->vbase, bar->size, mode); if (rc == 0) bar->map_mode = mode; return (rc); } /** * ntb_peer_db_set() - Set the doorbell on the secondary/external side * @ntb: pointer to ntb_softc instance * @bit: doorbell bits to ring * * This function allows triggering of a doorbell on the secondary/external * side that will initiate an interrupt on the remote host */ void ntb_peer_db_set(struct ntb_softc *ntb, uint64_t bit) { if (HAS_FEATURE(NTB_SB01BASE_LOCKUP)) { struct ntb_pci_bar_info *lapic; unsigned i; lapic = ntb->peer_lapic_bar; for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) { if ((bit & ntb_db_vector_mask(ntb, i)) != 0) bus_space_write_4(lapic->pci_bus_tag, lapic->pci_bus_handle, ntb->peer_msix_data[i].nmd_ofs, ntb->peer_msix_data[i].nmd_data); } return; } if (HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit); return; } db_iowrite(ntb, ntb->peer_reg->db_bell, bit); } /* * ntb_get_peer_db_addr() - Return the address of the remote doorbell register, * as well as the size of the register (via *sz_out). * * This function allows a caller using I/OAT DMA to chain the remote doorbell * ring to its memory window write. * * Note that writing the peer doorbell via a memory window will *not* generate * an interrupt on the remote host; that must be done seperately. */ bus_addr_t ntb_get_peer_db_addr(struct ntb_softc *ntb, vm_size_t *sz_out) { struct ntb_pci_bar_info *bar; uint64_t regoff; KASSERT(sz_out != NULL, ("must be non-NULL")); if (!HAS_FEATURE(NTB_SDOORBELL_LOCKUP)) { bar = &ntb->bar_info[NTB_CONFIG_BAR]; regoff = ntb->peer_reg->db_bell; } else { KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED, ("invalid b2b idx")); bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)]; regoff = XEON_PDOORBELL_OFFSET; } KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh")); *sz_out = ntb->reg->db_size; /* HACK: Specific to current x86 bus implementation. */ return ((uint64_t)bar->pci_bus_handle + regoff); } /* * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb * @ntb: NTB device context * * Hardware may support different number or arrangement of doorbell bits. * * Return: A mask of doorbell bits supported by the ntb. */ uint64_t ntb_db_valid_mask(struct ntb_softc *ntb) { return (ntb->db_valid_mask); } /* * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector * @ntb: NTB device context * @vector: Doorbell vector number * * Each interrupt vector may have a different number or arrangement of bits. * * Return: A mask of doorbell bits serviced by a vector. */ uint64_t ntb_db_vector_mask(struct ntb_softc *ntb, uint32_t vector) { if (vector > ntb->db_vec_count) return (0); return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector)); } /** * ntb_link_is_up() - get the current ntb link state * @ntb: NTB device context * @speed: OUT - The link speed expressed as PCIe generation number * @width: OUT - The link width expressed as the number of PCIe lanes * * RETURNS: true or false based on the hardware link state */ bool ntb_link_is_up(struct ntb_softc *ntb, enum ntb_speed *speed, enum ntb_width *width) { if (speed != NULL) *speed = ntb_link_sta_speed(ntb); if (width != NULL) *width = ntb_link_sta_width(ntb); return (link_is_up(ntb)); } static void save_bar_parameters(struct ntb_pci_bar_info *bar) { bar->pci_bus_tag = rman_get_bustag(bar->pci_resource); bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource); bar->pbase = rman_get_start(bar->pci_resource); bar->size = rman_get_size(bar->pci_resource); bar->vbase = rman_get_virtual(bar->pci_resource); } device_t ntb_get_device(struct ntb_softc *ntb) { return (ntb->device); } /* Export HW-specific errata information. */ bool ntb_has_feature(struct ntb_softc *ntb, uint32_t feature) { return (HAS_FEATURE(feature)); } Index: head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c =================================================================== --- head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 295879) +++ head/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c (revision 295880) @@ -1,6657 +1,6656 @@ /******************************************************************************* ** *Copyright (c) 2014 PMC-Sierra, Inc. All rights reserved. * *Redistribution and use in source and binary forms, with or without modification, are permitted provided *that the following conditions are met: *1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. *2. Redistributions in binary form must reproduce the above copyright notice, *this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * *THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, * *INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE *ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS *OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, *WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF *THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE ** *******************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #define MAJOR_REVISION 1 #define MINOR_REVISION 3 #define BUILD_REVISION 10800 #include // defines used in kernel.h #include #include #include #include #include // types used in module initialization #include // cdevsw struct #include // uio struct #include #include #include // structs, prototypes for pci bus stuff #include #include #include #include // 1. for vtophys #include // 2. for vtophys -#include // 3. for vtophys (yes, three) #include // For pci_get macros #include #include #include #include #include #include #include #include #include #include #include #include #include // #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE( M_PMC_MCCB, "CCB List", "CCB List for PMCS driver" ); MALLOC_DEFINE( M_PMC_MSTL, "STLock malloc", "allocated in agtiapi_attach as memory for lock use" ); MALLOC_DEFINE( M_PMC_MDVT, "ag_device_t malloc", "allocated in agtiapi_attach as mem for ag_device_t pDevList" ); MALLOC_DEFINE( M_PMC_MPRT, "ag_portal_data_t malloc", "allocated in agtiapi_attach as mem for *pPortalData" ); MALLOC_DEFINE( M_PMC_MDEV, "tiDeviceHandle_t * malloc", "allocated in agtiapi_GetDevHandle as local mem for **agDev" ); MALLOC_DEFINE( M_PMC_MFLG, "lDevFlags * malloc", "allocated in agtiapi_GetDevHandle as local mem for * flags" ); #ifdef LINUX_PERBI_SUPPORT MALLOC_DEFINE( M_PMC_MSLR, "ag_slr_map_t malloc", "mem allocated in agtiapi_attach for pSLRList" ); MALLOC_DEFINE( M_PMC_MTGT, "ag_tgt_map_t malloc", "mem allocated in agtiapi_attach for pWWNList" ); #endif MALLOC_DEFINE(TEMP,"tempbuff","buffer for payload"); MALLOC_DEFINE(TEMP2, "tempbuff", "buffer for agtiapi_getdevlist"); STATIC U32 agtiapi_intx_mode = 0; STATIC U08 ag_Perbi = 0; STATIC U32 agtiapi_polling_mode = 0; STATIC U32 ag_card_good = 0; // * total card initialized STATIC U32 ag_option_flag = 0; // * adjustable parameter flag STATIC U32 agtiapi_1st_time = 1; STATIC U32 ag_timeout_secs = 10; //Made timeout equivalent to linux U32 gTiDebugLevel = 1; S32 ag_encryption_enable = 0; atomic_t outstanding_encrypted_io_count; #define cache_line_size() CACHE_LINE_SIZE #define PMCoffsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #define CPU_TO_LE32(dst, src) \ dst.lower = htole32(LOW_32_BITS(src)); \ dst.upper = htole32(HIGH_32_BITS(src)) #define CMND_TO_CHANNEL( ccb ) ( ccb->ccb_h.path_id ) #define CMND_TO_TARGET( ccb ) ( ccb->ccb_h.target_id ) #define CMND_TO_LUN( ccb ) ( ccb->ccb_h.target_lun ) STATIC U08 agtiapi_AddrModes[AGTIAPI_MAX_CHANNEL_NUM + 1] = { AGTIAPI_PERIPHERAL }; #ifdef LINUX_PERBI_SUPPORT // Holding area for target-WWN mapping assignments on the boot line static ag_mapping_t *agMappingList = NULL; // modified by agtiapi_Setup() #endif // * For Debugging Purpose #ifdef AGTIAPI_DEBUG #define AGTIAPI_WWN(name, len) wwnprintk(name, len) #else #define AGTIAPI_WWN(name, len) #endif #define AGTIAPI_WWNPRINTK(name, len, format, a...) \ AGTIAPI_PRINTK(format "name ", a); \ AGTIAPI_WWN((unsigned char*)name, len); #define AGTIAPI_ERR_WWNPRINTK(name, len, format, a...) \ printk(KERN_DEBUG format "name ", ## a); \ wwnprintk((unsigned char*)name, len); #define AGTIAPI_CPY_DEV_INFO(root, dev, pDev) \ tiINIGetDeviceInfo(root, dev, &pDev->devInfo); \ wwncpy(pDev); #ifdef AGTIAPI_LOCAL_LOCK #define AG_CARD_LOCAL_LOCK(lock) ,(lock) #define AG_SPIN_LOCK_IRQ(lock, flags) #define AG_SPIN_UNLOCK_IRQ(lock, flags) #define AG_SPIN_LOCK(lock) #define AG_SPIN_UNLOCK(lock) #define AG_GLOBAL_ARG(arg) #define AG_PERF_SPINLOCK(lock) #define AG_PERF_SPINLOCK_IRQ(lock, flags) #define AG_LOCAL_LOCK(lock) if (lock) \ mtx_lock(lock) #define AG_LOCAL_UNLOCK(lock) if (lock) \ mtx_unlock(lock) #define AG_LOCAL_FLAGS(_flags) unsigned long _flags = 0 #endif #define AG_GET_DONE_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneLock); \ pccb = pmcsc->ccbDoneHead; \ if (pccb != NULL) \ { \ pmcsc->ccbDoneHead = NULL; \ pmcsc->ccbDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ agtiapi_Done(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneLock); \ } #define AG_GET_DONE_SMP_PCCB(pccb, pmcsc) \ { \ AG_LOCAL_LOCK(&pmcsc->doneSMPLock); \ pccb = pmcsc->smpDoneHead; \ if (pccb != NULL) \ { \ pmcsc->smpDoneHead = NULL; \ pmcsc->smpDoneTail = NULL; \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ agtiapi_SMPDone(pmcsc, pccb); \ } \ else \ AG_LOCAL_UNLOCK(&pmcsc->doneSMPLock); \ } #ifdef AGTIAPI_DUMP_IO_DEBUG #define AG_IO_DUMPCCB(pccb) agtiapi_DumpCCB(pccb) #else #define AG_IO_DUMPCCB(pccb) #endif #define SCHED_DELAY_JIFFIES 4 /* in seconds */ #ifdef HOTPLUG_SUPPORT #define AG_HOTPLUG_LOCK_INIT(lock) mxt_init(lock) #define AG_LIST_LOCK(lock) mtx_lock(lock) #define AG_LIST_UNLOCK(lock) mtx_unlock(lock) #else #define AG_HOTPLUG_LOCK_INIT(lock) #define AG_LIST_LOCK(lock) #define AG_LIST_UNLOCK(lock) #endif STATIC void agtiapi_CheckIOTimeout(void *data); static ag_card_info_t agCardInfoList[ AGTIAPI_MAX_CARDS ]; // card info list static void agtiapi_cam_action( struct cam_sim *, union ccb * ); static void agtiapi_cam_poll( struct cam_sim * ); // Function prototypes static d_open_t agtiapi_open; static d_close_t agtiapi_close; static d_read_t agtiapi_read; static d_write_t agtiapi_write; static d_ioctl_t agtiapi_CharIoctl; static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg); void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth); // Character device entry points static struct cdevsw agtiapi_cdevsw = { .d_version = D_VERSION, .d_open = agtiapi_open, .d_close = agtiapi_close, .d_read = agtiapi_read, .d_write = agtiapi_write, .d_ioctl = agtiapi_CharIoctl, .d_name = "pmspcv", }; U32 maxTargets = 0; U32 ag_portal_count = 0; // In the cdevsw routines, we find our softc by using the si_drv1 member // of struct cdev. We set this variable to point to our softc in our // attach routine when we create the /dev entry. int agtiapi_open( struct cdev *dev, int oflags, int devtype, struct thread *td ) { struct agtiapi_softc *sc; /* Look up our softc. */ sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_open\n"); AGTIAPI_PRINTK("Opened successfully. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_close( struct cdev *dev, int fflag, int devtype, struct thread *td ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK("agtiapi_close\n"); AGTIAPI_PRINTK("Closed. sc->my_dev %p\n", sc->my_dev); return( 0 ); } int agtiapi_read( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_read\n" ); AGTIAPI_PRINTK( "Asked to read %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_write( struct cdev *dev, struct uio *uio, int ioflag ) { struct agtiapi_softc *sc; // Look up our softc sc = dev->si_drv1; AGTIAPI_PRINTK( "agtiapi_write\n" ); AGTIAPI_PRINTK( "Asked to write %lu bytes. sc->my_dev %p\n", uio->uio_resid, sc->my_dev ); return( 0 ); } int agtiapi_getdevlist( struct agtiapi_softc *pCard, tiIOCTLPayload_t *agIOCTLPayload ) { tdDeviceListPayload_t *pIoctlPayload = (tdDeviceListPayload_t *) agIOCTLPayload->FunctionSpecificArea; tdDeviceInfoIOCTL_t *pDeviceInfo = NULL; bit8 *pDeviceInfoOrg; tdsaDeviceData_t *pDeviceData = NULL; tiDeviceHandle_t **devList = NULL; tiDeviceHandle_t **devHandleArray = NULL; tiDeviceHandle_t *pDeviceHandle = NULL; bit32 x, memNeeded1; bit32 count, total; bit32 MaxDeviceCount; bit32 ret_val=IOCTL_CALL_INVALID_CODE; ag_portal_data_t *pPortalData; bit8 *pDeviceHandleList = NULL; AGTIAPI_PRINTK( "agtiapi_getdevlist: Enter\n" ); pDeviceInfoOrg = pIoctlPayload -> pDeviceInfo; MaxDeviceCount = pCard->devDiscover; if (MaxDeviceCount > pIoctlPayload->deviceLength ) { AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); MaxDeviceCount = pIoctlPayload->deviceLength; ret_val = IOCTL_CALL_FAIL; } AGTIAPI_PRINTK( "agtiapi_getdevlist: MaxDeviceCount: %d > Requested device length: %d\n", MaxDeviceCount, pIoctlPayload->deviceLength ); memNeeded1 = AG_ALIGNSIZE( MaxDeviceCount * sizeof(tiDeviceHandle_t *), sizeof(void *) ); AGTIAPI_PRINTK("agtiapi_getdevlist: portCount %d\n", pCard->portCount); devList = malloc(memNeeded1, TEMP2, M_WAITOK); if (devList == NULL) { AGTIAPI_PRINTK("agtiapi_getdevlist: failed to allocate memory\n"); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } osti_memset(devList, 0, memNeeded1); pPortalData = &pCard->pPortalData[0]; pDeviceHandleList = (bit8*)devList; for (total = x = 0; x < pCard->portCount; x++, pPortalData++) { count = tiINIGetDeviceHandlesForWinIOCTL(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, ( tiDeviceHandle_t **)pDeviceHandleList ,MaxDeviceCount ); if (count == DISCOVERY_IN_PROGRESS) { AGTIAPI_PRINTK( "agtiapi_getdevlist: DISCOVERY_IN_PROGRESS on " "portal %d\n", x ); free(devList, TEMP2); ret_val = IOCTL_CALL_FAIL; agIOCTLPayload->Status = IOCTL_ERR_STATUS_INTERNAL_ERROR; return ret_val; } total += count; pDeviceHandleList+= count*sizeof(tiDeviceHandle_t *); MaxDeviceCount-= count; } if (total > pIoctlPayload->deviceLength) { total = pIoctlPayload->deviceLength; } // dump device information from device handle list count = 0; devHandleArray = devList; for (x = 0; x < pCard->devDiscover; x++) { pDeviceHandle = (tiDeviceHandle_t*)devHandleArray[x]; if (devList[x] != agNULL) { pDeviceData = devList [x]->tdData; pDeviceInfo = (tdDeviceInfoIOCTL_t*)(pDeviceInfoOrg + sizeof(tdDeviceInfoIOCTL_t) * count); if (pDeviceData != agNULL && pDeviceInfo != agNULL) { osti_memcpy( &pDeviceInfo->sasAddressHi, pDeviceData->agDeviceInfo.sasAddressHi, sizeof(bit32) ); osti_memcpy( &pDeviceInfo->sasAddressLo, pDeviceData->agDeviceInfo.sasAddressLo, sizeof(bit32) ); #if 0 pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); #endif pDeviceInfo->deviceType = ( pDeviceData->agDeviceInfo.devType_S_Rate & 0x30 ) >> 4; pDeviceInfo->linkRate = pDeviceData->agDeviceInfo.devType_S_Rate & 0x0F; pDeviceInfo->phyId = pDeviceData->phyID; pDeviceInfo->ishost = pDeviceData->target_ssp_stp_smp; pDeviceInfo->DeviceHandle= (unsigned long)pDeviceHandle; if(pDeviceInfo->deviceType == 0x02) { bit8 *sasAddressHi; bit8 *sasAddressLo; tiIniGetDirectSataSasAddr(&pCard->tiRoot, pDeviceData->phyID, &sasAddressHi, &sasAddressLo); pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressHi); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32(*(bit32*)sasAddressLo) + pDeviceData->phyID + 16; } else { pDeviceInfo->sasAddressHi = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressHi ); pDeviceInfo->sasAddressLo = DMA_BEBIT32_TO_BIT32( pDeviceInfo->sasAddressLo ); } AGTIAPI_PRINTK( "agtiapi_getdevlist: devicetype %x\n", pDeviceInfo->deviceType ); AGTIAPI_PRINTK( "agtiapi_getdevlist: linkrate %x\n", pDeviceInfo->linkRate ); AGTIAPI_PRINTK( "agtiapi_getdevlist: phyID %x\n", pDeviceInfo->phyId ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresshi %x\n", pDeviceInfo->sasAddressHi ); AGTIAPI_PRINTK( "agtiapi_getdevlist: addresslo %x\n", pDeviceInfo->sasAddressHi ); } else { AGTIAPI_PRINTK( "agtiapi_getdevlist: pDeviceData %p or pDeviceInfo " "%p is NULL %d\n", pDeviceData, pDeviceInfo, x ); } count++; } } pIoctlPayload->realDeviceCount = count; AGTIAPI_PRINTK( "agtiapi_getdevlist: Exit RealDeviceCount = %d\n", count ); if (devList) { free(devList, TEMP2); } if(ret_val != IOCTL_CALL_FAIL) { ret_val = IOCTL_CALL_SUCCESS; } agIOCTLPayload->Status = IOCTL_ERR_STATUS_OK; return ret_val; } /****************************************************************************** agtiapi_getCardInfo() Purpose: This function retrives the Card information Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ int agtiapi_getCardInfo ( struct agtiapi_softc *pCard, U32_64 size, void *buffer ) { CardInfo_t *pCardInfo; pCardInfo = (CardInfo_t *)buffer; pCardInfo->deviceId = pci_get_device(pCard->my_dev); pCardInfo->vendorId =pci_get_vendor(pCard->my_dev) ; memcpy( pCardInfo->pciMemBaseSpc, pCard->pCardInfo->pciMemBaseSpc, ((sizeof(U32_64))*PCI_NUMBER_BARS) ); pCardInfo->deviceNum = pci_get_slot(pCard->my_dev); pCardInfo->pciMemBase = pCard->pCardInfo->pciMemBase; pCardInfo->pciIOAddrLow = pCard->pCardInfo->pciIOAddrLow; pCardInfo->pciIOAddrUp = pCard->pCardInfo->pciIOAddrUp; pCardInfo->busNum =pci_get_bus(pCard->my_dev); return 0; } void agtiapi_adjust_queue_depth(struct cam_path *path, bit32 QueueDepth) { struct ccb_relsim crs; xpt_setup_ccb(&crs.ccb_h, path, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = QueueDepth; xpt_action((union ccb *)&crs); if(crs.ccb_h.status != CAM_REQ_CMP) { printf("XPT_REL_SIMQ failed\n"); } } static void agtiapi_async(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) { struct agtiapi_softc *pmsc; U32 TID; ag_device_t *targ; pmsc = (struct agtiapi_softc*)callback_arg; switch (code) { case AC_FOUND_DEVICE: { struct ccb_getdev *cgd; cgd = (struct ccb_getdev *)arg; if (cgd == NULL) { break; } TID = cgd->ccb_h.target_id; if (TID >= 0 && TID < maxTargets){ if (pmsc != NULL){ TID = INDEX(pmsc, TID); targ = &pmsc->pDevList[TID]; agtiapi_adjust_queue_depth(path, targ->qdepth); } } break; } default: break; } } /****************************************************************************** agtiapi_CharIoctl() Purpose: This function handles the ioctl from application layer Parameters: Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_CharIoctl( struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td ) { struct sema mx; datatosend *load; // structure defined in lxcommon.h tiIOCTLPayload_t *pIoctlPayload; struct agtiapi_softc *pCard; pCard=dev->si_drv1; void *param1 = NULL; void *param2 = NULL; void *param3 = NULL; U32 status = 0; U32 retValue; int err = 0; int error = 0; tdDeviceListPayload_t *pDeviceList = NULL; unsigned long flags; switch (cmd) { case AGTIAPI_IOCTL: load=(datatosend*)data; pIoctlPayload = malloc(load->datasize,TEMP,M_WAITOK); AGTIAPI_PRINTK( "agtiapi_CharIoctl: old load->datasize = %d\n", load->datasize ); //Copy payload to kernel buffer, on success it returns 0 err = copyin(load->data,pIoctlPayload,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } sema_init(&mx,0,"sem"); pCard->pIoctlSem =&mx; pCard->up_count = pCard->down_count = 0; if ( pIoctlPayload->MajorFunction == IOCTL_MJ_GET_DEVICE_LIST ) { retValue = agtiapi_getdevlist(pCard, pIoctlPayload); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } //update new device length pDeviceList = (tdDeviceListPayload_t*)pIoctlPayload->FunctionSpecificArea; load->datasize =load->datasize - sizeof(tdDeviceInfoIOCTL_t) * (pDeviceList->deviceLength - pDeviceList->realDeviceCount); AGTIAPI_PRINTK( "agtiapi_CharIoctl: new load->datasize = %d\n", load->datasize ); } else if (pIoctlPayload->MajorFunction == IOCTL_MN_GET_CARD_INFO) { retValue = agtiapi_getCardInfo( pCard, pIoctlPayload->Length, (pIoctlPayload->FunctionSpecificArea) ); if (retValue == 0) { pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else if ( pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_DPMC_EVENT ) { if ( pCard->flags & AGTIAPI_PORT_PANIC ) { strcpy ( pIoctlPayload->FunctionSpecificArea, "DPMC LEAN\n" ); } else { strcpy ( pIoctlPayload->FunctionSpecificArea, "do not dpmc lean\n" ); } pIoctlPayload->Status = IOCTL_CALL_SUCCESS; status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_CHECK_FATAL_ERROR ) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_CHECK_FATAL_ERROR call received for card %d\n", pCard->cardNo); //read port status to see if there is a fatal event if(pCard->flags & AGTIAPI_PORT_PANIC) { printf("agtiapi_CharIoctl: Port Panic Status For Card %d is True\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_TRUE; } else { AGTIAPI_PRINTK("agtiapi_CharIoctl: Port Panic Status For Card %d is False\n",pCard->cardNo); pIoctlPayload->Status = IOCTL_MJ_FATAL_ERR_CHK_SEND_FALSE; } status = IOCTL_CALL_SUCCESS; } else if (pIoctlPayload->MajorFunction == IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE) { AGTIAPI_PRINTK("agtiapi_CharIoctl: IOCTL_MJ_FATAL_ERROR_DUMP_COMPLETE call received for card %d\n", pCard->cardNo); //set flags bit status to be a soft reset pCard->flags |= AGTIAPI_SOFT_RESET; //trigger soft reset for the card retValue = agtiapi_ResetCard (pCard, &flags); if(retValue == AGTIAPI_SUCCESS) { //clear port panic status pCard->flags &= ~AGTIAPI_PORT_PANIC; pIoctlPayload->Status = IOCTL_MJ_FATAL_ERROR_SOFT_RESET_TRIG; status = IOCTL_CALL_SUCCESS; } else { pIoctlPayload->Status = IOCTL_CALL_FAIL; status = IOCTL_CALL_FAIL; } } else { status = tiCOMMgntIOCTL( &pCard->tiRoot, pIoctlPayload, pCard, param2, param3 ); if (status == IOCTL_CALL_PENDING) { ostiIOCTLWaitForSignal(&pCard->tiRoot,NULL, NULL, NULL); status = IOCTL_CALL_SUCCESS; } } pCard->pIoctlSem = NULL; err = 0; //copy kernel buffer to userland buffer err=copyout(pIoctlPayload,load->data,load->datasize); if (err) { status = IOCTL_CALL_FAIL; return status; } free(pIoctlPayload,TEMP); pIoctlPayload=NULL; break; default: error = ENOTTY; break; } return(status); } /****************************************************************************** agtiapi_probe() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_probe( device_t dev ) { int retVal; int thisCard; ag_card_info_t *thisCardInst; thisCard = device_get_unit( dev ); if ( thisCard >= AGTIAPI_MAX_CARDS ) { device_printf( dev, "Too many PMC-Sierra cards detected ERROR!\n" ); return (ENXIO); // maybe change to different return value? } thisCardInst = &agCardInfoList[ thisCard ]; retVal = agtiapi_ProbeCard( dev, thisCardInst, thisCard ); if ( retVal ) return (ENXIO); // maybe change to different return value? return( BUS_PROBE_DEFAULT ); // successful probe } /****************************************************************************** agtiapi_attach() Purpose: This function initialize and registere all detected HBAs. The first function being called in driver after agtiapi_probe() Parameters: device_t dev (IN) - device pointer Return: A number - error 0 - HBA has been detected Note: ******************************************************************************/ static int agtiapi_attach( device_t devx ) { // keeping get_unit call to once int thisCard = device_get_unit( devx ); struct agtiapi_softc *pmsc; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; ag_resource_info_t *pRscInfo; int idx; int lenRecv; char buffer [256], *pLastUsedChar; union ccb *ccb; int bus, tid, lun; struct ccb_setasync csa; AGTIAPI_PRINTK("agtiapi_attach: start dev %p thisCard %d\n", devx, thisCard); // AGTIAPI_PRINTK( "agtiapi_attach: entry pointer values A %p / %p\n", // thisCardInst->pPCIDev, thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: deviceID: 0x%x\n", pci_get_devid( devx ) ); TUNABLE_INT_FETCH( "DPMC_TIMEOUT_SECS", &ag_timeout_secs ); TUNABLE_INT_FETCH( "DPMC_TIDEBUG_LEVEL", &gTiDebugLevel ); // printf( "agtiapi_attach: debugLevel %d, timeout %d\n", // gTiDebugLevel, ag_timeout_secs ); if ( ag_timeout_secs < 1 ) { ag_timeout_secs = 1; // set minimum timeout value of 1 second } ag_timeout_secs = (ag_timeout_secs * 1000); // convert to millisecond notation // Look up our softc and initialize its fields. pmsc = device_get_softc( devx ); pmsc->my_dev = devx; /* Get NumberOfPortals */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "CardDefault", agNULL, agNULL, agNULL, agNULL, "NumberOfPortals", buffer, 255, &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 0); } else { ag_portal_count = osti_strtoul (buffer, &pLastUsedChar, 10); } if (ag_portal_count > AGTIAPI_MAX_PORTALS) ag_portal_count = AGTIAPI_MAX_PORTALS; } else { ag_portal_count = AGTIAPI_MAX_PORTALS; } AGTIAPI_PRINTK( "agtiapi_attach: ag_portal_count=%d\n", ag_portal_count ); // initialize hostdata structure pmsc->flags |= AGTIAPI_INIT_TIME | AGTIAPI_SCSI_REGISTERED | AGTIAPI_INITIATOR; pmsc->cardNo = thisCard; pmsc->ccbTotal = 0; pmsc->portCount = ag_portal_count; pmsc->pCardInfo = thisCardInst; pmsc->tiRoot.osData = pmsc; pmsc->pCardInfo->pCard = (void *)pmsc; pmsc->VidDid = ( pci_get_vendor(devx) << 16 ) | pci_get_device( devx ); pmsc->SimQFrozen = agFALSE; pmsc->devq_flag = agFALSE; pRscInfo = &thisCardInst->tiRscInfo; osti_memset(buffer, 0, 256); lenRecv = 0; /* Get MaxTargets */ if ((ostiGetTransportParam( &pmsc->tiRoot, "Global", "InitiatorParms", agNULL, agNULL, agNULL, agNULL, "MaxTargets", buffer, sizeof(buffer), &lenRecv ) == tiSuccess) && (lenRecv != 0)) { if (osti_strncmp(buffer, "0x", 2) == 0) { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 0); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 0 \n" ); } else { maxTargets = osti_strtoul (buffer, &pLastUsedChar, 10); AGTIAPI_PRINTK( "agtiapi_attach: maxTargets = osti_strtoul 10\n" ); } } else { if(Is_ADP8H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_8H; else if(Is_ADP7H(pmsc)) maxTargets = AGTIAPI_MAX_DEVICE_7H; else maxTargets = AGTIAPI_MAX_DEVICE; } if (maxTargets > AGTIAPI_HW_LIMIT_DEVICE) { AGTIAPI_PRINTK( "agtiapi_attach: maxTargets: %d > AGTIAPI_HW_LIMIT_DEVICE: %d\n", maxTargets, AGTIAPI_HW_LIMIT_DEVICE ); AGTIAPI_PRINTK( "agtiapi_attach: change maxTargets = AGTIAPI_HW_LIMIT_DEVICE\n" ); maxTargets = AGTIAPI_HW_LIMIT_DEVICE; } pmsc->devDiscover = maxTargets ; #ifdef HIALEAH_ENCRYPTION ag_encryption_enable = 1; if(ag_encryption_enable && pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) { pmsc->encrypt = 1; pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; printf("agtiapi_attach: Encryption Enabled\n" ); } #endif // ## for now, skip calls to ostiGetTransportParam(...) // ## for now, skip references to DIF & EDC // Create a /dev entry for this device. The kernel will assign us // a major number automatically. We use the unit number of this // device as the minor number and name the character device // "agtiapi". pmsc->my_cdev = make_dev( &agtiapi_cdevsw, thisCard, UID_ROOT, GID_WHEEL, 0600, "spcv%u", thisCard ); pmsc->my_cdev->si_drv1 = pmsc; mtx_init( &thisCardInst->pmIOLock, "pmc SAS I/O lock", NULL, MTX_DEF|MTX_RECURSE ); struct cam_devq *devq; /* set the maximum number of pending IOs */ devq = cam_simq_alloc( AGTIAPI_MAX_CAM_Q_DEPTH ); if (devq == NULL) { AGTIAPI_PRINTK("agtiapi_attach: cam_simq_alloc is NULL\n" ); return( EIO ); } struct cam_sim *lsim; lsim = cam_sim_alloc( agtiapi_cam_action, agtiapi_cam_poll, "pmspcbsd", pmsc, thisCard, &thisCardInst->pmIOLock, 1, // queued per target AGTIAPI_MAX_CAM_Q_DEPTH, // max tag depth devq ); if ( lsim == NULL ) { cam_simq_free( devq ); AGTIAPI_PRINTK("agtiapi_attach: cam_sim_alloc is NULL\n" ); return( EIO ); } pmsc->dev_scan = agFALSE; //one cam sim per scsi bus mtx_lock( &thisCardInst->pmIOLock ); if ( xpt_bus_register( lsim, devx, 0 ) != CAM_SUCCESS ) { // bus 0 cam_sim_free( lsim, TRUE ); mtx_unlock( &thisCardInst->pmIOLock ); AGTIAPI_PRINTK("agtiapi_attach: xpt_bus_register fails\n" ); return( EIO ); } pmsc->sim = lsim; bus = cam_sim_path(pmsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); return ( EIO ); } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock( &thisCardInst->pmIOLock ); cam_sim_free( lsim, TRUE ); cam_simq_free( devq ); xpt_free_ccb(ccb); return( EIO ); } pmsc->path = ccb->ccb_h.path; xpt_setup_ccb(&csa.ccb_h, pmsc->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE; csa.callback = agtiapi_async; csa.callback_arg = pmsc; xpt_action((union ccb *)&csa); if (csa.ccb_h.status != CAM_REQ_CMP) { AGTIAPI_PRINTK("agtiapi_attach: Unable to register AC_FOUND_DEVICE\n" ); } lsim->devq = devq; mtx_unlock( &thisCardInst->pmIOLock ); // get TD and lower layer memory requirements tiCOMGetResource( &pmsc->tiRoot, &pRscInfo->tiLoLevelResource, &pRscInfo->tiInitiatorResource, NULL, &pRscInfo->tiSharedMem ); agtiapi_ScopeDMARes( thisCardInst ); AGTIAPI_PRINTK( "agtiapi_attach: size from the call agtiapi_ScopeDMARes" " 0x%x \n", pmsc->typhn ); // initialize card information and get resource ready if( agtiapi_InitResource( thisCardInst ) == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_attach: Card %d initialize resource ERROR\n", thisCard ); } // begin: allocate and initialize card portal info resource ag_portal_data_t *pPortalData; if (pmsc->portCount == 0) { pmsc->pPortalData = NULL; } else { pmsc->pPortalData = (ag_portal_data_t *) malloc( sizeof(ag_portal_data_t) * pmsc->portCount, M_PMC_MPRT, M_ZERO | M_WAITOK ); if (pmsc->pPortalData == NULL) { AGTIAPI_PRINTK( "agtiapi_attach: Portal memory allocation ERROR\n" ); } } pPortalData = pmsc->pPortalData; for( idx = 0; idx < pmsc->portCount; idx++ ) { pPortalData->pCard = pmsc; pPortalData->portalInfo.portID = idx; pPortalData->portalInfo.tiPortalContext.osData = (void *)pPortalData; pPortalData++; } // end: allocate and initialize card portal info resource // begin: enable msix // setup msix // map to interrupt handler int error = 0; int mesgs = MAX_MSIX_NUM_VECTOR; int i, cnt; void (*intrHandler[MAX_MSIX_NUM_ISR])(void *arg) = { agtiapi_IntrHandler0, agtiapi_IntrHandler1, agtiapi_IntrHandler2, agtiapi_IntrHandler3, agtiapi_IntrHandler4, agtiapi_IntrHandler5, agtiapi_IntrHandler6, agtiapi_IntrHandler7, agtiapi_IntrHandler8, agtiapi_IntrHandler9, agtiapi_IntrHandler10, agtiapi_IntrHandler11, agtiapi_IntrHandler12, agtiapi_IntrHandler13, agtiapi_IntrHandler14, agtiapi_IntrHandler15 }; cnt = pci_msix_count(devx); AGTIAPI_PRINTK("supported MSIX %d\n", cnt); //this should be 64 mesgs = MIN(mesgs, cnt); error = pci_alloc_msix(devx, &mesgs); if (error != 0) { printf( "pci_alloc_msix error %d\n", error ); AGTIAPI_PRINTK("error %d\n", error); return( EIO ); } for(i=0; i < mesgs; i++) { pmsc->rscID[i] = i + 1; pmsc->irq[i] = bus_alloc_resource_any( devx, SYS_RES_IRQ, &pmsc->rscID[i], RF_ACTIVE ); if( pmsc->irq[i] == NULL ) { printf( "RES_IRQ went terribly bad at %d\n", i ); return( EIO ); } if ( (error = bus_setup_intr( devx, pmsc->irq[i], INTR_TYPE_CAM | INTR_MPSAFE, NULL, intrHandler[i], pmsc, &pmsc->intrcookie[i] ) ) != 0 ) { device_printf( devx, "Failed to register handler" ); return( EIO ); } } pmsc->flags |= AGTIAPI_IRQ_REQUESTED; pmsc->pCardInfo->maxInterruptVectors = MAX_MSIX_NUM_VECTOR; // end: enable msix int ret = 0; ret = agtiapi_InitCardSW(pmsc); if (ret == AGTIAPI_FAIL || ret == AGTIAPI_UNKNOWN) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardSW failure %d\n", ret ); return( EIO ); } pmsc->ccbFreeList = NULL; pmsc->ccbChainList = NULL; pmsc->ccbAllocList = NULL; pmsc->flags |= ( AGTIAPI_INSTALLED ); ret = agtiapi_alloc_requests( pmsc ); if( ret != 0 ) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_requests failure %d\n", ret ); return( EIO ); } ret = agtiapi_alloc_ostimem( pmsc ); if (ret != AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_alloc_ostimem failure %d\n", ret ); return( EIO ); } ret = agtiapi_InitCardHW( pmsc ); if (ret != 0) { AGTIAPI_PRINTK( "agtiapi_attach: agtiapi_InitCardHW failure %d\n", ret ); return( EIO ); } #ifdef HIALEAH_ENCRYPTION if(pmsc->encrypt) { if((agtiapi_SetupEncryption(pmsc)) < 0) AGTIAPI_PRINTK("SetupEncryption returned less than 0\n"); } #endif pmsc->flags &= ~AGTIAPI_INIT_TIME; return( 0 ); } /****************************************************************************** agtiapi_InitCardSW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardSW( struct agtiapi_softc *pmsc ) { ag_card_info_t *thisCardInst = pmsc->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; int initSWIdx; // begin: agtiapi_InitCardSW() // now init some essential locks n agtiapi_InitCardSW mtx_init( &pmsc->sendLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->sendSMPLock, "local q send lock", NULL, MTX_DEF ); mtx_init( &pmsc->doneSMPLock, "local q done lock", NULL, MTX_DEF ); mtx_init( &pmsc->ccbLock, "ccb list lock", NULL, MTX_DEF ); mtx_init( &pmsc->devListLock, "hotP devListLock", NULL, MTX_DEF ); mtx_init( &pmsc->memLock, "dynamic memory lock", NULL, MTX_DEF ); mtx_init( &pmsc->freezeLock, "sim freeze lock", NULL, MTX_DEF | MTX_RECURSE); // initialize lower layer resources //## if (pCard->flags & AGTIAPI_INIT_TIME) { #ifdef HIALEAH_ENCRYPTION /* Enable encryption if chip supports it */ if (pci_get_device(pmsc->pCardInfo->pPCIDev) == PCI_DEVICE_ID_HIALEAH_HBA_SPCVE) pmsc->encrypt = 1; if (pmsc->encrypt) pRscInfo->tiLoLevelResource.loLevelOption.encryption = agTRUE; #endif pmsc->flags &= ~(AGTIAPI_PORT_INITIALIZED | AGTIAPI_SYS_INTR_ON); // For now, up to 16 MSIX vectors are supported thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption. maxInterruptVectors = pmsc->pCardInfo->maxInterruptVectors; AGTIAPI_PRINTK( "agtiapi_InitCardSW: maxInterruptVectors set to %d", pmsc->pCardInfo->maxInterruptVectors ); thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.max_MSI_InterruptVectors = 0; thisCardInst->tiRscInfo.tiLoLevelResource.loLevelOption.flag = 0; pRscInfo->tiLoLevelResource.loLevelOption.maxNumOSLocks = 0; AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); if( tiCOMInit( &pmsc->tiRoot, &thisCardInst->tiRscInfo.tiLoLevelResource, &thisCardInst->tiRscInfo.tiInitiatorResource, NULL, &thisCardInst->tiRscInfo.tiSharedMem ) != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMInit ERROR\n" ); return AGTIAPI_FAIL; } int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; pmsc->STLock = malloc( ( maxLocks * sizeof(struct mtx) ), M_PMC_MSTL, M_ZERO | M_WAITOK ); for( initSWIdx = 0; initSWIdx < maxLocks; initSWIdx++ ) { // init all indexes mtx_init( &pmsc->STLock[initSWIdx], "LL & TD lock", NULL, MTX_DEF ); } if( tiCOMPortInit( &pmsc->tiRoot, agFALSE ) != tiSuccess ) { printf( "agtiapi_InitCardSW: tiCOMPortInit ERROR -- AGTIAPI_FAIL\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_InitCardSW: tiCOMPortInit" " root %p, dev %p, pmsc %p\n", &pmsc->tiRoot, pmsc->my_dev, pmsc ); pmsc->flags |= AGTIAPI_PORT_INITIALIZED; pmsc->freezeSim = agFALSE; #ifdef HIALEAH_ENCRYPTION atomic_set(&outstanding_encrypted_io_count, 0); /*fix below*/ /*if(pmsc->encrypt && (pmsc->flags & AGTIAPI_INIT_TIME)) if((agtiapi_SetupEncryptionPools(pmsc)) != 0) printf("SetupEncryptionPools failed\n"); */ #endif return AGTIAPI_SUCCESS; // end: agtiapi_InitCardSW() } /****************************************************************************** agtiapi_InitCardHW() Purpose: Host Bus Adapter Initialization Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: TBD, need chip register information ******************************************************************************/ STATIC agBOOLEAN agtiapi_InitCardHW( struct agtiapi_softc *pmsc ) { U32 numVal; U32 count; U32 loop; // begin: agtiapi_InitCardHW() ag_portal_info_t *pPortalInfo = NULL; ag_portal_data_t *pPortalData; // ISR is registered, enable chip interrupt. tiCOMSystemInterruptsActive( &pmsc->tiRoot, agTRUE ); pmsc->flags |= AGTIAPI_SYS_INTR_ON; numVal = sizeof(ag_device_t) * pmsc->devDiscover; pmsc->pDevList = (ag_device_t *)malloc( numVal, M_PMC_MDVT, M_ZERO | M_WAITOK ); if( !pmsc->pDevList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d DevList ERROR\n", numVal ); panic( "agtiapi_InitCardHW\n" ); return AGTIAPI_FAIL; } #ifdef LINUX_PERBI_SUPPORT numVal = sizeof(ag_slr_map_t) * pmsc->devDiscover; pmsc->pSLRList = (ag_slr_map_t *)malloc( numVal, M_PMC_MSLR, M_ZERO | M_WAITOK ); if( !pmsc->pSLRList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d SLRList ERROR\n", numVal ); panic( "agtiapi_InitCardHW SLRL\n" ); return AGTIAPI_FAIL; } numVal = sizeof(ag_tgt_map_t) * pmsc->devDiscover; pmsc->pWWNList = (ag_tgt_map_t *)malloc( numVal, M_PMC_MTGT, M_ZERO | M_WAITOK ); if( !pmsc->pWWNList ) { AGTIAPI_PRINTK( "agtiapi_InitCardHW: kmalloc %d WWNList ERROR\n", numVal ); panic( "agtiapi_InitCardHW WWNL\n" ); return AGTIAPI_FAIL; } // Get the WWN_to_target_ID mappings from the // holding area which contains the input of the // system configuration file. if( ag_Perbi ) agtiapi_GetWWNMappings( pmsc, agMappingList ); else { agtiapi_GetWWNMappings( pmsc, 0 ); if( agMappingList ) printf( "agtiapi_InitCardHW: WWN PERBI disabled WARN\n" ); } #endif //agtiapi_DelaySec(5); DELAY( 500000 ); pmsc->tgtCount = 0; pmsc->flags &= ~AGTIAPI_CB_DONE; pPortalData = pmsc->pPortalData; //start port for (count = 0; count < pmsc->portCount; count++) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); for (loop = 0; loop < AGTIAPI_LOOP_MAX; loop++) { AGTIAPI_PRINTK( "tiCOMPortStart entry data %p / %d / %p\n", &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pmsc->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); agtiapi_DelayMSec( AGTIAPI_EXTRA_DELAY ); AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); AGTIAPI_PRINTK( "tiCOMPortStart failed -- no loop, portalData %p\n", pPortalData ); } else { AGTIAPI_PRINTK( "tiCOMPortStart success no loop, portalData %p\n", pPortalData ); break; } } // end of for loop /* release lock */ AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( loop >= AGTIAPI_LOOP_MAX ) { return AGTIAPI_FAIL; } tiCOMGetPortInfo( &pmsc->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } /* discover target device */ #ifndef HOTPLUG_SUPPORT agtiapi_DiscoverTgt( pCard ); #endif pmsc->flags |= AGTIAPI_INSTALLED; if( pmsc->flags & AGTIAPI_INIT_TIME ) { agtiapi_TITimer( (void *)pmsc ); pmsc->flags |= AGTIAPI_TIMER_ON; } return 0; } /****************************************************************************** agtiapi_IntrHandlerx_() Purpose: Interrupt service routine. Parameters: void arg (IN) Pointer to the HBA data structure bit32 idx (IN) Vector index ******************************************************************************/ void agtiapi_IntrHandlerx_( void *arg, int index ) { struct agtiapi_softc *pCard; int rv; pCard = (struct agtiapi_softc *)arg; #ifndef AGTIAPI_DPC ccb_t *pccb; #endif AG_LOCAL_LOCK(&(pCard->pCardInfo->pmIOLock)); AG_PERF_SPINLOCK(agtiapi_host_lock); if (pCard->flags & AGTIAPI_SHUT_DOWN) goto ext; rv = tiCOMInterruptHandler(&pCard->tiRoot, index); if (rv == agFALSE) { /* not our irq */ AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } #ifdef AGTIAPI_DPC tasklet_hi_schedule(&pCard->tasklet_dpc[idx]); #else /* consume all completed entries, 100 is random number to be big enough */ tiCOMDelayedInterruptHandler(&pCard->tiRoot, index, 100, tiInterruptContext); AG_GET_DONE_PCCB(pccb, pCard); AG_GET_DONE_SMP_PCCB(pccb, pCard); #endif ext: AG_SPIN_UNLOCK(agtiapi_host_lock); AG_LOCAL_UNLOCK(&(pCard->pCardInfo->pmIOLock)); return; } /****************************************************************************** agtiapi_IntrHandler0() Purpose: Interrupt service routine for interrupt vector index 0. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler0( void *arg ) { agtiapi_IntrHandlerx_( arg, 0 ); return; } /****************************************************************************** agtiapi_IntrHandler1() Purpose: Interrupt service routine for interrupt vector index 1. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler1( void *arg ) { agtiapi_IntrHandlerx_( arg, 1 ); return; } /****************************************************************************** agtiapi_IntrHandler2() Purpose: Interrupt service routine for interrupt vector index 2. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler2( void *arg ) { agtiapi_IntrHandlerx_( arg, 2 ); return; } /****************************************************************************** agtiapi_IntrHandler3() Purpose: Interrupt service routine for interrupt vector index 3. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler3( void *arg ) { agtiapi_IntrHandlerx_( arg, 3 ); return; } /****************************************************************************** agtiapi_IntrHandler4() Purpose: Interrupt service routine for interrupt vector index 4. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler4( void *arg ) { agtiapi_IntrHandlerx_( arg, 4 ); return; } /****************************************************************************** agtiapi_IntrHandler5() Purpose: Interrupt service routine for interrupt vector index 5. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler5( void *arg ) { agtiapi_IntrHandlerx_( arg, 5 ); return; } /****************************************************************************** agtiapi_IntrHandler6() Purpose: Interrupt service routine for interrupt vector index 6. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler6( void *arg ) { agtiapi_IntrHandlerx_( arg, 6 ); return; } /****************************************************************************** agtiapi_IntrHandler7() Purpose: Interrupt service routine for interrupt vector index 7. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler7( void *arg ) { agtiapi_IntrHandlerx_( arg, 7 ); return; } /****************************************************************************** agtiapi_IntrHandler8() Purpose: Interrupt service routine for interrupt vector index 8. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler8( void *arg ) { agtiapi_IntrHandlerx_( arg, 8 ); return; } /****************************************************************************** agtiapi_IntrHandler9() Purpose: Interrupt service routine for interrupt vector index 9. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler9( void *arg ) { agtiapi_IntrHandlerx_( arg, 9 ); return; } /****************************************************************************** agtiapi_IntrHandler10() Purpose: Interrupt service routine for interrupt vector index 10. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler10( void *arg ) { agtiapi_IntrHandlerx_( arg, 10 ); return; } /****************************************************************************** agtiapi_IntrHandler11() Purpose: Interrupt service routine for interrupt vector index 11. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler11( void *arg ) { agtiapi_IntrHandlerx_( arg, 11 ); return; } /****************************************************************************** agtiapi_IntrHandler12() Purpose: Interrupt service routine for interrupt vector index 12. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler12( void *arg ) { agtiapi_IntrHandlerx_( arg, 12 ); return; } /****************************************************************************** agtiapi_IntrHandler13() Purpose: Interrupt service routine for interrupt vector index 13. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler13( void *arg ) { agtiapi_IntrHandlerx_( arg, 13 ); return; } /****************************************************************************** agtiapi_IntrHandler14() Purpose: Interrupt service routine for interrupt vector index 14. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler14( void *arg ) { agtiapi_IntrHandlerx_( arg, 14 ); return; } /****************************************************************************** agtiapi_IntrHandler15() Purpose: Interrupt service routine for interrupt vector index 15. Parameters: void arg (IN) Pointer to the HBA data structure ******************************************************************************/ void agtiapi_IntrHandler15( void *arg ) { agtiapi_IntrHandlerx_( arg, 15 ); return; } static void agtiapi_SglMemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_SglMemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_SglMemoryCB: error %d\n", error); panic("agtiapi_SglMemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } static void agtiapi_MemoryCB( void *arg, bus_dma_segment_t *dm_segs, int nseg, int error ) { bus_addr_t *addr; AGTIAPI_PRINTK("agtiapi_MemoryCB: start\n"); if (error != 0) { AGTIAPI_PRINTK("agtiapi_MemoryCB: error %d\n", error); panic("agtiapi_MemoryCB: error %d\n", error); return; } addr = arg; *addr = dm_segs[0].ds_addr; return; } /****************************************************************************** agtiapi_alloc_requests() Purpose: Allocates resources such as dma tag and timer Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: ******************************************************************************/ int agtiapi_alloc_requests( struct agtiapi_softc *pmcsc ) { int rsize, nsegs; U32 next_tick; nsegs = AGTIAPI_NSEGS; rsize = AGTIAPI_MAX_DMA_SEGS; // 128 AGTIAPI_PRINTK( "agtiapi_alloc_requests: MAXPHYS 0x%x PAGE_SIZE 0x%x \n", MAXPHYS, PAGE_SIZE ); AGTIAPI_PRINTK( "agtiapi_alloc_requests: nsegs %d rsize %d \n", nsegs, rsize ); // 32, 128 // This is for csio->data_ptr if( bus_dma_tag_create( agNULL, // parent 1, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg BUS_SPACE_MAXSIZE_32BIT, // maxsize nsegs, // nsegments BUS_SPACE_MAXSIZE_32BIT, // maxsegsize BUS_DMA_ALLOCNOW, // flags busdma_lock_mutex, // lockfunc &pmcsc->pCardInfo->pmIOLock, // lockarg &pmcsc->buffer_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } // This is for tiSgl_t of pccb in agtiapi_PrepCCBs() rsize = (sizeof(tiSgl_t) * AGTIAPI_NSEGS) * AGTIAPI_CCB_PER_DEVICE * maxTargets; AGTIAPI_PRINTK( "agtiapi_alloc_requests: rsize %d \n", rsize ); // 32, 128 if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR_32BIT, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize 1, // nsegments rsize, // maxsegsize BUS_DMA_ALLOCNOW, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->tisgl_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot alloc request DMA tag\n" ); return( ENOMEM ); } if( bus_dmamem_alloc( pmcsc->tisgl_dmat, (void **)&pmcsc->tisgl_mem, BUS_DMA_NOWAIT, &pmcsc->tisgl_map ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_requests: Cannot allocate SGL memory\n" ); return( ENOMEM ); } bzero( pmcsc->tisgl_mem, rsize ); bus_dmamap_load( pmcsc->tisgl_dmat, pmcsc->tisgl_map, pmcsc->tisgl_mem, rsize, agtiapi_SglMemoryCB, &pmcsc->tisgl_busaddr, BUS_DMA_NOWAIT /* 0 */ ); mtx_init( &pmcsc->OS_timer_lock, "OS timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->IO_timer_lock, "IO timer lock", NULL, MTX_DEF ); mtx_init( &pmcsc->devRmTimerLock, "targ rm timer lock", NULL, MTX_DEF ); callout_init_mtx( &pmcsc->OS_timer, &pmcsc->OS_timer_lock, 0 ); callout_init_mtx( &pmcsc->IO_timer, &pmcsc->IO_timer_lock, 0 ); callout_init_mtx( &pmcsc->devRmTimer, &pmcsc->devRmTimerLock, 0); next_tick = pmcsc->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; AGTIAPI_PRINTK( "agtiapi_alloc_requests: before callout_reset, " "next_tick 0x%x\n", next_tick ); callout_reset( &pmcsc->OS_timer, next_tick, agtiapi_TITimer, pmcsc ); return 0; } /****************************************************************************** agtiapi_alloc_ostimem() Purpose: Allocates memory used later in ostiAllocMemory Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA data structure Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail Note: This is a pre-allocation for ostiAllocMemory() "non-cacheable" function calls ******************************************************************************/ int agtiapi_alloc_ostimem( struct agtiapi_softc *pmcsc ) { int rsize, nomsize; nomsize = 4096; rsize = AGTIAPI_DYNAMIC_MAX * nomsize; // 8M AGTIAPI_PRINTK("agtiapi_alloc_ostimem: rsize %d \n", rsize); if( bus_dma_tag_create( agNULL, // parent 32, // alignment 0, // boundary BUS_SPACE_MAXADDR, // lowaddr BUS_SPACE_MAXADDR, // highaddr NULL, // filter NULL, // filterarg rsize, // maxsize (size) 1, // number of segments rsize, // maxsegsize 0, // flags NULL, // lockfunc NULL, // lockarg &pmcsc->osti_dmat ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Can't create no-cache mem tag\n" ); return AGTIAPI_FAIL; } if( bus_dmamem_alloc( pmcsc->osti_dmat, &pmcsc->osti_mem, BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_NOCACHE, &pmcsc->osti_mapp ) ) { AGTIAPI_PRINTK( "agtiapi_alloc_ostimem: Cannot allocate cache mem %d\n", rsize ); return AGTIAPI_FAIL; } bus_dmamap_load( pmcsc->osti_dmat, pmcsc->osti_mapp, pmcsc->osti_mem, rsize, agtiapi_MemoryCB, // try reuse of CB for same goal &pmcsc->osti_busaddr, BUS_DMA_NOWAIT ); // populate all the ag_dma_addr_t osti_busaddr/mem fields with addresses for // handy reference when driver is in motion int idx; ag_card_info_t *pCardInfo = pmcsc->pCardInfo; ag_dma_addr_t *pMem; for( idx = 0; idx < AGTIAPI_DYNAMIC_MAX; idx++ ) { pMem = &pCardInfo->dynamicMem[idx]; pMem->nocache_busaddr = pmcsc->osti_busaddr + ( idx * nomsize ); pMem->nocache_mem = (void*)((U64)pmcsc->osti_mem + ( idx * nomsize )); pCardInfo->freeDynamicMem[idx] = &pCardInfo->dynamicMem[idx]; } pCardInfo->topOfFreeDynamicMem = AGTIAPI_DYNAMIC_MAX; return AGTIAPI_SUCCESS; } /****************************************************************************** agtiapi_cam_action() Purpose: Parses CAM frames and triggers a corresponding action Parameters: struct cam_sim *sim (IN) Pointer to SIM data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: Note: ******************************************************************************/ static void agtiapi_cam_action( struct cam_sim *sim, union ccb * ccb ) { struct agtiapi_softc *pmcsc; tiDeviceHandle_t *pDevHandle = NULL; // acts as flag as well tiDeviceInfo_t devInfo; int pathID, targetID, lunID; int lRetVal; U32 TID; U32 speed = 150000; pmcsc = cam_sim_softc( sim ); AGTIAPI_IO( "agtiapi_cam_action: start pmcs %p\n", pmcsc ); if (pmcsc == agNULL) { AGTIAPI_PRINTK( "agtiapi_cam_action: start pmcs is NULL\n" ); return; } mtx_assert( &(pmcsc->pCardInfo->pmIOLock), MA_OWNED ); AGTIAPI_IO( "agtiapi_cam_action: cardNO %d func_code 0x%x\n", pmcsc->cardNo, ccb->ccb_h.func_code ); pathID = xpt_path_path_id( ccb->ccb_h.path ); targetID = xpt_path_target_id( ccb->ccb_h.path ); lunID = xpt_path_lun_id( ccb->ccb_h.path ); AGTIAPI_IO( "agtiapi_cam_action: P 0x%x T 0x%x L 0x%x\n", pathID, targetID, lunID ); switch (ccb->ccb_h.func_code) { case XPT_PATH_INQ: { struct ccb_pathinq *cpi; /* See architecure book p180*/ cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE | PI_WIDE_16; cpi->target_sprt = 0; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = maxTargets - 1; cpi->max_lun = AGTIAPI_MAX_LUN; cpi->maxio = 1024 *1024; /* Max supported I/O size, in bytes. */ cpi->initiator_id = 255; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "PMC", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); // rate is set when XPT_GET_TRAN_SETTINGS is processed cpi->base_transfer_speed = 150000; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_SPC3; cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts; struct ccb_trans_settings_sas *sas; struct ccb_trans_settings_scsi *scsi; if ( pmcsc->flags & AGTIAPI_SHUT_DOWN ) { return; } cts = &ccb->cts; sas = &ccb->cts.xport_specific.sas; scsi = &cts->proto_specific.scsi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_SPC3; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; /* this sets the "MB/s transfers" */ if (pmcsc != NULL && targetID >= 0 && targetID < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, targetID); if (TID < maxTargets) { pDevHandle = pmcsc->pDevList[TID].pDevHandle; } } } if (pDevHandle) { tiINIGetDeviceInfo( &pmcsc->tiRoot, pDevHandle, &devInfo ); switch (devInfo.info.devType_S_Rate & 0xF) { case 0x8: speed = 150000; break; case 0x9: speed = 300000; break; case 0xA: speed = 600000; break; case 0xB: speed = 1200000; break; default: speed = 150000; break; } } sas->bitrate = speed; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: { lRetVal = agtiapi_eh_HostReset( pmcsc, ccb ); // usually works first time if ( SUCCESS == lRetVal ) { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset success.\n" ); } else { AGTIAPI_PRINTK( "agtiapi_cam_action: bus reset failed.\n" ); } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_DEV: { ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_ABORT: { ccb->ccb_h.status = CAM_REQ_CMP; break; } #if __FreeBSD_version >= 900026 case XPT_SMP_IO: { agtiapi_QueueSMP( pmcsc, ccb ); return; } #endif /* __FreeBSD_version >= 900026 */ case XPT_SCSI_IO: { if(pmcsc->dev_scan == agFALSE) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (pmcsc->flags & AGTIAPI_SHUT_DOWN) { AGTIAPI_PRINTK( "agtiapi_cam_action: shutdown, XPT_SCSI_IO 0x%x\n", XPT_SCSI_IO ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } else { AGTIAPI_IO( "agtiapi_cam_action: Zero XPT_SCSI_IO 0x%x, doing IOs\n", XPT_SCSI_IO ); agtiapi_QueueCmnd_( pmcsc, ccb ); return; } } case XPT_CALC_GEOMETRY: { cam_calc_geometry(&ccb->ccg, 1); ccb->ccb_h.status = CAM_REQ_CMP; break; } default: { /* XPT_SET_TRAN_SETTINGS */ AGTIAPI_IO( "agtiapi_cam_action: default function code 0x%x\n", ccb->ccb_h.func_code ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; } } /* switch */ xpt_done(ccb); } /****************************************************************************** agtiapi_GetCCB() Purpose: Get a ccb from free list or allocate a new one Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA structure Return: Pointer to a ccb structure, or NULL if not available Note: ******************************************************************************/ STATIC pccb_t agtiapi_GetCCB( struct agtiapi_softc *pmcsc ) { pccb_t pccb; AGTIAPI_IO( "agtiapi_GetCCB: start\n" ); AG_LOCAL_LOCK( &pmcsc->ccbLock ); /* get the ccb from the head of the free list */ if ((pccb = (pccb_t)pmcsc->ccbFreeList) != NULL) { pmcsc->ccbFreeList = (caddr_t *)pccb->pccbNext; pccb->pccbNext = NULL; pccb->flags = ACTIVE; pccb->startTime = 0; pmcsc->activeCCB++; AGTIAPI_IO( "agtiapi_GetCCB: re-allocated ccb %p\n", pccb ); } else { AGTIAPI_PRINTK( "agtiapi_GetCCB: kmalloc ERROR - no ccb allocated\n" ); } AG_LOCAL_UNLOCK( &pmcsc->ccbLock ); return pccb; } /****************************************************************************** agtiapi_QueueCmnd_() Purpose: Calls for sending CCB and excuting on HBA. Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure union ccb * ccb (IN) Pointer to CAM ccb data structure Return: 0 - Command is pending to execute 1 - Command returned without further process Note: ******************************************************************************/ int agtiapi_QueueCmnd_(struct agtiapi_softc *pmcsc, union ccb * ccb) { struct ccb_scsiio *csio = &ccb->csio; pccb_t pccb = agNULL; // call dequeue int status = tiSuccess; U32 Channel = CMND_TO_CHANNEL(ccb); U32 TID = CMND_TO_TARGET(ccb); U32 LUN = CMND_TO_LUN(ccb); AGTIAPI_IO( "agtiapi_QueueCmnd_: start\n" ); /* no support for CBD > 16 */ if (csio->cdb_len > 16) { AGTIAPI_PRINTK( "agtiapi_QueueCmnd_: unsupported CDB length %d\n", csio->cdb_len ); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQ_INVALID;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } if (TID < 0 || TID >= maxTargets) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: INVALID TID ERROR\n"); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_DEV_NOT_THERE;//CAM_REQ_CMP; xpt_done(ccb); return tiError; } /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { ag_device_t *targ; AGTIAPI_PRINTK("agtiapi_QueueCmnd_: GetCCB ERROR\n"); if (pmcsc != NULL) { TID = INDEX(pmcsc, TID); targ = &pmcsc->pDevList[TID]; } if (targ != NULL) { agtiapi_adjust_queue_depth(ccb->ccb_h.path,targ->qdepth); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = TID; pccb->lun = LUN; pccb->channel = Channel; pccb->ccb = ccb; /* for struct scsi_cmnd */ pccb->senseLen = csio->sense_len; pccb->startTime = ticks; pccb->pSenseData = (caddr_t) &csio->sense_data; pccb->tiSuperScsiRequest.flags = 0; /* each channel is reserved for different addr modes */ pccb->addrMode = agtiapi_AddrModes[Channel]; status = agtiapi_PrepareSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueCmnd_: agtiapi_PrepareSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done( ccb ); return tiError; } return status; } /****************************************************************************** agtiapi_DumpCDB() Purpose: Prints out CDB Parameters: const char *ptitle (IN) A string to be printed ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCDB(const char *ptitle, ccb_t *pccb) { union ccb *ccb; struct ccb_scsiio *csio; bit8 cdb[64]; int len; if (pccb == NULL) { printf( "agtiapi_DumpCDB: no pccb here \n" ); panic("agtiapi_DumpCDB: pccb is NULL. called from %s\n", ptitle); return; } ccb = pccb->ccb; if (ccb == NULL) { printf( "agtiapi_DumpCDB: no ccb here \n" ); panic( "agtiapi_DumpCDB: pccb %p ccb %p flags %d ccb NULL! " "called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } csio = &ccb->csio; if (csio == NULL) { printf( "agtiapi_DumpCDB: no csio here \n" ); panic( "agtiapi_DumpCDB: pccb%p ccb%p flags%d csio NULL! called from %s\n", pccb, pccb->ccb, pccb->flags, ptitle ); return; } len = MIN(64, csio->cdb_len); if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cdb[0], len); } else { bcopy(csio->cdb_io.cdb_bytes, &cdb[0], len); } AGTIAPI_IO( "agtiapi_DumpCDB: pccb%p CDB0x%x csio->cdb_len %d" " len %d from %s\n", pccb, cdb[0], csio->cdb_len, len, ptitle ); return; } /****************************************************************************** agtiapi_DoSoftReset() Purpose: Do card reset Parameters: *data (IN) point to pmcsc (struct agtiapi_softc *) Return: Note: ******************************************************************************/ int agtiapi_DoSoftReset (struct agtiapi_softc *pmcsc) { int ret; unsigned long flags; pmcsc->flags |= AGTIAPI_SOFT_RESET; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); ret = agtiapi_ResetCard( pmcsc, &flags ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); if( ret != AGTIAPI_SUCCESS ) return tiError; return SUCCESS; } /****************************************************************************** agtiapi_CheckIOTimeout() Purpose: Timeout function for SCSI IO or TM Parameters: *data (IN) point to pCard (ag_card_t *) Return: Note: ******************************************************************************/ STATIC void agtiapi_CheckIOTimeout(void *data) { U32 status = AGTIAPI_SUCCESS; ccb_t *pccb; struct agtiapi_softc *pmcsc; pccb_t pccb_curr; pccb_t pccb_next; pmcsc = (struct agtiapi_softc *)data; //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Enter\n"); //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Active CCB %d\n", pmcsc->activeCCB); pccb = (pccb_t)pmcsc->ccbChainList; /* if link is down, do nothing */ if ((pccb == NULL) || (pmcsc->activeCCB == 0)) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: goto restart_timer\n"); goto restart_timer; } AG_SPIN_LOCK_IRQ(agtiapi_host_lock, flags); if (pmcsc->flags & AGTIAPI_SHUT_DOWN) goto ext; pccb_curr = pccb; /* Walk thorugh the IO Chain linked list to find the pending io */ /* Set the TM flag based on the pccb type, i.e SCSI IO or TM cmd */ while (pccb_curr != NULL) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbChainNext; if( (pccb_curr->flags == 0) || (pccb_curr->tiIORequest.tdData == NULL) || (pccb_curr->startTime == 0) /* && (pccb->startTime == 0) */) { //AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: move to next element\n"); } else if ( ( (ticks-pccb_curr->startTime) >= ag_timeout_secs ) && !(pccb_curr->flags & TIMEDOUT) ) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: pccb %p timed out, call TM " "function -- flags=%x startTime=%ld tdData = %p\n", pccb_curr, pccb_curr->flags, pccb->startTime, pccb_curr->tiIORequest.tdData ); pccb_curr->flags |= TIMEDOUT; status = agtiapi_StartTM(pmcsc, pccb_curr); if (status == AGTIAPI_SUCCESS) { AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout: TM Request sent with " "success\n" ); goto restart_timer; } else { #ifdef AGTIAPI_LOCAL_RESET /* abort request did not go through */ AGTIAPI_PRINTK("agtiapi_CheckIOTimeout: Abort request failed\n"); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_CheckIOTimeout:in agtiapi_CheckIOTimeout() " "abort request did not go thru ==> soft reset#7, then " "restart timer\n" ); agtiapi_DoSoftReset (pmcsc); goto restart_timer; #endif } } pccb_curr = pccb_next; } restart_timer: callout_reset(&pmcsc->IO_timer, 1*hz, agtiapi_CheckIOTimeout, pmcsc); ext: AG_SPIN_UNLOCK_IRQ(agtiapi_host_lock, flags); return; } /****************************************************************************** agtiapi_StartTM() Purpose: DDI calls for aborting outstanding IO command Parameters: struct scsi_cmnd *pccb (IN) Pointer to the command to be aborted unsigned long flags (IN/out) spinlock flags used in locking from calling layers Return: AGTIAPI_SUCCESS - success AGTIAPI_FAIL - fail ******************************************************************************/ int agtiapi_StartTM(struct agtiapi_softc *pCard, ccb_t *pccb) { ccb_t *pTMccb = NULL; U32 status = AGTIAPI_SUCCESS; ag_device_t *pDevice = NULL; U32 TMstatus = tiSuccess; AGTIAPI_PRINTK( "agtiapi_StartTM: pccb %p, pccb->flags %x\n", pccb, pccb->flags ); if (pccb == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: %p not found\n",pccb); status = AGTIAPI_SUCCESS; goto ext; } if (!pccb->tiIORequest.tdData) { /* should not be the case */ AGTIAPI_PRINTK("agtiapi_StartTM: ccb %p flag 0x%x tid %d no tdData " "ERROR\n", pccb, pccb->flags, pccb->targetId); status = AGTIAPI_FAIL; } else { /* If timedout CCB is TM_ABORT_TASK command, issue LocalAbort first to clear pending TM_ABORT_TASK */ /* Else Device State will not be put back to Operational, (refer FW) */ if (pccb->flags & TASK_MANAGEMENT) { if (tiINIIOAbort(&pCard->tiRoot, &pccb->tiIORequest) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort Request for Abort_TASK " "TM failed\n" ); /* TODO: call Soft reset here */ AGTIAPI_PRINTK( "agtiapi_StartTM: in agtiapi_StartTM() abort " "tiINIIOAbort() failed ==> soft reset#8\n" ); agtiapi_DoSoftReset( pCard ); } else { AGTIAPI_PRINTK( "agtiapi_StartTM: LocalAbort for Abort_TASK TM " "Request sent\n" ); status = AGTIAPI_SUCCESS; } } else { /* get a ccb */ if ((pTMccb = agtiapi_GetCCB(pCard)) == NULL) { AGTIAPI_PRINTK("agtiapi_StartTM: TM resource unavailable!\n"); status = AGTIAPI_FAIL; goto ext; } pTMccb->pmcsc = pCard; pTMccb->targetId = pccb->targetId; pTMccb->devHandle = pccb->devHandle; if (pTMccb->targetId >= pCard->devDiscover) { AGTIAPI_PRINTK("agtiapi_StartTM: Incorrect dev Id in TM!\n"); status = AGTIAPI_FAIL; goto ext; } if (pTMccb->targetId < 0 || pTMccb->targetId >= maxTargets) { return AGTIAPI_FAIL; } if (INDEX(pCard, pTMccb->targetId) >= maxTargets) { return AGTIAPI_FAIL; } pDevice = &pCard->pDevList[INDEX(pCard, pTMccb->targetId)]; if ((pDevice == NULL) || !(pDevice->flags & ACTIVE)) { return AGTIAPI_FAIL; } /* save pending io to issue local abort at Task mgmt CB */ pTMccb->pccbIO = pccb; AGTIAPI_PRINTK( "agtiapi_StartTM: pTMccb %p flag %x tid %d via TM " "request !\n", pTMccb, pTMccb->flags, pTMccb->targetId ); pTMccb->flags &= ~(TASK_SUCCESS | ACTIVE); pTMccb->flags |= TASK_MANAGEMENT; TMstatus = tiINITaskManagement(&pCard->tiRoot, pccb->devHandle, AG_ABORT_TASK, &pccb->tiSuperScsiRequest.scsiCmnd.lun, &pccb->tiIORequest, &pTMccb->tiIORequest); if (TMstatus == tiSuccess) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request success ccb " "%p, pTMccb %p\n", pccb, pTMccb ); pTMccb->startTime = ticks; status = AGTIAPI_SUCCESS; } else if (TMstatus == tiIONoDevice) { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request tiIONoDevice ccb " "%p, pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_SUCCESS; } else { AGTIAPI_PRINTK( "agtiapi_StartTM: TM_ABORT_TASK request failed ccb %p, " "pTMccb %p\n", pccb, pTMccb ); status = AGTIAPI_FAIL; agtiapi_FreeTMCCB(pCard, pTMccb); /* TODO */ /* call TM_TARGET_RESET */ } } } ext: AGTIAPI_PRINTK("agtiapi_StartTM: return %d flgs %x\n", status, (pccb) ? pccb->flags : -1); return status; } /* agtiapi_StartTM */ #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { switch((ccbh->flags & CAM_DATA_MASK)) { int error; struct bus_dma_segment seg; case CAM_DATA_VADDR: /* Virtual address that needs to translated into one or more physical address ranges. */ // int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } break; case CAM_DATA_PADDR: /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ //struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); break; default: AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSGList(struct agtiapi_softc *pmcsc, ccb_t *pccb) { union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_IO( "agtiapi_PrepareSGList: start\n" ); // agtiapi_DumpCDB("agtiapi_PrepareSGList", pccb); AGTIAPI_IO( "agtiapi_PrepareSGList: dxfer_len %d\n", csio->dxfer_len ); if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) { if ((ccbh->flags & CAM_SCATTER_VALID) == 0) { /* We've been given a pointer to a single buffer. */ if ((ccbh->flags & CAM_DATA_PHYS) == 0) { /* Virtual address that needs to translated into one or more physical address ranges. */ int error; // AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_IO( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csio->data_ptr, csio->dxfer_len, agtiapi_PrepareSGListCB, pccb, BUS_DMA_NOWAIT/* 0 */ ); // AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if (error == EINPROGRESS) { /* So as to maintain ordering, freeze the controller queue until our mapping is returned. */ AGTIAPI_PRINTK("agtiapi_PrepareSGList: EINPROGRESS\n"); xpt_freeze_simq(pmcsc->sim, 1); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { /* We have been given a pointer to single physical buffer. */ /* pccb->tiSuperScsiRequest.sglVirtualAddr = seg.ds_addr; */ struct bus_dma_segment seg; AGTIAPI_PRINTK("agtiapi_PrepareSGList: physical address\n"); seg.ds_addr = (bus_addr_t)(vm_offset_t)csio->data_ptr; seg.ds_len = csio->dxfer_len; // * 0xFF to be defined agtiapi_PrepareSGListCB(pccb, &seg, 1, 0xAABBCCDD); } } else { AGTIAPI_PRINTK("agtiapi_PrepareSGList: unexpected case\n"); return tiReject; } } else { agtiapi_PrepareSGListCB(pccb, NULL, 0, 0xAAAAAAAA); } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct ccb_scsiio *csio = &ccb->csio; struct agtiapi_softc *pmcsc; tiIniScsiCmnd_t *pScsiCmnd; bit32 i; bus_dmasync_op_t op; U32_64 phys_addr; U08 *CDB; int io_is_encryptable = 0; unsigned long long start_lba = 0; ag_device_t *pDev; U32 TID = CMND_TO_TARGET(ccb); AGTIAPI_IO( "agtiapi_PrepareSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if (error != tiSuccess) { if (error == 0xAABBCCDD || error == 0xAAAAAAAA) { // do nothing } else { AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: error status 0x%x\n", error); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } } if (nsegs > AGTIAPI_MAX_DMA_SEGS) { AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: over the limit. nsegs %d" " AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); bus_dmamap_destroy(pmcsc->buffer_dmat, pccb->CCB_dmamap); agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return; } /* fill in IO information */ pccb->dataLen = csio->dxfer_len; /* start fill in sgl structure */ if (nsegs == 1 && error == 0xAABBCCDD) { /* to be tested */ /* A single physical buffer */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: nsegs is 1\n"); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)segs->ds_addr; pccb->numSgElements = 1; } else if (nsegs == 0 && error == 0xAAAAAAAA) { /* no data transfer */ AGTIAPI_IO( "agtiapi_PrepareSGListCB: no data transfer\n" ); pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; pccb->numSgElements = 0; } else { /* virtual/logical buffer */ if (nsegs == 1) { pccb->dataLen = segs[0].ds_len; CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, segs[0].ds_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->tiSuperScsiRequest.agSgl1.len = htole32(segs[0].ds_len); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } else { pccb->dataLen = 0; /* loop */ for (i = 0; i < nsegs; i++) { pccb->sgList[i].len = htole32(segs[i].ds_len); CPU_TO_LE32(pccb->sgList[i], segs[i].ds_addr); pccb->sgList[i].type = htole32(tiSgl); pccb->dataLen += segs[i].ds_len; } /* for */ pccb->numSgElements = nsegs; /* set up sgl buffer address */ CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, pccb->tisgl_busaddr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSglList); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->dataLen); pccb->tiSuperScsiRequest.sglVirtualAddr = (void *)csio->data_ptr; pccb->numSgElements = nsegs; } /* else */ } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_PREWRITE; pccb->tiSuperScsiRequest.dataDirection = tiDirectionOut; } else { op = BUS_DMASYNC_PREREAD; pccb->tiSuperScsiRequest.dataDirection = tiDirectionIn; } pScsiCmnd = &pccb->tiSuperScsiRequest.scsiCmnd; pScsiCmnd->expDataLength = pccb->dataLen; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &pScsiCmnd->cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &pScsiCmnd->cdb[0],csio->cdb_len); } CDB = &pScsiCmnd->cdb[0]; switch (CDB[0]) { case REQUEST_SENSE: /* requires different buffer */ /* This code should not be excercised because SAS support auto sense For the completeness, vtophys() is still used here. */ AGTIAPI_PRINTK("agtiapi_PrepareSGListCB: QueueCmnd - REQUEST SENSE new\n"); pccb->tiSuperScsiRequest.agSgl1.len = htole32(pccb->senseLen); phys_addr = vtophys(&csio->sense_data); CPU_TO_LE32(pccb->tiSuperScsiRequest.agSgl1, phys_addr); pccb->tiSuperScsiRequest.agSgl1.type = htole32(tiSgl); pccb->dataLen = pccb->senseLen; pccb->numSgElements = 1; break; case INQUIRY: /* only using lun 0 for device type detection */ pccb->flags |= AGTIAPI_INQUIRY; break; case TEST_UNIT_READY: case RESERVE: case RELEASE: case START_STOP: pccb->tiSuperScsiRequest.agSgl1.len = 0; pccb->dataLen = 0; break; case READ_6: case WRITE_6: /* Extract LBA */ start_lba = ((CDB[1] & 0x1f) << 16) | (CDB[2] << 8) | (CDB[3]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_10: case WRITE_10: case READ_12: case WRITE_12: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; case READ_16: case WRITE_16: /* Extract LBA */ start_lba = (CDB[2] << 24) | (CDB[3] << 16) | (CDB[4] << 8) | (CDB[5]); start_lba <<= 32; start_lba |= ((CDB[6] << 24) | (CDB[7] << 16) | (CDB[8] << 8) | (CDB[9])); #ifdef HIALEAH_ENCRYPTION io_is_encryptable = 1; #endif break; default: break; } /* fill device lun based one address mode */ agtiapi_SetLunField(pccb); if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } if (INDEX(pmcsc, pccb->targetId) >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } pDev = &pmcsc->pDevList[INDEX(pmcsc, pccb->targetId)]; #if 1 if ((pmcsc->flags & EDC_DATA) && (pDev->flags & EDC_DATA)) { /* * EDC support: * * Possible command supported - * READ_6, READ_10, READ_12, READ_16, READ_LONG, READ_BUFFER, * READ_DEFECT_DATA, etc. * WRITE_6, WRITE_10, WRITE_12, WRITE_16, WRITE_LONG, WRITE_LONG2, * WRITE_BUFFER, WRITE_VERIFY, WRITE_VERIFY_12, etc. * * Do some data length adjustment and set chip operation instruction. */ switch (CDB[0]) { case READ_6: case READ_10: case READ_12: case READ_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; #endif pccb->flags |= EDC_DATA; #ifdef TEST_VERIFY_AND_FORWARD pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_FORWARD | DIF_UDT_REF_BLOCK_COUNT; if(pDev->sector_size == 520) { pScsiCmnd->expDataLength += (pccb->dataLen / 512) * 8; } else if(pDev->sector_size == 4104) { pScsiCmnd->expDataLength += (pccb->dataLen / 4096) * 8; } #else #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.Dif.flags = DIF_VERIFY_DELETE | DIF_UDT_REF_BLOCK_COUNT; #endif #endif #ifdef AGTIAPI_TEST_DIF switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_520 << 16 ); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } if(pCard->flags & EDC_DATA_CRC) pccb->tiSuperScsiRequest.Dif.flags |= DIF_CRC_VERIFICATION; /* Turn on upper 4 bits of UVM */ pccb->tiSuperScsiRequest.Dif.flags |= 0x03c00000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == READ_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; pccb->tiSuperScsiRequest.Dif.udtArray[5] = 0; } else if(CDB[0] == READ_10 || CDB[0] == READ_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == READ_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[9]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[8]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[7]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[6]; /* Note: 32 bits lost */ } #endif break; case WRITE_6: case WRITE_10: case WRITE_12: case WRITE_16: // BUG_ON(pccb->tiSuperScsiRequest.flags & TI_SCSI_INITIATOR_ENCRYPT); pccb->flags |= EDC_DATA; #ifdef AGTIAPI_TEST_DIF pccb->tiSuperScsiRequest.flags |= TI_SCSI_INITIATOR_DIF; pccb->tiSuperScsiRequest.Dif.flags = DIF_INSERT | DIF_UDT_REF_BLOCK_COUNT; switch(pDev->sector_size) { case 528: pccb->tiSuperScsiRequest.Dif.flags |= (DIF_BLOCK_SIZE_520 << 16); break; case 4104: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4096 << 16 ); break; case 4168: pccb->tiSuperScsiRequest.Dif.flags |= ( DIF_BLOCK_SIZE_4160 << 16 ); break; } /* Turn on upper 4 bits of UUM */ pccb->tiSuperScsiRequest.Dif.flags |= 0xf0000000; #endif #ifdef AGTIAPI_TEST_DPL if(agtiapi_SetupDifPerLA(pCard, pccb, start_lba) < 0) { printk(KERN_ERR "SetupDifPerLA Failed.\n"); cmnd->result = SCSI_HOST(DID_ERROR); goto err; } pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = TRUE; #endif #ifdef AGTIAPI_TEST_DIF /* Set App Tag */ pccb->tiSuperScsiRequest.Dif.udtArray[0] = 0xaa; pccb->tiSuperScsiRequest.Dif.udtArray[1] = 0xbb; /* Set LBA in UDT array */ if(CDB[0] == WRITE_6) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[2]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[1] & 0x1f; } else if(CDB[0] == WRITE_10 || CDB[0] == WRITE_12) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; } else if(CDB[0] == WRITE_16) { pccb->tiSuperScsiRequest.Dif.udtArray[2] = CDB[5]; pccb->tiSuperScsiRequest.Dif.udtArray[3] = CDB[4]; pccb->tiSuperScsiRequest.Dif.udtArray[4] = CDB[3]; pccb->tiSuperScsiRequest.Dif.udtArray[5] = CDB[2]; /* Note: 32 bits lost */ } #endif break; } } #endif /* end of DIF */ if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(csio->tag_action) { case MSG_HEAD_OF_Q_TAG: pScsiCmnd->taskAttribute = TASK_HEAD_OF_QUEUE; break; case MSG_ACA_TASK: pScsiCmnd->taskAttribute = TASK_ACA; break; case MSG_ORDERED_Q_TAG: pScsiCmnd->taskAttribute = TASK_ORDERED; break; case MSG_SIMPLE_Q_TAG: /* fall through */ default: pScsiCmnd->taskAttribute = TASK_SIMPLE; break; } } if (pccb->tiSuperScsiRequest.agSgl1.len != 0 && pccb->dataLen != 0) { /* should be just before start IO */ bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ if (pccb->targetId < 0 || pccb->targetId >= maxTargets) { pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX(pmcsc, pccb->targetId); if ((TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle)) { /* AGTIAPI_PRINTK( "agtiapi_PrepareSGListCB: not sending ccb devH %p," " target %d tid %d/%d card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); */ pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_FreeCCB(pmcsc, pccb); ccb->ccb_h.status = CAM_DEV_NOT_THERE; // ## v. CAM_FUNC_NOTAVAIL xpt_done(ccb); pccb->ccb = NULL; return; } AGTIAPI_IO( "agtiapi_PrepareSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); #ifdef HIALEAH_ENCRYPTION if(pmcsc->encrypt && io_is_encryptable) { agtiapi_SetupEncryptedIO(pmcsc, pccb, start_lba); } else{ io_is_encryptable = 0; pccb->tiSuperScsiRequest.flags = 0; } #endif // put the request in send queue agtiapi_QueueCCB( pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb ); agtiapi_StartIO(pmcsc); return; } /****************************************************************************** agtiapi_StartIO() Purpose: Send IO request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartIO( struct agtiapi_softc *pmcsc ) { ccb_t *pccb; int TID; ag_device_t *targ; struct ccb_relsim crs; AGTIAPI_IO( "agtiapi_StartIO: start\n" ); AG_LOCAL_LOCK( &pmcsc->sendLock ); pccb = pmcsc->ccbSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK( &pmcsc->sendLock ); AGTIAPI_PRINTK( "agtiapi_StartIO: goto ext\n" ); goto ext; } if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { TID = INDEX(pmcsc, pccb->targetId); targ = &pmcsc->pDevList[TID]; } /* clear send queue */ pmcsc->ccbSendHead = NULL; pmcsc->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pmcsc->sendLock ); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK( "agtiapi_StartIO: pccb->ccb is NULL ERROR!\n" ); pccb = pccb_next; continue; } AG_IO_DUMPCCB( pccb ); if (!pccb->devHandle) { agtiapi_DumpCCB( pccb ); AGTIAPI_PRINTK( "agtiapi_StartIO: ccb NULL device ERROR!\n" ); pccb = pccb_next; continue; } AGTIAPI_IO( "agtiapi_StartIO: ccb %p retry %d\n", pccb, pccb->retryCount ); #ifndef ABORT_TEST if( !pccb->devHandle || !pccb->devHandle->osData || /* in rmmod case */ !(((ag_device_t *)(pccb->devHandle->osData))->flags & ACTIVE)) { AGTIAPI_PRINTK( "agtiapi_StartIO: device %p not active! ERROR\n", pccb->devHandle ); if( pccb->devHandle ) { AGTIAPI_PRINTK( "agtiapi_StartIO: device not active detail" " -- osData:%p\n", pccb->devHandle->osData ); if( pccb->devHandle->osData ) { AGTIAPI_PRINTK( "agtiapi_StartIO: more device not active detail" " -- active flag:%d\n", ( (ag_device_t *) (pccb->devHandle->osData))->flags & ACTIVE ); } } pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done( pmcsc, pccb ); pccb = pccb_next; continue; } #endif #ifdef FAST_IO_TEST status = agtiapi_FastIOTest( pmcsc, pccb ); #else status = tiINISuperIOStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSuperScsiRequest, (void *)&pccb->tdIOReqBody, tiInterruptContext ); #endif switch( status ) { case tiSuccess: /* static int squelchCount = 0; if ( 200000 == squelchCount++ ) // squelch prints { AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart stat tiSuccess %p\n", pccb ); squelchCount = 0; // reset count } */ break; case tiDeviceBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiDeviceBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiDeviceBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDeviceBusy; agtiapi_Done(pmcsc, pccb); break; case tiBusy: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiBusy %p\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiBusy " ); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiBusy; agtiapi_Done(pmcsc, pccb); break; case tiIONoDevice: AGTIAPI_PRINTK( "agtiapi_StartIO: tiINIIOStart status tiNoDevice %p " "ERROR\n", pccb->ccb ); #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart invalid device handle " ); #endif #ifndef ABORT_TEST /* return command back to OS due to no device available */ ((ag_device_t *)(pccb->devHandle->osData))->flags &= ~ACTIVE; pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailNoLogin; agtiapi_Done(pmcsc, pccb); #else /* for short cable pull, we want IO retried - 3-18-2005 */ agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); #endif break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "tiINIIOStart tiError "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); #ifdef LOGEVENT agtiapi_LogEvent(pmcsc, IOCTL_EVT_SEV_ERROR, 0, agNULL, 0, "tiINIIOStart unexpected status "); #endif pccb->ccbStatus = tiIOFailed; pccb->scsiStatus = tiDetailOtherError; agtiapi_Done(pmcsc, pccb); } pccb = pccb_next; } ext: /* some IO requests might have been completed */ AG_GET_DONE_PCCB(pccb, pmcsc); return; } /****************************************************************************** agtiapi_StartSMP() Purpose: Send SMP request down for processing. Parameters: (struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_StartSMP(struct agtiapi_softc *pmcsc) { ccb_t *pccb; AGTIAPI_PRINTK("agtiapi_StartSMP: start\n"); AG_LOCAL_LOCK(&pmcsc->sendSMPLock); pccb = pmcsc->smpSendHead; /* if link is down, do nothing */ if ((pccb == NULL) || pmcsc->flags & AGTIAPI_RESET) { AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); AGTIAPI_PRINTK("agtiapi_StartSMP: goto ext\n"); goto ext; } /* clear send queue */ pmcsc->smpSendHead = NULL; pmcsc->smpSendTail = NULL; AG_LOCAL_UNLOCK(&pmcsc->sendSMPLock); /* send all ccbs down */ while (pccb) { pccb_t pccb_next; U32 status; pccb_next = pccb->pccbNext; pccb->pccbNext = NULL; if (!pccb->ccb) { AGTIAPI_PRINTK("agtiapi_StartSMP: pccb->ccb is NULL ERROR!\n"); pccb = pccb_next; continue; } if (!pccb->devHandle) { AGTIAPI_PRINTK("agtiapi_StartSMP: ccb NULL device ERROR!\n"); pccb = pccb_next; continue; } pccb->flags |= TAG_SMP; // mark as SMP for later tracking AGTIAPI_PRINTK( "agtiapi_StartSMP: ccb %p retry %d\n", pccb, pccb->retryCount ); status = tiINISMPStart( &pmcsc->tiRoot, &pccb->tiIORequest, pccb->devHandle, &pccb->tiSMPFrame, (void *)&pccb->tdIOReqBody, tiInterruptContext); switch (status) { case tiSuccess: break; case tiBusy: AGTIAPI_PRINTK("agtiapi_StartSMP: tiINISMPStart status tiBusy %p\n", pccb->ccb); /* pending ccb back to send queue */ agtiapi_QueueCCB(pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb); break; case tiError: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status tiError %p\n", pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); break; default: AGTIAPI_PRINTK("agtiapi_StartIO: tiINIIOStart status default %x %p\n", status, pccb->ccb); pccb->ccbStatus = tiSMPFailed; agtiapi_SMPDone(pmcsc, pccb); } pccb = pccb_next; } ext: /* some SMP requests might have been completed */ AG_GET_DONE_SMP_PCCB(pccb, pmcsc); return; } #if __FreeBSD_version > 901000 /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); switch((ccbh->flags & CAM_DATA_MASK)) { case CAM_DATA_PADDR: case CAM_DATA_SG_PADDR: AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; case CAM_DATA_SG: /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list not supported\n"); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject; } } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #else /****************************************************************************** agtiapi_PrepareSMPSGList() Purpose: This function prepares scatter-gather list for the given ccb Parameters: struct agtiapi_softc *pmsc (IN) Pointer to the HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - success 1 - failure Note: ******************************************************************************/ static int agtiapi_PrepareSMPSGList( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { /* Pointer to CAM's ccb */ union ccb *ccb = pccb->ccb; struct ccb_smpio *csmpio = &ccb->smpio; struct ccb_hdr *ccbh = &ccb->ccb_h; AGTIAPI_PRINTK("agtiapi_PrepareSMPSGList: start\n"); if (ccbh->flags & (CAM_DATA_PHYS|CAM_SG_LIST_PHYS)) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Physical Address " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject;; } if (ccbh->flags & CAM_SCATTER_VALID) { /* * Currently we do not support Multiple SG list * return error for now */ if ( (csmpio->smp_request_sglist_cnt > 1) || (csmpio->smp_response_sglist_cnt > 1) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: Multiple SG list " "not supported\n" ); ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); return tiReject;; } if ( csmpio->smp_request_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK(&(pmcsc->pCardInfo->pmIOLock)); AGTIAPI_PRINTK("agtiapi_PrepareSGList: virtual address\n"); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_request, csmpio->smp_request_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK(&(pmcsc->pCardInfo->pmIOLock)); if (error == EINPROGRESS) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } if( csmpio->smp_response_sglist_cnt != 0 ) { /* * Virtual address that needs to translated into * one or more physical address ranges. */ int error; //AG_LOCAL_LOCK( &(pmcsc->pCardInfo->pmIOLock) ); AGTIAPI_PRINTK( "agtiapi_PrepareSGList: virtual address\n" ); error = bus_dmamap_load( pmcsc->buffer_dmat, pccb->CCB_dmamap, csmpio->smp_response, csmpio->smp_response_len, agtiapi_PrepareSMPSGListCB, pccb, BUS_DMA_NOWAIT /* 0 */ ); //AG_LOCAL_UNLOCK( &(pmcsc->pCardInfo->pmIOLock) ); if ( error == EINPROGRESS ) { /* * So as to maintain ordering, * freeze the controller queue * until our mapping is * returned. */ AGTIAPI_PRINTK( "agtiapi_PrepareSGList: EINPROGRESS\n" ); xpt_freeze_simq( pmcsc->sim, 1 ); pmcsc->SimQFrozen = agTRUE; ccbh->status |= CAM_RELEASE_SIMQ; } } } else { if ( (csmpio->smp_request_sglist_cnt == 0) && (csmpio->smp_response_sglist_cnt == 0) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGList: physical address\n" ); pccb->tiSMPFrame.outFrameBuf = (void *)csmpio->smp_request; pccb->tiSMPFrame.outFrameLen = csmpio->smp_request_len; pccb->tiSMPFrame.expectedRespLen = csmpio->smp_response_len; // 0xFF to be defined agtiapi_PrepareSMPSGListCB( pccb, NULL, 0, 0xAABBCCDD ); } pccb->tiSMPFrame.flag = 0; } return tiSuccess; } #endif /****************************************************************************** agtiapi_PrepareSMPSGListCB() Purpose: Callback function for bus_dmamap_load() This fuctions sends IO to LL layer. Parameters: void *arg (IN) Pointer to the HBA data structure bus_dma_segment_t *segs (IN) Pointer to dma segment int nsegs (IN) number of dma segment int error (IN) error Return: Note: ******************************************************************************/ static void agtiapi_PrepareSMPSGListCB( void *arg, bus_dma_segment_t *segs, int nsegs, int error ) { pccb_t pccb = arg; union ccb *ccb = pccb->ccb; struct agtiapi_softc *pmcsc; U32 TID = CMND_TO_TARGET(ccb); int status; tiDeviceHandle_t *tiExpDevHandle; tiPortalContext_t *tiExpPortalContext; ag_portal_info_t *tiExpPortalInfo; AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: start, nsegs %d error 0x%x\n", nsegs, error ); pmcsc = pccb->pmcsc; if ( error != tiSuccess ) { if (error == 0xAABBCCDD) { // do nothing } else { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: error status 0x%x\n", error ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } } if ( nsegs > AGTIAPI_MAX_DMA_SEGS ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: over the limit. nsegs %d " "AGTIAPI_MAX_DMA_SEGS %d\n", nsegs, AGTIAPI_MAX_DMA_SEGS ); bus_dmamap_unload( pmcsc->buffer_dmat, pccb->CCB_dmamap ); bus_dmamap_destroy( pmcsc->buffer_dmat, pccb->CCB_dmamap ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done( ccb ); return; } /* * If assigned pDevHandle is not available * then there is no need to send it to StartIO() */ /* TODO: Add check for deviceType */ if ( pccb->targetId < 0 || pccb->targetId >= maxTargets ) { agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done(ccb); pccb->ccb = NULL; return; } TID = INDEX( pmcsc, pccb->targetId ); if ( (TID >= pmcsc->devDiscover) || !(pccb->devHandle = pmcsc->pDevList[TID].pDevHandle) ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: not sending ccb devH %p, " "target %d tid %d/%d " "card %p ERROR pccb %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc, pccb ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* TODO: add indirect handling */ /* set the flag correctly based on Indiret SMP request and responce */ AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: send ccb pccb->devHandle %p, " "pccb->targetId %d TID %d pmcsc->devDiscover %d card %p\n", pccb->devHandle, pccb->targetId, TID, pmcsc->devDiscover, pmcsc ); tiExpDevHandle = pccb->devHandle; tiExpPortalInfo = pmcsc->pDevList[TID].pPortalInfo; tiExpPortalContext = &tiExpPortalInfo->tiPortalContext; /* Look for the expander associated with the ses device */ status = tiINIGetExpander( &pmcsc->tiRoot, tiExpPortalContext, pccb->devHandle, &tiExpDevHandle ); if ( status != tiSuccess ) { AGTIAPI_PRINTK( "agtiapi_PrepareSMPSGListCB: Error getting Expander " "device\n" ); agtiapi_FreeCCB( pmcsc, pccb ); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; xpt_done( ccb ); pccb->ccb = NULL; return; } /* this is expander device */ pccb->devHandle = tiExpDevHandle; /* put the request in send queue */ agtiapi_QueueCCB( pmcsc, &pmcsc->smpSendHead, &pmcsc->smpSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendSMPLock), pccb ); agtiapi_StartSMP( pmcsc ); return; } /****************************************************************************** agtiapi_Done() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_Done(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; tiIniScsiCmnd_t *cmnd; union ccb * ccb; AGTIAPI_IO("agtiapi_Done: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckError(pmcsc, pccb_curr) != 0) { /* send command back and release the ccb */ cmnd = &pccb_curr->tiSuperScsiRequest.scsiCmnd; if (cmnd->cdb[0] == RECEIVE_DIAGNOSTIC) { AGTIAPI_PRINTK("agtiapi_Done: RECEIVE_DIAG pg %d id %d cmnd %p pccb " "%p\n", cmnd->cdb[2], pccb_curr->targetId, cmnd, pccb_curr); } CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } return; } /****************************************************************************** agtiapi_SMPDone() Purpose: Processing completed ccbs Parameters: struct agtiapi_softc *pmcsc (IN) Ponter to HBA data structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_SMPDone(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb_t pccb_curr = pccb; pccb_t pccb_next; union ccb * ccb; AGTIAPI_PRINTK("agtiapi_SMPDone: start\n"); while (pccb_curr) { /* start from 1st ccb in the chain */ pccb_next = pccb_curr->pccbNext; if (agtiapi_CheckSMPError(pmcsc, pccb_curr) != 0) { CMND_DMA_UNMAP(pmcsc, ccb); /* send the request back to the CAM */ ccb = pccb_curr->ccb; agtiapi_FreeSMPCCB(pmcsc, pccb_curr); xpt_done(ccb); } pccb_curr = pccb_next; } AGTIAPI_PRINTK("agtiapi_SMPDone: Done\n"); return; } /****************************************************************************** agtiapi_hexdump() Purpose: Utility function for dumping in hex Parameters: const char *ptitle (IN) A string to be printed bit8 *pbuf (IN) A pointer to a buffer to be printed. int len (IN) The lengther of the buffer Return: Note: ******************************************************************************/ void agtiapi_hexdump(const char *ptitle, bit8 *pbuf, int len) { int i; AGTIAPI_PRINTK("%s - hexdump(len=%d):\n", ptitle, (int)len); if (!pbuf) { AGTIAPI_PRINTK("pbuf is NULL\n"); return; } for (i = 0; i < len; ) { if (len - i > 4) { AGTIAPI_PRINTK( " 0x%02x, 0x%02x, 0x%02x, 0x%02x,\n", pbuf[i], pbuf[i+1], pbuf[i+2], pbuf[i+3] ); i += 4; } else { AGTIAPI_PRINTK(" 0x%02x,", pbuf[i]); i++; } } AGTIAPI_PRINTK("\n"); } /****************************************************************************** agtiapi_CheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckError(struct agtiapi_softc *pmcsc, ccb_t *pccb) { ag_device_t *pDevice; // union ccb * ccb = pccb->ccb; union ccb * ccb; int is_error, TID; if (pccb == NULL) { return 0; } ccb = pccb->ccb; AGTIAPI_IO("agtiapi_CheckError: start\n"); if (ccb == NULL) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK("agtiapi_CheckError: CCB orphan = %p ERROR\n", pccb); agtiapi_FreeCCB(pmcsc, pccb); return 0; } is_error = 1; pDevice = NULL; if (pmcsc != NULL && pccb->targetId >= 0 && pccb->targetId < maxTargets) { if (pmcsc->pWWNList != NULL) { TID = INDEX(pmcsc, pccb->targetId); if (TID < maxTargets) { pDevice = &pmcsc->pDevList[TID]; if (pDevice != NULL) { is_error = 0; } } } } if (is_error) { AGTIAPI_PRINTK("agtiapi_CheckError: pDevice == NULL\n"); agtiapi_FreeCCB(pmcsc, pccb); return 0; } /* SCSI status */ ccb->csio.scsi_status = pccb->scsiStatus; if(pDevice->CCBCount > 0){ atomic_subtract_int(&pDevice->CCBCount,1); } AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agTRUE) { pmcsc->freezeSim = agFALSE; xpt_release_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); switch (pccb->ccbStatus) { case tiIOSuccess: AGTIAPI_IO("agtiapi_CheckError: tiIOSuccess pccb %p\n", pccb); /* CAM status */ if (pccb->scsiStatus == SCSI_STATUS_OK) { ccb->ccb_h.status = CAM_REQ_CMP; } else if (pccb->scsiStatus == SCSI_TASK_ABORTED) { ccb->ccb_h.status = CAM_REQ_ABORTED; } else { ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } if (ccb->csio.scsi_status == SCSI_CHECK_CONDITION) { ccb->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case tiIOOverRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOOverRun pccb %p\n", pccb); /* resid is ignored for this condition */ ccb->csio.resid = 0; ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case tiIOUnderRun: AGTIAPI_PRINTK("agtiapi_CheckError: tiIOUnderRun pccb %p\n", pccb); ccb->csio.resid = pccb->scsiStatus; ccb->ccb_h.status = CAM_REQ_CMP; ccb->csio.scsi_status = SCSI_STATUS_OK; break; case tiIOFailed: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDeviceBusy) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - tiDetailBusy\n", pccb ); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_REQUEUE_REQ; if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); } } else if(pccb->scsiStatus == tiBusy) { AG_LOCAL_LOCK(&pmcsc->freezeLock); if(pmcsc->freezeSim == agFALSE) { pmcsc->freezeSim = agTRUE; xpt_freeze_simq(pmcsc->sim, 1); } AG_LOCAL_UNLOCK(&pmcsc->freezeLock); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; ccb->ccb_h.status |= CAM_REQUEUE_REQ; } else if (pccb->scsiStatus == tiDetailNoLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNoLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_DEV_NOT_THERE; } else if (pccb->scsiStatus == tiDetailNotValid) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailNotValid ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_INVALID; } else if (pccb->scsiStatus == tiDetailAbortLogin) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortLogin ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAbortReset) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAbortReset ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailAborted) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailAborted ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } else if (pccb->scsiStatus == tiDetailOtherError) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailOtherError ERROR\n", pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; } break; case tiIODifError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDifAppTagMismatch) { AGTIAPI_IO( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifAppTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifRefTagMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifRefTagMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } else if (pccb->scsiStatus == tiDetailDifCrcMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed - " "tiDetailDifCrcMismatch\n", pccb ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; } break; #ifdef HIALEAH_ENCRYPTION case tiIOEncryptError: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOFailed %d id %d ERROR\n", pccb, pccb->scsiStatus, pccb->targetId ); if (pccb->scsiStatus == tiDetailDekKeyCacheMiss) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekKeyCacheMiss ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } else if (pccb->scsiStatus == tiDetailDekIVMismatch) { AGTIAPI_PRINTK( "agtiapi_CheckError: %s: pccb %p tiIOFailed - " "tiDetailDekIVMismatch ERROR\n", __FUNCTION__, pccb ); ccb->ccb_h.status = CAM_REQ_ABORTED; agtiapi_HandleEncryptedIOFailure(pDevice, pccb); } break; #endif default: AGTIAPI_PRINTK( "agtiapi_CheckError: pccb %p tiIOdefault %d id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_SMPCheckError() Purpose: Processes status pertaining to the ccb -- whether it was completed successfully, aborted, or error encountered. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ccb_t *pccd (IN) A pointer to the driver's own CCB, not CAM's CCB Return: 0 - the command retry is required 1 - the command process is completed Note: ******************************************************************************/ STATIC U32 agtiapi_CheckSMPError( struct agtiapi_softc *pmcsc, ccb_t *pccb ) { union ccb * ccb = pccb->ccb; AGTIAPI_PRINTK("agtiapi_CheckSMPError: start\n"); if (!ccb) { /* shouldn't be here but just in case we do */ AGTIAPI_PRINTK( "agtiapi_CheckSMPError: CCB orphan = %p ERROR\n", pccb ); agtiapi_FreeSMPCCB(pmcsc, pccb); return 0; } switch (pccb->ccbStatus) { case tiSMPSuccess: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPSuccess pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP; break; case tiSMPFailed: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: tiSMPFailed pccb %p\n", pccb ); /* CAM status */ ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; default: AGTIAPI_PRINTK( "agtiapi_CheckSMPError: pccb %p tiSMPdefault %d " "id %d ERROR\n", pccb, pccb->ccbStatus, pccb->targetId ); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } return 1; } /****************************************************************************** agtiapi_HandleEncryptedIOFailure(): Purpose: Parameters: Return: Note: Currently not used. ******************************************************************************/ void agtiapi_HandleEncryptedIOFailure(ag_device_t *pDev, ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_HandleEncryptedIOFailure: start\n"); return; } /****************************************************************************** agtiapi_Retry() Purpose: Retry a ccb. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to the HBA structure ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: Currently not used. ******************************************************************************/ STATIC void agtiapi_Retry(struct agtiapi_softc *pmcsc, ccb_t *pccb) { pccb->retryCount++; pccb->flags = ACTIVE | AGTIAPI_RETRY; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = ticks; AGTIAPI_PRINTK( "agtiapi_Retry: start\n" ); AGTIAPI_PRINTK( "agtiapi_Retry: ccb %p retry %d flgs x%x\n", pccb, pccb->retryCount, pccb->flags ); agtiapi_QueueCCB(pmcsc, &pmcsc->ccbSendHead, &pmcsc->ccbSendTail AG_CARD_LOCAL_LOCK(&pmcsc->sendLock), pccb); return; } /****************************************************************************** agtiapi_DumpCCB() Purpose: Dump CCB for debuging Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ STATIC void agtiapi_DumpCCB(ccb_t *pccb) { AGTIAPI_PRINTK("agtiapi_DumpCCB: pccb %p, devHandle %p, tid %d, lun %d\n", pccb, pccb->devHandle, pccb->targetId, pccb->lun); AGTIAPI_PRINTK("flag 0x%x, add_mode 0x%x, ccbStatus 0x%x, scsiStatus 0x%x\n", pccb->flags, pccb->addrMode, pccb->ccbStatus, pccb->scsiStatus); AGTIAPI_PRINTK("scsi comand = 0x%x, numSgElements = %d\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->numSgElements); AGTIAPI_PRINTK("dataLen = 0x%x, sens_len = 0x%x\n", pccb->dataLen, pccb->senseLen); AGTIAPI_PRINTK("tiSuperScsiRequest:\n"); AGTIAPI_PRINTK("scsiCmnd: expDataLength 0x%x, taskAttribute 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.expDataLength, pccb->tiSuperScsiRequest.scsiCmnd.taskAttribute); AGTIAPI_PRINTK("cdb[0] = 0x%x, cdb[1] = 0x%x, cdb[2] = 0x%x, cdb[3] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[0], pccb->tiSuperScsiRequest.scsiCmnd.cdb[1], pccb->tiSuperScsiRequest.scsiCmnd.cdb[2], pccb->tiSuperScsiRequest.scsiCmnd.cdb[3]); AGTIAPI_PRINTK("cdb[4] = 0x%x, cdb[5] = 0x%x, cdb[6] = 0x%x, cdb[7] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[4], pccb->tiSuperScsiRequest.scsiCmnd.cdb[5], pccb->tiSuperScsiRequest.scsiCmnd.cdb[6], pccb->tiSuperScsiRequest.scsiCmnd.cdb[7]); AGTIAPI_PRINTK( "cdb[8] = 0x%x, cdb[9] = 0x%x, cdb[10] = 0x%x, " "cdb[11] = 0x%x\n", pccb->tiSuperScsiRequest.scsiCmnd.cdb[8], pccb->tiSuperScsiRequest.scsiCmnd.cdb[9], pccb->tiSuperScsiRequest.scsiCmnd.cdb[10], pccb->tiSuperScsiRequest.scsiCmnd.cdb[11] ); AGTIAPI_PRINTK("agSgl1: upper 0x%x, lower 0x%x, len 0x%x, type %d\n", pccb->tiSuperScsiRequest.agSgl1.upper, pccb->tiSuperScsiRequest.agSgl1.lower, pccb->tiSuperScsiRequest.agSgl1.len, pccb->tiSuperScsiRequest.agSgl1.type); } /****************************************************************************** agtiapi_eh_HostReset() Purpose: A new error handler of Host Reset command. Parameters: scsi_cmnd *cmnd (IN) Pointer to a command to the HBA to be reset Return: SUCCESS - success FAILED - fail Note: ******************************************************************************/ int agtiapi_eh_HostReset( struct agtiapi_softc *pmcsc, union ccb *cmnd ) { AGTIAPI_PRINTK( "agtiapi_eh_HostReset: ccb pointer %p\n", cmnd ); if( cmnd == NULL ) { printf( "agtiapi_eh_HostReset: null command, skipping reset.\n" ); return tiInvalidHandle; } #ifdef LOGEVENT agtiapi_LogEvent( pmcsc, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "agtiapi_eh_HostReset! " ); #endif return agtiapi_DoSoftReset( pmcsc ); } int agtiapi_eh_DeviceReset( struct agtiapi_softc *pmcsc, union ccb *cmnd ) { AGTIAPI_PRINTK( "agtiapi_eh_HostReset: ccb pointer %p\n", cmnd ); if( cmnd == NULL ) { printf( "agtiapi_eh_HostReset: null command, skipping reset.\n" ); return tiInvalidHandle; } return agtiapi_DoSoftReset( pmcsc ); } /****************************************************************************** agtiapi_QueueCCB() Purpose: Put ccb in ccb queue at the tail Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t *phead (IN) Double pointer to ccb queue head pccb_t *ptail (IN) Double pointer to ccb queue tail ccb_t *pccb (IN) Poiner to a ccb to be queued Return: Note: Put the ccb to the tail of queue ******************************************************************************/ STATIC void agtiapi_QueueCCB( struct agtiapi_softc *pmcsc, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *mutex, #endif ccb_t *pccb ) { AGTIAPI_IO( "agtiapi_QueueCCB: start\n" ); AGTIAPI_IO( "agtiapi_QueueCCB: %p to %p\n", pccb, phead ); if (phead == NULL || ptail == NULL) { panic( "agtiapi_QueueCCB: phead %p ptail %p", phead, ptail ); } pccb->pccbNext = NULL; AG_LOCAL_LOCK( mutex ); if (*phead == NULL) { //WARN_ON(*ptail != NULL); /* critical, just get more logs */ *phead = pccb; } else { //WARN_ON(*ptail == NULL); /* critical, just get more logs */ if (*ptail) (*ptail)->pccbNext = pccb; } *ptail = pccb; AG_LOCAL_UNLOCK( mutex ); return; } /****************************************************************************** agtiapi_QueueCCB() Purpose: Parameters: Return: Note: ******************************************************************************/ static int agtiapi_QueueSMP(struct agtiapi_softc *pmcsc, union ccb * ccb) { pccb_t pccb = agNULL; /* call dequeue */ int status = tiSuccess; int targetID = xpt_path_target_id(ccb->ccb_h.path); AGTIAPI_PRINTK("agtiapi_QueueSMP: start\n"); /* get a ccb */ if ((pccb = agtiapi_GetCCB(pmcsc)) == NULL) { AGTIAPI_PRINTK("agtiapi_QueueSMP: GetCCB ERROR\n"); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); return tiBusy; } pccb->pmcsc = pmcsc; /* initialize Command Control Block (CCB) */ pccb->targetId = targetID; pccb->ccb = ccb; /* for struct scsi_cmnd */ status = agtiapi_PrepareSMPSGList(pmcsc, pccb); if (status != tiSuccess) { AGTIAPI_PRINTK("agtiapi_QueueSMP: agtiapi_PrepareSMPSGList failure\n"); agtiapi_FreeCCB(pmcsc, pccb); if (status == tiReject) { ccb->ccb_h.status = CAM_REQ_INVALID; } else { ccb->ccb_h.status = CAM_REQ_CMP; } xpt_done(ccb); return tiError; } return status; } /****************************************************************************** agtiapi_SetLunField() Purpose: Set LUN field based on different address mode Parameters: ccb_t *pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Return: Note: ******************************************************************************/ void agtiapi_SetLunField(ccb_t *pccb) { U08 *pchar; pchar = (U08 *)&pccb->tiSuperScsiRequest.scsiCmnd.lun; // AGTIAPI_PRINTK("agtiapi_SetLunField: start\n"); switch (pccb->addrMode) { case AGTIAPI_PERIPHERAL: *pchar++ = 0; *pchar = (U08)pccb->lun; break; case AGTIAPI_VOLUME_SET: *pchar++ = (AGTIAPI_VOLUME_SET << AGTIAPI_ADDRMODE_SHIFT) | (U08)((pccb->lun >> 8) & 0x3F); *pchar = (U08)pccb->lun; break; case AGTIAPI_LUN_ADDR: *pchar++ = (AGTIAPI_LUN_ADDR << AGTIAPI_ADDRMODE_SHIFT) | pccb->targetId; *pchar = (U08)pccb->lun; break; } } /***************************************************************************** agtiapi_FreeCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_IO( "agtiapi_FreeCCB: start %p\n", pccb ); #ifdef AGTIAPI_TEST_EPL tiEncrypt_t *encrypt; #endif agtiapi_DumpCDB( "agtiapi_FreeCCB", pccb ); if (pccb->sgList != agNULL) { AGTIAPI_IO( "agtiapi_FreeCCB: pccb->sgList is NOT null\n" ); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: pccb->sgList is null\n" ); } /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements zero\n" ); } else if (pccb->numSgElements == 1) { AGTIAPI_IO( "agtiapi_FreeCCB: numSgElements is one\n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK( "agtiapi_FreeCCB: numSgElements 2 or higher \n" ); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } #ifdef AGTIAPI_TEST_DPL if (pccb->tiSuperScsiRequest.Dif.enableDIFPerLA == TRUE) { if(pccb->dplPtr) memset( (char *) pccb->dplPtr, 0, MAX_DPL_REGIONS * sizeof(dplaRegion_t) ); pccb->tiSuperScsiRequest.Dif.enableDIFPerLA = FALSE; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrLo = 0; pccb->tiSuperScsiRequest.Dif.DIFPerLAAddrHi = 0; } #endif #ifdef AGTIAPI_TEST_EPL encrypt = &pccb->tiSuperScsiRequest.Encrypt; if (encrypt->enableEncryptionPerLA == TRUE) { encrypt->enableEncryptionPerLA = FALSE; encrypt->EncryptionPerLAAddrLo = 0; encrypt->EncryptionPerLAAddrHi = 0; } #endif #ifdef ENABLE_SATA_DIF if (pccb->holePtr && pccb->dmaHandleHole) pci_free_consistent( pmcsc->pCardInfo->pPCIDev, 512, pccb->holePtr, pccb->dmaHandleHole ); pccb->holePtr = 0; pccb->dmaHandleHole = 0; #endif pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); #ifdef HIALEAH_ENCRYPTION if (pmcsc->encrypt) agtiapi_CleanupEncryptedIO(pmcsc, pccb); #endif pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_FlushCCBs() Purpose: Flush all in processed ccbs. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure U32 flag (IN) Flag to call back Return: Note: ******************************************************************************/ STATIC void agtiapi_FlushCCBs( struct agtiapi_softc *pCard, U32 flag ) { union ccb *ccb; ccb_t *pccb; AGTIAPI_PRINTK( "agtiapi_FlushCCBs: enter \n" ); for( pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext ) { if( pccb->flags == 0 ) { // printf( "agtiapi_FlushCCBs: nothing, continue \n" ); continue; } ccb = pccb->ccb; if ( pccb->flags & ( TASK_MANAGEMENT | DEV_RESET ) ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeTMCCB \n" ); agtiapi_FreeTMCCB( pCard, pccb ); } else { if ( pccb->flags & TAG_SMP ) { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeSMPCCB \n" ); agtiapi_FreeSMPCCB( pCard, pccb ); } else { AGTIAPI_PRINTK( "agtiapi_FlushCCBs: agtiapi_FreeCCB \n" ); agtiapi_FreeCCB( pCard, pccb ); } if( ccb ) { CMND_DMA_UNMAP( pCard, ccb ); if( flag == AGTIAPI_CALLBACK ) { ccb->ccb_h.status = CAM_SCSI_BUS_RESET; xpt_done( ccb ); } } } } } /***************************************************************************** agtiapi_FreeSMPCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeSMPCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { union ccb *ccb = pccb->ccb; bus_dmasync_op_t op; AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: start %p\n", pccb); /* set data transfer direction */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { op = BUS_DMASYNC_POSTWRITE; } else { op = BUS_DMASYNC_POSTREAD; } if (pccb->numSgElements == 0) { // do nothing AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 0\n"); } else if (pccb->numSgElements == 1) { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 1\n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } else { AGTIAPI_PRINTK("agtiapi_FreeSMPCCB: numSgElements 2 or higher \n"); //op is either BUS_DMASYNC_POSTWRITE or BUS_DMASYNC_POSTREAD bus_dmamap_sync(pmcsc->buffer_dmat, pccb->CCB_dmamap, op); bus_dmamap_unload(pmcsc->buffer_dmat, pccb->CCB_dmamap); } /*dma api cleanning*/ pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSMPFrame, 0, AGSMP_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /***************************************************************************** agtiapi_FreeTMCCB() Purpose: Free a ccb and put it back to ccbFreeList. Parameters: struct agtiapi_softc *pmcsc (IN) Pointer to HBA data structure pccb_t pccb (IN) A pointer to the driver's own CCB, not CAM's CCB Returns: Note: *****************************************************************************/ STATIC void agtiapi_FreeTMCCB(struct agtiapi_softc *pmcsc, pccb_t pccb) { AG_LOCAL_LOCK(&pmcsc->ccbLock); AGTIAPI_PRINTK("agtiapi_FreeTMCCB: start %p\n", pccb); pccb->dataLen = 0; pccb->retryCount = 0; pccb->ccbStatus = 0; pccb->scsiStatus = 0; pccb->startTime = 0; pccb->dmaHandle = 0; pccb->numSgElements = 0; pccb->tiIORequest.tdData = 0; memset((void *)&pccb->tiSuperScsiRequest, 0, AGSCSI_INIT_XCHG_LEN); pccb->flags = 0; pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->pccbNext = (pccb_t)pmcsc->ccbFreeList; pmcsc->ccbFreeList = (caddr_t *)pccb; pmcsc->activeCCB--; AG_LOCAL_UNLOCK(&pmcsc->ccbLock); return; } /****************************************************************************** agtiapi_CheckAllVectors(): Purpose: Parameters: Return: Note: Currently, not used. ******************************************************************************/ void agtiapi_CheckAllVectors( struct agtiapi_softc *pCard, bit32 context ) { #ifdef SPC_MSIX_INTR if (!agtiapi_intx_mode) { int i; for (i = 0; i < pCard->pCardInfo->maxInterruptVectors; i++) if (tiCOMInterruptHandler(&pCard->tiRoot, i) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, i, 100, context); } else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #else if (tiCOMInterruptHandler(&pCard->tiRoot, 0) == agTRUE) tiCOMDelayedInterruptHandler(&pCard->tiRoot, 0, 100, context); #endif } /****************************************************************************** agtiapi_CheckCB() Purpose: Check call back function returned event for process completion Parameters: struct agtiapi_softc *pCard Pointer to card data structure U32 milisec (IN) Waiting time for expected event U32 flag (IN) Flag of the event to check U32 *pStatus (IN) Pointer to status of the card or port to check Return: AGTIAPI_SUCCESS - event comes as expected AGTIAPI_FAIL - event not coming Note: Currently, not used ******************************************************************************/ agBOOLEAN agtiapi_CheckCB( struct agtiapi_softc *pCard, U32 milisec, U32 flag, volatile U32 *pStatus ) { U32 msecsPerTick = pCard->pCardInfo->tiRscInfo.tiInitiatorResource. initiatorOption.usecsPerTick / 1000; S32 i = milisec/msecsPerTick; AG_GLOBAL_ARG( _flags ); AGTIAPI_PRINTK( "agtiapi_CheckCB: start\n" ); AGTIAPI_FLOW( "agtiapi_CheckCB: start\n" ); if( i <= 0 ) i = 1; while (i > 0) { if (*pStatus & TASK_MANAGEMENT) { if (*pStatus & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } } else if (pCard->flags & AGTIAPI_CB_DONE) { if( flag == 0 || *pStatus & flag ) return AGTIAPI_SUCCESS; else return AGTIAPI_FAIL; } agtiapi_DelayMSec( msecsPerTick ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, _flags ); tiCOMTimerTick( &pCard->tiRoot ); agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, _flags ); i--; } if( *pStatus & TASK_MANAGEMENT ) *pStatus |= TASK_TIMEOUT; return AGTIAPI_FAIL; } /****************************************************************************** agtiapi_DiscoverTgt() Purpose: Discover available devices Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_DiscoverTgt(struct agtiapi_softc *pCard) { ag_portal_data_t *pPortalData; U32 count; AGTIAPI_PRINTK("agtiapi_DiscoverTgt: start\n"); AGTIAPI_FLOW("agtiapi_DiscoverTgt\n"); AGTIAPI_INIT("agtiapi_DiscoverTgt\n"); pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { pCard->flags &= ~AGTIAPI_CB_DONE; if (!(PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (pCard->flags & AGTIAPI_INIT_TIME) { if (agtiapi_CheckCB(pCard, 5000, AGTIAPI_PORT_DISC_READY, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Port %p / %d not ready for " "discovery\n", pPortalData, count ); /* * There is no need to spend time on discovering device * if port is not ready to do so. */ continue; } } else continue; } AGTIAPI_FLOW( "agtiapi_DiscoverTgt: Portal %p DiscoverTargets starts\n", pPortalData ); AGTIAPI_INIT_DELAY(1000); pCard->flags &= ~AGTIAPI_CB_DONE; if (tiINIDiscoverTargets(&pCard->tiRoot, &pPortalData->portalInfo.tiPortalContext, FORCE_PERSISTENT_ASSIGN_MASK) != tiSuccess) AGTIAPI_PRINTK("agtiapi_DiscoverTgt: tiINIDiscoverTargets ERROR\n"); /* * Should wait till discovery completion to start * next portal. However, lower layer have issue on * multi-portal case under Linux. */ } pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY)) { if (agtiapi_CheckCB(pCard, 20000, AGTIAPI_DISC_COMPLETE, &PORTAL_STATUS(pPortalData)) == AGTIAPI_FAIL) { if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE)) AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover complete, " "status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); else AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %p discover is not " "completed, status 0x%x\n", pPortalData, PORTAL_STATUS(pPortalData) ); continue; } AGTIAPI_PRINTK( "agtiapi_DiscoverTgt: Portal %d discover target " "success\n", count ); } } /* * Calling to get device handle should be done per portal based * and better right after discovery is done. However, lower iscsi * layer may not returns discovery complete in correct sequence or we * ran out time. We get device handle for all portals together * after discovery is done or timed out. */ pPortalData = pCard->pPortalData; for (count = 0; count < pCard->portCount; count++, pPortalData++) { /* * We try to get device handle no matter * if discovery is completed or not. */ if (PORTAL_STATUS(pPortalData) & AGTIAPI_PORT_DISC_READY) { U32 i; for (i = 0; i < AGTIAPI_GET_DEV_MAX; i++) { if (agtiapi_GetDevHandle(pCard, &pPortalData->portalInfo, 0, 0) != 0) break; agtiapi_DelayMSec(AGTIAPI_EXTRA_DELAY); } if ((PORTAL_STATUS(pPortalData) & AGTIAPI_DISC_COMPLETE) || (pCard->tgtCount > 0)) PORTAL_STATUS(pPortalData) |= ( AGTIAPI_DISC_DONE | AGTIAPI_PORT_LINK_UP ); } } return; } /****************************************************************************** agtiapi_PrepCCBs() Purpose: Prepares CCB including DMA map. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ccb_hdr_t *hdr (IN) Pointer to the CCB header U32 size (IN) size U32 max_ccb (IN) count Return: Note: ******************************************************************************/ STATIC void agtiapi_PrepCCBs( struct agtiapi_softc *pCard, ccb_hdr_t *hdr, U32 size, U32 max_ccb, int tid ) { int i; U32 hdr_sz, ccb_sz; ccb_t *pccb = 0; int offset = 0; int nsegs = 0; int sgl_sz = 0; AGTIAPI_PRINTK("agtiapi_PrepCCBs: start\n"); offset = tid * AGTIAPI_CCB_PER_DEVICE; nsegs = AGTIAPI_NSEGS; sgl_sz = sizeof(tiSgl_t) * nsegs; AGTIAPI_PRINTK( "agtiapi_PrepCCBs: tid %d offset %d nsegs %d sizeof(tiSgl_t) " "%lu, max_ccb %d\n", tid, offset, nsegs, sizeof(tiSgl_t), max_ccb ); ccb_sz = (AGTIAPI_CCB_SIZE + cache_line_size() - 1) & ~(cache_line_size() -1); hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1); AGTIAPI_PRINTK("agtiapi_PrepCCBs: after cache line\n"); memset((void *)hdr, 0, size); hdr->next = pCard->ccbAllocList; pCard->ccbAllocList = hdr; AGTIAPI_PRINTK("agtiapi_PrepCCBs: after memset\n"); pccb = (ccb_t*) ((char*)hdr + hdr_sz); for (i = 0; i < max_ccb; i++, pccb = (ccb_t*)((char*)pccb + ccb_sz)) { pccb->tiIORequest.osData = (void *)pccb; /* * Initially put all the ccbs on the free list * in addition to chainlist. * ccbChainList is a list of all available ccbs * (free/active everything) */ pccb->pccbChainNext = (pccb_t)pCard->ccbChainList; pccb->pccbNext = (pccb_t)pCard->ccbFreeList; pCard->ccbChainList = (caddr_t *)pccb; pCard->ccbFreeList = (caddr_t *)pccb; pCard->ccbTotal++; #ifdef AGTIAPI_ALIGN_CHECK if (&pccb & 0x63) AGTIAPI_PRINTK("pccb = %p\n", pccb); if (pccb->devHandle & 0x63) AGTIAPI_PRINTK("devHandle addr = %p\n", &pccb->devHandle); if (&pccb->lun & 0x63) AGTIAPI_PRINTK("lun addr = %p\n", &pccb->lun); if (&pccb->targetId & 0x63) AGTIAPI_PRINTK("tig addr = %p\n", &pccb->targetId); if (&pccb->ccbStatus & 0x63) AGTIAPI_PRINTK("ccbStatus addr = %p\n", &pccb->ccbStatus); if (&pccb->scsiStatus & 0x63) AGTIAPI_PRINTK("scsiStatus addr = %p\n", &pccb->scsiStatus); if (&pccb->dataLen & 0x63) AGTIAPI_PRINTK("dataLen addr = %p\n", &pccb->dataLen); if (&pccb->senseLen & 0x63) AGTIAPI_PRINTK("senseLen addr = %p\n", &pccb->senseLen); if (&pccb->numSgElements & 0x63) AGTIAPI_PRINTK("numSgElements addr = %p\n", &pccb->numSgElements); if (&pccb->retryCount & 0x63) AGTIAPI_PRINTK("retry cnt addr = %p\n", &pccb->retryCount); if (&pccb->flags & 0x63) AGTIAPI_PRINTK("flag addr = %p\n", &pccb->flags); if (&pccb->pSenseData & 0x63) AGTIAPI_PRINTK("senseData addr = %p\n", &pccb->pSenseData); if (&pccb->sgList[0] & 0x63) AGTIAPI_PRINTK("SgList 0 = %p\n", &pccb->sgList[0]); if (&pccb->pccbNext & 0x63) AGTIAPI_PRINTK("ccb next = %p\n", &pccb->pccbNext); if (&pccb->pccbChainNext & 0x63) AGTIAPI_PRINTK("ccbChainNext = %p\n", &pccb->pccbChainNext); if (&pccb->cmd & 0x63) AGTIAPI_PRINTK("command = %p\n", &pccb->cmd); if( &pccb->startTime & 0x63 ) AGTIAPI_PRINTK( "startTime = %p\n", &pccb->startTime ); if (&pccb->tiIORequest & 0x63) AGTIAPI_PRINTK("tiIOReq addr = %p\n", &pccb->tiIORequest); if (&pccb->tdIOReqBody & 0x63) AGTIAPI_PRINTK("tdIORequestBody addr = %p\n", &pccb->tdIOReqBody); if (&pccb->tiSuperScsiRequest & 0x63) AGTIAPI_PRINTK( "InitiatorExchange addr = %p\n", &pccb->tiSuperScsiRequest ); #endif if ( bus_dmamap_create( pCard->buffer_dmat, 0, &pccb->CCB_dmamap ) != tiSuccess) { AGTIAPI_PRINTK("agtiapi_PrepCCBs: can't create dma\n"); return; } /* assigns tiSgl_t memory to pccb */ pccb->sgList = (void*)((U64)pCard->tisgl_mem + ((i + offset) * sgl_sz)); pccb->tisgl_busaddr = pCard->tisgl_busaddr + ((i + offset) * sgl_sz); pccb->ccb = NULL; pccb->pccbIO = NULL; pccb->startTime = 0; } #ifdef AGTIAPI_ALIGN_CHECK AGTIAPI_PRINTK("ccb size = %d / %d\n", sizeof(ccb_t), ccb_sz); #endif return; } /****************************************************************************** agtiapi_InitCCBs() Purpose: Create and initialize per card based CCB pool. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure int tgtCount (IN) Count Return: Total number of ccb allocated Note: ******************************************************************************/ STATIC U32 agtiapi_InitCCBs(struct agtiapi_softc *pCard, int tgtCount, int tid) { U32 max_ccb, size, ccb_sz, hdr_sz; int no_allocs = 0, i; ccb_hdr_t *hdr = 0; AGTIAPI_PRINTK("agtiapi_InitCCBs: start\n"); AGTIAPI_PRINTK("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); AGTIAPI_FLOW("agtiapi_InitCCBs: tgtCount %d tid %d\n", tgtCount, tid); #ifndef HOTPLUG_SUPPORT if (pCard->tgtCount > AGSA_MAX_INBOUND_Q) return 1; #else if (tgtCount > AGSA_MAX_INBOUND_Q) tgtCount = AGSA_MAX_INBOUND_Q; #endif max_ccb = tgtCount * AGTIAPI_CCB_PER_DEVICE;// / 4; // TBR ccb_sz = ( (AGTIAPI_CCB_SIZE + cache_line_size() - 1) & ~(cache_line_size() -1) ); hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1); size = ccb_sz * max_ccb + hdr_sz; for (i = 0; i < (1 << no_allocs); i++) { hdr = (ccb_hdr_t*)malloc( size, M_PMC_MCCB, M_NOWAIT ); if( !hdr ) { panic( "agtiapi_InitCCBs: bug!!!\n" ); } else { agtiapi_PrepCCBs( pCard, hdr, size, max_ccb, tid ); } } return 1; } #ifdef LINUX_PERBI_SUPPORT /****************************************************************************** agtiapi_GetWWNMappings() Purpose: Get the mappings from target IDs to WWNs, if any. Store them in the WWN_list array, indexed by target ID. Leave the devListIndex field blank; this will be filled-in later. Parameters: ag_card_t *pCard (IN) Pointer to HBA data structure ag_mapping_t *pMapList (IN) Pointer to mapped device list Return: Note: The boot command line parameters are used to load the mapping information, which is contained in the system configuration file. ******************************************************************************/ STATIC void agtiapi_GetWWNMappings( struct agtiapi_softc *pCard, ag_mapping_t *pMapList ) { int devDisc; int lIdx = 0; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; ag_device_t *pDevList; if( !pCard ) panic( "agtiapi_GetWWNMappings: no pCard \n" ); AGTIAPI_PRINTK( "agtiapi_GetWWNMappings: start\n" ); pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; pDevList = pCard->pDevList; pCard->numTgtHardMapped = 0; devDisc = pCard->devDiscover; pWWNList[devDisc-1].devListIndex = maxTargets; pSLRList[devDisc-1].localeNameLen = -2; pSLRList[devDisc-1].remoteNameLen = -2; pDevList[devDisc-1].targetId = maxTargets; /* * Get the mappings from holding area which contains * the input of the system file and store them * in the WWN_list array, indexed by target ID. */ for ( lIdx = 0; lIdx < devDisc - 1; lIdx++) { pWWNList[lIdx].flags = 0; pWWNList[lIdx].devListIndex = maxTargets; pSLRList[lIdx].localeNameLen = -1; pSLRList[lIdx].remoteNameLen = -1; } // this is where we would propagate values fed to pMapList } /* agtiapi_GetWWNMappings */ #endif /****************************************************************************** agtiapi_FindWWNListNext() Purpose: finds first available new (unused) wwn list entry Parameters: ag_tgt_map_t *pWWNList Pointer to head of wwn list int lstMax Number of entries in WWNList Return: index into WWNList indicating available entry space; if available entry space is not found, return negative value ******************************************************************************/ STATIC int agtiapi_FindWWNListNext( ag_tgt_map_t *pWWNList, int lstMax ) { int lLstIdx; for ( lLstIdx = 0; lLstIdx < lstMax; lLstIdx++ ) { if ( pWWNList[lLstIdx].devListIndex == lstMax && pWWNList[lLstIdx].targetLen == 0 ) { AGTIAPI_PRINTK( "agtiapi_FindWWNListNext: %d %d %d %d v. %d\n", lLstIdx, pWWNList[lLstIdx].devListIndex, pWWNList[lLstIdx].targetLen, pWWNList[lLstIdx].portId, lstMax ); return lLstIdx; } } return -1; } /****************************************************************************** agtiapi_GetDevHandle() Purpose: Get device handle. Handles will be placed in the devlist array with same order as TargetList provided and will be mapped to a scsi target id and registered to OS later. Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure ag_portal_info_t *pPortalInfo (IN) Pointer to the portal data structure U32 eType (IN) Port event U32 eStatus (IN) Port event status Return: Number of device handle slot present Note: The sequence of device handle will match the sequence of taregt list ******************************************************************************/ STATIC U32 agtiapi_GetDevHandle( struct agtiapi_softc *pCard, ag_portal_info_t *pPortalInfo, U32 eType, U32 eStatus ) { ag_device_t *pDevice; // tiDeviceHandle_t *agDev[pCard->devDiscover]; tiDeviceHandle_t **agDev; int devIdx, szdv, devTotal, cmpsetRtn; int lDevIndex = 0, lRunScanFlag = FALSE; int *lDevFlags; tiPortInfo_t portInfT; ag_device_t lTmpDevice; ag_tgt_map_t *pWWNList; ag_slr_map_t *pSLRList; bit32 lReadRm; bit16 lReadCt; AGTIAPI_PRINTK( "agtiapi_GetDevHandle: start\n" ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: pCard->devDiscover %d / tgtCt %d\n", pCard->devDiscover, pCard->tgtCount ); AGTIAPI_FLOW( "agtiapi_GetDevHandle: portalInfo %p\n", pPortalInfo ); AGTIAPI_INIT_DELAY( 1000 ); agDev = (tiDeviceHandle_t **) malloc( sizeof(tiDeviceHandle_t *) * pCard->devDiscover, M_PMC_MDEV, M_ZERO | M_NOWAIT); if (agDev == NULL) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc agDev[]\n" ); return 0; } lDevFlags = (int *) malloc( sizeof(int) * pCard->devDiscover, M_PMC_MFLG, M_ZERO | M_NOWAIT ); if (lDevFlags == NULL) { free((caddr_t)agDev, M_PMC_MDEV); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: failed to alloc lDevFlags[]\n" ); return 0; } pWWNList = pCard->pWWNList; pSLRList = pCard->pSLRList; memset( (void *)agDev, 0, sizeof(void *) * pCard->devDiscover ); memset( lDevFlags, 0, sizeof(int) * pCard->devDiscover ); // get device handles devTotal = tiINIGetDeviceHandles( &pCard->tiRoot, &pPortalInfo->tiPortalContext, (tiDeviceHandle_t **)agDev, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: portalInfo %p port id %d event %u " "status %u card %p pCard->devDiscover %d devTotal %d " "pPortalInfo->devTotal %d pPortalInfo->devPrev %d " "AGTIAPI_INIT_TIME %x\n", pPortalInfo, pPortalInfo->portID, eType, eStatus, pCard, pCard->devDiscover, devTotal, pPortalInfo->devTotal, pPortalInfo->devPrev, pCard->flags & AGTIAPI_INIT_TIME ); // reset devTotal from any previous runs of this pPortalInfo->devPrev = devTotal; pPortalInfo->devTotal = devTotal; AG_LIST_LOCK( &pCard->devListLock ); if ( tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &portInfT ) != tiSuccess) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: tiCOMGetPortInfo did not succeed. \n" ); } szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } // reconstructing dev list via comparison of wwn for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { if ( agDev[devIdx] != 0 ) { // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: agDev %d not NULL %p\n", // devIdx, agDev[devIdx] ); // pack temp device structure for tiINIGetDeviceInfo call pDevice = &lTmpDevice; pDevice->devType = DIRECT_DEVICE; pDevice->pCard = (void *)pCard; pDevice->flags = ACTIVE; pDevice->pPortalInfo = pPortalInfo; pDevice->pDevHandle = agDev[devIdx]; pDevice->qbusy = agFALSE; //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: idx %d / %d : %p \n", // devIdx, pCard->devDiscover, agDev[devIdx] ); tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); //AGTIAPI_PRINTK( "agtiapi_GetDevHandle: wwn sizes %ld %d/%d ", // sizeof(pDevice->targetName), // pDevice->devInfo.osAddress1, // pDevice->devInfo.osAddress2 ); wwncpy( pDevice ); wwnprintk( (unsigned char*)pDevice->targetName, pDevice->targetLen ); for ( lDevIndex = 0; lDevIndex < szdv; lDevIndex++ ) // match w/ wwn list { if ( (pCard->pDevList[lDevIndex].portalId == pPortalInfo->portID) && pDevice->targetLen > 0 && portInfT.localNameLen > 0 && portInfT.remoteNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen > 0 && pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen > 0 && ( portInfT.localNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].localeNameLen ) && ( portInfT.remoteNameLen == pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteNameLen ) && memcmp( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].localeName, portInfT.localName, portInfT.localNameLen ) == 0 && memcmp( pSLRList[pWWNList[lDevIndex].sasLrIdx].remoteName, portInfT.remoteName, portInfT.remoteNameLen ) == 0 ) { AGTIAPI_PRINTK( " pWWNList match @ %d/%d/%d \n", lDevIndex, devIdx, pPortalInfo->portID ); if ( (pCard->pDevList[lDevIndex].targetId == lDevIndex) && ( pPortalInfo->pDevList[lDevIndex] == &pCard->pDevList[lDevIndex] ) ) // active { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: dev in use %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev handle lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used lReadRm = atomic_readandclear_32( &pWWNList[lDevIndex].devRemoved ); if ( lReadRm ) // cleared timeout, now remove count for timer { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: clear timer count for" " %d of %d\n", lDevIndex, pPortalInfo->portID ); atomic_subtract_16( &pCard->rmChkCt, 1 ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { callout_stop( &pCard->devRmTimer ); } } break; } AGTIAPI_PRINTK( "agtiapi_GetDevHandle: goin fresh on %d of %d/%d\n", lDevIndex, // reactivate now devTotal, pPortalInfo->portID ); // pDevice going fresh lRunScanFlag = TRUE; // scan and clear outstanding removals // pCard->tgtCount++; ## pDevice->targetId = lDevIndex; pDevice->portalId = pPortalInfo->portID; memcpy ( &pCard->pDevList[lDevIndex], pDevice, sizeof(lTmpDevice) ); agDev[devIdx]->osData = (void *)&pCard->pDevList[lDevIndex]; if ( agtiapi_InitCCBs( pCard, 1, pDevice->targetId ) == 0 ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: InitCCB " "tgtCnt %d ERROR!\n", pCard->tgtCount ); AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); return 0; } pPortalInfo->pDevList[lDevIndex] = &pCard->pDevList[lDevIndex]; // (ag_device_t *) if ( 0 == lDevFlags[devIdx] ) { pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } else { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: odd dev handle " "status inspect %d %d %d\n", lDevFlags[devIdx], devIdx, lDevIndex ); pPortalInfo->devTotal++; lDevFlags[devIdx] |= DPMC_LEANFLAG_AGDEVUSED; // agDev used lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // pDevice used } break; } } // end: match this wwn with previous wwn list // we have an agDev entry, but no pWWNList target for it if ( !(lDevFlags[devIdx] & DPMC_LEANFLAG_AGDEVUSED) ) { // flag dev handle not accounted for yet lDevFlags[devIdx] |= DPMC_LEANFLAG_NOWWNLIST; // later, get an empty pDevice and map this agDev. // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: devIdx %d flags 0x%x, %d\n", // devIdx, lDevFlags[devIdx], (lDevFlags[devIdx] & 8) ); } } else { lDevFlags[devIdx] |= DPMC_LEANFLAG_NOAGDEVYT; // known empty agDev handle } } // AGTIAPI_PRINTK( "agtiapi_GetDevHandle: all WWN all the time, " // "devLstIdx/flags/(WWNL)portId ... \n" ); // review device list for further action needed for ( devIdx = 0; devIdx < pCard->devDiscover; devIdx++ ) { if ( lDevFlags[devIdx] & DPMC_LEANFLAG_NOWWNLIST ) // new target, register { int lNextDyad; // find next available dyad entry AGTIAPI_PRINTK( "agtiapi_GetDevHandle: register new target, " "devIdx %d -- %d \n", devIdx, pCard->devDiscover ); lRunScanFlag = TRUE; // scan and clear outstanding removals for ( lNextDyad = 0; lNextDyad < pCard->devDiscover; lNextDyad++ ) { if ( pSLRList[lNextDyad].localeNameLen < 0 && pSLRList[lNextDyad].remoteNameLen < 0 ) break; } if ( lNextDyad == pCard->devDiscover ) { printf( "agtiapi_GetDevHandle: failed to find available SAS LR\n" ); AG_LIST_UNLOCK( &pCard->devListLock ); free( (caddr_t)lDevFlags, M_PMC_MFLG ); free( (caddr_t)agDev, M_PMC_MDEV ); return 0; } // index of new entry lDevIndex = agtiapi_FindWWNListNext( pWWNList, pCard->devDiscover ); AGTIAPI_PRINTK( "agtiapi_GetDevHandle: listIdx new target %d of %d/%d\n", lDevIndex, devTotal, pPortalInfo->portID ); if ( 0 > lDevIndex ) { printf( "agtiapi_GetDevHandle: WARNING -- WWNList exhausted.\n" ); continue; } pDevice = &pCard->pDevList[lDevIndex]; tiINIGetDeviceInfo( &pCard->tiRoot, agDev[devIdx], &pDevice->devInfo ); wwncpy( pDevice ); agtiapi_InitCCBs( pCard, 1, lDevIndex ); pDevice->pCard = (void *)pCard; pDevice->devType = DIRECT_DEVICE; // begin to populate new WWNList entry memcpy( pWWNList[lDevIndex].targetName, pDevice->targetName, pDevice->targetLen ); pWWNList[lDevIndex].targetLen = pDevice->targetLen; pWWNList[lDevIndex].flags = SOFT_MAPPED; pWWNList[lDevIndex].portId = pPortalInfo->portID; pWWNList[lDevIndex].devListIndex = lDevIndex; pWWNList[lDevIndex].sasLrIdx = lNextDyad; pSLRList[lNextDyad].localeNameLen = portInfT.localNameLen; pSLRList[lNextDyad].remoteNameLen = portInfT.remoteNameLen; memcpy( pSLRList[lNextDyad].localeName, portInfT.localName, portInfT.localNameLen ); memcpy( pSLRList[lNextDyad].remoteName, portInfT.remoteName, portInfT.remoteNameLen ); // end of populating new WWNList entry pDevice->targetId = lDevIndex; pDevice->flags = ACTIVE; pDevice->CCBCount = 0; pDevice->pDevHandle = agDev[devIdx]; agDev[devIdx]->osData = (void*)pDevice; pDevice->pPortalInfo = pPortalInfo; pDevice->portalId = pPortalInfo->portID; pPortalInfo->pDevList[lDevIndex] = (void*)pDevice; lDevFlags[lDevIndex] |= DPMC_LEANFLAG_PDEVSUSED; // mark pDevice slot used } if ( (pCard->pDevList[devIdx].portalId == pPortalInfo->portID) && !(lDevFlags[devIdx] & DPMC_LEANFLAG_PDEVSUSED) ) // pDevice not used { pDevice = &pCard->pDevList[devIdx]; //pDevice->flags &= ~ACTIVE; if ( ( pDevice->pDevHandle != NULL || pPortalInfo->pDevList[devIdx] != NULL ) ) { atomic_add_16( &pCard->rmChkCt, 1 ); // show count of lost device if (FALSE == lRunScanFlag) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: targ dropped out %d of %d/%d\n", devIdx, devTotal, pPortalInfo->portID ); // if ( 0 == pWWNList[devIdx].devRemoved ) '.devRemoved = 5; cmpsetRtn = atomic_cmpset_32( &pWWNList[devIdx].devRemoved, 0, 5 ); if ( 0 == cmpsetRtn ) { AGTIAPI_PRINTK( "agtiapi_GetDevHandle: target %d timer already set\n", devIdx ); } else { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } } // else ... scan coming soon enough anyway, ignore timer for dropout } } } // end of for ( devIdx = 0; ... AG_LIST_UNLOCK( &pCard->devListLock ); free((caddr_t)lDevFlags, M_PMC_MFLG); free((caddr_t)agDev, M_PMC_MDEV); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); return devTotal; } // end agtiapi_GetDevHandle /****************************************************************************** agtiapi_scan() Purpose: Triggers CAM's scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ static void agtiapi_scan(struct agtiapi_softc *pmcsc) { union ccb *ccb; int bus, tid, lun, card_no; static int num=0; AGTIAPI_PRINTK("agtiapi_scan: start cardNO %d \n", pmcsc->cardNo); bus = cam_sim_path(pmcsc->sim); tid = CAM_TARGET_WILDCARD; lun = CAM_LUN_WILDCARD; mtx_lock(&(pmcsc->pCardInfo->pmIOLock)); ccb = xpt_alloc_ccb_nowait(); if (ccb == agNULL) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); return; } if (xpt_create_path(&ccb->ccb_h.path, agNULL, bus, tid, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); xpt_free_ccb(ccb); return; } mtx_unlock(&(pmcsc->pCardInfo->pmIOLock)); pmcsc->dev_scan = agTRUE; xpt_rescan(ccb); return; } /****************************************************************************** agtiapi_DeQueueCCB() Purpose: Remove a ccb from a queue Parameters: struct agtiapi_softc *pCard (IN) Pointer to the card structure pccb_t *phead (IN) Pointer to a head of ccb queue ccb_t *pccd (IN) Pointer to the ccb to be processed Return: AGTIAPI_SUCCESS - the ccb is removed from queue AGTIAPI_FAIL - the ccb is not found from queue Note: ******************************************************************************/ STATIC agBOOLEAN agtiapi_DeQueueCCB(struct agtiapi_softc *pCard, pccb_t *phead, pccb_t *ptail, #ifdef AGTIAPI_LOCAL_LOCK struct mtx *lock, #endif ccb_t *pccb) { ccb_t *pccb_curr; U32 status = AGTIAPI_FAIL; AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); if (pccb == NULL || *phead == NULL) { return AGTIAPI_FAIL; } AGTIAPI_PRINTK("agtiapi_DeQueueCCB: %p from %p\n", pccb, phead); AG_LOCAL_LOCK(lock); if (pccb == *phead) { *phead = (*phead)->pccbNext; if (pccb == *ptail) { *ptail = NULL; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; } else { pccb_curr = *phead; while (pccb_curr->pccbNext != NULL) { if (pccb_curr->pccbNext == pccb) { pccb_curr->pccbNext = pccb->pccbNext; pccb->pccbNext = NULL; if (pccb == *ptail) { *ptail = pccb_curr; } else pccb->pccbNext = NULL; status = AGTIAPI_SUCCESS; break; } pccb_curr = pccb_curr->pccbNext; } } AG_LOCAL_UNLOCK(lock); return status; } STATIC void wwnprintk( unsigned char *name, int len ) { int i; for (i = 0; i < len; i++, name++) AGTIAPI_PRINTK("%02x", *name); AGTIAPI_PRINTK("\n"); } /* * SAS and SATA behind expander has 8 byte long unique address. * However, direct connect SATA device use 512 byte unique device id. * SPC uses remoteName to indicate length of ID and remoteAddress for the * address of memory that holding ID. */ STATIC int wwncpy( ag_device_t *pDevice ) { int rc = 0; if (sizeof(pDevice->targetName) >= pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2) { memcpy(pDevice->targetName, pDevice->devInfo.remoteName, pDevice->devInfo.osAddress1); memcpy(pDevice->targetName + pDevice->devInfo.osAddress1, pDevice->devInfo.remoteAddress, pDevice->devInfo.osAddress2); pDevice->targetLen = pDevice->devInfo.osAddress1 + pDevice->devInfo.osAddress2; rc = pDevice->targetLen; } else { AGTIAPI_PRINTK("WWN wrong size: %d + %d ERROR\n", pDevice->devInfo.osAddress1, pDevice->devInfo.osAddress2); rc = -1; } return rc; } /****************************************************************************** agtiapi_ReleaseCCBs() Purpose: Free all allocated CCB memories for the Host Adapter. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data stucture Return: Note: ******************************************************************************/ STATIC void agtiapi_ReleaseCCBs( struct agtiapi_softc *pCard ) { ccb_hdr_t *hdr; U32 hdr_sz; ccb_t *pccb = 0; AGTIAPI_PRINTK( "agtiapi_ReleaseCCBs: start\n" ); #if ( defined AGTIAPI_TEST_DPL || defined AGTIAPI_TEST_EPL ) ccb_t *pccb; #endif #ifdef AGTIAPI_TEST_DPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->dplPtr && pccb->dplDma) pci_pool_free(pCard->dpl_ctx_pool, pccb->dplPtr, pccb->dplDma); } #endif #ifdef AGTIAPI_TEST_EPL for (pccb = (pccb_t)pCard->ccbChainList; pccb != NULL; pccb = pccb->pccbChainNext) { if(pccb->epl_ptr && pccb->epl_dma_ptr) pci_pool_free( pCard->epl_ctx_pool, pccb->epl_ptr, pccb->epl_dma_ptr ); } #endif while ((hdr = pCard->ccbAllocList) != NULL) { pCard->ccbAllocList = hdr->next; hdr_sz = (sizeof(*hdr) + cache_line_size() - 1) & ~(cache_line_size() - 1); pccb = (ccb_t*) ((char*)hdr + hdr_sz); if (pCard->buffer_dmat != NULL && pccb->CCB_dmamap != NULL) { bus_dmamap_destroy(pCard->buffer_dmat, pccb->CCB_dmamap); } free(hdr, M_PMC_MCCB); } pCard->ccbAllocList = NULL; return; } /****************************************************************************** agtiapi_TITimer() Purpose: Timer tick for tisa common layer Parameters: void *data (IN) Pointer to the HBA data structure Return: Note: ******************************************************************************/ STATIC void agtiapi_TITimer( void *data ) { U32 next_tick; struct agtiapi_softc *pCard; pCard = (struct agtiapi_softc *)data; // AGTIAPI_PRINTK("agtiapi_TITimer: start\n"); AG_GLOBAL_ARG( flags ); next_tick = pCard->pCardInfo->tiRscInfo.tiLoLevelResource. loLevelOption.usecsPerTick / USEC_PER_TICK; if( next_tick == 0 ) /* no timer required */ return; AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) goto ext; tiCOMTimerTick( &pCard->tiRoot ); /* tisa common layer timer tick */ //add for polling mode #ifdef PMC_SPC if( agtiapi_polling_mode ) agtiapi_CheckAllVectors( pCard, tiNonInterruptContext ); #endif callout_reset( &pCard->OS_timer, next_tick, agtiapi_TITimer, pCard ); ext: AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); return; } /****************************************************************************** agtiapi_clrRmScan() Purpose: Clears device list entries scheduled for timeout and calls scan Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure ******************************************************************************/ STATIC void agtiapi_clrRmScan( struct agtiapi_softc *pCard ) { ag_tgt_map_t *pWWNList; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; int lIdx; bit32 lReadRm; bit16 lReadCt; pWWNList = pCard->pWWNList; AGTIAPI_PRINTK( "agtiapi_clrRmScan: start\n" ); AG_LIST_LOCK( &pCard->devListLock ); for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // trim to who cares } lReadRm = atomic_readandclear_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { pCard->pDevList[lIdx].flags &= ~ACTIVE; pCard->pDevList[lIdx].pDevHandle = NULL; pPortalData = &pCard->pPortalData[pWWNList[lIdx].portId]; pPortalInfo = &pPortalData->portalInfo; pPortalInfo->pDevList[lIdx] = NULL; AGTIAPI_PRINTK( "agtiapi_clrRmScan: cleared dev %d at port %d\n", lIdx, pWWNList[lIdx].portId ); atomic_subtract_16( &pCard->rmChkCt, 1 ); } } AG_LIST_UNLOCK( &pCard->devListLock ); agtiapi_scan( pCard ); } /****************************************************************************** agtiapi_devRmCheck() Purpose: Timer tick to check for timeout on missing targets Removes device list entry when timeout is reached Parameters: void *data (IN) Pointer to the HBA data structure ******************************************************************************/ STATIC void agtiapi_devRmCheck( void *data ) { struct agtiapi_softc *pCard; ag_tgt_map_t *pWWNList; int lIdx, cmpsetRtn, lRunScanFlag = FALSE; bit16 lReadCt; bit32 lReadRm; pCard = ( struct agtiapi_softc * )data; // routine overhead if ( callout_pending( &pCard->devRmTimer ) ) // callout was reset { return; } if ( !callout_active( &pCard->devRmTimer ) ) // callout was stopped { return; } callout_deactivate( &pCard->devRmTimer ); if( pCard->flags & AGTIAPI_SHUT_DOWN ) { return; // implicit timer clear } pWWNList = pCard->pWWNList; AG_LIST_LOCK( &pCard->devListLock ); lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( lReadCt ) { if ( callout_pending(&pCard->devRmTimer) == FALSE ) { callout_reset( &pCard->devRmTimer, 1 * hz, agtiapi_devRmCheck, pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); return; } for ( lIdx = 0; lIdx < pCard->devDiscover; lIdx++ ) { lReadCt = atomic_load_acq_16( &pCard->rmChkCt ); if ( 0 == lReadCt ) { break; // if handled somewhere else, get out } lReadRm = atomic_load_acq_32( &pWWNList[lIdx].devRemoved ); if ( lReadRm > 0 ) { if ( 1 == lReadRm ) // timed out { // no decrement of devRemoved as way to leave a clrRmScan marker lRunScanFlag = TRUE; // other devRemoved values are about to get wiped break; // ... so bail out } else { AGTIAPI_PRINTK( "agtiapi_devRmCheck: counting down dev %d @ %d; %d\n", lIdx, lReadRm, lReadCt ); cmpsetRtn = atomic_cmpset_32( &pWWNList[lIdx].devRemoved, lReadRm, lReadRm-1 ); if ( 0 == cmpsetRtn ) { printf( "agtiapi_devRmCheck: %d decrement already handled\n", lIdx ); } } } } AG_LIST_UNLOCK( &pCard->devListLock ); if ( TRUE == lRunScanFlag ) agtiapi_clrRmScan( pCard ); } else { AG_LIST_UNLOCK( &pCard->devListLock ); } return; } static void agtiapi_cam_poll( struct cam_sim *asim ) { return; } /***************************************************************************** agtiapi_ResetCard() Purpose: Hard or soft reset on the controller and resend any outstanding requests if needed. Parameters: struct agtiapi_softc *pCard (IN) Pointer to HBA data structure unsigned lomg flags (IN/OUT) Flags used in locking done from calling layers Return: AGTIAPI_SUCCESS - reset successful AGTIAPI_FAIL - reset failed Note: *****************************************************************************/ U32 agtiapi_ResetCard( struct agtiapi_softc *pCard, unsigned long *flags ) { ag_device_t *pDevice; U32 lIdx = 0; U32 lFlagVal; agBOOLEAN ret; ag_portal_info_t *pPortalInfo; ag_portal_data_t *pPortalData; U32 count, loop; int szdv; if( pCard->flags & AGTIAPI_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: reset card already in progress!\n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: Enter cnt %d\n", pCard->resetCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator time = %d!", pCard->resetCount + 1 ); #endif pCard->flags |= AGTIAPI_RESET; pCard->flags &= ~(AGTIAPI_CB_DONE | AGTIAPI_RESET_SUCCESS); tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; agtiapi_FlushCCBs( pCard, AGTIAPI_CALLBACK ); for ( lIdx = 1; 3 >= lIdx; lIdx++ ) // we try reset up to 3 times { if( pCard->flags & AGTIAPI_SOFT_RESET ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft variant\n" ); tiCOMReset( &pCard->tiRoot, tiSoftReset ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: no flag, no reset!\n" ); } lFlagVal = AGTIAPI_RESET_SUCCESS; AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); ret = agtiapi_CheckCB( pCard, 50000, lFlagVal, &pCard->flags ); AG_SPIN_LOCK_IRQ( agtiapi_host_lock, *flags ); if( ret == AGTIAPI_FAIL ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: CheckCB indicates failed reset call, " "try again?\n" ); } else { break; } } if ( 1 < lIdx ) { if ( AGTIAPI_FAIL == ret ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset failed after try %d\n", lIdx ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: soft reset success at try %d\n", lIdx ); } } if( AGTIAPI_FAIL == ret ) { printf( "agtiapi_ResetCard: reset ERROR\n" ); pCard->flags &= ~AGTIAPI_INSTALLED; return AGTIAPI_FAIL; } pCard->flags &= ~AGTIAPI_SOFT_RESET; // disable all devices pDevice = pCard->pDevList; for( lIdx = 0; lIdx < maxTargets; lIdx++, pDevice++ ) { /* if ( pDevice->flags & ACTIVE ) { printf( "agtiapi_ResetCard: before ... active device %d\n", lIdx ); } */ pDevice->flags &= ~ACTIVE; } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, *flags ); if( tiCOMPortInit( &pCard->tiRoot, agFALSE ) != tiSuccess ) printf( "agtiapi_ResetCard: tiCOMPortInit FAILED \n" ); else AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortInit success\n" ); if( !pCard->pDevList ) { // try to get a little sanity here AGTIAPI_PRINTK( "agtiapi_ResetCard: no pDevList ERROR %p\n", pCard->pDevList ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: pre target-count %d port-count %d\n", pCard->tgtCount, pCard->portCount ); pCard->tgtCount = 0; DELAY( 500000 ); pCard->flags &= ~AGTIAPI_CB_DONE; pPortalData = pCard->pPortalData; for( count = 0; count < pCard->portCount; count++ ) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); pPortalInfo = &pPortalData->portalInfo; pPortalInfo->portStatus = 0; pPortalInfo->portStatus &= ~( AGTIAPI_PORT_START | AGTIAPI_PORT_DISC_READY | AGTIAPI_DISC_DONE | AGTIAPI_DISC_COMPLETE ); szdv = sizeof( pPortalInfo->pDevList ) / sizeof( pPortalInfo->pDevList[0] ); if (szdv > pCard->devDiscover) { szdv = pCard->devDiscover; } for( lIdx = 0, loop = 0; lIdx < szdv && loop < pPortalInfo->devTotal; lIdx++ ) { pDevice = (ag_device_t*)pPortalInfo->pDevList[lIdx]; if( pDevice ) { loop++; pDevice->pDevHandle = 0; // mark for availability in pCard->pDevList[] // don't erase more as the device is scheduled for removal on DPC } AGTIAPI_PRINTK( "agtiapi_ResetCard: reset pDev %p pDevList %p idx %d\n", pDevice, pPortalInfo->pDevList, lIdx ); pPortalInfo->devTotal = pPortalInfo->devPrev = 0; } for( lIdx = 0; lIdx < maxTargets; lIdx++ ) { // we reconstruct dev list later in get dev handle pPortalInfo->pDevList[lIdx] = NULL; } for( loop = 0; loop < AGTIAPI_LOOP_MAX; loop++ ) { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart entry data " "%p / %d / %p\n", &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext ); if( tiCOMPortStart( &pCard->tiRoot, pPortalInfo->portID, &pPortalInfo->tiPortalContext, 0 ) != tiSuccess ) { printf( "agtiapi_ResetCard: tiCOMPortStart %d FAILED\n", pPortalInfo->portID ); } else { AGTIAPI_PRINTK( "agtiapi_ResetCard: tiCOMPortStart %d success\n", pPortalInfo->portID ); break; } } AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); tiCOMGetPortInfo( &pCard->tiRoot, &pPortalInfo->tiPortalContext, &pPortalInfo->tiPortInfo ); pPortalData++; } // ## fail case: pCard->flags &= ~AGTIAPI_INSTALLED; AG_SPIN_LOCK_IRQ(agtiapi_host_lock, *flags); if( !(pCard->flags & AGTIAPI_INSTALLED) ) // driver not installed ! { printf( "agtiapi_ResetCard: error, driver not intstalled? " "!AGTIAPI_INSTALLED \n" ); return AGTIAPI_FAIL; } AGTIAPI_PRINTK( "agtiapi_ResetCard: total device %d\n", pCard->tgtCount ); #ifdef LOGEVENT agtiapi_LogEvent( pCard, IOCTL_EVT_SEV_INFORMATIONAL, 0, agNULL, 0, "Reset initiator total device = %d!", pCard->tgtCount ); #endif pCard->resetCount++; AGTIAPI_PRINTK( "agtiapi_ResetCard: clear send and done queues\n" ); // clear send & done queue AG_LOCAL_LOCK( &pCard->sendLock ); pCard->ccbSendHead = NULL; pCard->ccbSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendLock ); AG_LOCAL_LOCK( &pCard->doneLock ); pCard->ccbDoneHead = NULL; pCard->ccbDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneLock ); // clear smp queues also AG_LOCAL_LOCK( &pCard->sendSMPLock ); pCard->smpSendHead = NULL; pCard->smpSendTail = NULL; AG_LOCAL_UNLOCK( &pCard->sendSMPLock ); AG_LOCAL_LOCK( &pCard->doneSMPLock ); pCard->smpDoneHead = NULL; pCard->smpDoneTail = NULL; AG_LOCAL_UNLOCK( &pCard->doneSMPLock ); // finished with all reset stuff, now start things back up tiCOMSystemInterruptsActive( &pCard->tiRoot, TRUE ); pCard->flags |= AGTIAPI_SYS_INTR_ON; pCard->flags |= AGTIAPI_HAD_RESET; pCard->flags &= ~AGTIAPI_RESET; // ## agtiapi_StartIO( pCard ); AGTIAPI_PRINTK( "agtiapi_ResetCard: local return success\n" ); return AGTIAPI_SUCCESS; } // agtiapi_ResetCard /****************************************************************************** agtiapi_ReleaseHBA() Purpose: Releases all resources previously acquired to support a specific Host Adapter, including the I/O Address range, and unregisters the agtiapi Host Adapter. Parameters: device_t dev (IN) - device pointer Return: always return 0 - success Note: ******************************************************************************/ int agtiapi_ReleaseHBA( device_t dev ) { int thisCard = device_get_unit( dev ); // keeping get_unit call to once int i; ag_card_info_t *thisCardInst = &agCardInfoList[ thisCard ]; struct ccb_setasync csa; struct agtiapi_softc *pCard; pCard = device_get_softc( dev ); ag_card_info_t *pCardInfo = pCard->pCardInfo; ag_resource_info_t *pRscInfo = &thisCardInst->tiRscInfo; AG_GLOBAL_ARG(flags); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: start\n" ); if (thisCardInst != pCardInfo) { AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p " "pCardInfo %p\n", thisCardInst, pCardInfo ); panic( "agtiapi_ReleaseHBA: Wrong ag_card_info_t thisCardInst %p pCardInfo " "%p\n", thisCardInst, pCardInfo ); return( EIO ); } AGTIAPI_PRINTK( "agtiapi_ReleaseHBA card %p\n", pCard ); pCard->flags |= AGTIAPI_SHUT_DOWN; // remove timer if (pCard->flags & AGTIAPI_TIMER_ON) { AG_SPIN_LOCK_IRQ( agtiapi_host_lock, flags ); callout_drain( &pCard->OS_timer ); callout_drain( &pCard->devRmTimer ); callout_drain(&pCard->IO_timer); AG_SPIN_UNLOCK_IRQ( agtiapi_host_lock, flags ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: timer released\n" ); } #ifdef HIALEAH_ENCRYPTION //Release encryption table memory - Fix it //if(pCard->encrypt && (pCard->flags & AGTIAPI_INSTALLED)) //agtiapi_CleanupEncryption(pCard); #endif /* * Shutdown the channel so that chip gets frozen * and it does not do any more pci-bus accesses. */ if (pCard->flags & AGTIAPI_SYS_INTR_ON) { tiCOMSystemInterruptsActive( &pCard->tiRoot, FALSE ); pCard->flags &= ~AGTIAPI_SYS_INTR_ON; AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: card interrupt off\n" ); } if (pCard->flags & AGTIAPI_INSTALLED) { tiCOMShutDown( &pCard->tiRoot ); AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: low layers shutdown\n" ); } /* * first release IRQ, so that we do not get any more interrupts * from this host */ if (pCard->flags & AGTIAPI_IRQ_REQUESTED) { if (!agtiapi_intx_mode) { int i; for (i = 0; i< MAX_MSIX_NUM_VECTOR; i++) { if (pCard->irq[i] != agNULL && pCard->rscID[i] != 0) { bus_teardown_intr(dev, pCard->irq[i], pCard->intrcookie[i]); bus_release_resource( dev, SYS_RES_IRQ, pCard->rscID[i], pCard->irq[i] ); } } pci_release_msi(dev); } pCard->flags &= ~AGTIAPI_IRQ_REQUESTED; #ifdef AGTIAPI_DPC for (i = 0; i < MAX_MSIX_NUM_DPC; i++) tasklet_kill(&pCard->tasklet_dpc[i]); #endif AGTIAPI_PRINTK("agtiapi_ReleaseHBA: IRQ released\n"); } // release memory vs. alloc in agtiapi_alloc_ostimem; used in ostiAllocMemory if( pCard->osti_busaddr != 0 ) { bus_dmamap_unload( pCard->osti_dmat, pCard->osti_mapp ); } if( pCard->osti_mem != NULL ) { bus_dmamem_free( pCard->osti_dmat, pCard->osti_mem, pCard->osti_mapp ); } if( pCard->osti_dmat != NULL ) { bus_dma_tag_destroy( pCard->osti_dmat ); } /* unmap the mapped PCI memory */ /* calls bus_release_resource( ,SYS_RES_MEMORY, ..) */ agtiapi_ReleasePCIMem(thisCardInst); /* release all ccbs */ if (pCard->ccbTotal) { //calls bus_dmamap_destroy() for all pccbs agtiapi_ReleaseCCBs(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: CCB released\n"); } #ifdef HIALEAH_ENCRYPTION /*release encryption resources - Fix it*/ if(pCard->encrypt) { /*Check that all IO's are completed */ if(atomic_read (&outstanding_encrypted_io_count) > 0) { printf("%s: WARNING: %d outstanding encrypted IOs !\n", __FUNCTION__, atomic_read(&outstanding_encrypted_io_count)); } //agtiapi_CleanupEncryptionPools(pCard); } #endif /* release device list */ if( pCard->pDevList ) { free((caddr_t)pCard->pDevList, M_PMC_MDVT); pCard->pDevList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: device list released\n"); } #ifdef LINUX_PERBI_SUPPORT // ## review use of PERBI AGTIAPI_PRINTK( "agtiapi_ReleaseHBA: WWN list %p \n", pCard->pWWNList ); if( pCard->pWWNList ) { free( (caddr_t)pCard->pWWNList, M_PMC_MTGT ); pCard->pWWNList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: WWN list released\n"); } if( pCard->pSLRList ) { free( (caddr_t)pCard->pSLRList, M_PMC_MSLR ); pCard->pSLRList = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: SAS Local Remote list released\n"); } #endif if (pCard->pPortalData) { free((caddr_t)pCard->pPortalData, M_PMC_MPRT); pCard->pPortalData = NULL; AGTIAPI_PRINTK("agtiapi_ReleaseHBA: PortalData released\n"); } //calls contigfree() or free() agtiapi_MemFree(pCardInfo); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: low level resource released\n"); #ifdef HOTPLUG_SUPPORT if (pCard->flags & AGTIAPI_PORT_INITIALIZED) { // agtiapi_FreeDevWorkList(pCard); AGTIAPI_PRINTK("agtiapi_ReleaseHBA: (HP dev) work resources released\n"); } #endif /* * TBD, scsi_unregister may release wrong host data structure * which cause NULL pointer shows up. */ if (pCard->flags & AGTIAPI_SCSI_REGISTERED) { pCard->flags &= ~AGTIAPI_SCSI_REGISTERED; #ifdef AGTIAPI_LOCAL_LOCK if (pCard->STLock) { //destroy mtx int maxLocks; maxLocks = pRscInfo->tiLoLevelResource.loLevelOption.numOfQueuesPerPort; for( i = 0; i < maxLocks; i++ ) { mtx_destroy(&pCard->STLock[i]); } free(pCard->STLock, M_PMC_MSTL); pCard->STLock = NULL; } #endif } ag_card_good--; /* reset agtiapi_1st_time if this is the only card */ if (!ag_card_good && !agtiapi_1st_time) { agtiapi_1st_time = 1; } /* for tiSgl_t memeory */ if (pCard->tisgl_busaddr != 0) { bus_dmamap_unload(pCard->tisgl_dmat, pCard->tisgl_map); } if (pCard->tisgl_mem != NULL) { bus_dmamem_free(pCard->tisgl_dmat, pCard->tisgl_mem, pCard->tisgl_map); } if (pCard->tisgl_dmat != NULL) { bus_dma_tag_destroy(pCard->tisgl_dmat); } if (pCard->buffer_dmat != agNULL) { bus_dma_tag_destroy(pCard->buffer_dmat); } if (pCard->sim != NULL) { mtx_lock(&thisCardInst->pmIOLock); xpt_setup_ccb(&csa.ccb_h, pCard->path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = 0; csa.callback = agtiapi_async; csa.callback_arg = pCard; xpt_action((union ccb *)&csa); xpt_free_path(pCard->path); // if (pCard->ccbTotal == 0) if (pCard->ccbTotal <= thisCard) { /* no link up so that simq has not been released. In order to remove cam, we call this. */ xpt_release_simq(pCard->sim, 1); } xpt_bus_deregister(cam_sim_path(pCard->sim)); cam_sim_free(pCard->sim, FALSE); mtx_unlock(&thisCardInst->pmIOLock); } if (pCard->devq != NULL) { cam_simq_free(pCard->devq); } //destroy mtx mtx_destroy( &thisCardInst->pmIOLock ); mtx_destroy( &pCard->sendLock ); mtx_destroy( &pCard->doneLock ); mtx_destroy( &pCard->sendSMPLock ); mtx_destroy( &pCard->doneSMPLock ); mtx_destroy( &pCard->ccbLock ); mtx_destroy( &pCard->devListLock ); mtx_destroy( &pCard->OS_timer_lock ); mtx_destroy( &pCard->devRmTimerLock ); mtx_destroy( &pCard->memLock ); mtx_destroy( &pCard->freezeLock ); destroy_dev( pCard->my_cdev ); memset((void *)pCardInfo, 0, sizeof(ag_card_info_t)); return 0; } // Called during system shutdown after sync static int agtiapi_shutdown( device_t dev ) { AGTIAPI_PRINTK( "agtiapi_shutdown\n" ); return( 0 ); } static int agtiapi_suspend( device_t dev ) // Device suspend routine. { AGTIAPI_PRINTK( "agtiapi_suspend\n" ); return( 0 ); } static int agtiapi_resume( device_t dev ) // Device resume routine. { AGTIAPI_PRINTK( "agtiapi_resume\n" ); return( 0 ); } static device_method_t agtiapi_methods[] = { // Device interface DEVMETHOD( device_probe, agtiapi_probe ), DEVMETHOD( device_attach, agtiapi_attach ), DEVMETHOD( device_detach, agtiapi_ReleaseHBA ), DEVMETHOD( device_shutdown, agtiapi_shutdown ), DEVMETHOD( device_suspend, agtiapi_suspend ), DEVMETHOD( device_resume, agtiapi_resume ), { 0, 0 } }; static devclass_t pmspcv_devclass; static driver_t pmspcv_driver = { "pmspcv", agtiapi_methods, sizeof( struct agtiapi_softc ) }; DRIVER_MODULE( pmspcv, pci, pmspcv_driver, pmspcv_devclass, 0, 0 ); MODULE_DEPEND( pmspcv, cam, 1, 1, 1 ); MODULE_DEPEND( pmspcv, pci, 1, 1, 1 ); #include #include #include #include Index: head/sys/dev/rt/if_rt.c =================================================================== --- head/sys/dev/rt/if_rt.c (revision 295879) +++ head/sys/dev/rt/if_rt.c (revision 295880) @@ -1,2814 +1,2813 @@ /*- * Copyright (c) 2015, Stanislav Galabov * Copyright (c) 2014, Aleksandr A. Mityaev * Copyright (c) 2011, Aleksandr Rybalko * based on hard work * by Alexander Egorenkov * and by Damien Bergamini * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "if_rtvar.h" #include "if_rtreg.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include "opt_platform.h" #include "opt_rt305x.h" #ifdef FDT #include #include #include #endif #include #include #include #include #ifdef IF_RT_PHY_SUPPORT #include "miibus_if.h" #endif /* * Defines and macros */ #define RT_MAX_AGG_SIZE 3840 #define RT_TX_DATA_SEG0_SIZE MJUMPAGESIZE #define RT_MS(_v, _f) (((_v) & _f) >> _f##_S) #define RT_SM(_v, _f) (((_v) << _f##_S) & _f) #define RT_TX_WATCHDOG_TIMEOUT 5 #define RT_CHIPID_RT3050 0x3050 #define RT_CHIPID_RT3052 0x3052 #define RT_CHIPID_RT5350 0x5350 #define RT_CHIPID_RT6855 0x6855 #define RT_CHIPID_MT7620 0x7620 #ifdef FDT /* more specific and new models should go first */ static const struct ofw_compat_data rt_compat_data[] = { { "ralink,rt6855-eth", (uintptr_t)RT_CHIPID_RT6855 }, { "ralink,rt5350-eth", (uintptr_t)RT_CHIPID_RT5350 }, { "ralink,rt3052-eth", (uintptr_t)RT_CHIPID_RT3052 }, { "ralink,rt305x-eth", (uintptr_t)RT_CHIPID_RT3050 }, { NULL, (uintptr_t)NULL } }; #endif /* * Static function prototypes */ static int rt_probe(device_t dev); static int rt_attach(device_t dev); static int rt_detach(device_t dev); static int rt_shutdown(device_t dev); static int rt_suspend(device_t dev); static int rt_resume(device_t dev); static void rt_init_locked(void *priv); static void rt_init(void *priv); static void rt_stop_locked(void *priv); static void rt_stop(void *priv); static void rt_start(struct ifnet *ifp); static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static void rt_periodic(void *arg); static void rt_tx_watchdog(void *arg); static void rt_intr(void *arg); static void rt_rt5350_intr(void *arg); static void rt_tx_coherent_intr(struct rt_softc *sc); static void rt_rx_coherent_intr(struct rt_softc *sc); static void rt_rx_delay_intr(struct rt_softc *sc); static void rt_tx_delay_intr(struct rt_softc *sc); static void rt_rx_intr(struct rt_softc *sc, int qid); static void rt_tx_intr(struct rt_softc *sc, int qid); static void rt_rx_done_task(void *context, int pending); static void rt_tx_done_task(void *context, int pending); static void rt_periodic_task(void *context, int pending); static int rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit); static void rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_update_stats(struct rt_softc *sc); static void rt_watchdog(struct rt_softc *sc); static void rt_update_raw_counters(struct rt_softc *sc); static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask); static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask); static int rt_txrx_enable(struct rt_softc *sc); static int rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid); static void rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring); static void rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring); static int rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid); static void rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring); static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void rt_sysctl_attach(struct rt_softc *sc); #ifdef IF_RT_PHY_SUPPORT void rt_miibus_statchg(device_t); static int rt_miibus_readreg(device_t, int, int); static int rt_miibus_writereg(device_t, int, int, int); #endif static int rt_ifmedia_upd(struct ifnet *); static void rt_ifmedia_sts(struct ifnet *, struct ifmediareq *); static SYSCTL_NODE(_hw, OID_AUTO, rt, CTLFLAG_RD, 0, "RT driver parameters"); #ifdef IF_RT_DEBUG static int rt_debug = 0; SYSCTL_INT(_hw_rt, OID_AUTO, debug, CTLFLAG_RWTUN, &rt_debug, 0, "RT debug level"); #endif static int rt_probe(device_t dev) { struct rt_softc *sc = device_get_softc(dev); char buf[80]; #ifdef FDT const struct ofw_compat_data * cd; cd = ofw_bus_search_compatible(dev, rt_compat_data); if (cd->ocd_data == (uintptr_t)NULL) return (ENXIO); sc->rt_chipid = (unsigned int)(cd->ocd_data); #else #if defined(MT7620) sc->rt_chipid = RT_CHIPID_MT7620; #elif defined(RT5350) sc->rt_chipid = RT_CHIPID_RT5350; #else sc->rt_chipid = RT_CHIPID_RT3050; #endif #endif snprintf(buf, sizeof(buf), "Ralink RT%x onChip Ethernet driver", sc->rt_chipid); device_set_desc_copy(dev, buf); return (BUS_PROBE_GENERIC); } /* * macaddr_atoi - translate string MAC address to uint8_t array */ static int macaddr_atoi(const char *str, uint8_t *mac) { int count, i; unsigned int amac[ETHER_ADDR_LEN]; /* Aligned version */ count = sscanf(str, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", &amac[0], &amac[1], &amac[2], &amac[3], &amac[4], &amac[5]); if (count < ETHER_ADDR_LEN) { memset(mac, 0, ETHER_ADDR_LEN); return (1); } /* Copy aligned to result */ for (i = 0; i < ETHER_ADDR_LEN; i ++) mac[i] = (amac[i] & 0xff); return (0); } #ifdef USE_GENERATED_MAC_ADDRESS /* * generate_mac(uin8_t *mac) * This is MAC address generator for cases when real device MAC address * unknown or not yet accessible. * Use 'b','s','d' signature and 3 octets from CRC32 on kenv. * MAC = 'b', 's', 'd', CRC[3]^CRC[2], CRC[1], CRC[0] * * Output - MAC address, that do not change between reboots, if hints or * bootloader info unchange. */ static void generate_mac(uint8_t *mac) { unsigned char *cp; int i = 0; uint32_t crc = 0xffffffff; /* Generate CRC32 on kenv */ for (cp = kenvp[0]; cp != NULL; cp = kenvp[++i]) { crc = calculate_crc32c(crc, cp, strlen(cp) + 1); } crc = ~crc; mac[0] = 'b'; mac[1] = 's'; mac[2] = 'd'; mac[3] = (crc >> 24) ^ ((crc >> 16) & 0xff); mac[4] = (crc >> 8) & 0xff; mac[5] = crc & 0xff; } #endif /* * ether_request_mac - try to find usable MAC address. */ static int ether_request_mac(device_t dev, uint8_t *mac) { char *var; /* * "ethaddr" is passed via envp on RedBoot platforms * "kmac" is passed via argv on RouterBOOT platforms */ #if defined(RT305X_UBOOT) || defined(__REDBOOT__) || defined(__ROUTERBOOT__) if ((var = kern_getenv("ethaddr")) != NULL || (var = kern_getenv("kmac")) != NULL ) { if(!macaddr_atoi(var, mac)) { printf("%s: use %s macaddr from KENV\n", device_get_nameunit(dev), var); freeenv(var); return (0); } freeenv(var); } #endif /* * Try from hints * hint.[dev].[unit].macaddr */ if (!resource_string_value(device_get_name(dev), device_get_unit(dev), "macaddr", (const char **)&var)) { if(!macaddr_atoi(var, mac)) { printf("%s: use %s macaddr from hints\n", device_get_nameunit(dev), var); return (0); } } #ifdef USE_GENERATED_MAC_ADDRESS generate_mac(mac); device_printf(dev, "use generated %02x:%02x:%02x:%02x:%02x:%02x " "macaddr\n", mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); #else /* Hardcoded */ mac[0] = 0x00; mac[1] = 0x18; mac[2] = 0xe7; mac[3] = 0xd5; mac[4] = 0x83; mac[5] = 0x90; device_printf(dev, "use hardcoded 00:18:e7:d5:83:90 macaddr\n"); #endif return (0); } /* * Reset hardware */ static void reset_freng(struct rt_softc *sc) { /* XXX hard reset kills everything so skip it ... */ return; } static int rt_attach(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; int error, i; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); sc->mem_rid = 0; sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); error = ENXIO; goto fail; } sc->bst = rman_get_bustag(sc->mem); sc->bsh = rman_get_bushandle(sc->mem); sc->irq_rid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); error = ENXIO; goto fail; } #ifdef IF_RT_DEBUG sc->debug = rt_debug; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "rt debug level"); #endif /* Reset hardware */ reset_freng(sc); /* Fill in soc-specific registers map */ switch(sc->rt_chipid) { case RT_CHIPID_MT7620: case RT_CHIPID_RT5350: device_printf(dev, "RT%x Ethernet MAC (rev 0x%08x)\n", sc->rt_chipid, sc->mac_rev); /* RT5350: No GDMA, PSE, CDMA, PPE */ RT_WRITE(sc, GE_PORT_BASE + 0x0C00, // UDPCS, TCPCS, IPCS=1 RT_READ(sc, GE_PORT_BASE + 0x0C00) | (0x7<<16)); sc->delay_int_cfg=RT5350_PDMA_BASE+RT5350_DELAY_INT_CFG; sc->fe_int_status=RT5350_FE_INT_STATUS; sc->fe_int_enable=RT5350_FE_INT_ENABLE; sc->pdma_glo_cfg=RT5350_PDMA_BASE+RT5350_PDMA_GLO_CFG; sc->pdma_rst_idx=RT5350_PDMA_BASE+RT5350_PDMA_RST_IDX; for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { sc->tx_base_ptr[i]=RT5350_PDMA_BASE+RT5350_TX_BASE_PTR(i); sc->tx_max_cnt[i]=RT5350_PDMA_BASE+RT5350_TX_MAX_CNT(i); sc->tx_ctx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_CTX_IDX(i); sc->tx_dtx_idx[i]=RT5350_PDMA_BASE+RT5350_TX_DTX_IDX(i); } sc->rx_ring_count=2; sc->rx_base_ptr[0]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR0; sc->rx_max_cnt[0]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT0; sc->rx_calc_idx[0]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX0; sc->rx_drx_idx[0]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX0; sc->rx_base_ptr[1]=RT5350_PDMA_BASE+RT5350_RX_BASE_PTR1; sc->rx_max_cnt[1]=RT5350_PDMA_BASE+RT5350_RX_MAX_CNT1; sc->rx_calc_idx[1]=RT5350_PDMA_BASE+RT5350_RX_CALC_IDX1; sc->rx_drx_idx[1]=RT5350_PDMA_BASE+RT5350_RX_DRX_IDX1; sc->int_rx_done_mask=RT5350_INT_RXQ0_DONE; sc->int_tx_done_mask=RT5350_INT_TXQ0_DONE; break; case RT_CHIPID_RT6855: device_printf(dev, "RT6855 Ethernet MAC (rev 0x%08x)\n", sc->mac_rev); break; default: device_printf(dev, "RT305XF Ethernet MAC (rev 0x%08x)\n", sc->mac_rev); RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* fwd UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* fwd BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* fwd MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* fwd Other to CPU */ )); sc->delay_int_cfg=PDMA_BASE+DELAY_INT_CFG; sc->fe_int_status=GE_PORT_BASE+FE_INT_STATUS; sc->fe_int_enable=GE_PORT_BASE+FE_INT_ENABLE; sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG; sc->pdma_glo_cfg=PDMA_BASE+PDMA_GLO_CFG; sc->pdma_rst_idx=PDMA_BASE+PDMA_RST_IDX; for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { sc->tx_base_ptr[i]=PDMA_BASE+TX_BASE_PTR(i); sc->tx_max_cnt[i]=PDMA_BASE+TX_MAX_CNT(i); sc->tx_ctx_idx[i]=PDMA_BASE+TX_CTX_IDX(i); sc->tx_dtx_idx[i]=PDMA_BASE+TX_DTX_IDX(i); } sc->rx_ring_count=1; sc->rx_base_ptr[0]=PDMA_BASE+RX_BASE_PTR0; sc->rx_max_cnt[0]=PDMA_BASE+RX_MAX_CNT0; sc->rx_calc_idx[0]=PDMA_BASE+RX_CALC_IDX0; sc->rx_drx_idx[0]=PDMA_BASE+RX_DRX_IDX0; sc->int_rx_done_mask=INT_RX_DONE; sc->int_tx_done_mask=INT_TXQ0_DONE; }; /* allocate Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { error = rt_alloc_tx_ring(sc, &sc->tx_ring[i], i); if (error != 0) { device_printf(dev, "could not allocate Tx ring #%d\n", i); goto fail; } } sc->tx_ring_mgtqid = 5; for (i = 0; i < sc->rx_ring_count; i++) { error = rt_alloc_rx_ring(sc, &sc->rx_ring[i], i); if (error != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } } callout_init(&sc->periodic_ch, 0); callout_init_mtx(&sc->tx_watchdog_ch, &sc->lock, 0); ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "could not if_alloc()\n"); error = ENOMEM; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = rt_init; ifp->if_ioctl = rt_ioctl; ifp->if_start = rt_start; #define RT_TX_QLEN 256 IFQ_SET_MAXLEN(&ifp->if_snd, RT_TX_QLEN); ifp->if_snd.ifq_drv_maxlen = RT_TX_QLEN; IFQ_SET_READY(&ifp->if_snd); #ifdef IF_RT_PHY_SUPPORT error = mii_attach(dev, &sc->rt_miibus, ifp, rt_ifmedia_upd, rt_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); error = ENXIO; goto fail; } #else ifmedia_init(&sc->rt_ifmedia, 0, rt_ifmedia_upd, rt_ifmedia_sts); ifmedia_add(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_set(&sc->rt_ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX); #endif /* IF_RT_PHY_SUPPORT */ ether_request_mac(dev, sc->mac_addr); ether_ifattach(ifp, sc->mac_addr); /* * Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable |= IFCAP_VLAN_MTU; ifp->if_capabilities |= IFCAP_RXCSUM|IFCAP_TXCSUM; ifp->if_capenable |= IFCAP_RXCSUM|IFCAP_TXCSUM; /* init task queue */ TASK_INIT(&sc->rx_done_task, 0, rt_rx_done_task, sc); TASK_INIT(&sc->tx_done_task, 0, rt_tx_done_task, sc); TASK_INIT(&sc->periodic_task, 0, rt_periodic_task, sc); sc->rx_process_limit = 100; sc->taskqueue = taskqueue_create("rt_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->taskqueue); taskqueue_start_threads(&sc->taskqueue, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); rt_sysctl_attach(sc); /* set up interrupt */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, (sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620) ? rt_rt5350_intr : rt_intr, sc, &sc->irqh); if (error != 0) { printf("%s: could not set up interrupt\n", device_get_nameunit(dev)); goto fail; } #ifdef IF_RT_DEBUG device_printf(dev, "debug var at %#08x\n", (u_int)&(sc->debug)); #endif return (0); fail: /* free Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_free_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) rt_free_rx_ring(sc, &sc->rx_ring[i]); mtx_destroy(&sc->lock); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); if (sc->irq != NULL) bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); return (error); } /* * Set media options. */ static int rt_ifmedia_upd(struct ifnet *ifp) { struct rt_softc *sc; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; struct mii_softc *miisc; int error = 0; sc = ifp->if_softc; RT_SOFTC_LOCK(sc); mii = device_get_softc(sc->rt_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); RT_SOFTC_UNLOCK(sc); return (error); #else /* !IF_RT_PHY_SUPPORT */ struct ifmedia *ifm; struct ifmedia_entry *ife; sc = ifp->if_softc; ifm = &sc->rt_ifmedia; ife = ifm->ifm_cur; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { device_printf(sc->dev, "AUTO is not supported for multiphy MAC"); return (EINVAL); } /* * Ignore everything */ return (0); #endif /* IF_RT_PHY_SUPPORT */ } /* * Report current media status. */ static void rt_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { #ifdef IF_RT_PHY_SUPPORT struct rt_softc *sc; struct mii_data *mii; sc = ifp->if_softc; RT_SOFTC_LOCK(sc); mii = device_get_softc(sc->rt_miibus); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; RT_SOFTC_UNLOCK(sc); #else /* !IF_RT_PHY_SUPPORT */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; #endif /* IF_RT_PHY_SUPPORT */ } static int rt_detach(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; int i; sc = device_get_softc(dev); ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "detaching\n"); RT_SOFTC_LOCK(sc); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->periodic_ch); callout_stop(&sc->tx_watchdog_ch); taskqueue_drain(sc->taskqueue, &sc->rx_done_task); taskqueue_drain(sc->taskqueue, &sc->tx_done_task); taskqueue_drain(sc->taskqueue, &sc->periodic_task); /* free Tx and Rx rings */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_free_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) rt_free_rx_ring(sc, &sc->rx_ring[i]); RT_SOFTC_UNLOCK(sc); #ifdef IF_RT_PHY_SUPPORT if (sc->rt_miibus != NULL) device_delete_child(dev, sc->rt_miibus); #endif ether_ifdetach(ifp); if_free(ifp); taskqueue_free(sc->taskqueue); mtx_destroy(&sc->lock); bus_generic_detach(dev); bus_teardown_intr(dev, sc->irq, sc->irqh); bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem); return (0); } static int rt_shutdown(device_t dev) { struct rt_softc *sc; sc = device_get_softc(dev); RT_DPRINTF(sc, RT_DEBUG_ANY, "shutting down\n"); rt_stop(sc); return (0); } static int rt_suspend(device_t dev) { struct rt_softc *sc; sc = device_get_softc(dev); RT_DPRINTF(sc, RT_DEBUG_ANY, "suspending\n"); rt_stop(sc); return (0); } static int rt_resume(device_t dev) { struct rt_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "resuming\n"); if (ifp->if_flags & IFF_UP) rt_init(sc); return (0); } /* * rt_init_locked - Run initialization process having locked mtx. */ static void rt_init_locked(void *priv) { struct rt_softc *sc; struct ifnet *ifp; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; #endif int i, ntries; uint32_t tmp; sc = priv; ifp = sc->ifp; #ifdef IF_RT_PHY_SUPPORT mii = device_get_softc(sc->rt_miibus); #endif RT_DPRINTF(sc, RT_DEBUG_ANY, "initializing\n"); RT_SOFTC_ASSERT_LOCKED(sc); /* hardware reset */ //RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET); //rt305x_sysctl_set(SYSCTL_RSTCTRL, SYSCTL_RSTCTRL_FRENG); /* Fwd to CPU (uni|broad|multi)cast and Unknown */ if(sc->rt_chipid == RT_CHIPID_RT3050 || sc->rt_chipid == RT_CHIPID_RT3052) RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* Forward Other to CPU */ )); /* disable DMA engine */ RT_WRITE(sc, sc->pdma_glo_cfg, 0); RT_WRITE(sc, sc->pdma_rst_idx, 0xffffffff); /* wait while DMA engine is busy */ for (ntries = 0; ntries < 100; ntries++) { tmp = RT_READ(sc, sc->pdma_glo_cfg); if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY))) break; DELAY(1000); } if (ntries == 100) { device_printf(sc->dev, "timeout waiting for DMA engine\n"); goto fail; } /* reset Rx and Tx rings */ tmp = FE_RST_DRX_IDX0 | FE_RST_DTX_IDX3 | FE_RST_DTX_IDX2 | FE_RST_DTX_IDX1 | FE_RST_DTX_IDX0; RT_WRITE(sc, sc->pdma_rst_idx, tmp); /* XXX switch set mac address */ for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_reset_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { /* update TX_BASE_PTRx */ RT_WRITE(sc, sc->tx_base_ptr[i], sc->tx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->tx_max_cnt[i], RT_SOFTC_TX_RING_DESC_COUNT); RT_WRITE(sc, sc->tx_ctx_idx[i], 0); } /* init Rx ring */ for (i = 0; i < sc->rx_ring_count; i++) rt_reset_rx_ring(sc, &sc->rx_ring[i]); /* update RX_BASE_PTRx */ for (i = 0; i < sc->rx_ring_count; i++) { RT_WRITE(sc, sc->rx_base_ptr[i], sc->rx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->rx_max_cnt[i], RT_SOFTC_RX_RING_DATA_COUNT); RT_WRITE(sc, sc->rx_calc_idx[i], RT_SOFTC_RX_RING_DATA_COUNT - 1); } /* write back DDONE, 16byte burst enable RX/TX DMA */ tmp = FE_TX_WB_DDONE | FE_DMA_BT_SIZE16 | FE_RX_DMA_EN | FE_TX_DMA_EN; if (sc->rt_chipid == RT_CHIPID_MT7620) tmp |= (1<<31); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* disable interrupts mitigation */ RT_WRITE(sc, sc->delay_int_cfg, 0); /* clear pending interrupts */ RT_WRITE(sc, sc->fe_int_status, 0xffffffff); /* enable interrupts */ if (sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620) tmp = RT5350_INT_TX_COHERENT | RT5350_INT_RX_COHERENT | RT5350_INT_TXQ3_DONE | RT5350_INT_TXQ2_DONE | RT5350_INT_TXQ1_DONE | RT5350_INT_TXQ0_DONE | RT5350_INT_RXQ1_DONE | RT5350_INT_RXQ0_DONE; else tmp = CNT_PPE_AF | CNT_GDM_AF | PSE_P2_FC | GDM_CRC_DROP | PSE_BUF_DROP | GDM_OTHER_DROP | PSE_P1_FC | PSE_P0_FC | PSE_FQ_EMPTY | INT_TX_COHERENT | INT_RX_COHERENT | INT_TXQ3_DONE | INT_TXQ2_DONE | INT_TXQ1_DONE | INT_TXQ0_DONE | INT_RX_DONE; sc->intr_enable_mask = tmp; RT_WRITE(sc, sc->fe_int_enable, tmp); if (rt_txrx_enable(sc) != 0) goto fail; #ifdef IF_RT_PHY_SUPPORT if (mii) mii_mediachg(mii); #endif /* IF_RT_PHY_SUPPORT */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->periodic_round = 0; callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc); return; fail: rt_stop_locked(sc); } /* * rt_init - lock and initialize device. */ static void rt_init(void *priv) { struct rt_softc *sc; sc = priv; RT_SOFTC_LOCK(sc); rt_init_locked(sc); RT_SOFTC_UNLOCK(sc); } /* * rt_stop_locked - stop TX/RX w/ lock */ static void rt_stop_locked(void *priv) { struct rt_softc *sc; struct ifnet *ifp; sc = priv; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_ANY, "stopping\n"); RT_SOFTC_ASSERT_LOCKED(sc); sc->tx_timer = 0; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); callout_stop(&sc->periodic_ch); callout_stop(&sc->tx_watchdog_ch); RT_SOFTC_UNLOCK(sc); taskqueue_block(sc->taskqueue); /* * Sometime rt_stop_locked called from isr and we get panic * When found, I fix it */ #ifdef notyet taskqueue_drain(sc->taskqueue, &sc->rx_done_task); taskqueue_drain(sc->taskqueue, &sc->tx_done_task); taskqueue_drain(sc->taskqueue, &sc->periodic_task); #endif RT_SOFTC_LOCK(sc); /* disable interrupts */ RT_WRITE(sc, sc->fe_int_enable, 0); if(sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620) { } else { /* reset adapter */ RT_WRITE(sc, GE_PORT_BASE + FE_RST_GLO, PSE_RESET); RT_WRITE(sc, GDMA1_BASE + GDMA_FWD_CFG, ( GDM_ICS_EN | /* Enable IP Csum */ GDM_TCS_EN | /* Enable TCP Csum */ GDM_UCS_EN | /* Enable UDP Csum */ GDM_STRPCRC | /* Strip CRC from packet */ GDM_DST_PORT_CPU << GDM_UFRC_P_SHIFT | /* Forward UCast to CPU */ GDM_DST_PORT_CPU << GDM_BFRC_P_SHIFT | /* Forward BCast to CPU */ GDM_DST_PORT_CPU << GDM_MFRC_P_SHIFT | /* Forward MCast to CPU */ GDM_DST_PORT_CPU << GDM_OFRC_P_SHIFT /* Forward Other to CPU */ )); } } static void rt_stop(void *priv) { struct rt_softc *sc; sc = priv; RT_SOFTC_LOCK(sc); rt_stop_locked(sc); RT_SOFTC_UNLOCK(sc); } /* * rt_tx_data - transmit packet. */ static int rt_tx_data(struct rt_softc *sc, struct mbuf *m, int qid) { struct ifnet *ifp; struct rt_softc_tx_ring *ring; struct rt_softc_tx_data *data; struct rt_txdesc *desc; struct mbuf *m_d; bus_dma_segment_t dma_seg[RT_SOFTC_MAX_SCATTER]; int error, ndmasegs, ndescs, i; KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT, ("%s: Tx data: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_SOFTC_TX_RING_ASSERT_LOCKED(&sc->tx_ring[qid]); ifp = sc->ifp; ring = &sc->tx_ring[qid]; desc = &ring->desc[ring->desc_cur]; data = &ring->data[ring->data_cur]; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m, dma_seg, &ndmasegs, 0); if (error != 0) { /* too many fragments, linearize */ RT_DPRINTF(sc, RT_DEBUG_TX, "could not load mbuf DMA map, trying to linearize " "mbuf: ndmasegs=%d, len=%d, error=%d\n", ndmasegs, m->m_pkthdr.len, error); m_d = m_collapse(m, M_NOWAIT, 16); if (m_d == NULL) { m_freem(m); m = NULL; return (ENOMEM); } m = m_d; sc->tx_defrag_packets++; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, m, dma_seg, &ndmasegs, 0); if (error != 0) { device_printf(sc->dev, "could not load mbuf DMA map: " "ndmasegs=%d, len=%d, error=%d\n", ndmasegs, m->m_pkthdr.len, error); m_freem(m); return (error); } } if (m->m_pkthdr.len == 0) ndmasegs = 0; /* determine how many Tx descs are required */ ndescs = 1 + ndmasegs / 2; if ((ring->desc_queued + ndescs) > (RT_SOFTC_TX_RING_DESC_COUNT - 2)) { RT_DPRINTF(sc, RT_DEBUG_TX, "there are not enough Tx descs\n"); sc->no_tx_desc_avail++; bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(m); return (EFBIG); } data->m = m; /* set up Tx descs */ for (i = 0; i < ndmasegs; i += 2) { /* TODO: this needs to be refined as MT7620 for example has * a different word3 layout than RT305x and RT5350 (the last * one doesn't use word3 at all). */ /* Set destination */ if (sc->rt_chipid != RT_CHIPID_MT7620) desc->dst = (TXDSCR_DST_PORT_GDMA1); if ((ifp->if_capenable & IFCAP_TXCSUM) != 0) desc->dst |= (TXDSCR_IP_CSUM_GEN|TXDSCR_UDP_CSUM_GEN| TXDSCR_TCP_CSUM_GEN); /* Set queue id */ desc->qn = qid; /* No PPPoE */ desc->pppoe = 0; /* No VLAN */ desc->vid = 0; desc->sdp0 = htole32(dma_seg[i].ds_addr); desc->sdl0 = htole16(dma_seg[i].ds_len | ( ((i+1) == ndmasegs )?RT_TXDESC_SDL0_LASTSEG:0 )); if ((i+1) < ndmasegs) { desc->sdp1 = htole32(dma_seg[i+1].ds_addr); desc->sdl1 = htole16(dma_seg[i+1].ds_len | ( ((i+2) == ndmasegs )?RT_TXDESC_SDL1_LASTSEG:0 )); } else { desc->sdp1 = 0; desc->sdl1 = 0; } if ((i+2) < ndmasegs) { ring->desc_queued++; ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT; } desc = &ring->desc[ring->desc_cur]; } RT_DPRINTF(sc, RT_DEBUG_TX, "sending data: len=%d, ndmasegs=%d, " "DMA ds_len=%d/%d/%d/%d/%d\n", m->m_pkthdr.len, ndmasegs, (int) dma_seg[0].ds_len, (int) dma_seg[1].ds_len, (int) dma_seg[2].ds_len, (int) dma_seg[3].ds_len, (int) dma_seg[4].ds_len); bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREWRITE); ring->desc_queued++; ring->desc_cur = (ring->desc_cur + 1) % RT_SOFTC_TX_RING_DESC_COUNT; ring->data_queued++; ring->data_cur = (ring->data_cur + 1) % RT_SOFTC_TX_RING_DATA_COUNT; /* kick Tx */ RT_WRITE(sc, sc->tx_ctx_idx[qid], ring->desc_cur); return (0); } /* * rt_start - start Transmit/Receive */ static void rt_start(struct ifnet *ifp) { struct rt_softc *sc; struct mbuf *m; int qid = 0 /* XXX must check QoS priority */; sc = ifp->if_softc; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; for (;;) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; m->m_pkthdr.rcvif = NULL; RT_SOFTC_TX_RING_LOCK(&sc->tx_ring[qid]); if (sc->tx_ring[qid].data_queued >= RT_SOFTC_TX_RING_DATA_COUNT) { RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); RT_DPRINTF(sc, RT_DEBUG_TX, "if_start: Tx ring with qid=%d is full\n", qid); m_freem(m); ifp->if_drv_flags |= IFF_DRV_OACTIVE; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->tx_data_queue_full[qid]++; break; } if (rt_tx_data(sc, m, qid) != 0) { RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); break; } RT_SOFTC_TX_RING_UNLOCK(&sc->tx_ring[qid]); sc->tx_timer = RT_TX_WATCHDOG_TIMEOUT; callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc); } } /* * rt_update_promisc - set/clear promiscuous mode. Unused yet, because * filtering done by attached Ethernet switch. */ static void rt_update_promisc(struct ifnet *ifp) { struct rt_softc *sc; sc = ifp->if_softc; printf("%s: %s promiscuous mode\n", device_get_nameunit(sc->dev), (ifp->if_flags & IFF_PROMISC) ? "entering" : "leaving"); } /* * rt_ioctl - ioctl handler. */ static int rt_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct rt_softc *sc; struct ifreq *ifr; #ifdef IF_RT_PHY_SUPPORT struct mii_data *mii; #endif /* IF_RT_PHY_SUPPORT */ int error, startall; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFFLAGS: startall = 0; RT_SOFTC_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->if_flags) & IFF_PROMISC) rt_update_promisc(ifp); } else { rt_init_locked(sc); startall = 1; } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) rt_stop_locked(sc); } sc->if_flags = ifp->if_flags; RT_SOFTC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: #ifdef IF_RT_PHY_SUPPORT mii = device_get_softc(sc->rt_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); #else error = ifmedia_ioctl(ifp, ifr, &sc->rt_ifmedia, cmd); #endif /* IF_RT_PHY_SUPPORT */ break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } /* * rt_periodic - Handler of PERIODIC interrupt */ static void rt_periodic(void *arg) { struct rt_softc *sc; sc = arg; RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic\n"); taskqueue_enqueue(sc->taskqueue, &sc->periodic_task); } /* * rt_tx_watchdog - Handler of TX Watchdog */ static void rt_tx_watchdog(void *arg) { struct rt_softc *sc; struct ifnet *ifp; sc = arg; ifp = sc->ifp; if (sc->tx_timer == 0) return; if (--sc->tx_timer == 0) { device_printf(sc->dev, "Tx watchdog timeout: resetting\n"); #ifdef notyet /* * XXX: Commented out, because reset break input. */ rt_stop_locked(sc); rt_init_locked(sc); #endif if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); sc->tx_watchdog_timeouts++; } callout_reset(&sc->tx_watchdog_ch, hz, rt_tx_watchdog, sc); } /* * rt_cnt_ppe_af - Handler of PPE Counter Table Almost Full interrupt */ static void rt_cnt_ppe_af(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PPE Counter Table Almost Full\n"); } /* * rt_cnt_gdm_af - Handler of GDMA 1 & 2 Counter Table Almost Full interrupt */ static void rt_cnt_gdm_af(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 Counter Table Almost Full\n"); } /* * rt_pse_p2_fc - Handler of PSE port2 (GDMA 2) flow control interrupt */ static void rt_pse_p2_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port2 (GDMA 2) flow control asserted.\n"); } /* * rt_gdm_crc_drop - Handler of GDMA 1/2 discard a packet due to CRC error * interrupt */ static void rt_gdm_crc_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 discard a packet due to CRC error\n"); } /* * rt_pse_buf_drop - Handler of buffer sharing limitation interrupt */ static void rt_pse_buf_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE discards a packet due to buffer sharing limitation\n"); } /* * rt_gdm_other_drop - Handler of discard on other reason interrupt */ static void rt_gdm_other_drop(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "GDMA 1 & 2 discard a packet due to other reason\n"); } /* * rt_pse_p1_fc - Handler of PSE port1 (GDMA 1) flow control interrupt */ static void rt_pse_p1_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port1 (GDMA 1) flow control asserted.\n"); } /* * rt_pse_p0_fc - Handler of PSE port0 (CDMA) flow control interrupt */ static void rt_pse_p0_fc(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE port0 (CDMA) flow control asserted.\n"); } /* * rt_pse_fq_empty - Handler of PSE free Q empty threshold reached interrupt */ static void rt_pse_fq_empty(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "PSE free Q empty threshold reached & forced drop " "condition occurred.\n"); } /* * rt_intr - main ISR */ static void rt_intr(void *arg) { struct rt_softc *sc; struct ifnet *ifp; uint32_t status; sc = arg; ifp = sc->ifp; /* acknowledge interrupts */ status = RT_READ(sc, sc->fe_int_status); RT_WRITE(sc, sc->fe_int_status, status); RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status); if (status == 0xffffffff || /* device likely went away */ status == 0) /* not for us */ return; sc->interrupts++; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (status & CNT_PPE_AF) rt_cnt_ppe_af(sc); if (status & CNT_GDM_AF) rt_cnt_gdm_af(sc); if (status & PSE_P2_FC) rt_pse_p2_fc(sc); if (status & GDM_CRC_DROP) rt_gdm_crc_drop(sc); if (status & PSE_BUF_DROP) rt_pse_buf_drop(sc); if (status & GDM_OTHER_DROP) rt_gdm_other_drop(sc); if (status & PSE_P1_FC) rt_pse_p1_fc(sc); if (status & PSE_P0_FC) rt_pse_p0_fc(sc); if (status & PSE_FQ_EMPTY) rt_pse_fq_empty(sc); if (status & INT_TX_COHERENT) rt_tx_coherent_intr(sc); if (status & INT_RX_COHERENT) rt_rx_coherent_intr(sc); if (status & RX_DLY_INT) rt_rx_delay_intr(sc); if (status & TX_DLY_INT) rt_tx_delay_intr(sc); if (status & INT_RX_DONE) rt_rx_intr(sc, 0); if (status & INT_TXQ3_DONE) rt_tx_intr(sc, 3); if (status & INT_TXQ2_DONE) rt_tx_intr(sc, 2); if (status & INT_TXQ1_DONE) rt_tx_intr(sc, 1); if (status & INT_TXQ0_DONE) rt_tx_intr(sc, 0); } /* * rt_rt5350_intr - main ISR for Ralink 5350 SoC */ static void rt_rt5350_intr(void *arg) { struct rt_softc *sc; struct ifnet *ifp; uint32_t status; sc = arg; ifp = sc->ifp; /* acknowledge interrupts */ status = RT_READ(sc, sc->fe_int_status); RT_WRITE(sc, sc->fe_int_status, status); RT_DPRINTF(sc, RT_DEBUG_INTR, "interrupt: status=0x%08x\n", status); if (status == 0xffffffff || /* device likely went away */ status == 0) /* not for us */ return; sc->interrupts++; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; if (status & RT5350_INT_TX_COHERENT) rt_tx_coherent_intr(sc); if (status & RT5350_INT_RX_COHERENT) rt_rx_coherent_intr(sc); if (status & RT5350_RX_DLY_INT) rt_rx_delay_intr(sc); if (status & RT5350_TX_DLY_INT) rt_tx_delay_intr(sc); if (status & RT5350_INT_RXQ1_DONE) rt_rx_intr(sc, 1); if (status & RT5350_INT_RXQ0_DONE) rt_rx_intr(sc, 0); if (status & RT5350_INT_TXQ3_DONE) rt_tx_intr(sc, 3); if (status & RT5350_INT_TXQ2_DONE) rt_tx_intr(sc, 2); if (status & RT5350_INT_TXQ1_DONE) rt_tx_intr(sc, 1); if (status & RT5350_INT_TXQ0_DONE) rt_tx_intr(sc, 0); } static void rt_tx_coherent_intr(struct rt_softc *sc) { uint32_t tmp; int i; RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx coherent interrupt\n"); sc->tx_coherent_interrupts++; /* restart DMA engine */ tmp = RT_READ(sc, sc->pdma_glo_cfg); tmp &= ~(FE_TX_WB_DDONE | FE_TX_DMA_EN); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) rt_reset_tx_ring(sc, &sc->tx_ring[i]); for (i = 0; i < RT_SOFTC_TX_RING_COUNT; i++) { RT_WRITE(sc, sc->tx_base_ptr[i], sc->tx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->tx_max_cnt[i], RT_SOFTC_TX_RING_DESC_COUNT); RT_WRITE(sc, sc->tx_ctx_idx[i], 0); } rt_txrx_enable(sc); } /* * rt_rx_coherent_intr */ static void rt_rx_coherent_intr(struct rt_softc *sc) { uint32_t tmp; int i; RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx coherent interrupt\n"); sc->rx_coherent_interrupts++; /* restart DMA engine */ tmp = RT_READ(sc, sc->pdma_glo_cfg); tmp &= ~(FE_RX_DMA_EN); RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* init Rx ring */ for (i = 0; i < sc->rx_ring_count; i++) rt_reset_rx_ring(sc, &sc->rx_ring[i]); for (i = 0; i < sc->rx_ring_count; i++) { RT_WRITE(sc, sc->rx_base_ptr[i], sc->rx_ring[i].desc_phys_addr); RT_WRITE(sc, sc->rx_max_cnt[i], RT_SOFTC_RX_RING_DATA_COUNT); RT_WRITE(sc, sc->rx_calc_idx[i], RT_SOFTC_RX_RING_DATA_COUNT - 1); } rt_txrx_enable(sc); } /* * rt_rx_intr - a packet received */ static void rt_rx_intr(struct rt_softc *sc, int qid) { KASSERT(qid >= 0 && qid < sc->rx_ring_count, ("%s: Rx interrupt: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx interrupt\n"); sc->rx_interrupts[qid]++; RT_SOFTC_LOCK(sc); if (!(sc->intr_disable_mask & (sc->int_rx_done_mask << qid))) { rt_intr_disable(sc, (sc->int_rx_done_mask << qid)); taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task); } sc->intr_pending_mask |= (sc->int_rx_done_mask << qid); RT_SOFTC_UNLOCK(sc); } static void rt_rx_delay_intr(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "Rx delay interrupt\n"); sc->rx_delay_interrupts++; } static void rt_tx_delay_intr(struct rt_softc *sc) { RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx delay interrupt\n"); sc->tx_delay_interrupts++; } /* * rt_tx_intr - Transsmition of packet done */ static void rt_tx_intr(struct rt_softc *sc, int qid) { KASSERT(qid >= 0 && qid < RT_SOFTC_TX_RING_COUNT, ("%s: Tx interrupt: invalid qid=%d\n", device_get_nameunit(sc->dev), qid)); RT_DPRINTF(sc, RT_DEBUG_INTR, "Tx interrupt: qid=%d\n", qid); sc->tx_interrupts[qid]++; RT_SOFTC_LOCK(sc); if (!(sc->intr_disable_mask & (sc->int_tx_done_mask << qid))) { rt_intr_disable(sc, (sc->int_tx_done_mask << qid)); taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task); } sc->intr_pending_mask |= (sc->int_tx_done_mask << qid); RT_SOFTC_UNLOCK(sc); } /* * rt_rx_done_task - run RX task */ static void rt_rx_done_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; int again; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task\n"); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; sc->intr_pending_mask &= ~sc->int_rx_done_mask; again = rt_rx_eof(sc, &sc->rx_ring[0], sc->rx_process_limit); RT_SOFTC_LOCK(sc); if ((sc->intr_pending_mask & sc->int_rx_done_mask) || again) { RT_DPRINTF(sc, RT_DEBUG_RX, "Rx done task: scheduling again\n"); taskqueue_enqueue(sc->taskqueue, &sc->rx_done_task); } else { rt_intr_enable(sc, sc->int_rx_done_mask); } RT_SOFTC_UNLOCK(sc); } /* * rt_tx_done_task - check for pending TX task in all queues */ static void rt_tx_done_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; uint32_t intr_mask; int i; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task\n"); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; for (i = RT_SOFTC_TX_RING_COUNT - 1; i >= 0; i--) { if (sc->intr_pending_mask & (sc->int_tx_done_mask << i)) { sc->intr_pending_mask &= ~(sc->int_tx_done_mask << i); rt_tx_eof(sc, &sc->tx_ring[i]); } } sc->tx_timer = 0; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if(sc->rt_chipid == RT_CHIPID_RT5350 || sc->rt_chipid == RT_CHIPID_MT7620) intr_mask = ( RT5350_INT_TXQ3_DONE | RT5350_INT_TXQ2_DONE | RT5350_INT_TXQ1_DONE | RT5350_INT_TXQ0_DONE); else intr_mask = ( INT_TXQ3_DONE | INT_TXQ2_DONE | INT_TXQ1_DONE | INT_TXQ0_DONE); RT_SOFTC_LOCK(sc); rt_intr_enable(sc, ~sc->intr_pending_mask & (sc->intr_disable_mask & intr_mask)); if (sc->intr_pending_mask & intr_mask) { RT_DPRINTF(sc, RT_DEBUG_TX, "Tx done task: scheduling again\n"); taskqueue_enqueue(sc->taskqueue, &sc->tx_done_task); } RT_SOFTC_UNLOCK(sc); if (!IFQ_IS_EMPTY(&ifp->if_snd)) rt_start(ifp); } /* * rt_periodic_task - run periodic task */ static void rt_periodic_task(void *context, int pending) { struct rt_softc *sc; struct ifnet *ifp; sc = context; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_PERIODIC, "periodic task: round=%lu\n", sc->periodic_round); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) return; RT_SOFTC_LOCK(sc); sc->periodic_round++; rt_update_stats(sc); if ((sc->periodic_round % 10) == 0) { rt_update_raw_counters(sc); rt_watchdog(sc); } RT_SOFTC_UNLOCK(sc); callout_reset(&sc->periodic_ch, hz / 10, rt_periodic, sc); } /* * rt_rx_eof - check for frames that done by DMA engine and pass it into * network subsystem. */ static int rt_rx_eof(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int limit) { struct ifnet *ifp; /* struct rt_softc_rx_ring *ring; */ struct rt_rxdesc *desc; struct rt_softc_rx_data *data; struct mbuf *m, *mnew; bus_dma_segment_t segs[1]; bus_dmamap_t dma_map; uint32_t index, desc_flags; int error, nsegs, len, nframes; ifp = sc->ifp; /* ring = &sc->rx_ring[0]; */ nframes = 0; while (limit != 0) { index = RT_READ(sc, sc->rx_drx_idx[0]); if (ring->cur == index) break; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); #ifdef IF_RT_DEBUG if ( sc->debug & RT_DEBUG_RX ) { printf("\nRX Descriptor[%#08x] dump:\n", (u_int)desc); hexdump(desc, 16, 0, 0); printf("-----------------------------------\n"); } #endif /* XXX Sometime device don`t set DDONE bit */ #ifdef DDONE_FIXED if (!(desc->sdl0 & htole16(RT_RXDESC_SDL0_DDONE))) { RT_DPRINTF(sc, RT_DEBUG_RX, "DDONE=0, try next\n"); break; } #endif len = le16toh(desc->sdl0) & 0x3fff; RT_DPRINTF(sc, RT_DEBUG_RX, "new frame len=%d\n", len); nframes++; mnew = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (mnew == NULL) { sc->rx_mbuf_alloc_errors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto skip; } mnew->m_len = mnew->m_pkthdr.len = MJUMPAGESIZE; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, ring->spare_dma_map, mnew, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { RT_DPRINTF(sc, RT_DEBUG_RX, "could not load Rx mbuf DMA map: " "error=%d, nsegs=%d\n", error, nsegs); m_freem(mnew); sc->rx_mbuf_dmamap_errors++; if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto skip; } KASSERT(nsegs == 1, ("%s: too many DMA segments", device_get_nameunit(sc->dev))); bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); dma_map = data->dma_map; data->dma_map = ring->spare_dma_map; ring->spare_dma_map = dma_map; bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_PREREAD); m = data->m; desc_flags = desc->src; data->m = mnew; /* Add 2 for proper align of RX IP header */ desc->sdp0 = htole32(segs[0].ds_addr+2); desc->sdl0 = htole32(segs[0].ds_len-2); desc->src = 0; desc->ai = 0; desc->foe = 0; RT_DPRINTF(sc, RT_DEBUG_RX, "Rx frame: rxdesc flags=0x%08x\n", desc_flags); m->m_pkthdr.rcvif = ifp; /* Add 2 to fix data align, after sdp0 = addr + 2 */ m->m_data += 2; m->m_pkthdr.len = m->m_len = len; /* check for crc errors */ if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { /*check for valid checksum*/ if (desc_flags & (RXDSXR_SRC_IP_CSUM_FAIL| RXDSXR_SRC_L4_CSUM_FAIL)) { RT_DPRINTF(sc, RT_DEBUG_RX, "rxdesc: crc error\n"); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); if (!(ifp->if_flags & IFF_PROMISC)) { m_freem(m); goto skip; } } if ((desc_flags & RXDSXR_SRC_IP_CSUM_FAIL) != 0) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; m->m_pkthdr.csum_flags |= CSUM_IP_VALID; m->m_pkthdr.csum_data = 0xffff; } m->m_flags &= ~M_HASFCS; } (*ifp->if_input)(ifp, m); skip: desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->cur = (ring->cur + 1) % RT_SOFTC_RX_RING_DATA_COUNT; limit--; } if (ring->cur == 0) RT_WRITE(sc, sc->rx_calc_idx[0], RT_SOFTC_RX_RING_DATA_COUNT - 1); else RT_WRITE(sc, sc->rx_calc_idx[0], ring->cur - 1); RT_DPRINTF(sc, RT_DEBUG_RX, "Rx eof: nframes=%d\n", nframes); sc->rx_packets += nframes; return (limit == 0); } /* * rt_tx_eof - check for successful transmitted frames and mark their * descriptor as free. */ static void rt_tx_eof(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct ifnet *ifp; struct rt_txdesc *desc; struct rt_softc_tx_data *data; uint32_t index; int ndescs, nframes; ifp = sc->ifp; ndescs = 0; nframes = 0; for (;;) { index = RT_READ(sc, sc->tx_dtx_idx[ring->qid]); if (ring->desc_next == index) break; ndescs++; desc = &ring->desc[ring->desc_next]; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (desc->sdl0 & htole16(RT_TXDESC_SDL0_LASTSEG) || desc->sdl1 & htole16(RT_TXDESC_SDL1_LASTSEG)) { nframes++; data = &ring->data[ring->data_next]; bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); data->m = NULL; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); RT_SOFTC_TX_RING_LOCK(ring); ring->data_queued--; ring->data_next = (ring->data_next + 1) % RT_SOFTC_TX_RING_DATA_COUNT; RT_SOFTC_TX_RING_UNLOCK(ring); } desc->sdl0 &= ~htole16(RT_TXDESC_SDL0_DDONE); bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); RT_SOFTC_TX_RING_LOCK(ring); ring->desc_queued--; ring->desc_next = (ring->desc_next + 1) % RT_SOFTC_TX_RING_DESC_COUNT; RT_SOFTC_TX_RING_UNLOCK(ring); } RT_DPRINTF(sc, RT_DEBUG_TX, "Tx eof: qid=%d, ndescs=%d, nframes=%d\n", ring->qid, ndescs, nframes); } /* * rt_update_stats - query statistics counters and update related variables. */ static void rt_update_stats(struct rt_softc *sc) { struct ifnet *ifp; ifp = sc->ifp; RT_DPRINTF(sc, RT_DEBUG_STATS, "update statistic: \n"); /* XXX do update stats here */ } /* * rt_watchdog - reinit device on watchdog event. */ static void rt_watchdog(struct rt_softc *sc) { uint32_t tmp; #ifdef notyet int ntries; #endif if(sc->rt_chipid != RT_CHIPID_RT5350 && sc->rt_chipid != RT_CHIPID_MT7620) { tmp = RT_READ(sc, PSE_BASE + CDMA_OQ_STA); RT_DPRINTF(sc, RT_DEBUG_WATCHDOG, "watchdog: PSE_IQ_STA=0x%08x\n", tmp); } /* XXX: do not reset */ #ifdef notyet if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) != 0) { sc->tx_queue_not_empty[0]++; for (ntries = 0; ntries < 10; ntries++) { tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA); if (((tmp >> P0_IQ_PCNT_SHIFT) & 0xff) == 0) break; DELAY(1); } } if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) != 0) { sc->tx_queue_not_empty[1]++; for (ntries = 0; ntries < 10; ntries++) { tmp = RT_READ(sc, PSE_BASE + PSE_IQ_STA); if (((tmp >> P1_IQ_PCNT_SHIFT) & 0xff) == 0) break; DELAY(1); } } #endif } /* * rt_update_raw_counters - update counters. */ static void rt_update_raw_counters(struct rt_softc *sc) { sc->tx_bytes += RT_READ(sc, CNTR_BASE + GDMA_TX_GBCNT0); sc->tx_packets += RT_READ(sc, CNTR_BASE + GDMA_TX_GPCNT0); sc->tx_skip += RT_READ(sc, CNTR_BASE + GDMA_TX_SKIPCNT0); sc->tx_collision+= RT_READ(sc, CNTR_BASE + GDMA_TX_COLCNT0); sc->rx_bytes += RT_READ(sc, CNTR_BASE + GDMA_RX_GBCNT0); sc->rx_packets += RT_READ(sc, CNTR_BASE + GDMA_RX_GPCNT0); sc->rx_crc_err += RT_READ(sc, CNTR_BASE + GDMA_RX_CSUM_ERCNT0); sc->rx_short_err+= RT_READ(sc, CNTR_BASE + GDMA_RX_SHORT_ERCNT0); sc->rx_long_err += RT_READ(sc, CNTR_BASE + GDMA_RX_LONG_ERCNT0); sc->rx_phy_err += RT_READ(sc, CNTR_BASE + GDMA_RX_FERCNT0); sc->rx_fifo_overflows+= RT_READ(sc, CNTR_BASE + GDMA_RX_OERCNT0); } static void rt_intr_enable(struct rt_softc *sc, uint32_t intr_mask) { uint32_t tmp; sc->intr_disable_mask &= ~intr_mask; tmp = sc->intr_enable_mask & ~sc->intr_disable_mask; RT_WRITE(sc, sc->fe_int_enable, tmp); } static void rt_intr_disable(struct rt_softc *sc, uint32_t intr_mask) { uint32_t tmp; sc->intr_disable_mask |= intr_mask; tmp = sc->intr_enable_mask & ~sc->intr_disable_mask; RT_WRITE(sc, sc->fe_int_enable, tmp); } /* * rt_txrx_enable - enable TX/RX DMA */ static int rt_txrx_enable(struct rt_softc *sc) { struct ifnet *ifp; uint32_t tmp; int ntries; ifp = sc->ifp; /* enable Tx/Rx DMA engine */ for (ntries = 0; ntries < 200; ntries++) { tmp = RT_READ(sc, sc->pdma_glo_cfg); if (!(tmp & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY))) break; DELAY(1000); } if (ntries == 200) { device_printf(sc->dev, "timeout waiting for DMA engine\n"); return (-1); } DELAY(50); tmp |= FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; RT_WRITE(sc, sc->pdma_glo_cfg, tmp); /* XXX set Rx filter */ return (0); } /* * rt_alloc_rx_ring - allocate RX DMA ring buffer */ static int rt_alloc_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring, int qid) { struct rt_rxdesc *desc; struct rt_softc_rx_data *data; bus_dma_segment_t segs[1]; int i, nsegs, error; error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 1, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), 0, NULL, NULL, &ring->desc_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Rx desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Rx desc DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map, ring->desc, RT_SOFTC_RX_RING_DATA_COUNT * sizeof(struct rt_rxdesc), rt_dma_map_addr, &ring->desc_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Rx desc DMA map\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, 1, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Rx data DMA tag\n"); goto fail; } for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { desc = &ring->desc[i]; data = &ring->data[i]; error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map); if (error != 0) { device_printf(sc->dev, "could not create Rx data DMA " "map\n"); goto fail; } data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (data->m == NULL) { device_printf(sc->dev, "could not allocate Rx mbuf\n"); error = ENOMEM; goto fail; } data->m->m_len = data->m->m_pkthdr.len = MJUMPAGESIZE; error = bus_dmamap_load_mbuf_sg(ring->data_dma_tag, data->dma_map, data->m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->dev, "could not load Rx mbuf DMA map\n"); goto fail; } KASSERT(nsegs == 1, ("%s: too many DMA segments", device_get_nameunit(sc->dev))); /* Add 2 for proper align of RX IP header */ desc->sdp0 = htole32(segs[0].ds_addr+2); desc->sdl0 = htole32(segs[0].ds_len-2); } error = bus_dmamap_create(ring->data_dma_tag, 0, &ring->spare_dma_map); if (error != 0) { device_printf(sc->dev, "could not create Rx spare DMA map\n"); goto fail; } bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->qid = qid; return (0); fail: rt_free_rx_ring(sc, ring); return (error); } /* * rt_reset_rx_ring - reset RX ring buffer */ static void rt_reset_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring) { struct rt_rxdesc *desc; int i; for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { desc = &ring->desc[i]; desc->sdl0 &= ~htole16(RT_RXDESC_SDL0_DDONE); } bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); ring->cur = 0; } /* * rt_free_rx_ring - free memory used by RX ring buffer */ static void rt_free_rx_ring(struct rt_softc *sc, struct rt_softc_rx_ring *ring) { struct rt_softc_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map); bus_dmamem_free(ring->desc_dma_tag, ring->desc, ring->desc_dma_map); } if (ring->desc_dma_tag != NULL) bus_dma_tag_destroy(ring->desc_dma_tag); for (i = 0; i < RT_SOFTC_RX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); } if (data->dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, data->dma_map); } if (ring->spare_dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, ring->spare_dma_map); if (ring->data_dma_tag != NULL) bus_dma_tag_destroy(ring->data_dma_tag); } /* * rt_alloc_tx_ring - allocate TX ring buffer */ static int rt_alloc_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring, int qid) { struct rt_softc_tx_data *data; int error, i; mtx_init(&ring->lock, device_get_nameunit(sc->dev), NULL, MTX_DEF); error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 1, RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc), 0, NULL, NULL, &ring->desc_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dma_tag, (void **) &ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Tx desc DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dma_tag, ring->desc_dma_map, ring->desc, (RT_SOFTC_TX_RING_DESC_COUNT * sizeof(struct rt_txdesc)), rt_dma_map_addr, &ring->desc_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Tx desc DMA map\n"); goto fail; } ring->desc_queued = 0; ring->desc_cur = 0; ring->desc_next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 1, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, 0, NULL, NULL, &ring->seg0_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx seg0 DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->seg0_dma_tag, (void **) &ring->seg0, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->seg0_dma_map); if (error != 0) { device_printf(sc->dev, "could not allocate Tx seg0 DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->seg0_dma_tag, ring->seg0_dma_map, ring->seg0, RT_SOFTC_TX_RING_DATA_COUNT * RT_TX_DATA_SEG0_SIZE, rt_dma_map_addr, &ring->seg0_phys_addr, 0); if (error != 0) { device_printf(sc->dev, "could not load Tx seg0 DMA map\n"); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->dev), PAGE_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MJUMPAGESIZE, RT_SOFTC_MAX_SCATTER, MJUMPAGESIZE, 0, NULL, NULL, &ring->data_dma_tag); if (error != 0) { device_printf(sc->dev, "could not create Tx data DMA tag\n"); goto fail; } for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dma_tag, 0, &data->dma_map); if (error != 0) { device_printf(sc->dev, "could not create Tx data DMA " "map\n"); goto fail; } } ring->data_queued = 0; ring->data_cur = 0; ring->data_next = 0; ring->qid = qid; return (0); fail: rt_free_tx_ring(sc, ring); return (error); } /* * rt_reset_tx_ring - reset TX ring buffer to empty state */ static void rt_reset_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct rt_softc_tx_data *data; struct rt_txdesc *desc; int i; for (i = 0; i < RT_SOFTC_TX_RING_DESC_COUNT; i++) { desc = &ring->desc[i]; desc->sdl0 = 0; desc->sdl1 = 0; } ring->desc_queued = 0; ring->desc_cur = 0; ring->desc_next = 0; bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_PREWRITE); for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); data->m = NULL; } } ring->data_queued = 0; ring->data_cur = 0; ring->data_next = 0; } /* * rt_free_tx_ring - free RX ring buffer */ static void rt_free_tx_ring(struct rt_softc *sc, struct rt_softc_tx_ring *ring) { struct rt_softc_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dma_tag, ring->desc_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dma_tag, ring->desc_dma_map); bus_dmamem_free(ring->desc_dma_tag, ring->desc, ring->desc_dma_map); } if (ring->desc_dma_tag != NULL) bus_dma_tag_destroy(ring->desc_dma_tag); if (ring->seg0 != NULL) { bus_dmamap_sync(ring->seg0_dma_tag, ring->seg0_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->seg0_dma_tag, ring->seg0_dma_map); bus_dmamem_free(ring->seg0_dma_tag, ring->seg0, ring->seg0_dma_map); } if (ring->seg0_dma_tag != NULL) bus_dma_tag_destroy(ring->seg0_dma_tag); for (i = 0; i < RT_SOFTC_TX_RING_DATA_COUNT; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dma_tag, data->dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dma_tag, data->dma_map); m_freem(data->m); } if (data->dma_map != NULL) bus_dmamap_destroy(ring->data_dma_tag, data->dma_map); } if (ring->data_dma_tag != NULL) bus_dma_tag_destroy(ring->data_dma_tag); mtx_destroy(&ring->lock); } /* * rt_dma_map_addr - get address of busdma segment */ static void rt_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *) arg = segs[0].ds_addr; } /* * rt_sysctl_attach - attach sysctl nodes for NIC counters. */ static void rt_sysctl_attach(struct rt_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid *stats; ctx = device_get_sysctl_ctx(sc->dev); tree = device_get_sysctl_tree(sc->dev); /* statistic counters */ stats = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLFLAG_RD, 0, "statistic"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "interrupts", CTLFLAG_RD, &sc->interrupts, "all interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_coherent_interrupts", CTLFLAG_RD, &sc->tx_coherent_interrupts, "Tx coherent interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_coherent_interrupts", CTLFLAG_RD, &sc->rx_coherent_interrupts, "Rx coherent interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_interrupts", CTLFLAG_RD, &sc->rx_interrupts[0], "Rx interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_delay_interrupts", CTLFLAG_RD, &sc->rx_delay_interrupts, "Rx delay interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_interrupts", CTLFLAG_RD, &sc->tx_interrupts[3], "Tx AC3 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_interrupts", CTLFLAG_RD, &sc->tx_interrupts[2], "Tx AC2 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_interrupts", CTLFLAG_RD, &sc->tx_interrupts[1], "Tx AC1 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_interrupts", CTLFLAG_RD, &sc->tx_interrupts[0], "Tx AC0 interrupts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_delay_interrupts", CTLFLAG_RD, &sc->tx_delay_interrupts, "Tx delay interrupts"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_desc_queued", CTLFLAG_RD, &sc->tx_ring[3].desc_queued, 0, "Tx AC3 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_data_queued", CTLFLAG_RD, &sc->tx_ring[3].data_queued, 0, "Tx AC3 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_desc_queued", CTLFLAG_RD, &sc->tx_ring[2].desc_queued, 0, "Tx AC2 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_data_queued", CTLFLAG_RD, &sc->tx_ring[2].data_queued, 0, "Tx AC2 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_desc_queued", CTLFLAG_RD, &sc->tx_ring[1].desc_queued, 0, "Tx AC1 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_data_queued", CTLFLAG_RD, &sc->tx_ring[1].data_queued, 0, "Tx AC1 data queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_desc_queued", CTLFLAG_RD, &sc->tx_ring[0].desc_queued, 0, "Tx AC0 descriptors queued"); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_data_queued", CTLFLAG_RD, &sc->tx_ring[0].data_queued, 0, "Tx AC0 data queued"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ3_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[3], "Tx AC3 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ2_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[2], "Tx AC2 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ1_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[1], "Tx AC1 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "TXQ0_data_queue_full", CTLFLAG_RD, &sc->tx_data_queue_full[0], "Tx AC0 data queue full"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_watchdog_timeouts", CTLFLAG_RD, &sc->tx_watchdog_timeouts, "Tx watchdog timeouts"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_defrag_packets", CTLFLAG_RD, &sc->tx_defrag_packets, "Tx defragmented packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "no_tx_desc_avail", CTLFLAG_RD, &sc->no_tx_desc_avail, "no Tx descriptors available"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_mbuf_alloc_errors", CTLFLAG_RD, &sc->rx_mbuf_alloc_errors, "Rx mbuf allocation errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_mbuf_dmamap_errors", CTLFLAG_RD, &sc->rx_mbuf_dmamap_errors, "Rx mbuf DMA mapping errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_queue_0_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[0], "Tx queue 0 not empty"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_queue_1_not_empty", CTLFLAG_RD, &sc->tx_queue_not_empty[1], "Tx queue 1 not empty"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_packets", CTLFLAG_RD, &sc->rx_packets, "Rx packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_crc_errors", CTLFLAG_RD, &sc->rx_crc_err, "Rx CRC errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_phy_errors", CTLFLAG_RD, &sc->rx_phy_err, "Rx PHY errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_dup_packets", CTLFLAG_RD, &sc->rx_dup_packets, "Rx duplicate packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_fifo_overflows", CTLFLAG_RD, &sc->rx_fifo_overflows, "Rx FIFO overflows"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_bytes", CTLFLAG_RD, &sc->rx_bytes, "Rx bytes"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_long_err", CTLFLAG_RD, &sc->rx_long_err, "Rx too long frame errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "rx_short_err", CTLFLAG_RD, &sc->rx_short_err, "Rx too short frame errors"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_bytes", CTLFLAG_RD, &sc->tx_bytes, "Tx bytes"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_packets", CTLFLAG_RD, &sc->tx_packets, "Tx packets"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_skip", CTLFLAG_RD, &sc->tx_skip, "Tx skip count for GDMA ports"); SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(stats), OID_AUTO, "tx_collision", CTLFLAG_RD, &sc->tx_collision, "Tx collision count for GDMA ports"); } #ifdef IF_RT_PHY_SUPPORT static int rt_miibus_readreg(device_t dev, int phy, int reg) { struct rt_softc *sc = device_get_softc(dev); /* * PSEUDO_PHYAD is a special value for indicate switch attached. * No one PHY use PSEUDO_PHYAD (0x1e) address. */ if (phy == 31) { /* Fake PHY ID for bfeswitch attach */ switch (reg) { case MII_BMSR: return (BMSR_EXTSTAT|BMSR_MEDIAMASK); case MII_PHYIDR1: return (0x40); /* As result of faking */ case MII_PHYIDR2: /* PHY will detect as */ return (0x6250); /* bfeswitch */ } } /* Wait prev command done if any */ while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); RT_WRITE(sc, MDIO_ACCESS, MDIO_CMD_ONGO || ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) || ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK)); while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); return (RT_READ(sc, MDIO_ACCESS) & MDIO_PHY_DATA_MASK); } static int rt_miibus_writereg(device_t dev, int phy, int reg, int val) { struct rt_softc *sc = device_get_softc(dev); /* Wait prev command done if any */ while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); RT_WRITE(sc, MDIO_ACCESS, MDIO_CMD_ONGO || MDIO_CMD_WR || ((phy << MDIO_PHY_ADDR_SHIFT) & MDIO_PHY_ADDR_MASK) || ((reg << MDIO_PHYREG_ADDR_SHIFT) & MDIO_PHYREG_ADDR_MASK) || (val & MDIO_PHY_DATA_MASK)); while (RT_READ(sc, MDIO_ACCESS) & MDIO_CMD_ONGO); return (0); } void rt_miibus_statchg(device_t dev) { struct rt_softc *sc = device_get_softc(dev); struct mii_data *mii; mii = device_get_softc(sc->rt_miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: /* XXX check link here */ sc->flags |= 1; break; default: break; } } } #endif /* IF_RT_PHY_SUPPORT */ static device_method_t rt_dev_methods[] = { DEVMETHOD(device_probe, rt_probe), DEVMETHOD(device_attach, rt_attach), DEVMETHOD(device_detach, rt_detach), DEVMETHOD(device_shutdown, rt_shutdown), DEVMETHOD(device_suspend, rt_suspend), DEVMETHOD(device_resume, rt_resume), #ifdef IF_RT_PHY_SUPPORT /* MII interface */ DEVMETHOD(miibus_readreg, rt_miibus_readreg), DEVMETHOD(miibus_writereg, rt_miibus_writereg), DEVMETHOD(miibus_statchg, rt_miibus_statchg), #endif DEVMETHOD_END }; static driver_t rt_driver = { "rt", rt_dev_methods, sizeof(struct rt_softc) }; static devclass_t rt_dev_class; DRIVER_MODULE(rt, nexus, rt_driver, rt_dev_class, 0, 0); #ifdef FDT DRIVER_MODULE(rt, simplebus, rt_driver, rt_dev_class, 0, 0); #endif MODULE_DEPEND(rt, ether, 1, 1, 1); MODULE_DEPEND(rt, miibus, 1, 1, 1); Index: head/sys/dev/siba/siba_pcib.c =================================================================== --- head/sys/dev/siba/siba_pcib.c (revision 295879) +++ head/sys/dev/siba/siba_pcib.c (revision 295880) @@ -1,431 +1,430 @@ /*- * Copyright (c) 2007 Bruce M. Simpson. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Child driver for PCI host bridge core. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #include #include #include #ifndef MIPS_MEM_RID #define MIPS_MEM_RID 0x20 #endif #define SBPCI_SLOTMAX 15 #define SBPCI_READ_4(sc, reg) \ bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, (reg)) #define SBPCI_WRITE_4(sc, reg, val) \ bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, (reg), (val)) /* * PCI Configuration space window (64MB). * contained in SBTOPCI1 window. */ #define SBPCI_CFGBASE 0x0C000000 #define SBPCI_CFGSIZE 0x01000000 /* * TODO: implement type 1 config space access (ie beyond bus 0) * we may need to tweak the windows to do this * TODO: interrupt routing. * TODO: fully implement bus allocation. * TODO: implement resource managers. * TODO: code cleanup. */ static int siba_pcib_activate_resource(device_t, device_t, int, int, struct resource *); static struct resource * siba_pcib_alloc_resource(device_t, device_t, int, int *, rman_res_t , rman_res_t, rman_res_t, u_int); static int siba_pcib_attach(device_t); static int siba_pcib_deactivate_resource(device_t, device_t, int, int, struct resource *); static int siba_pcib_maxslots(device_t); static int siba_pcib_probe(device_t); static u_int32_t siba_pcib_read_config(device_t, u_int, u_int, u_int, u_int, int); static int siba_pcib_read_ivar(device_t, device_t, int, uintptr_t *); static int siba_pcib_release_resource(device_t, device_t, int, int, struct resource *); static int siba_pcib_route_interrupt(device_t, device_t, int); static int siba_pcib_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int siba_pcib_teardown_intr(device_t, device_t, struct resource *, void *); static void siba_pcib_write_config(device_t, u_int, u_int, u_int, u_int, u_int32_t, int); static int siba_pcib_write_ivar(device_t, device_t, int, uintptr_t); static int siba_pcib_probe(device_t dev) { /* TODO: support earlier cores. */ /* TODO: Check if PCI host mode is enabled in the SPROM. */ if (siba_get_vendor(dev) == SIBA_VID_BROADCOM && siba_get_device(dev) == SIBA_DEVID_PCI) { device_set_desc(dev, "SiBa-to-PCI host bridge"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } //extern int rman_debug; static int siba_pcib_attach(device_t dev) { struct siba_pcib_softc *sc = device_get_softc(dev); int rid; /* * Allocate the resources which the parent bus has already * determined for us. */ rid = MIPS_MEM_RID; /* XXX */ //rman_debug = 1; sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->sc_mem == NULL) { device_printf(dev, "unable to allocate memory\n"); return (ENXIO); } sc->sc_bt = rman_get_bustag(sc->sc_mem); sc->sc_bh = rman_get_bushandle(sc->sc_mem); device_printf(dev, "bridge registers addr 0x%08x vaddr %p\n", (uint32_t)sc->sc_bh, rman_get_virtual(sc->sc_mem)); SBPCI_WRITE_4(sc, 0x0000, 0x05); SBPCI_WRITE_4(sc, 0x0000, 0x0D); DELAY(150); SBPCI_WRITE_4(sc, 0x0000, 0x0F); SBPCI_WRITE_4(sc, 0x0010, 0x01); DELAY(1); bus_space_handle_t sc_cfg_hand; int error; /* * XXX this doesn't actually do anything on mips; however... should * we not be mapping to KSEG1? we need to wire down the range. */ error = bus_space_map(sc->sc_bt, SBPCI_CFGBASE, SBPCI_CFGSIZE, 0, &sc_cfg_hand); if (error) { device_printf(dev, "cannot map PCI configuration space\n"); return (ENXIO); } device_printf(dev, "mapped pci config space at 0x%08x\n", (uint32_t)sc_cfg_hand); /* * Setup configuration, io, and dma space windows. * XXX we need to be able to do type 1 too. * we probably don't need to be able to do i/o cycles. */ /* I/O read/write window */ SBPCI_WRITE_4(sc, SIBA_PCICORE_SBTOPCI0, 1); /* type 0 configuration only */ SBPCI_WRITE_4(sc, SIBA_PCICORE_SBTOPCI1, 2); SBPCI_WRITE_4(sc, SIBA_PCICORE_SBTOPCI2, 1 << 30); /* memory only */ DELAY(500); /* XXX resource managers */ device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } /* bus functions */ static int siba_pcib_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct siba_pcib_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); } return (ENOENT); } static int siba_pcib_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { struct siba_pcib_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_bus = value; return (0); } return (ENOENT); } static int siba_pcib_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { return (BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, filt, intr, arg, cookiep)); } static int siba_pcib_teardown_intr(device_t dev, device_t child, struct resource *vec, void *cookie) { return (BUS_TEARDOWN_INTR(device_get_parent(dev), child, vec, cookie)); } static struct resource * siba_pcib_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { #if 1 //device_printf(bus, "%s: not yet implemented\n", __func__); return (NULL); #else bus_space_tag_t tag; struct siba_pcib_softc *sc = device_get_softc(bus); struct rman *rmanp; struct resource *rv; tag = 0; rv = NULL; switch (type) { case SYS_RES_IRQ: rmanp = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rmanp = &sc->sc_mem_rman; tag = &sc->sc_pci_memt; break; default: return (rv); } rv = rman_reserve_resource(rmanp, start, end, count, flags, child); if (rv != NULL) { rman_set_rid(rv, *rid); if (type == SYS_RES_MEMORY) { #if 0 rman_set_bustag(rv, tag); rman_set_bushandle(rv, rman_get_bushandle(sc->sc_mem) + (rman_get_start(rv) - IXP425_PCI_MEM_HWBASE)); #endif } } return (rv); #endif } static int siba_pcib_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { device_printf(bus, "%s: not yet implemented\n", __func__); device_printf(bus, "%s called activate_resource\n", device_get_nameunit(child)); return (ENXIO); } static int siba_pcib_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { device_printf(bus, "%s: not yet implemented\n", __func__); device_printf(bus, "%s called deactivate_resource\n", device_get_nameunit(child)); return (ENXIO); } static int siba_pcib_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { device_printf(bus, "%s: not yet implemented\n", __func__); device_printf(bus, "%s called release_resource\n", device_get_nameunit(child)); return (ENXIO); } /* pcib interface functions */ static int siba_pcib_maxslots(device_t dev) { return (SBPCI_SLOTMAX); } /* * This needs hacking and fixery. It is currently broke and hangs. * Debugging it will be tricky; there seems to be no way to enable * a target abort which would cause a nice target abort. * Look at linux again? */ static u_int32_t siba_pcib_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct siba_pcib_softc *sc = device_get_softc(dev); bus_addr_t cfgaddr; uint32_t cfgtag; uint32_t val; /* XXX anything higher than slot 2 currently seems to hang the bus. * not sure why this is; look at linux again */ if (bus != 0 || slot > 2) { printf("%s: bad b/s/f %d/%d/%d\n", __func__, bus, slot, func); return 0xffffffff; // XXX } device_printf(dev, "requested %d bytes from b/s/f %d/%d/%d reg %d\n", bytes, bus, slot, func, reg); /* * The configuration tag on the broadcom is weird. */ SBPCI_WRITE_4(sc, SIBA_PCICORE_SBTOPCI1, 2); /* XXX again??? */ cfgtag = ((1 << slot) << 16) | (func << 8); cfgaddr = SBPCI_CFGBASE | cfgtag | (reg & ~3); /* cfg space i/o is always 32 bits on this bridge */ printf("reading 4 bytes from %08x\n", cfgaddr); val = *(volatile uint32_t *)MIPS_PHYS_TO_KSEG1(cfgaddr); /* XXX MIPS */ val = bswap32(val); /* XXX seems to be needed for now */ /* swizzle and return what was asked for */ val &= 0xffffffff >> ((4 - bytes) * 8); return (val); } static void siba_pcib_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, u_int32_t val, int bytes) { /* write to pci configuration space */ //device_printf(dev, "%s: not yet implemented\n", __func__); } static int siba_pcib_route_interrupt(device_t bridge, device_t device, int pin) { //device_printf(bridge, "%s: not yet implemented\n", __func__); return (-1); } static device_method_t siba_pcib_methods[] = { /* Device interface */ DEVMETHOD(device_attach, siba_pcib_attach), DEVMETHOD(device_probe, siba_pcib_probe), /* Bus interface */ DEVMETHOD(bus_read_ivar, siba_pcib_read_ivar), DEVMETHOD(bus_write_ivar, siba_pcib_write_ivar), DEVMETHOD(bus_setup_intr, siba_pcib_setup_intr), DEVMETHOD(bus_teardown_intr, siba_pcib_teardown_intr), DEVMETHOD(bus_alloc_resource, siba_pcib_alloc_resource), DEVMETHOD(bus_activate_resource, siba_pcib_activate_resource), DEVMETHOD(bus_deactivate_resource, siba_pcib_deactivate_resource), DEVMETHOD(bus_release_resource, siba_pcib_release_resource), /* pcib interface */ DEVMETHOD(pcib_maxslots, siba_pcib_maxslots), DEVMETHOD(pcib_read_config, siba_pcib_read_config), DEVMETHOD(pcib_write_config, siba_pcib_write_config), DEVMETHOD(pcib_route_interrupt, siba_pcib_route_interrupt), DEVMETHOD_END }; static driver_t siba_pcib_driver = { "pcib", siba_pcib_methods, sizeof(struct siba_softc), }; static devclass_t siba_pcib_devclass; DRIVER_MODULE(siba_pcib, siba, siba_pcib_driver, siba_pcib_devclass, 0, 0); Index: head/sys/dev/vt/hw/efifb/efifb.c =================================================================== --- head/sys/dev/vt/hw/efifb/efifb.c (revision 295879) +++ head/sys/dev/vt/hw/efifb/efifb.c (revision 295880) @@ -1,139 +1,138 @@ /*- * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Aleksandr Rybalko under sponsorship from the * FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include "opt_platform.h" #include #include #include #include #include -#include #include #include #include static vd_init_t vt_efifb_init; static vd_probe_t vt_efifb_probe; static struct vt_driver vt_efifb_driver = { .vd_name = "efifb", .vd_probe = vt_efifb_probe, .vd_init = vt_efifb_init, .vd_blank = vt_fb_blank, .vd_bitblt_text = vt_fb_bitblt_text, .vd_bitblt_bmp = vt_fb_bitblt_bitmap, .vd_drawrect = vt_fb_drawrect, .vd_setpixel = vt_fb_setpixel, .vd_fb_ioctl = vt_fb_ioctl, .vd_fb_mmap = vt_fb_mmap, /* Better than VGA, but still generic driver. */ .vd_priority = VD_PRIORITY_GENERIC + 1, }; static struct fb_info local_info; VT_DRIVER_DECLARE(vt_efifb, vt_efifb_driver); static int vt_efifb_probe(struct vt_device *vd) { int disabled; struct efi_fb *efifb; caddr_t kmdp; disabled = 0; TUNABLE_INT_FETCH("hw.syscons.disable", &disabled); if (disabled != 0) return (CN_DEAD); kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); if (efifb == NULL) return (CN_DEAD); return (CN_INTERNAL); } static int vt_efifb_init(struct vt_device *vd) { struct fb_info *info; struct efi_fb *efifb; caddr_t kmdp; info = vd->vd_softc; if (info == NULL) info = vd->vd_softc = (void *)&local_info; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); efifb = (struct efi_fb *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_FB); if (efifb == NULL) return (CN_DEAD); info->fb_height = efifb->fb_height; info->fb_width = efifb->fb_width; info->fb_depth = fls(efifb->fb_mask_red | efifb->fb_mask_green | efifb->fb_mask_blue | efifb->fb_mask_reserved); /* Round to a multiple of the bits in a byte. */ info->fb_bpp = (info->fb_depth + NBBY - 1) & ~(NBBY - 1); /* Stride in bytes, not pixels */ info->fb_stride = efifb->fb_stride * (info->fb_bpp / NBBY); vt_generate_cons_palette(info->fb_cmap, COLOR_FORMAT_RGB, efifb->fb_mask_red, ffs(efifb->fb_mask_red) - 1, efifb->fb_mask_green, ffs(efifb->fb_mask_green) - 1, efifb->fb_mask_blue, ffs(efifb->fb_mask_blue) - 1); info->fb_size = info->fb_height * info->fb_stride; info->fb_pbase = efifb->fb_addr; info->fb_vbase = (intptr_t)pmap_mapdev_attr(info->fb_pbase, info->fb_size, VM_MEMATTR_WRITE_COMBINING); vt_fb_init(vd); return (CN_INTERNAL); } Index: head/sys/i386/bios/mca_machdep.c =================================================================== --- head/sys/i386/bios/mca_machdep.c (revision 295879) +++ head/sys/i386/bios/mca_machdep.c (revision 295880) @@ -1,161 +1,160 @@ /*- * Copyright (c) 1999 Matthew N. Dodd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include -#include #include #include #include #include #include #include /* Global MCA bus flag */ int MCA_system = 0; /* System Configuration Block */ struct sys_config { u_int16_t count; u_int8_t model; u_int8_t submodel; u_int8_t bios_rev; u_int8_t feature; #define FEATURE_MCAISA 0x01 /* Machine contains both MCA and ISA bus*/ #define FEATURE_MCABUS 0x02 /* MicroChannel Architecture */ #define FEATURE_EBDA 0x04 /* Extended BIOS data area allocated */ #define FEATURE_WAITEV 0x08 /* Wait for external event is supported */ #define FEATURE_KBDINT 0x10 /* Keyboard intercept called by Int 09h */ #define FEATURE_RTC 0x20 /* Real-time clock present */ #define FEATURE_IC2 0x40 /* Second interrupt chip present */ #define FEATURE_DMA3 0x80 /* DMA channel 3 used by hard disk BIOS */ u_int8_t pad[3]; } __packed; /* Function Prototypes */ static void bios_mcabus_present (void *); SYSINIT(mca_present, SI_SUB_CPU, SI_ORDER_ANY, bios_mcabus_present, NULL); /* Functions */ static void bios_mcabus_present(void * dummy) { struct vm86frame vmf; struct sys_config * scp; vm_offset_t paddr; bzero(&vmf, sizeof(struct vm86frame)); vmf.vmf_ah = 0xc0; if (vm86_intcall(0x15, &vmf)) { if (bootverbose) { printf("BIOS SDT: INT call failed.\n"); } return; } if ((vmf.vmf_ah != 0) && (vmf.vmf_flags & 0x01)) { if (bootverbose) { printf("BIOS SDT: Not supported. Not PS/2?\n"); printf("BIOS SDT: AH 0x%02x, Flags 0x%04x\n", vmf.vmf_ah, vmf.vmf_flags); } return; } paddr = vmf.vmf_es; paddr = (paddr << 4) + vmf.vmf_bx; scp = (struct sys_config *)BIOS_PADDRTOVADDR(paddr); if (bootverbose) { printf("BIOS SDT: model 0x%02x, submodel 0x%02x, bios_rev 0x%02x\n", scp->model, scp->submodel, scp->bios_rev); printf("BIOS SDT: features 0x%b\n", scp->feature, "\20" "\01MCA+ISA" "\02MCA" "\03EBDA" "\04WAITEV" "\05KBDINT" "\06RTC" "\07IC2" "\08DMA3" "\n"); } MCA_system = ((scp->feature & FEATURE_MCABUS) ? 1 : 0); if (MCA_system) printf("MicroChannel Architecture System detected.\n"); return; } int mca_bus_nmi (void) { int slot; int retval = 0; int pos5 = 0; /* Disable motherboard setup */ outb(MCA_MB_SETUP_REG, MCA_MB_SETUP_DIS); /* For each slot */ for (slot = 0; slot < MCA_MAX_SLOTS; slot++) { /* Select the slot */ outb(MCA_ADAP_SETUP_REG, slot | MCA_ADAP_SET); pos5 = inb(MCA_POS_REG(MCA_POS5)); /* If Adapter Check is low */ if ((pos5 & MCA_POS5_CHCK) == 0) { retval++; /* If Adapter Check Status is available */ if ((pos5 & MCA_POS5_CHCK_STAT) == 0) { printf("MCA NMI: slot %d, POS6=0x%02x, POS7=0x%02x\n", slot+1, inb( MCA_POS_REG(MCA_POS6) ), inb( MCA_POS_REG(MCA_POS7) )); } else { printf("MCA NMI: slot %d\n", slot+1); } } /* Disable adapter setup */ outb(MCA_ADAP_SETUP_REG, MCA_ADAP_SETUP_DIS); } return (retval); } Index: head/sys/i386/pci/pci_cfgreg.c =================================================================== --- head/sys/i386/pci/pci_cfgreg.c (revision 295879) +++ head/sys/i386/pci/pci_cfgreg.c (revision 295880) @@ -1,718 +1,717 @@ /*- * Copyright (c) 1997, Stefan Esser * Copyright (c) 2000, Michael Smith * Copyright (c) 2000, BSDi * Copyright (c) 2004, Scott Long * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_xbox.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #ifdef XBOX #include #endif #define PRVERB(a) do { \ if (bootverbose) \ printf a ; \ } while(0) #define PCIE_CACHE 8 struct pcie_cfg_elem { TAILQ_ENTRY(pcie_cfg_elem) elem; vm_offset_t vapage; vm_paddr_t papage; }; enum { CFGMECH_NONE = 0, CFGMECH_1, CFGMECH_2, CFGMECH_PCIE, }; SYSCTL_DECL(_hw_pci); static TAILQ_HEAD(pcie_cfg_list, pcie_cfg_elem) pcie_list[MAXCPU]; static uint64_t pcie_base; static int pcie_minbus, pcie_maxbus; static uint32_t pcie_badslots; static int cfgmech; static int devmax; static struct mtx pcicfg_mtx; static int mcfg_enable = 1; SYSCTL_INT(_hw_pci, OID_AUTO, mcfg, CTLFLAG_RDTUN, &mcfg_enable, 0, "Enable support for PCI-e memory mapped config access"); static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes); static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes); static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes); static int pcireg_cfgopen(void); static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes); static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes); /* * Some BIOS writers seem to want to ignore the spec and put * 0 in the intline rather than 255 to indicate none. Some use * numbers in the range 128-254 to indicate something strange and * apparently undocumented anywhere. Assume these are completely bogus * and map them to 255, which means "none". */ static __inline int pci_i386_map_intline(int line) { if (line == 0 || line >= 128) return (PCI_INVALID_IRQ); return (line); } static u_int16_t pcibios_get_version(void) { struct bios_regs args; if (PCIbios.ventry == 0) { PRVERB(("pcibios: No call entry point\n")); return (0); } args.eax = PCIBIOS_BIOS_PRESENT; if (bios32(&args, PCIbios.ventry, GSEL(GCODE_SEL, SEL_KPL))) { PRVERB(("pcibios: BIOS_PRESENT call failed\n")); return (0); } if (args.edx != 0x20494350) { PRVERB(("pcibios: BIOS_PRESENT didn't return 'PCI ' in edx\n")); return (0); } return (args.ebx & 0xffff); } /* * Initialise access to PCI configuration space */ int pci_cfgregopen(void) { static int opened = 0; uint64_t pciebar; u_int16_t vid, did; u_int16_t v; if (opened) return (1); if (cfgmech == CFGMECH_NONE && pcireg_cfgopen() == 0) return (0); v = pcibios_get_version(); if (v > 0) PRVERB(("pcibios: BIOS version %x.%02x\n", (v & 0xff00) >> 8, v & 0xff)); mtx_init(&pcicfg_mtx, "pcicfg", NULL, MTX_SPIN); opened = 1; /* $PIR requires PCI BIOS 2.10 or greater. */ if (v >= 0x0210) pci_pir_open(); if (cfgmech == CFGMECH_PCIE) return (1); /* * Grope around in the PCI config space to see if this is a * chipset that is capable of doing memory-mapped config cycles. * This also implies that it can do PCIe extended config cycles. */ /* Check for supported chipsets */ vid = pci_cfgregread(0, 0, 0, PCIR_VENDOR, 2); did = pci_cfgregread(0, 0, 0, PCIR_DEVICE, 2); switch (vid) { case 0x8086: switch (did) { case 0x3590: case 0x3592: /* Intel 7520 or 7320 */ pciebar = pci_cfgregread(0, 0, 0, 0xce, 2) << 16; pcie_cfgregopen(pciebar, 0, 255); break; case 0x2580: case 0x2584: case 0x2590: /* Intel 915, 925, or 915GM */ pciebar = pci_cfgregread(0, 0, 0, 0x48, 4); pcie_cfgregopen(pciebar, 0, 255); break; } } return(1); } static uint32_t pci_docfgregread(int bus, int slot, int func, int reg, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) return (pciereg_cfgread(bus, slot, func, reg, bytes)); else return (pcireg_cfgread(bus, slot, func, reg, bytes)); } /* * Read configuration space register */ u_int32_t pci_cfgregread(int bus, int slot, int func, int reg, int bytes) { uint32_t line; /* * Some BIOS writers seem to want to ignore the spec and put * 0 in the intline rather than 255 to indicate none. The rest of * the code uses 255 as an invalid IRQ. */ if (reg == PCIR_INTLINE && bytes == 1) { line = pci_docfgregread(bus, slot, func, PCIR_INTLINE, 1); return (pci_i386_map_intline(line)); } return (pci_docfgregread(bus, slot, func, reg, bytes)); } /* * Write configuration space register */ void pci_cfgregwrite(int bus, int slot, int func, int reg, u_int32_t data, int bytes) { if (cfgmech == CFGMECH_PCIE && (bus >= pcie_minbus && bus <= pcie_maxbus) && (bus != 0 || !(1 << slot & pcie_badslots))) pciereg_cfgwrite(bus, slot, func, reg, data, bytes); else pcireg_cfgwrite(bus, slot, func, reg, data, bytes); } /* * Configuration space access using direct register operations */ /* enable configuration space accesses and return data port address */ static int pci_cfgenable(unsigned bus, unsigned slot, unsigned func, int reg, int bytes) { int dataport = 0; #ifdef XBOX if (arch_i386_is_xbox) { /* * The Xbox MCPX chipset is a derivative of the nForce 1 * chipset. It almost has the same bus layout; some devices * cannot be used, because they have been removed. */ /* * Devices 00:00.1 and 00:00.2 used to be memory controllers on * the nForce chipset, but on the Xbox, using them will lockup * the chipset. */ if (bus == 0 && slot == 0 && (func == 1 || func == 2)) return dataport; /* * Bus 1 only contains a VGA controller at 01:00.0. When you try * to probe beyond that device, you only get garbage, which * could cause lockups. */ if (bus == 1 && (slot != 0 || func != 0)) return dataport; /* * Bus 2 used to contain the AGP controller, but the Xbox MCPX * doesn't have one. Probing it can cause lockups. */ if (bus >= 2) return dataport; } #endif if (bus <= PCI_BUSMAX && slot < devmax && func <= PCI_FUNCMAX && (unsigned)reg <= PCI_REGMAX && bytes != 3 && (unsigned)bytes <= 4 && (reg & (bytes - 1)) == 0) { switch (cfgmech) { case CFGMECH_PCIE: case CFGMECH_1: outl(CONF1_ADDR_PORT, (1U << 31) | (bus << 16) | (slot << 11) | (func << 8) | (reg & ~0x03)); dataport = CONF1_DATA_PORT + (reg & 0x03); break; case CFGMECH_2: outb(CONF2_ENABLE_PORT, 0xf0 | (func << 1)); outb(CONF2_FORWARD_PORT, bus); dataport = 0xc000 | (slot << 8) | reg; break; } } return (dataport); } /* disable configuration space accesses */ static void pci_cfgdisable(void) { switch (cfgmech) { case CFGMECH_PCIE: case CFGMECH_1: /* * Do nothing for the config mechanism 1 case. * Writing a 0 to the address port can apparently * confuse some bridges and cause spurious * access failures. */ break; case CFGMECH_2: outb(CONF2_ENABLE_PORT, 0); break; } } static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes) { int data = -1; int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: data = inb(port); break; case 2: data = inw(port); break; case 4: data = inl(port); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); return (data); } static void pcireg_cfgwrite(int bus, int slot, int func, int reg, int data, int bytes) { int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: outb(port, data); break; case 2: outw(port, data); break; case 4: outl(port, data); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); } /* check whether the configuration mechanism has been correctly identified */ static int pci_cfgcheck(int maxdev) { uint32_t id, class; uint8_t header; uint8_t device; int port; if (bootverbose) printf("pci_cfgcheck:\tdevice "); for (device = 0; device < maxdev; device++) { if (bootverbose) printf("%d ", device); port = pci_cfgenable(0, device, 0, 0, 4); id = inl(port); if (id == 0 || id == 0xffffffff) continue; port = pci_cfgenable(0, device, 0, 8, 4); class = inl(port) >> 8; if (bootverbose) printf("[class=%06x] ", class); if (class == 0 || (class & 0xf870ff) != 0) continue; port = pci_cfgenable(0, device, 0, 14, 1); header = inb(port); if (bootverbose) printf("[hdr=%02x] ", header); if ((header & 0x7e) != 0) continue; if (bootverbose) printf("is there (id=%08x)\n", id); pci_cfgdisable(); return (1); } if (bootverbose) printf("-- nothing found\n"); pci_cfgdisable(); return (0); } static int pcireg_cfgopen(void) { uint32_t mode1res, oldval1; uint8_t mode2res, oldval2; /* Check for type #1 first. */ oldval1 = inl(CONF1_ADDR_PORT); if (bootverbose) { printf("pci_open(1):\tmode 1 addr port (0x0cf8) is 0x%08x\n", oldval1); } cfgmech = CFGMECH_1; devmax = 32; outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK); DELAY(1); mode1res = inl(CONF1_ADDR_PORT); outl(CONF1_ADDR_PORT, oldval1); if (bootverbose) printf("pci_open(1a):\tmode1res=0x%08x (0x%08lx)\n", mode1res, CONF1_ENABLE_CHK); if (mode1res) { if (pci_cfgcheck(32)) return (cfgmech); } outl(CONF1_ADDR_PORT, CONF1_ENABLE_CHK1); mode1res = inl(CONF1_ADDR_PORT); outl(CONF1_ADDR_PORT, oldval1); if (bootverbose) printf("pci_open(1b):\tmode1res=0x%08x (0x%08lx)\n", mode1res, CONF1_ENABLE_CHK1); if ((mode1res & CONF1_ENABLE_MSK1) == CONF1_ENABLE_RES1) { if (pci_cfgcheck(32)) return (cfgmech); } /* Type #1 didn't work, so try type #2. */ oldval2 = inb(CONF2_ENABLE_PORT); if (bootverbose) { printf("pci_open(2):\tmode 2 enable port (0x0cf8) is 0x%02x\n", oldval2); } if ((oldval2 & 0xf0) == 0) { cfgmech = CFGMECH_2; devmax = 16; outb(CONF2_ENABLE_PORT, CONF2_ENABLE_CHK); mode2res = inb(CONF2_ENABLE_PORT); outb(CONF2_ENABLE_PORT, oldval2); if (bootverbose) printf("pci_open(2a):\tmode2res=0x%02x (0x%02x)\n", mode2res, CONF2_ENABLE_CHK); if (mode2res == CONF2_ENABLE_RES) { if (bootverbose) printf("pci_open(2a):\tnow trying mechanism 2\n"); if (pci_cfgcheck(16)) return (cfgmech); } } /* Nothing worked, so punt. */ cfgmech = CFGMECH_NONE; devmax = 0; return (cfgmech); } int pcie_cfgregopen(uint64_t base, uint8_t minbus, uint8_t maxbus) { struct pcie_cfg_list *pcielist; struct pcie_cfg_elem *pcie_array, *elem; #ifdef SMP struct pcpu *pc; #endif vm_offset_t va; uint32_t val1, val2; int i, slot; if (!mcfg_enable) return (0); if (minbus != 0) return (0); #ifndef PAE if (base >= 0x100000000) { if (bootverbose) printf( "PCI: Memory Mapped PCI configuration area base 0x%jx too high\n", (uintmax_t)base); return (0); } #endif if (bootverbose) printf("PCIe: Memory Mapped configuration base @ 0x%jx\n", (uintmax_t)base); #ifdef SMP STAILQ_FOREACH(pc, &cpuhead, pc_allcpu) #endif { pcie_array = malloc(sizeof(struct pcie_cfg_elem) * PCIE_CACHE, M_DEVBUF, M_NOWAIT); if (pcie_array == NULL) return (0); va = kva_alloc(PCIE_CACHE * PAGE_SIZE); if (va == 0) { free(pcie_array, M_DEVBUF); return (0); } #ifdef SMP pcielist = &pcie_list[pc->pc_cpuid]; #else pcielist = &pcie_list[0]; #endif TAILQ_INIT(pcielist); for (i = 0; i < PCIE_CACHE; i++) { elem = &pcie_array[i]; elem->vapage = va + (i * PAGE_SIZE); elem->papage = 0; TAILQ_INSERT_HEAD(pcielist, elem, elem); } } pcie_base = base; pcie_minbus = minbus; pcie_maxbus = maxbus; cfgmech = CFGMECH_PCIE; devmax = 32; /* * On some AMD systems, some of the devices on bus 0 are * inaccessible using memory-mapped PCI config access. Walk * bus 0 looking for such devices. For these devices, we will * fall back to using type 1 config access instead. */ if (pci_cfgregopen() != 0) { for (slot = 0; slot <= PCI_SLOTMAX; slot++) { val1 = pcireg_cfgread(0, slot, 0, 0, 4); if (val1 == 0xffffffff) continue; val2 = pciereg_cfgread(0, slot, 0, 0, 4); if (val2 != val1) pcie_badslots |= (1 << slot); } } return (1); } #define PCIE_PADDR(base, reg, bus, slot, func) \ ((base) + \ ((((bus) & 0xff) << 20) | \ (((slot) & 0x1f) << 15) | \ (((func) & 0x7) << 12) | \ ((reg) & 0xfff))) static __inline vm_offset_t pciereg_findaddr(int bus, unsigned slot, unsigned func, unsigned reg) { struct pcie_cfg_list *pcielist; struct pcie_cfg_elem *elem; vm_paddr_t pa, papage; pa = PCIE_PADDR(pcie_base, reg, bus, slot, func); papage = pa & ~PAGE_MASK; /* * Find an element in the cache that matches the physical page desired, * or create a new mapping from the least recently used element. * A very simple LRU algorithm is used here, does it need to be more * efficient? */ pcielist = &pcie_list[PCPU_GET(cpuid)]; TAILQ_FOREACH(elem, pcielist, elem) { if (elem->papage == papage) break; } if (elem == NULL) { elem = TAILQ_LAST(pcielist, pcie_cfg_list); if (elem->papage != 0) { pmap_kremove(elem->vapage); invlpg(elem->vapage); } pmap_kenter(elem->vapage, papage); elem->papage = papage; } if (elem != TAILQ_FIRST(pcielist)) { TAILQ_REMOVE(pcielist, elem, elem); TAILQ_INSERT_HEAD(pcielist, elem, elem); } return (elem->vapage | (pa & PAGE_MASK)); } /* * AMD BIOS And Kernel Developer's Guides for CPU families starting with 10h * have a requirement that all accesses to the memory mapped PCI configuration * space are done using AX class of registers. * Since other vendors do not currently have any contradicting requirements * the AMD access pattern is applied universally. */ static int pciereg_cfgread(int bus, unsigned slot, unsigned func, unsigned reg, unsigned bytes) { vm_offset_t va; int data = -1; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return (-1); critical_enter(); va = pciereg_findaddr(bus, slot, func, reg); switch (bytes) { case 4: __asm("movl %1, %0" : "=a" (data) : "m" (*(volatile uint32_t *)va)); break; case 2: __asm("movzwl %1, %0" : "=a" (data) : "m" (*(volatile uint16_t *)va)); break; case 1: __asm("movzbl %1, %0" : "=a" (data) : "m" (*(volatile uint8_t *)va)); break; } critical_exit(); return (data); } static void pciereg_cfgwrite(int bus, unsigned slot, unsigned func, unsigned reg, int data, unsigned bytes) { vm_offset_t va; if (bus < pcie_minbus || bus > pcie_maxbus || slot > PCI_SLOTMAX || func > PCI_FUNCMAX || reg > PCIE_REGMAX) return; critical_enter(); va = pciereg_findaddr(bus, slot, func, reg); switch (bytes) { case 4: __asm("movl %1, %0" : "=m" (*(volatile uint32_t *)va) : "a" (data)); break; case 2: __asm("movw %1, %0" : "=m" (*(volatile uint16_t *)va) : "a" ((uint16_t)data)); break; case 1: __asm("movb %1, %0" : "=m" (*(volatile uint8_t *)va) : "a" ((uint8_t)data)); break; } critical_exit(); } Index: head/sys/mips/adm5120/admpci.c =================================================================== --- head/sys/mips/adm5120/admpci.c (revision 295879) +++ head/sys/mips/adm5120/admpci.c (revision 295880) @@ -1,502 +1,501 @@ /* $NetBSD: admpci.c,v 1.1 2007/03/20 08:52:02 dyoung Exp $ */ /*- * Copyright (c) 2007 David Young. All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /*- * Copyright (c) 2006 Itronix Inc. * All rights reserved. * * Written by Garrett D'Amore for Itronix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of Itronix Inc. may not be used to endorse * or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #ifdef ADMPCI_DEBUG int admpci_debug = 1; #define ADMPCI_DPRINTF(__fmt, ...) \ do { \ if (admpci_debug) \ printf((__fmt), __VA_ARGS__); \ } while (/*CONSTCOND*/0) #else /* !ADMPCI_DEBUG */ #define ADMPCI_DPRINTF(__fmt, ...) do { } while (/*CONSTCOND*/0) #endif /* ADMPCI_DEBUG */ #define ADMPCI_TAG_BUS_MASK __BITS(23, 16) /* Bit 11 is reserved. It selects the AHB-PCI bridge. Let device 0 * be the bridge. For all other device numbers, let bit[11] == 0. */ #define ADMPCI_TAG_DEVICE_MASK __BITS(15, 11) #define ADMPCI_TAG_DEVICE_SUBMASK __BITS(15, 12) #define ADMPCI_TAG_DEVICE_BRIDGE __BIT(11) #define ADMPCI_TAG_FUNCTION_MASK __BITS(10, 8) #define ADMPCI_TAG_REGISTER_MASK __BITS(7, 0) #define ADMPCI_MAX_DEVICE struct admpci_softc { device_t sc_dev; bus_space_tag_t sc_st; /* Access to PCI config registers */ bus_space_handle_t sc_addrh; bus_space_handle_t sc_datah; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; uint32_t sc_mem; uint32_t sc_io; }; static int admpci_probe(device_t dev) { return (0); } static int admpci_attach(device_t dev) { int busno = 0; struct admpci_softc *sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_busno = busno; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MIPS_PHYS_TO_KSEG1(ADM5120_BASE_PCI_IO); sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "ADMPCI I/O Ports"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0, 0xffff) != 0) { panic("admpci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MIPS_PHYS_TO_KSEG1(ADM5120_BASE_PCI_MEM); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ADMPCI PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + 0x100000) != 0) { panic("admpci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ADMPCI PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("admpci_attach: failed to set up IRQ rman"); if (bus_space_map(sc->sc_st, ADM5120_BASE_PCI_CONFADDR, 4, 0, &sc->sc_addrh) != 0) { device_printf(sc->sc_dev, "unable to address space\n"); panic("bus_space_map failed"); } if (bus_space_map(sc->sc_st, ADM5120_BASE_PCI_CONFDATA, 4, 0, &sc->sc_datah) != 0) { device_printf(sc->sc_dev, "unable to address space\n"); panic("bus_space_map failed"); } device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int admpci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t admpci_make_addr(int bus, int slot, int func, int reg) { return (0x80000000 | (bus << 16) | (slot << 11) | (func << 8) | reg); } static uint32_t admpci_read_config(device_t dev, int bus, int slot, int func, int reg, int bytes) { struct admpci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t shift, mask; bus_addr_t addr; ADMPCI_DPRINTF("%s: sc %p tag (%x, %x, %x) reg %d\n", __func__, (void *)sc, bus, slot, func, reg); addr = admpci_make_addr(bus, slot, func, reg); ADMPCI_DPRINTF("%s: sc_addrh %p sc_datah %p addr %p\n", __func__, (void *)sc->sc_addrh, (void *)sc->sc_datah, (void *)addr); bus_space_write_4(sc->sc_io, sc->sc_addrh, 0, addr); data = bus_space_read_4(sc->sc_io, sc->sc_datah, 0); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if (reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } ADMPCI_DPRINTF("%s: read 0x%x\n", __func__, data); return (data); } static void admpci_write_config(device_t dev, int bus, int slot, int func, int reg, uint32_t data, int bytes) { struct admpci_softc *sc = device_get_softc(dev); bus_addr_t addr; uint32_t reg_data; uint32_t shift, mask; ADMPCI_DPRINTF("%s: sc %p tag (%x, %x, %x) reg %d\n", __func__, (void *)sc, bus, slot, func, reg); if (bytes != 4) { reg_data = admpci_read_config(dev, bus, slot, func, reg, 4); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if (reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } } addr = admpci_make_addr(bus, slot, func, reg); ADMPCI_DPRINTF("%s: sc_addrh %p sc_datah %p addr %p\n", __func__, (void *)sc->sc_addrh, (void *)sc->sc_datah, (void *)addr); bus_space_write_4(sc->sc_io, sc->sc_addrh, 0, addr); bus_space_write_4(sc->sc_io, sc->sc_datah, 0, data); } static int admpci_route_interrupt(device_t pcib, device_t dev, int pin) { /* TODO: implement */ return (0); } static int admpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct admpci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int admpci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct admpci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * admpci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { return (NULL); #if 0 struct admpci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, sc->sc_st); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); #endif } static int admpci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int admpci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { #if 0 struct admpci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, (void (*)(void *))NULL, "admpci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); admpci_set_icus(sc); #endif return (0); } static int admpci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (intr_event_remove_handler(cookie)); } static device_method_t admpci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, admpci_probe), DEVMETHOD(device_attach, admpci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, admpci_read_ivar), DEVMETHOD(bus_write_ivar, admpci_write_ivar), DEVMETHOD(bus_alloc_resource, admpci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, admpci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, admpci_setup_intr), DEVMETHOD(bus_teardown_intr, admpci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, admpci_maxslots), DEVMETHOD(pcib_read_config, admpci_read_config), DEVMETHOD(pcib_write_config, admpci_write_config), DEVMETHOD(pcib_route_interrupt, admpci_route_interrupt), DEVMETHOD_END }; static driver_t admpci_driver = { "pcib", admpci_methods, sizeof(struct admpci_softc), }; static devclass_t admpci_devclass; DRIVER_MODULE(admpci, obio, admpci_driver, admpci_devclass, 0, 0); Index: head/sys/mips/atheros/ar71xx_fixup.c =================================================================== --- head/sys/mips/atheros/ar71xx_fixup.c (revision 295879) +++ head/sys/mips/atheros/ar71xx_fixup.c (revision 295880) @@ -1,153 +1,152 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #include #include #include #include #include /* * Take a copy of the EEPROM contents and squirrel it away in a firmware. * The SPI flash will eventually cease to be memory-mapped, so we need * to take a copy of this before the SPI driver initialises. */ void ar71xx_pci_slot_create_eeprom_firmware(device_t dev, u_int bus, u_int slot, u_int func, long int flash_addr, int size) { char buf[64]; uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); void *eeprom = NULL; const struct firmware *fw = NULL; device_printf(dev, "EEPROM firmware: 0x%lx @ %d bytes\n", flash_addr, size); eeprom = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); if (! eeprom) { device_printf(dev, "%s: malloc failed for '%s', aborting EEPROM\n", __func__, buf); return; } memcpy(eeprom, cal_data, size); /* * Generate a flash EEPROM 'firmware' from the given memory * region. Since the SPI controller will eventually * go into port-IO mode instead of memory-mapped IO * mode, a copy of the EEPROM contents is required. */ snprintf(buf, sizeof(buf), "%s.%d.bus.%d.%d.%d.eeprom_firmware", device_get_name(dev), device_get_unit(dev), bus, slot, func); fw = firmware_register(buf, eeprom, size, 1, NULL); if (fw == NULL) { device_printf(dev, "%s: firmware_register (%s) failed\n", __func__, buf); free(eeprom, M_DEVBUF); return; } device_printf(dev, "device EEPROM '%s' registered\n", buf); } #if 0 static void ar71xx_pci_slot_fixup(device_t dev, u_int bus, u_int slot, u_int func) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), bus, slot, func); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", bus, slot, func); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", bus, slot, func); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, bus, slot, func); ar71xx_pci_fixup(dev, bus, slot, func, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, bus, slot, func, flash_addr, size); } } #endif /* 0 */ Index: head/sys/mips/atheros/ar71xx_pci.c =================================================================== --- head/sys/mips/atheros/ar71xx_pci.c (revision 295879) +++ head/sys/mips/atheros/ar71xx_pci.c (revision 295880) @@ -1,705 +1,704 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR71XX_PCI_DEBUG #ifdef AR71XX_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct mtx ar71xx_pci_mtx; MTX_SYSINIT(ar71xx_pci_mtx, &ar71xx_pci_mtx, "ar71xx PCI space mutex", MTX_SPIN); struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; int sc_baseslot; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar71xx_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar71xx_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar71xx_pci_intr(void *); static void ar71xx_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg & ~(1 << irq)); } static void ar71xx_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX is the PCI lock required here? */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, reg | (1 << irq)); /* flush */ reg = ATH_READ_REG(AR71XX_PCI_INTR_MASK); } /* * get bitmask for bytes of interest: * 0 - we want this byte, 1 - ignore it. e.g: we read 1 byte * from register 7. Bitmask would be: 0111 */ static uint32_t ar71xx_get_bytes_to_read(int reg, int bytes) { uint32_t bytes_to_read = 0; if ((bytes % 4) == 0) bytes_to_read = 0; else if ((bytes % 4) == 1) bytes_to_read = (~(1 << (reg % 4))) & 0xf; else if ((bytes % 4) == 2) bytes_to_read = (~(3 << (reg % 4))) & 0xf; else panic("%s: wrong combination", __func__); return (bytes_to_read); } static int ar71xx_pci_check_bus_error(void) { uint32_t error, addr, has_errors = 0; mtx_assert(&ar71xx_pci_mtx, MA_OWNED); error = ATH_READ_REG(AR71XX_PCI_ERROR) & 0x3; dprintf("%s: PCI error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("PCI bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_ERROR, error); has_errors = 1; } error = ATH_READ_REG(AR71XX_PCI_AHB_ERROR) & 0x1; dprintf("%s: AHB error = %02x\n", __func__, error); if (error) { addr = ATH_READ_REG(AR71XX_PCI_AHB_ERROR_ADDR); /* Do not report it yet */ #if 0 printf("AHB bus error %d at addr 0x%08x\n", error, addr); #endif ATH_WRITE_REG(AR71XX_PCI_AHB_ERROR, error); has_errors = 1; } return (has_errors); } static uint32_t ar71xx_pci_make_addr(int bus, int slot, int func, int reg) { if (bus == 0) { return ((1 << slot) | (func << 8) | (reg & ~3)); } else { return ((bus << 16) | (slot << 11) | (func << 8) | (reg & ~3) | 1); } } static int ar71xx_pci_conf_setup(int bus, int slot, int func, int reg, int bytes, uint32_t cmd) { uint32_t addr = ar71xx_pci_make_addr(bus, slot, func, (reg & ~3)); mtx_assert(&ar71xx_pci_mtx, MA_OWNED); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 4); ATH_WRITE_REG(AR71XX_PCI_CONF_ADDR, addr); ATH_WRITE_REG(AR71XX_PCI_CONF_CMD, cmd); dprintf("%s: tag (%x, %x, %x) %d/%d addr=%08x, cmd=%08x\n", __func__, bus, slot, func, reg, bytes, addr, cmd); return ar71xx_pci_check_bus_error(); } static uint32_t ar71xx_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data; uint32_t shift, mask; /* register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_READ) == 0) data = ATH_READ_REG(AR71XX_PCI_CONF_READ_DATA); else data = -1; mtx_unlock_spin(&ar71xx_pci_mtx); /* get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar71xx_pci_local_write(device_t dev, uint32_t reg, uint32_t data, int bytes) { uint32_t cmd; dprintf("%s: local write reg %d(%d)\n", __func__, reg, bytes); data = data << (8*(reg % 4)); cmd = PCI_LCONF_CMD_WRITE | (reg & ~3); cmd |= (ar71xx_get_bytes_to_read(reg, bytes) << 20); mtx_lock_spin(&ar71xx_pci_mtx); ATH_WRITE_REG(AR71XX_PCI_LCONF_CMD, cmd); ATH_WRITE_REG(AR71XX_PCI_LCONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } static void ar71xx_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); data = data << (8*(reg % 4)); mtx_lock_spin(&ar71xx_pci_mtx); if (ar71xx_pci_conf_setup(bus, slot, func, reg, bytes, PCI_CONF_CMD_WRITE) == 0) ATH_WRITE_REG(AR71XX_PCI_CONF_WRITE_DATA, data); mtx_unlock_spin(&ar71xx_pci_mtx); } #ifdef AR71XX_ATH_EEPROM /* * Some embedded boards (eg AP94) have the MAC attached via PCI but they * don't have the MAC-attached EEPROM. The register initialisation * values and calibration data are stored in the on-board flash. * This routine initialises the NIC via the EEPROM register contents * before the probe/attach routines get a go at things. */ static void ar71xx_pci_fixup(device_t dev, u_int bus, u_int slot, u_int func, long flash_addr, int len) { uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); uint32_t reg, val, bar0; if (bootverbose) device_printf(dev, "%s: flash_addr=%lx, cal_data=%p\n", __func__, flash_addr, cal_data); /* XXX check 0xa55a */ /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_BAR(0), 4); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" reg: %x, val=%x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar71xx_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar71xx_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar71xx_pci_write_config(dev, bus, slot, func, PCIR_BAR(0), bar0, 4); } static void ar71xx_pci_slot_fixup(device_t dev, u_int bus, u_int slot, u_int func) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), bus, slot, func); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", bus, slot, func); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", bus, slot, func); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, bus, slot, func); ar71xx_pci_fixup(dev, bus, slot, func, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, bus, slot, func, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar71xx_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar71xx_pci_attach(device_t dev) { int rid = 0; struct ar71xx_pci_softc *sc = device_get_softc(dev); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar71xx PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar71xx_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar71xx PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar71xx_pci_attach: failed to set up IRQ rman"); /* * Check if there is a base slot hint. Otherwise use default value. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "baseslot", &sc->sc_baseslot) != 0) { device_printf(dev, "%s: missing hint '%s', default to AR71XX_PCI_BASE_SLOT\n", __func__, "baseslot"); sc->sc_baseslot = AR71XX_PCI_BASE_SLOT; } ATH_WRITE_REG(AR71XX_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR71XX_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar71xx_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* reset PCI core and PCI bus */ ar71xx_device_stop(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); ar71xx_device_start(RST_RESET_PCI_CORE | RST_RESET_PCI_BUS); DELAY(100000); /* Init PCI windows */ ATH_WRITE_REG(AR71XX_PCI_WINDOW0, PCI_WINDOW0_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW1, PCI_WINDOW1_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW2, PCI_WINDOW2_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW3, PCI_WINDOW3_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW4, PCI_WINDOW4_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW5, PCI_WINDOW5_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW6, PCI_WINDOW6_ADDR); ATH_WRITE_REG(AR71XX_PCI_WINDOW7, PCI_WINDOW7_CONF_ADDR); DELAY(100000); mtx_lock_spin(&ar71xx_pci_mtx); ar71xx_pci_check_bus_error(); mtx_unlock_spin(&ar71xx_pci_mtx); /* Fixup internal PCI bridge */ ar71xx_pci_local_write(dev, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 4); #ifdef AR71XX_ATH_EEPROM /* * Hard-code a check for slot 17 and 18 - these are * the two PCI slots which may have a PCI device that * requires "fixing". */ ar71xx_pci_slot_fixup(dev, 0, 17, 0); ar71xx_pci_slot_fixup(dev, 0, 18, 0); #endif /* AR71XX_ATH_EEPROM */ device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar71xx_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar71xx_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar71xx_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar71xx_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar71xx_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar71xx_pci_mask_irq, ar71xx_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar71xx_pci_unmask_irq((void*)irq); return (0); } static int ar71xx_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar71xx_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar71xx_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR71XX_PCI_INTR_STATUS); mask = ATH_READ_REG(AR71XX_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; for (irq = AR71XX_PCI_IRQ_START; irq <= AR71XX_PCI_IRQ_END; irq++) { if (reg & (1 << irq)) { event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { /* Ignore timer interrupts */ if (irq != 0) printf("Stray IRQ %d\n", irq); continue; } /* Flush DDR FIFO for PCI/PCIe */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } } return (FILTER_HANDLED); } static int ar71xx_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar71xx_pci_route_interrupt(device_t pcib, device_t device, int pin) { struct ar71xx_pci_softc *sc = device_get_softc(pcib); if (pci_get_slot(device) < sc->sc_baseslot) panic("%s: PCI slot %d is less then AR71XX_PCI_BASE_SLOT", __func__, pci_get_slot(device)); return (pci_get_slot(device) - sc->sc_baseslot); } static device_method_t ar71xx_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar71xx_pci_probe), DEVMETHOD(device_attach, ar71xx_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar71xx_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar71xx_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar71xx_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar71xx_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar71xx_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar71xx_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar71xx_pci_maxslots), DEVMETHOD(pcib_read_config, ar71xx_pci_read_config), DEVMETHOD(pcib_write_config, ar71xx_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar71xx_pci_route_interrupt), DEVMETHOD_END }; static driver_t ar71xx_pci_driver = { "pcib", ar71xx_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar71xx_pci_devclass; DRIVER_MODULE(ar71xx_pci, nexus, ar71xx_pci_driver, ar71xx_pci_devclass, 0, 0); Index: head/sys/mips/atheros/ar71xx_spi.c =================================================================== --- head/sys/mips/atheros/ar71xx_spi.c (revision 295879) +++ head/sys/mips/atheros/ar71xx_spi.c (revision 295880) @@ -1,292 +1,291 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include "spibus_if.h" #include #undef AR71XX_SPI_DEBUG #ifdef AR71XX_SPI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* * register space access macros */ #define SPI_BARRIER_WRITE(sc) bus_barrier((sc)->sc_mem_res, 0, 0, \ BUS_SPACE_BARRIER_WRITE) #define SPI_BARRIER_READ(sc) bus_barrier((sc)->sc_mem_res, 0, 0, \ BUS_SPACE_BARRIER_READ) #define SPI_BARRIER_RW(sc) bus_barrier((sc)->sc_mem_res, 0, 0, \ BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE) #define SPI_WRITE(sc, reg, val) do { \ bus_write_4(sc->sc_mem_res, (reg), (val)); \ } while (0) #define SPI_READ(sc, reg) bus_read_4(sc->sc_mem_res, (reg)) #define SPI_SET_BITS(sc, reg, bits) \ SPI_WRITE(sc, reg, SPI_READ(sc, (reg)) | (bits)) #define SPI_CLEAR_BITS(sc, reg, bits) \ SPI_WRITE(sc, reg, SPI_READ(sc, (reg)) & ~(bits)) struct ar71xx_spi_softc { device_t sc_dev; struct resource *sc_mem_res; uint32_t sc_reg_ctrl; }; static int ar71xx_spi_probe(device_t dev) { device_set_desc(dev, "AR71XX SPI"); return (BUS_PROBE_NOWILDCARD); } static int ar71xx_spi_attach(device_t dev) { struct ar71xx_spi_softc *sc = device_get_softc(dev); int rid; sc->sc_dev = dev; rid = 0; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->sc_mem_res) { device_printf(dev, "Could not map memory\n"); return (ENXIO); } SPI_WRITE(sc, AR71XX_SPI_FS, 1); /* Flush out read before reading the control register */ SPI_BARRIER_WRITE(sc); sc->sc_reg_ctrl = SPI_READ(sc, AR71XX_SPI_CTRL); /* * XXX TODO: document what the SPI control register does. */ SPI_WRITE(sc, AR71XX_SPI_CTRL, 0x43); /* * Ensure the config register write has gone out before configuring * the chip select mask. */ SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, SPI_IO_CTRL_CSMASK); /* * .. and ensure the write has gone out before continuing. */ SPI_BARRIER_WRITE(sc); device_add_child(dev, "spibus", -1); return (bus_generic_attach(dev)); } static void ar71xx_spi_chip_activate(struct ar71xx_spi_softc *sc, int cs) { uint32_t ioctrl = SPI_IO_CTRL_CSMASK; /* * Put respective CSx to low */ ioctrl &= ~(SPI_IO_CTRL_CS0 << cs); /* * Make sure any other writes have gone out to the * device before changing the chip select line; * then ensure that it has made it out to the device * before continuing. */ SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, ioctrl); SPI_BARRIER_WRITE(sc); } static void ar71xx_spi_chip_deactivate(struct ar71xx_spi_softc *sc, int cs) { /* * Put all CSx to high */ SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, SPI_IO_CTRL_CSMASK); } static uint8_t ar71xx_spi_txrx(struct ar71xx_spi_softc *sc, int cs, uint8_t data) { int bit; /* CS0 */ uint32_t ioctrl = SPI_IO_CTRL_CSMASK; /* * low-level for selected CS */ ioctrl &= ~(SPI_IO_CTRL_CS0 << cs); uint32_t iod, rds; for (bit = 7; bit >=0; bit--) { if (data & (1 << bit)) iod = ioctrl | SPI_IO_CTRL_DO; else iod = ioctrl & ~SPI_IO_CTRL_DO; SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, iod); SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, iod | SPI_IO_CTRL_CLK); } /* * Provide falling edge for connected device by clear clock bit. */ SPI_BARRIER_WRITE(sc); SPI_WRITE(sc, AR71XX_SPI_IO_CTRL, iod); SPI_BARRIER_WRITE(sc); rds = SPI_READ(sc, AR71XX_SPI_RDS); return (rds & 0xff); } static int ar71xx_spi_transfer(device_t dev, device_t child, struct spi_command *cmd) { struct ar71xx_spi_softc *sc; uint8_t *buf_in, *buf_out; struct spibus_ivar *devi = SPIBUS_IVAR(child); int i; sc = device_get_softc(dev); ar71xx_spi_chip_activate(sc, devi->cs); KASSERT(cmd->tx_cmd_sz == cmd->rx_cmd_sz, ("TX/RX command sizes should be equal")); KASSERT(cmd->tx_data_sz == cmd->rx_data_sz, ("TX/RX data sizes should be equal")); /* * Transfer command */ buf_out = (uint8_t *)cmd->tx_cmd; buf_in = (uint8_t *)cmd->rx_cmd; for (i = 0; i < cmd->tx_cmd_sz; i++) buf_in[i] = ar71xx_spi_txrx(sc, devi->cs, buf_out[i]); /* * Receive/transmit data (depends on command) */ buf_out = (uint8_t *)cmd->tx_data; buf_in = (uint8_t *)cmd->rx_data; for (i = 0; i < cmd->tx_data_sz; i++) buf_in[i] = ar71xx_spi_txrx(sc, devi->cs, buf_out[i]); ar71xx_spi_chip_deactivate(sc, devi->cs); return (0); } static int ar71xx_spi_detach(device_t dev) { struct ar71xx_spi_softc *sc = device_get_softc(dev); /* * Ensure any other writes to the device are finished * before we tear down the SPI device. */ SPI_BARRIER_WRITE(sc); /* * Restore the control register; ensure it has hit the * hardware before continuing. */ SPI_WRITE(sc, AR71XX_SPI_CTRL, sc->sc_reg_ctrl); SPI_BARRIER_WRITE(sc); /* * And now, put the flash back into mapped IO mode and * ensure _that_ has completed before we finish up. */ SPI_WRITE(sc, AR71XX_SPI_FS, 0); SPI_BARRIER_WRITE(sc); if (sc->sc_mem_res) bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem_res); return (0); } static device_method_t ar71xx_spi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar71xx_spi_probe), DEVMETHOD(device_attach, ar71xx_spi_attach), DEVMETHOD(device_detach, ar71xx_spi_detach), DEVMETHOD(spibus_transfer, ar71xx_spi_transfer), {0, 0} }; static driver_t ar71xx_spi_driver = { "spi", ar71xx_spi_methods, sizeof(struct ar71xx_spi_softc), }; static devclass_t ar71xx_spi_devclass; DRIVER_MODULE(ar71xx_spi, nexus, ar71xx_spi_driver, ar71xx_spi_devclass, 0, 0); Index: head/sys/mips/atheros/ar724x_pci.c =================================================================== --- head/sys/mips/atheros/ar724x_pci.c (revision 295879) +++ head/sys/mips/atheros/ar724x_pci.c (revision 295880) @@ -1,665 +1,664 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #include #include #include #include #ifdef AR71XX_ATH_EEPROM #include #endif /* AR71XX_ATH_EEPROM */ #undef AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int ar724x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int ar724x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int ar724x_pci_intr(void *); static void ar724x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t ar724x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(AR724X_PCI_CFG_BASE + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void ar724x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; /* * WAR for BAR issue on AR7240 - We are unable to access the PCI * device space if we set the BAR with proper base address. * * However, we _do_ want to allow programming in the probe value * (0xffffffff) so the PCI code can find out how big the memory * map is for this device. Without it, it'll think the memory * map is 32 bits wide, the PCI code will then end up thinking * the register window is '0' and fail to allocate resources. */ if (reg == PCIR_BAR(0) && bytes == 4 && ar71xx_soc == AR71XX_SOC_AR7240 && data != 0xffffffff) ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, 0xffff, bytes); else ar724x_pci_write(AR724X_PCI_CFG_BASE, reg, data, bytes); } static void ar724x_pci_mask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX - Only one interrupt ? Only one device ? */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg & ~AR724X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, reg | AR724X_PCI_INTR_DEV0); } static void ar724x_pci_unmask_irq(void *source) { uint32_t reg; unsigned int irq = (unsigned int)source; /* XXX */ if (irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(AR724X_PCI_INTR_MASK); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, reg | AR724X_PCI_INTR_DEV0); } static int ar724x_pci_setup(device_t dev) { uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; ar724x_pci_write(AR724X_PCI_CRP_BASE, PCIR_COMMAND, reg, 2); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x20, 0x1ff01000, 4); ar724x_pci_write(AR724X_PCI_CRP_BASE, 0x24, 0x1ff01000, 4); reg = ATH_READ_REG(AR724X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(AR724X_PCI_RESET, 0); DELAY(100); ATH_WRITE_REG(AR724X_PCI_RESET, 4); DELAY(100000); } if (ar71xx_soc == AR71XX_SOC_AR7240) reg = AR724X_PCI_APP_LTSSM_ENABLE; else reg = 0x1ffc1; ATH_WRITE_REG(AR724X_PCI_APP, reg); /* Flush write */ (void) ATH_READ_REG(AR724X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(AR724X_PCI_RESET); if ((reg & AR724X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } if (ar71xx_soc == AR71XX_SOC_AR7241 || ar71xx_soc == AR71XX_SOC_AR7242) { reg = ATH_READ_REG(AR724X_PCI_APP); reg |= (1 << 16); ATH_WRITE_REG(AR724X_PCI_APP, reg); } return (0); } #ifdef AR71XX_ATH_EEPROM #define AR5416_EEPROM_MAGIC 0xa55a /* * XXX - This should not be here ! And this looks like Atheros (if_ath) only. */ static void ar724x_pci_fixup(device_t dev, long flash_addr, int len) { uint32_t bar0, reg, val; uint16_t *cal_data = (uint16_t *) MIPS_PHYS_TO_KSEG1(flash_addr); #if 0 if (cal_data[0] != AR5416_EEPROM_MAGIC) { device_printf(dev, "%s: Invalid calibration data from 0x%x\n", __func__, (uintptr_t) flash_addr); return; } #endif /* Save bar(0) address - just to flush bar(0) (SoC WAR) ? */ bar0 = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_BAR(0), 4); /* Write temporary BAR0 to map the NIC into a fixed location */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), AR71XX_PCI_MEM_BASE, 4); val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val |= (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* set pointer to first reg address */ cal_data += 3; while (*cal_data != 0xffff) { reg = *cal_data++; val = *cal_data++; val |= (*cal_data++) << 16; if (bootverbose) printf(" 0x%08x=0x%04x\n", reg, val); /* Write eeprom fixup data to device memory */ ATH_WRITE_REG(AR71XX_PCI_MEM_BASE + reg, val); DELAY(100); } val = ar724x_pci_read_config(dev, 0, 0, 0, PCIR_COMMAND, 2); val &= ~(PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, val, 2); /* Write the saved bar(0) address */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), bar0, 4); } #undef AR5416_EEPROM_MAGIC /* * XXX This is (mostly) duplicated with ar71xx_pci.c. * It should at some point be fixed. */ static void ar724x_pci_slot_fixup(device_t dev) { long int flash_addr; char buf[64]; int size; /* * Check whether the given slot has a hint to poke. */ if (bootverbose) device_printf(dev, "%s: checking dev %s, %d/%d/%d\n", __func__, device_get_nameunit(dev), 0, 0, 0); snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_addr", 0, 0, 0); if (resource_long_value(device_get_name(dev), device_get_unit(dev), buf, &flash_addr) == 0) { snprintf(buf, sizeof(buf), "bus.%d.%d.%d.ath_fixup_size", 0, 0, 0); if (resource_int_value(device_get_name(dev), device_get_unit(dev), buf, &size) != 0) { device_printf(dev, "%s: missing hint '%s', aborting EEPROM\n", __func__, buf); return; } device_printf(dev, "found EEPROM at 0x%lx on %d.%d.%d\n", flash_addr, 0, 0, 0); ar724x_pci_fixup(dev, flash_addr, size); ar71xx_pci_slot_create_eeprom_firmware(dev, 0, 0, 0, flash_addr, size); } } #endif /* AR71XX_ATH_EEPROM */ static int ar724x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int ar724x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int rid = 0; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "ar724x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, AR71XX_PCI_MEM_BASE, AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1) != 0) { panic("ar724x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "ar724x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("ar724x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(AR724X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(AR724X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, ar724x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(AR724X_RESET_PCIE); ar71xx_device_stop(AR724X_RESET_PCIE_PHY); ar71xx_device_stop(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY_SERIAL); DELAY(100); ar71xx_device_start(AR724X_RESET_PCIE_PHY); ar71xx_device_start(AR724X_RESET_PCIE); if (ar724x_pci_setup(dev)) return (ENXIO); #ifdef AR71XX_ATH_EEPROM ar724x_pci_slot_fixup(dev); #endif /* AR71XX_ATH_EEPROM */ /* Fixup internal PCI bridge */ ar724x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int ar724x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int ar724x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * ar724x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int ar724x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int ar724x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, ar724x_pci_mask_irq, ar724x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); ar724x_pci_unmask_irq((void*)irq); return (0); } static int ar724x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); ar724x_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int ar724x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; reg = ATH_READ_REG(AR724X_PCI_INTR_STATUS); mask = ATH_READ_REG(AR724X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; if (reg & AR724X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* Flush pending memory transactions */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int ar724x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int ar724x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t ar724x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ar724x_pci_probe), DEVMETHOD(device_attach, ar724x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ar724x_pci_read_ivar), DEVMETHOD(bus_write_ivar, ar724x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, ar724x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, ar724x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, ar724x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, ar724x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, ar724x_pci_maxslots), DEVMETHOD(pcib_read_config, ar724x_pci_read_config), DEVMETHOD(pcib_write_config, ar724x_pci_write_config), DEVMETHOD(pcib_route_interrupt, ar724x_pci_route_interrupt), DEVMETHOD_END }; static driver_t ar724x_pci_driver = { "pcib", ar724x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t ar724x_pci_devclass; DRIVER_MODULE(ar724x_pci, nexus, ar724x_pci_driver, ar724x_pci_devclass, 0, 0); Index: head/sys/mips/atheros/if_arge.c =================================================================== --- head/sys/mips/atheros/if_arge.c (revision 295879) +++ head/sys/mips/atheros/if_arge.c (revision 295880) @@ -1,2725 +1,2724 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AR71XX gigabit ethernet driver */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include "opt_arge.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include "opt_arge.h" #if defined(ARGE_MDIO) #include #include #include "mdio_if.h" #endif MODULE_DEPEND(arge, ether, 1, 1, 1); MODULE_DEPEND(arge, miibus, 1, 1, 1); MODULE_VERSION(arge, 1); #include "miibus_if.h" #include #include #include /* XXX tsk! */ #include /* XXX tsk! */ #include /* XXX tsk! */ #include #include #include #include typedef enum { ARGE_DBG_MII = 0x00000001, ARGE_DBG_INTR = 0x00000002, ARGE_DBG_TX = 0x00000004, ARGE_DBG_RX = 0x00000008, ARGE_DBG_ERR = 0x00000010, ARGE_DBG_RESET = 0x00000020, ARGE_DBG_PLL = 0x00000040, } arge_debug_flags; static const char * arge_miicfg_str[] = { "NONE", "GMII", "MII", "RGMII", "RMII", "SGMII" }; #ifdef ARGE_DEBUG #define ARGEDEBUG(_sc, _m, ...) \ do { \ if ((_m) & (_sc)->arge_debug) \ device_printf((_sc)->arge_dev, __VA_ARGS__); \ } while (0) #else #define ARGEDEBUG(_sc, _m, ...) #endif static int arge_attach(device_t); static int arge_detach(device_t); static void arge_flush_ddr(struct arge_softc *); static int arge_ifmedia_upd(struct ifnet *); static void arge_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int arge_ioctl(struct ifnet *, u_long, caddr_t); static void arge_init(void *); static void arge_init_locked(struct arge_softc *); static void arge_link_task(void *, int); static void arge_update_link_locked(struct arge_softc *sc); static void arge_set_pll(struct arge_softc *, int, int); static int arge_miibus_readreg(device_t, int, int); static void arge_miibus_statchg(device_t); static int arge_miibus_writereg(device_t, int, int, int); static int arge_probe(device_t); static void arge_reset_dma(struct arge_softc *); static int arge_resume(device_t); static int arge_rx_ring_init(struct arge_softc *); static void arge_rx_ring_free(struct arge_softc *sc); static int arge_tx_ring_init(struct arge_softc *); static void arge_tx_ring_free(struct arge_softc *); #ifdef DEVICE_POLLING static int arge_poll(struct ifnet *, enum poll_cmd, int); #endif static int arge_shutdown(device_t); static void arge_start(struct ifnet *); static void arge_start_locked(struct ifnet *); static void arge_stop(struct arge_softc *); static int arge_suspend(device_t); static int arge_rx_locked(struct arge_softc *); static void arge_tx_locked(struct arge_softc *); static void arge_intr(void *); static int arge_intr_filter(void *); static void arge_tick(void *); static void arge_hinted_child(device_t bus, const char *dname, int dunit); /* * ifmedia callbacks for multiPHY MAC */ void arge_multiphy_mediastatus(struct ifnet *, struct ifmediareq *); int arge_multiphy_mediachange(struct ifnet *); static void arge_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int arge_dma_alloc(struct arge_softc *); static void arge_dma_free(struct arge_softc *); static int arge_newbuf(struct arge_softc *, int); static __inline void arge_fixup_rx(struct mbuf *); static device_method_t arge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, arge_probe), DEVMETHOD(device_attach, arge_attach), DEVMETHOD(device_detach, arge_detach), DEVMETHOD(device_suspend, arge_suspend), DEVMETHOD(device_resume, arge_resume), DEVMETHOD(device_shutdown, arge_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, arge_miibus_readreg), DEVMETHOD(miibus_writereg, arge_miibus_writereg), DEVMETHOD(miibus_statchg, arge_miibus_statchg), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), DEVMETHOD(bus_hinted_child, arge_hinted_child), DEVMETHOD_END }; static driver_t arge_driver = { "arge", arge_methods, sizeof(struct arge_softc) }; static devclass_t arge_devclass; DRIVER_MODULE(arge, nexus, arge_driver, arge_devclass, 0, 0); DRIVER_MODULE(miibus, arge, miibus_driver, miibus_devclass, 0, 0); #if defined(ARGE_MDIO) static int argemdio_probe(device_t); static int argemdio_attach(device_t); static int argemdio_detach(device_t); /* * Declare an additional, separate driver for accessing the MDIO bus. */ static device_method_t argemdio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, argemdio_probe), DEVMETHOD(device_attach, argemdio_attach), DEVMETHOD(device_detach, argemdio_detach), /* bus interface */ DEVMETHOD(bus_add_child, device_add_child_ordered), /* MDIO access */ DEVMETHOD(mdio_readreg, arge_miibus_readreg), DEVMETHOD(mdio_writereg, arge_miibus_writereg), }; DEFINE_CLASS_0(argemdio, argemdio_driver, argemdio_methods, sizeof(struct arge_softc)); static devclass_t argemdio_devclass; DRIVER_MODULE(miiproxy, arge, miiproxy_driver, miiproxy_devclass, 0, 0); DRIVER_MODULE(argemdio, nexus, argemdio_driver, argemdio_devclass, 0, 0); DRIVER_MODULE(mdio, argemdio, mdio_driver, mdio_devclass, 0, 0); #endif static struct mtx miibus_mtx; MTX_SYSINIT(miibus_mtx, &miibus_mtx, "arge mii lock", MTX_DEF); /* * Flushes all * * XXX this needs to be done at interrupt time! Grr! */ static void arge_flush_ddr(struct arge_softc *sc) { switch (sc->arge_mac_unit) { case 0: ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE0); break; case 1: ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_GE1); break; default: device_printf(sc->arge_dev, "%s: unknown unit (%d)\n", __func__, sc->arge_mac_unit); break; } } static int arge_probe(device_t dev) { device_set_desc(dev, "Atheros AR71xx built-in ethernet interface"); return (BUS_PROBE_NOWILDCARD); } #ifdef ARGE_DEBUG static void arge_attach_intr_sysctl(device_t dev, struct sysctl_oid_list *parent) { struct arge_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); char sn[8]; int i; tree = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "intr", CTLFLAG_RD, NULL, "Interrupt statistics"); child = SYSCTL_CHILDREN(tree); for (i = 0; i < 32; i++) { snprintf(sn, sizeof(sn), "%d", i); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, sn, CTLFLAG_RD, &sc->intr_stats.count[i], 0, ""); } } #endif static void arge_attach_sysctl(device_t dev) { struct arge_softc *sc = device_get_softc(dev); struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); #ifdef ARGE_DEBUG SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->arge_debug, 0, "arge interface debugging flags"); arge_attach_intr_sysctl(dev, SYSCTL_CHILDREN(tree)); #endif SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_pkts_aligned", CTLFLAG_RW, &sc->stats.tx_pkts_aligned, 0, "number of TX aligned packets"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_pkts_unaligned", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned, 0, "number of TX unaligned packets"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_pkts_unaligned_start", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_start, 0, "number of TX unaligned packets (start)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_pkts_unaligned_len", CTLFLAG_RW, &sc->stats.tx_pkts_unaligned_len, 0, "number of TX unaligned packets (len)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_pkts_nosegs", CTLFLAG_RW, &sc->stats.tx_pkts_nosegs, 0, "number of TX packets fail with no ring slots avail"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "intr_stray_filter", CTLFLAG_RW, &sc->stats.intr_stray, 0, "number of stray interrupts (filter)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "intr_stray_intr", CTLFLAG_RW, &sc->stats.intr_stray2, 0, "number of stray interrupts (intr)"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "intr_ok", CTLFLAG_RW, &sc->stats.intr_ok, 0, "number of OK interrupts"); #ifdef ARGE_DEBUG SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_prod", CTLFLAG_RW, &sc->arge_cdata.arge_tx_prod, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cons", CTLFLAG_RW, &sc->arge_cdata.arge_tx_cons, 0, ""); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "tx_cnt", CTLFLAG_RW, &sc->arge_cdata.arge_tx_cnt, 0, ""); #endif } static void arge_reset_mac(struct arge_softc *sc) { uint32_t reg; uint32_t reset_reg; ARGEDEBUG(sc, ARGE_DBG_RESET, "%s called\n", __func__); /* Step 1. Soft-reset MAC */ ARGE_SET_BITS(sc, AR71XX_MAC_CFG1, MAC_CFG1_SOFT_RESET); DELAY(20); /* Step 2. Punt the MAC core from the central reset register */ /* * XXX TODO: migrate this (and other) chip specific stuff into * a chipdef method. */ if (sc->arge_mac_unit == 0) { reset_reg = RST_RESET_GE0_MAC; } else { reset_reg = RST_RESET_GE1_MAC; } /* * AR934x (and later) also needs the MDIO block reset. * XXX should methodize this! */ if (ar71xx_soc == AR71XX_SOC_AR9341 || ar71xx_soc == AR71XX_SOC_AR9342 || ar71xx_soc == AR71XX_SOC_AR9344) { if (sc->arge_mac_unit == 0) { reset_reg |= AR934X_RESET_GE0_MDIO; } else { reset_reg |= AR934X_RESET_GE1_MDIO; } } if (ar71xx_soc == AR71XX_SOC_QCA9556 || ar71xx_soc == AR71XX_SOC_QCA9558) { if (sc->arge_mac_unit == 0) { reset_reg |= QCA955X_RESET_GE0_MDIO; } else { reset_reg |= QCA955X_RESET_GE1_MDIO; } } if (ar71xx_soc == AR71XX_SOC_QCA9533 || ar71xx_soc == AR71XX_SOC_QCA9533_V2) { if (sc->arge_mac_unit == 0) { reset_reg |= QCA953X_RESET_GE0_MDIO; } else { reset_reg |= QCA953X_RESET_GE1_MDIO; } } ar71xx_device_stop(reset_reg); DELAY(100); ar71xx_device_start(reset_reg); /* Step 3. Reconfigure MAC block */ ARGE_WRITE(sc, AR71XX_MAC_CFG1, MAC_CFG1_SYNC_RX | MAC_CFG1_RX_ENABLE | MAC_CFG1_SYNC_TX | MAC_CFG1_TX_ENABLE); reg = ARGE_READ(sc, AR71XX_MAC_CFG2); reg |= MAC_CFG2_ENABLE_PADCRC | MAC_CFG2_LENGTH_FIELD ; ARGE_WRITE(sc, AR71XX_MAC_CFG2, reg); ARGE_WRITE(sc, AR71XX_MAC_MAX_FRAME_LEN, 1536); } /* * These values map to the divisor values programmed into * AR71XX_MAC_MII_CFG. * * The index of each value corresponds to the divisor section * value in AR71XX_MAC_MII_CFG (ie, table[0] means '0' in * AR71XX_MAC_MII_CFG, table[1] means '1', etc.) */ static const uint32_t ar71xx_mdio_div_table[] = { 4, 4, 6, 8, 10, 14, 20, 28, }; static const uint32_t ar7240_mdio_div_table[] = { 2, 2, 4, 6, 8, 12, 18, 26, 32, 40, 48, 56, 62, 70, 78, 96, }; static const uint32_t ar933x_mdio_div_table[] = { 4, 4, 6, 8, 10, 14, 20, 28, 34, 42, 50, 58, 66, 74, 82, 98, }; /* * Lookup the divisor to use based on the given frequency. * * Returns the divisor to use, or -ve on error. */ static int arge_mdio_get_divider(struct arge_softc *sc, unsigned long mdio_clock) { unsigned long ref_clock, t; const uint32_t *table; int ndivs; int i; /* * This is the base MDIO frequency on the SoC. * The dividers .. well, divide. Duh. */ ref_clock = ar71xx_mdio_freq(); /* * If either clock is undefined, just tell the * caller to fall through to the defaults. */ if (ref_clock == 0 || mdio_clock == 0) return (-EINVAL); /* * Pick the correct table! */ switch (ar71xx_soc) { case AR71XX_SOC_AR9330: case AR71XX_SOC_AR9331: case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: table = ar933x_mdio_div_table; ndivs = nitems(ar933x_mdio_div_table); break; case AR71XX_SOC_AR7240: case AR71XX_SOC_AR7241: case AR71XX_SOC_AR7242: table = ar7240_mdio_div_table; ndivs = nitems(ar7240_mdio_div_table); break; default: table = ar71xx_mdio_div_table; ndivs = nitems(ar71xx_mdio_div_table); } /* * Now, walk through the list and find the first divisor * that falls under the target MDIO frequency. * * The divisors go up, but the corresponding frequencies * are actually decreasing. */ for (i = 0; i < ndivs; i++) { t = ref_clock / table[i]; if (t <= mdio_clock) { return (i); } } ARGEDEBUG(sc, ARGE_DBG_RESET, "No divider found; MDIO=%lu Hz; target=%lu Hz\n", ref_clock, mdio_clock); return (-ENOENT); } /* * Fetch the MDIO bus clock rate. * * For now, the default is DIV_28 for everything * bar AR934x, which will be DIV_58. * * It will definitely need updating to take into account * the MDIO bus core clock rate and the target clock * rate for the chip. */ static uint32_t arge_fetch_mdiobus_clock_rate(struct arge_softc *sc) { int mdio_freq, div; /* * Is the MDIO frequency defined? If so, find a divisor that * makes reasonable sense. Don't overshoot the frequency. */ if (resource_int_value(device_get_name(sc->arge_dev), device_get_unit(sc->arge_dev), "mdio_freq", &mdio_freq) == 0) { sc->arge_mdiofreq = mdio_freq; div = arge_mdio_get_divider(sc, sc->arge_mdiofreq); if (bootverbose) device_printf(sc->arge_dev, "%s: mdio ref freq=%llu Hz, target freq=%llu Hz," " divisor index=%d\n", __func__, (unsigned long long) ar71xx_mdio_freq(), (unsigned long long) mdio_freq, div); if (div >= 0) return (div); } /* * Default value(s). * * XXX obviously these need .. fixing. * * From Linux/OpenWRT: * * + 7240? DIV_6 * + Builtin-switch port and not 934x? DIV_10 * + Not built-in switch port and 934x? DIV_58 * + .. else DIV_28. */ switch (ar71xx_soc) { case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: return (MAC_MII_CFG_CLOCK_DIV_58); break; default: return (MAC_MII_CFG_CLOCK_DIV_28); } } static void arge_reset_miibus(struct arge_softc *sc) { uint32_t mdio_div; mdio_div = arge_fetch_mdiobus_clock_rate(sc); /* * XXX AR934x and later; should we be also resetting the * MDIO block(s) using the reset register block? */ /* Reset MII bus; program in the default divisor */ ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, MAC_MII_CFG_RESET | mdio_div); DELAY(100); ARGE_WRITE(sc, AR71XX_MAC_MII_CFG, mdio_div); DELAY(100); } static void arge_fetch_pll_config(struct arge_softc *sc) { long int val; if (resource_long_value(device_get_name(sc->arge_dev), device_get_unit(sc->arge_dev), "pll_10", &val) == 0) { sc->arge_pllcfg.pll_10 = val; device_printf(sc->arge_dev, "%s: pll_10 = 0x%x\n", __func__, (int) val); } if (resource_long_value(device_get_name(sc->arge_dev), device_get_unit(sc->arge_dev), "pll_100", &val) == 0) { sc->arge_pllcfg.pll_100 = val; device_printf(sc->arge_dev, "%s: pll_100 = 0x%x\n", __func__, (int) val); } if (resource_long_value(device_get_name(sc->arge_dev), device_get_unit(sc->arge_dev), "pll_1000", &val) == 0) { sc->arge_pllcfg.pll_1000 = val; device_printf(sc->arge_dev, "%s: pll_1000 = 0x%x\n", __func__, (int) val); } } static int arge_attach(device_t dev) { struct ifnet *ifp; struct arge_softc *sc; int error = 0, rid, i; uint32_t hint; long eeprom_mac_addr = 0; int miicfg = 0; int readascii = 0; int local_mac = 0; uint8_t local_macaddr[ETHER_ADDR_LEN]; char * local_macstr; char devid_str[32]; int count; sc = device_get_softc(dev); sc->arge_dev = dev; sc->arge_mac_unit = device_get_unit(dev); /* * See if there's a "board" MAC address hint available for * this particular device. * * This is in the environment - it'd be nice to use the resource_*() * routines, but at the moment the system is booting, the resource hints * are set to the 'static' map so they're not pulling from kenv. */ snprintf(devid_str, 32, "hint.%s.%d.macaddr", device_get_name(dev), device_get_unit(dev)); if ((local_macstr = kern_getenv(devid_str)) != NULL) { uint32_t tmpmac[ETHER_ADDR_LEN]; /* Have a MAC address; should use it */ device_printf(dev, "Overriding MAC address from environment: '%s'\n", local_macstr); /* Extract out the MAC address */ /* XXX this should all be a generic method */ count = sscanf(local_macstr, "%x%*c%x%*c%x%*c%x%*c%x%*c%x", &tmpmac[0], &tmpmac[1], &tmpmac[2], &tmpmac[3], &tmpmac[4], &tmpmac[5]); if (count == 6) { /* Valid! */ local_mac = 1; for (i = 0; i < ETHER_ADDR_LEN; i++) local_macaddr[i] = tmpmac[i]; } /* Done! */ freeenv(local_macstr); local_macstr = NULL; } /* * Hardware workarounds. */ switch (ar71xx_soc) { case AR71XX_SOC_AR9330: case AR71XX_SOC_AR9331: case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: /* Arbitrary alignment */ sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE; sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE; break; default: sc->arge_hw_flags |= ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE; sc->arge_hw_flags |= ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE; break; } /* * Some units (eg the TP-Link WR-1043ND) do not have a convenient * EEPROM location to read the ethernet MAC address from. * OpenWRT simply snaffles it from a fixed location. * * Since multiple units seem to use this feature, include * a method of setting the MAC address based on an flash location * in CPU address space. * * Some vendors have decided to store the mac address as a literal * string of 18 characters in xx:xx:xx:xx:xx:xx format instead of * an array of numbers. Expose a hint to turn on this conversion * feature via strtol() */ if (local_mac == 0 && resource_long_value(device_get_name(dev), device_get_unit(dev), "eeprommac", &eeprom_mac_addr) == 0) { local_mac = 1; int i; const char *mac = (const char *) MIPS_PHYS_TO_KSEG1(eeprom_mac_addr); device_printf(dev, "Overriding MAC from EEPROM\n"); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "readascii", &readascii) == 0) { device_printf(dev, "Vendor stores MAC in ASCII format\n"); for (i = 0; i < 6; i++) { local_macaddr[i] = strtol(&(mac[i*3]), NULL, 16); } } else { for (i = 0; i < 6; i++) { local_macaddr[i] = mac[i]; } } } KASSERT(((sc->arge_mac_unit == 0) || (sc->arge_mac_unit == 1)), ("if_arge: Only MAC0 and MAC1 supported")); /* * Fetch the PLL configuration. */ arge_fetch_pll_config(sc); /* * Get the MII configuration, if applicable. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "miimode", &miicfg) == 0) { /* XXX bounds check? */ device_printf(dev, "%s: overriding MII mode to '%s'\n", __func__, arge_miicfg_str[miicfg]); sc->arge_miicfg = miicfg; } /* * Get which PHY of 5 available we should use for this unit */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "phymask", &sc->arge_phymask) != 0) { /* * Use port 4 (WAN) for GE0. For any other port use * its PHY the same as its unit number */ if (sc->arge_mac_unit == 0) sc->arge_phymask = (1 << 4); else /* Use all phys up to 4 */ sc->arge_phymask = (1 << 4) - 1; device_printf(dev, "No PHY specified, using mask %d\n", sc->arge_phymask); } /* * Get default/hard-coded media & duplex mode. */ if (resource_int_value(device_get_name(dev), device_get_unit(dev), "media", &hint) != 0) hint = 0; if (hint == 1000) sc->arge_media_type = IFM_1000_T; else if (hint == 100) sc->arge_media_type = IFM_100_TX; else if (hint == 10) sc->arge_media_type = IFM_10_T; else sc->arge_media_type = 0; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fduplex", &hint) != 0) hint = 1; if (hint) sc->arge_duplex_mode = IFM_FDX; else sc->arge_duplex_mode = 0; mtx_init(&sc->arge_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->arge_stat_callout, &sc->arge_mtx, 0); TASK_INIT(&sc->arge_link_task, 0, arge_link_task, sc); /* Map control/status registers. */ sc->arge_rid = 0; sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->arge_res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } /* Allocate interrupts */ rid = 0; sc->arge_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->arge_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Allocate ifnet structure. */ ifp = sc->arge_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "couldn't allocate ifnet structure\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = arge_ioctl; ifp->if_start = arge_start; ifp->if_init = arge_init; sc->arge_if_flags = ifp->if_flags; /* XXX: add real size */ IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* Tell the upper layer(s) we support long frames. */ ifp->if_capabilities |= IFCAP_VLAN_MTU; ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif /* If there's a local mac defined, copy that in */ if (local_mac == 1) { (void) ar71xx_mac_addr_init(sc->arge_eaddr, local_macaddr, 0, 0); } else { /* * No MAC address configured. Generate the random one. */ if (bootverbose) device_printf(dev, "Generating random ethernet address.\n"); (void) ar71xx_mac_addr_random_init(sc->arge_eaddr); } if (arge_dma_alloc(sc) != 0) { error = ENXIO; goto fail; } /* * Don't do this for the MDIO bus case - it's already done * as part of the MDIO bus attachment. * * XXX TODO: if we don't do this, we don't ever release the MAC * from reset and we can't use the port. Now, if we define ARGE_MDIO * but we /don't/ define two MDIO busses, then we can't actually * use both MACs. */ #if !defined(ARGE_MDIO) /* Initialize the MAC block */ arge_reset_mac(sc); arge_reset_miibus(sc); #endif /* Configure MII mode, just for convienence */ if (sc->arge_miicfg != 0) ar71xx_device_set_mii_if(sc->arge_mac_unit, sc->arge_miicfg); /* * Set all Ethernet address registers to the same initial values * set all four addresses to 66-88-aa-cc-dd-ee */ ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR1, (sc->arge_eaddr[2] << 24) | (sc->arge_eaddr[3] << 16) | (sc->arge_eaddr[4] << 8) | sc->arge_eaddr[5]); ARGE_WRITE(sc, AR71XX_MAC_STA_ADDR2, (sc->arge_eaddr[0] << 8) | sc->arge_eaddr[1]); ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG0, FIFO_CFG0_ALL << FIFO_CFG0_ENABLE_SHIFT); /* * SoC specific bits. */ switch (ar71xx_soc) { case AR71XX_SOC_AR7240: case AR71XX_SOC_AR7241: case AR71XX_SOC_AR7242: case AR71XX_SOC_AR9330: case AR71XX_SOC_AR9331: case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0010ffff); ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x015500aa); break; /* AR71xx, AR913x */ default: ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG1, 0x0fff0000); ARGE_WRITE(sc, AR71XX_MAC_FIFO_CFG2, 0x00001fff); } ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMATCH, FIFO_RX_FILTMATCH_DEFAULT); ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, FIFO_RX_FILTMASK_DEFAULT); #if defined(ARGE_MDIO) sc->arge_miiproxy = mii_attach_proxy(sc->arge_dev); #endif device_printf(sc->arge_dev, "finishing attachment, phymask %04x" ", proxy %s \n", sc->arge_phymask, sc->arge_miiproxy == NULL ? "null" : "set"); for (i = 0; i < ARGE_NPHY; i++) { if (((1 << i) & sc->arge_phymask) != 0) { error = mii_attach(sc->arge_miiproxy != NULL ? sc->arge_miiproxy : sc->arge_dev, &sc->arge_miibus, sc->arge_ifp, arge_ifmedia_upd, arge_ifmedia_sts, BMSR_DEFCAPMASK, i, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(sc->arge_dev, "unable to attach" " PHY %d: %d\n", i, error); goto fail; } } } if (sc->arge_miibus == NULL) { /* no PHY, so use hard-coded values */ ifmedia_init(&sc->arge_ifmedia, 0, arge_multiphy_mediachange, arge_multiphy_mediastatus); ifmedia_add(&sc->arge_ifmedia, IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode, 0, NULL); ifmedia_set(&sc->arge_ifmedia, IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode); arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); } /* Call MI attach routine. */ ether_ifattach(sc->arge_ifp, sc->arge_eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(sc->arge_dev, sc->arge_irq, INTR_TYPE_NET | INTR_MPSAFE, arge_intr_filter, arge_intr, sc, &sc->arge_intrhand); if (error) { device_printf(sc->arge_dev, "couldn't set up irq\n"); ether_ifdetach(sc->arge_ifp); goto fail; } /* setup sysctl variables */ arge_attach_sysctl(sc->arge_dev); fail: if (error) arge_detach(dev); return (error); } static int arge_detach(device_t dev) { struct arge_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->arge_ifp; KASSERT(mtx_initialized(&sc->arge_mtx), ("arge mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { ARGE_LOCK(sc); sc->arge_detach = 1; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif arge_stop(sc); ARGE_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->arge_link_task); ether_ifdetach(ifp); } if (sc->arge_miibus) device_delete_child(dev, sc->arge_miibus); if (sc->arge_miiproxy) device_delete_child(dev, sc->arge_miiproxy); bus_generic_detach(dev); if (sc->arge_intrhand) bus_teardown_intr(dev, sc->arge_irq, sc->arge_intrhand); if (sc->arge_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->arge_rid, sc->arge_res); if (ifp) if_free(ifp); arge_dma_free(sc); mtx_destroy(&sc->arge_mtx); return (0); } static int arge_suspend(device_t dev) { panic("%s", __func__); return 0; } static int arge_resume(device_t dev) { panic("%s", __func__); return 0; } static int arge_shutdown(device_t dev) { struct arge_softc *sc; sc = device_get_softc(dev); ARGE_LOCK(sc); arge_stop(sc); ARGE_UNLOCK(sc); return (0); } static void arge_hinted_child(device_t bus, const char *dname, int dunit) { BUS_ADD_CHILD(bus, 0, dname, dunit); device_printf(bus, "hinted child %s%d\n", dname, dunit); } static int arge_mdio_busy(struct arge_softc *sc) { int i,result; for (i = 0; i < ARGE_MII_TIMEOUT; i++) { DELAY(5); ARGE_MDIO_BARRIER_READ(sc); result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_INDICATOR); if (! result) return (0); DELAY(5); } return (-1); } static int arge_miibus_readreg(device_t dev, int phy, int reg) { struct arge_softc * sc = device_get_softc(dev); int result; uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); mtx_lock(&miibus_mtx); ARGE_MDIO_BARRIER_RW(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); ARGE_MDIO_BARRIER_WRITE(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); ARGE_MDIO_BARRIER_WRITE(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_READ); if (arge_mdio_busy(sc) != 0) { mtx_unlock(&miibus_mtx); ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); /* XXX: return ERRNO istead? */ return (-1); } ARGE_MDIO_BARRIER_READ(sc); result = ARGE_MDIO_READ(sc, AR71XX_MAC_MII_STATUS) & MAC_MII_STATUS_MASK; ARGE_MDIO_BARRIER_RW(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CMD, MAC_MII_CMD_WRITE); mtx_unlock(&miibus_mtx); ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value[%08x]=%04x\n", __func__, phy, reg, addr, result); return (result); } static int arge_miibus_writereg(device_t dev, int phy, int reg, int data) { struct arge_softc * sc = device_get_softc(dev); uint32_t addr = (phy << MAC_MII_PHY_ADDR_SHIFT) | (reg & MAC_MII_REG_MASK); ARGEDEBUG(sc, ARGE_DBG_MII, "%s: phy=%d, reg=%02x, value=%04x\n", __func__, phy, reg, data); mtx_lock(&miibus_mtx); ARGE_MDIO_BARRIER_RW(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_ADDR, addr); ARGE_MDIO_BARRIER_WRITE(sc); ARGE_MDIO_WRITE(sc, AR71XX_MAC_MII_CONTROL, data); ARGE_MDIO_BARRIER_WRITE(sc); if (arge_mdio_busy(sc) != 0) { mtx_unlock(&miibus_mtx); ARGEDEBUG(sc, ARGE_DBG_MII, "%s timedout\n", __func__); /* XXX: return ERRNO istead? */ return (-1); } mtx_unlock(&miibus_mtx); return (0); } static void arge_miibus_statchg(device_t dev) { struct arge_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->arge_link_task); } static void arge_link_task(void *arg, int pending) { struct arge_softc *sc; sc = (struct arge_softc *)arg; ARGE_LOCK(sc); arge_update_link_locked(sc); ARGE_UNLOCK(sc); } static void arge_update_link_locked(struct arge_softc *sc) { struct mii_data *mii; struct ifnet *ifp; uint32_t media, duplex; mii = device_get_softc(sc->arge_miibus); ifp = sc->arge_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { return; } /* * If we have a static media type configured, then * use that. Some PHY configurations (eg QCA955x -> AR8327) * use a static speed/duplex between the SoC and switch, * even though the front-facing PHY speed changes. */ if (sc->arge_media_type != 0) { ARGEDEBUG(sc, ARGE_DBG_MII, "%s: fixed; media=%d, duplex=%d\n", __func__, sc->arge_media_type, sc->arge_duplex_mode); if (mii->mii_media_status & IFM_ACTIVE) { sc->arge_link_status = 1; } else { sc->arge_link_status = 0; } arge_set_pll(sc, sc->arge_media_type, sc->arge_duplex_mode); } if (mii->mii_media_status & IFM_ACTIVE) { media = IFM_SUBTYPE(mii->mii_media_active); if (media != IFM_NONE) { sc->arge_link_status = 1; duplex = mii->mii_media_active & IFM_GMASK; ARGEDEBUG(sc, ARGE_DBG_MII, "%s: media=%d, duplex=%d\n", __func__, media, duplex); arge_set_pll(sc, media, duplex); } } else { sc->arge_link_status = 0; } } static void arge_set_pll(struct arge_softc *sc, int media, int duplex) { uint32_t cfg, ifcontrol, rx_filtmask; uint32_t fifo_tx, pll; int if_speed; /* * XXX Verify - is this valid for all chips? * QCA955x (and likely some of the earlier chips!) define * this as nibble mode and byte mode, and those have to do * with the interface type (MII/SMII versus GMII/RGMII.) */ ARGEDEBUG(sc, ARGE_DBG_PLL, "set_pll(%04x, %s)\n", media, duplex == IFM_FDX ? "full" : "half"); cfg = ARGE_READ(sc, AR71XX_MAC_CFG2); cfg &= ~(MAC_CFG2_IFACE_MODE_1000 | MAC_CFG2_IFACE_MODE_10_100 | MAC_CFG2_FULL_DUPLEX); if (duplex == IFM_FDX) cfg |= MAC_CFG2_FULL_DUPLEX; ifcontrol = ARGE_READ(sc, AR71XX_MAC_IFCONTROL); ifcontrol &= ~MAC_IFCONTROL_SPEED; rx_filtmask = ARGE_READ(sc, AR71XX_MAC_FIFO_RX_FILTMASK); rx_filtmask &= ~FIFO_RX_MASK_BYTE_MODE; switch(media) { case IFM_10_T: cfg |= MAC_CFG2_IFACE_MODE_10_100; if_speed = 10; break; case IFM_100_TX: cfg |= MAC_CFG2_IFACE_MODE_10_100; ifcontrol |= MAC_IFCONTROL_SPEED; if_speed = 100; break; case IFM_1000_T: case IFM_1000_SX: cfg |= MAC_CFG2_IFACE_MODE_1000; rx_filtmask |= FIFO_RX_MASK_BYTE_MODE; if_speed = 1000; break; default: if_speed = 100; device_printf(sc->arge_dev, "Unknown media %d\n", media); } ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: if_speed=%d\n", __func__, if_speed); switch (ar71xx_soc) { case AR71XX_SOC_AR7240: case AR71XX_SOC_AR7241: case AR71XX_SOC_AR7242: case AR71XX_SOC_AR9330: case AR71XX_SOC_AR9331: case AR71XX_SOC_AR9341: case AR71XX_SOC_AR9342: case AR71XX_SOC_AR9344: case AR71XX_SOC_QCA9533: case AR71XX_SOC_QCA9533_V2: case AR71XX_SOC_QCA9556: case AR71XX_SOC_QCA9558: fifo_tx = 0x01f00140; break; case AR71XX_SOC_AR9130: case AR71XX_SOC_AR9132: fifo_tx = 0x00780fff; break; /* AR71xx */ default: fifo_tx = 0x008001ff; } ARGE_WRITE(sc, AR71XX_MAC_CFG2, cfg); ARGE_WRITE(sc, AR71XX_MAC_IFCONTROL, ifcontrol); ARGE_WRITE(sc, AR71XX_MAC_FIFO_RX_FILTMASK, rx_filtmask); ARGE_WRITE(sc, AR71XX_MAC_FIFO_TX_THRESHOLD, fifo_tx); /* fetch PLL registers */ pll = ar71xx_device_get_eth_pll(sc->arge_mac_unit, if_speed); ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: pll=0x%x\n", __func__, pll); /* Override if required by platform data */ if (if_speed == 10 && sc->arge_pllcfg.pll_10 != 0) pll = sc->arge_pllcfg.pll_10; else if (if_speed == 100 && sc->arge_pllcfg.pll_100 != 0) pll = sc->arge_pllcfg.pll_100; else if (if_speed == 1000 && sc->arge_pllcfg.pll_1000 != 0) pll = sc->arge_pllcfg.pll_1000; ARGEDEBUG(sc, ARGE_DBG_PLL, "%s: final pll=0x%x\n", __func__, pll); /* XXX ensure pll != 0 */ ar71xx_device_set_pll_ge(sc->arge_mac_unit, if_speed, pll); /* set MII registers */ /* * This was introduced to match what the Linux ag71xx ethernet * driver does. For the AR71xx case, it does set the port * MII speed. However, if this is done, non-gigabit speeds * are not at all reliable when speaking via RGMII through * 'bridge' PHY port that's pretending to be a local PHY. * * Until that gets root caused, and until an AR71xx + normal * PHY board is tested, leave this disabled. */ #if 0 ar71xx_device_set_mii_speed(sc->arge_mac_unit, if_speed); #endif } static void arge_reset_dma(struct arge_softc *sc) { ARGEDEBUG(sc, ARGE_DBG_RESET, "%s: called\n", __func__); ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, 0); ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, 0); ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, 0); ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, 0); /* Clear all possible RX interrupts */ while(ARGE_READ(sc, AR71XX_DMA_RX_STATUS) & DMA_RX_STATUS_PKT_RECVD) ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); /* * Clear all possible TX interrupts */ while(ARGE_READ(sc, AR71XX_DMA_TX_STATUS) & DMA_TX_STATUS_PKT_SENT) ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); /* * Now Rx/Tx errors */ ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR | DMA_RX_STATUS_OVERFLOW); ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR | DMA_TX_STATUS_UNDERRUN); /* * Force a DDR flush so any pending data is properly * flushed to RAM before underlying buffers are freed. */ arge_flush_ddr(sc); } static void arge_init(void *xsc) { struct arge_softc *sc = xsc; ARGE_LOCK(sc); arge_init_locked(sc); ARGE_UNLOCK(sc); } static void arge_init_locked(struct arge_softc *sc) { struct ifnet *ifp = sc->arge_ifp; struct mii_data *mii; ARGE_LOCK_ASSERT(sc); if ((ifp->if_flags & IFF_UP) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) return; /* Init circular RX list. */ if (arge_rx_ring_init(sc) != 0) { device_printf(sc->arge_dev, "initialization failed: no memory for rx buffers\n"); arge_stop(sc); return; } /* Init tx descriptors. */ arge_tx_ring_init(sc); arge_reset_dma(sc); if (sc->arge_miibus) { mii = device_get_softc(sc->arge_miibus); mii_mediachg(mii); } else { /* * Sun always shines over multiPHY interface */ sc->arge_link_status = 1; } ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; if (sc->arge_miibus) { callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); arge_update_link_locked(sc); } ARGE_WRITE(sc, AR71XX_DMA_TX_DESC, ARGE_TX_RING_ADDR(sc, 0)); ARGE_WRITE(sc, AR71XX_DMA_RX_DESC, ARGE_RX_RING_ADDR(sc, 0)); /* Start listening */ ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); /* Enable interrupts */ ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); } /* * Return whether the mbuf chain is correctly aligned * for the arge TX engine. * * All the MACs have a length requirement: any non-final * fragment (ie, descriptor with MORE bit set) needs to have * a length divisible by 4. * * The AR71xx, AR913x require the start address also be * DWORD aligned. The later MACs don't. */ static int arge_mbuf_chain_is_tx_aligned(struct arge_softc *sc, struct mbuf *m0) { struct mbuf *m; for (m = m0; m != NULL; m = m->m_next) { /* * Only do this for chips that require it. */ if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && (mtod(m, intptr_t) & 3) != 0) { sc->stats.tx_pkts_unaligned_start++; return 0; } /* * All chips have this requirement for length. */ if ((m->m_next != NULL) && ((m->m_len & 0x03) != 0)) { sc->stats.tx_pkts_unaligned_len++; return 0; } } return 1; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int arge_encap(struct arge_softc *sc, struct mbuf **m_head) { struct arge_txdesc *txd; struct arge_desc *desc, *prev_desc; bus_dma_segment_t txsegs[ARGE_MAXFRAGS]; int error, i, nsegs, prod, prev_prod; struct mbuf *m; ARGE_LOCK_ASSERT(sc); /* * Fix mbuf chain based on hardware alignment constraints. */ m = *m_head; if (! arge_mbuf_chain_is_tx_aligned(sc, m)) { sc->stats.tx_pkts_unaligned++; m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } *m_head = m; } else sc->stats.tx_pkts_aligned++; prod = sc->arge_cdata.arge_tx_prod; txd = &sc->arge_cdata.arge_txdesc[prod]; error = bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { panic("EFBIG"); } else if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* Check number of available descriptors. */ if (sc->arge_cdata.arge_tx_cnt + nsegs >= (ARGE_TX_RING_COUNT - 2)) { bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); sc->stats.tx_pkts_nosegs++; return (ENOBUFS); } txd->tx_m = *m_head; bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_PREWRITE); /* * Make a list of descriptors for this packet. DMA controller will * walk through it while arge_link is not zero. * * Since we're in a endless circular buffer, ensure that * the first descriptor in a multi-descriptor ring is always * set to EMPTY, then un-do it when we're done populating. */ prev_prod = prod; desc = prev_desc = NULL; for (i = 0; i < nsegs; i++) { uint32_t tmp; desc = &sc->arge_rdata.arge_tx_ring[prod]; /* * Set DESC_EMPTY so the hardware (hopefully) stops at this * point. We don't want it to start transmitting descriptors * before we've finished fleshing this out. */ tmp = ARGE_DMASIZE(txsegs[i].ds_len); if (i == 0) tmp |= ARGE_DESC_EMPTY; desc->packet_ctrl = tmp; /* XXX Note: only relevant for older MACs; but check length! */ if ((sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_4BYTE) && (txsegs[i].ds_addr & 3)) panic("TX packet address unaligned\n"); desc->packet_addr = txsegs[i].ds_addr; /* link with previous descriptor */ if (prev_desc) prev_desc->packet_ctrl |= ARGE_DESC_MORE; sc->arge_cdata.arge_tx_cnt++; prev_desc = desc; ARGE_INC(prod, ARGE_TX_RING_COUNT); } /* Update producer index. */ sc->arge_cdata.arge_tx_prod = prod; /* * The descriptors are updated, so enable the first one. */ desc = &sc->arge_rdata.arge_tx_ring[prev_prod]; desc->packet_ctrl &= ~ ARGE_DESC_EMPTY; /* Sync descriptors. */ bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Flush writes */ ARGE_BARRIER_WRITE(sc); /* Start transmitting */ ARGEDEBUG(sc, ARGE_DBG_TX, "%s: setting DMA_TX_CONTROL_EN\n", __func__); ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); return (0); } static void arge_start(struct ifnet *ifp) { struct arge_softc *sc; sc = ifp->if_softc; ARGE_LOCK(sc); arge_start_locked(ifp); ARGE_UNLOCK(sc); } static void arge_start_locked(struct ifnet *ifp) { struct arge_softc *sc; struct mbuf *m_head; int enq = 0; sc = ifp->if_softc; ARGE_LOCK_ASSERT(sc); ARGEDEBUG(sc, ARGE_DBG_TX, "%s: beginning\n", __func__); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || sc->arge_link_status == 0 ) return; /* * Before we go any further, check whether we're already full. * The below check errors out immediately if the ring is full * and never gets a chance to set this flag. Although it's * likely never needed, this at least avoids an unexpected * situation. */ if (sc->arge_cdata.arge_tx_cnt >= ARGE_TX_RING_COUNT - 2) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; ARGEDEBUG(sc, ARGE_DBG_ERR, "%s: tx_cnt %d >= max %d; setting IFF_DRV_OACTIVE\n", __func__, sc->arge_cdata.arge_tx_cnt, ARGE_TX_RING_COUNT - 2); return; } arge_flush_ddr(sc); for (enq = 0; !IFQ_DRV_IS_EMPTY(&ifp->if_snd) && sc->arge_cdata.arge_tx_cnt < ARGE_TX_RING_COUNT - 2; ) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* * Pack the data into the transmit ring. */ if (arge_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } enq++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ ETHER_BPF_MTAP(ifp, m_head); } ARGEDEBUG(sc, ARGE_DBG_TX, "%s: finished; queued %d packets\n", __func__, enq); } static void arge_stop(struct arge_softc *sc) { struct ifnet *ifp; ARGE_LOCK_ASSERT(sc); ifp = sc->arge_ifp; ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); if (sc->arge_miibus) callout_stop(&sc->arge_stat_callout); /* mask out interrupts */ ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); arge_reset_dma(sc); /* Flush FIFO and free any existing mbufs */ arge_flush_ddr(sc); arge_rx_ring_free(sc); arge_tx_ring_free(sc); } static int arge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct arge_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii; int error; #ifdef DEVICE_POLLING int mask; #endif switch (command) { case SIOCSIFFLAGS: ARGE_LOCK(sc); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { if (((ifp->if_flags ^ sc->arge_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) != 0) { /* XXX: handle promisc & multi flags */ } } else { if (!sc->arge_detach) arge_init_locked(sc); } } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; arge_stop(sc); } sc->arge_if_flags = ifp->if_flags; ARGE_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: /* XXX: implement SIOCDELMULTI */ error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (sc->arge_miibus) { mii = device_get_softc(sc->arge_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else error = ifmedia_ioctl(ifp, ifr, &sc->arge_ifmedia, command); break; case SIOCSIFCAP: /* XXX: Check other capabilities */ #ifdef DEVICE_POLLING mask = ifp->if_capenable ^ ifr->ifr_reqcap; if (mask & IFCAP_POLLING) { if (ifr->ifr_reqcap & IFCAP_POLLING) { ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); error = ether_poll_register(arge_poll, ifp); if (error) return error; ARGE_LOCK(sc); ifp->if_capenable |= IFCAP_POLLING; ARGE_UNLOCK(sc); } else { ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); error = ether_poll_deregister(ifp); ARGE_LOCK(sc); ifp->if_capenable &= ~IFCAP_POLLING; ARGE_UNLOCK(sc); } } error = 0; break; #endif default: error = ether_ioctl(ifp, command, data); break; } return (error); } /* * Set media options. */ static int arge_ifmedia_upd(struct ifnet *ifp) { struct arge_softc *sc; struct mii_data *mii; struct mii_softc *miisc; int error; sc = ifp->if_softc; ARGE_LOCK(sc); mii = device_get_softc(sc->arge_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) PHY_RESET(miisc); error = mii_mediachg(mii); ARGE_UNLOCK(sc); return (error); } /* * Report current media status. */ static void arge_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct arge_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->arge_miibus); ARGE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; ARGE_UNLOCK(sc); } struct arge_dmamap_arg { bus_addr_t arge_busaddr; }; static void arge_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct arge_dmamap_arg *ctx; if (error != 0) return; ctx = arg; ctx->arge_busaddr = segs[0].ds_addr; } static int arge_dma_alloc(struct arge_softc *sc) { struct arge_dmamap_arg ctx; struct arge_txdesc *txd; struct arge_rxdesc *rxd; int error, i; int arge_tx_align, arge_rx_align; /* Assume 4 byte alignment by default */ arge_tx_align = 4; arge_rx_align = 4; if (sc->arge_hw_flags & ARGE_HW_FLG_TX_DESC_ALIGN_1BYTE) arge_tx_align = 1; if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) arge_rx_align = 1; /* Create parent DMA tag. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->arge_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->arge_cdata.arge_parent_tag); if (error != 0) { device_printf(sc->arge_dev, "failed to create parent DMA tag\n"); goto fail; } /* Create tag for Tx ring. */ error = bus_dma_tag_create( sc->arge_cdata.arge_parent_tag, /* parent */ ARGE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ARGE_TX_DMA_SIZE, /* maxsize */ 1, /* nsegments */ ARGE_TX_DMA_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->arge_cdata.arge_tx_ring_tag); if (error != 0) { device_printf(sc->arge_dev, "failed to create Tx ring DMA tag\n"); goto fail; } /* Create tag for Rx ring. */ error = bus_dma_tag_create( sc->arge_cdata.arge_parent_tag, /* parent */ ARGE_RING_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ ARGE_RX_DMA_SIZE, /* maxsize */ 1, /* nsegments */ ARGE_RX_DMA_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->arge_cdata.arge_rx_ring_tag); if (error != 0) { device_printf(sc->arge_dev, "failed to create Rx ring DMA tag\n"); goto fail; } /* Create tag for Tx buffers. */ error = bus_dma_tag_create( sc->arge_cdata.arge_parent_tag, /* parent */ arge_tx_align, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES * ARGE_MAXFRAGS, /* maxsize */ ARGE_MAXFRAGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->arge_cdata.arge_tx_tag); if (error != 0) { device_printf(sc->arge_dev, "failed to create Tx DMA tag\n"); goto fail; } /* Create tag for Rx buffers. */ error = bus_dma_tag_create( sc->arge_cdata.arge_parent_tag, /* parent */ arge_rx_align, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, /* maxsize */ ARGE_MAXFRAGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->arge_cdata.arge_rx_tag); if (error != 0) { device_printf(sc->arge_dev, "failed to create Rx DMA tag\n"); goto fail; } /* Allocate DMA'able memory and load the DMA map for Tx ring. */ error = bus_dmamem_alloc(sc->arge_cdata.arge_tx_ring_tag, (void **)&sc->arge_rdata.arge_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_tx_ring_map); if (error != 0) { device_printf(sc->arge_dev, "failed to allocate DMA'able memory for Tx ring\n"); goto fail; } ctx.arge_busaddr = 0; error = bus_dmamap_load(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map, sc->arge_rdata.arge_tx_ring, ARGE_TX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); if (error != 0 || ctx.arge_busaddr == 0) { device_printf(sc->arge_dev, "failed to load DMA'able memory for Tx ring\n"); goto fail; } sc->arge_rdata.arge_tx_ring_paddr = ctx.arge_busaddr; /* Allocate DMA'able memory and load the DMA map for Rx ring. */ error = bus_dmamem_alloc(sc->arge_cdata.arge_rx_ring_tag, (void **)&sc->arge_rdata.arge_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->arge_cdata.arge_rx_ring_map); if (error != 0) { device_printf(sc->arge_dev, "failed to allocate DMA'able memory for Rx ring\n"); goto fail; } ctx.arge_busaddr = 0; error = bus_dmamap_load(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map, sc->arge_rdata.arge_rx_ring, ARGE_RX_DMA_SIZE, arge_dmamap_cb, &ctx, 0); if (error != 0 || ctx.arge_busaddr == 0) { device_printf(sc->arge_dev, "failed to load DMA'able memory for Rx ring\n"); goto fail; } sc->arge_rdata.arge_rx_ring_paddr = ctx.arge_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < ARGE_TX_RING_COUNT; i++) { txd = &sc->arge_cdata.arge_txdesc[i]; txd->tx_m = NULL; txd->tx_dmamap = NULL; error = bus_dmamap_create(sc->arge_cdata.arge_tx_tag, 0, &txd->tx_dmamap); if (error != 0) { device_printf(sc->arge_dev, "failed to create Tx dmamap\n"); goto fail; } } /* Create DMA maps for Rx buffers. */ if ((error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, &sc->arge_cdata.arge_rx_sparemap)) != 0) { device_printf(sc->arge_dev, "failed to create spare Rx dmamap\n"); goto fail; } for (i = 0; i < ARGE_RX_RING_COUNT; i++) { rxd = &sc->arge_cdata.arge_rxdesc[i]; rxd->rx_m = NULL; rxd->rx_dmamap = NULL; error = bus_dmamap_create(sc->arge_cdata.arge_rx_tag, 0, &rxd->rx_dmamap); if (error != 0) { device_printf(sc->arge_dev, "failed to create Rx dmamap\n"); goto fail; } } fail: return (error); } static void arge_dma_free(struct arge_softc *sc) { struct arge_txdesc *txd; struct arge_rxdesc *rxd; int i; /* Tx ring. */ if (sc->arge_cdata.arge_tx_ring_tag) { if (sc->arge_rdata.arge_tx_ring_paddr) bus_dmamap_unload(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map); if (sc->arge_rdata.arge_tx_ring) bus_dmamem_free(sc->arge_cdata.arge_tx_ring_tag, sc->arge_rdata.arge_tx_ring, sc->arge_cdata.arge_tx_ring_map); sc->arge_rdata.arge_tx_ring = NULL; sc->arge_rdata.arge_tx_ring_paddr = 0; bus_dma_tag_destroy(sc->arge_cdata.arge_tx_ring_tag); sc->arge_cdata.arge_tx_ring_tag = NULL; } /* Rx ring. */ if (sc->arge_cdata.arge_rx_ring_tag) { if (sc->arge_rdata.arge_rx_ring_paddr) bus_dmamap_unload(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map); if (sc->arge_rdata.arge_rx_ring) bus_dmamem_free(sc->arge_cdata.arge_rx_ring_tag, sc->arge_rdata.arge_rx_ring, sc->arge_cdata.arge_rx_ring_map); sc->arge_rdata.arge_rx_ring = NULL; sc->arge_rdata.arge_rx_ring_paddr = 0; bus_dma_tag_destroy(sc->arge_cdata.arge_rx_ring_tag); sc->arge_cdata.arge_rx_ring_tag = NULL; } /* Tx buffers. */ if (sc->arge_cdata.arge_tx_tag) { for (i = 0; i < ARGE_TX_RING_COUNT; i++) { txd = &sc->arge_cdata.arge_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_destroy(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); txd->tx_dmamap = NULL; } } bus_dma_tag_destroy(sc->arge_cdata.arge_tx_tag); sc->arge_cdata.arge_tx_tag = NULL; } /* Rx buffers. */ if (sc->arge_cdata.arge_rx_tag) { for (i = 0; i < ARGE_RX_RING_COUNT; i++) { rxd = &sc->arge_cdata.arge_rxdesc[i]; if (rxd->rx_dmamap) { bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); rxd->rx_dmamap = NULL; } } if (sc->arge_cdata.arge_rx_sparemap) { bus_dmamap_destroy(sc->arge_cdata.arge_rx_tag, sc->arge_cdata.arge_rx_sparemap); sc->arge_cdata.arge_rx_sparemap = 0; } bus_dma_tag_destroy(sc->arge_cdata.arge_rx_tag); sc->arge_cdata.arge_rx_tag = NULL; } if (sc->arge_cdata.arge_parent_tag) { bus_dma_tag_destroy(sc->arge_cdata.arge_parent_tag); sc->arge_cdata.arge_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int arge_tx_ring_init(struct arge_softc *sc) { struct arge_ring_data *rd; struct arge_txdesc *txd; bus_addr_t addr; int i; sc->arge_cdata.arge_tx_prod = 0; sc->arge_cdata.arge_tx_cons = 0; sc->arge_cdata.arge_tx_cnt = 0; rd = &sc->arge_rdata; bzero(rd->arge_tx_ring, sizeof(rd->arge_tx_ring)); for (i = 0; i < ARGE_TX_RING_COUNT; i++) { if (i == ARGE_TX_RING_COUNT - 1) addr = ARGE_TX_RING_ADDR(sc, 0); else addr = ARGE_TX_RING_ADDR(sc, i + 1); rd->arge_tx_ring[i].packet_ctrl = ARGE_DESC_EMPTY; rd->arge_tx_ring[i].next_desc = addr; txd = &sc->arge_cdata.arge_txdesc[i]; txd->tx_m = NULL; } bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * Free the Tx ring, unload any pending dma transaction and free the mbuf. */ static void arge_tx_ring_free(struct arge_softc *sc) { struct arge_txdesc *txd; int i; /* Free the Tx buffers. */ for (i = 0; i < ARGE_TX_RING_COUNT; i++) { txd = &sc->arge_cdata.arge_txdesc[i]; if (txd->tx_dmamap) { bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); } if (txd->tx_m) m_freem(txd->tx_m); txd->tx_m = NULL; } } /* * Initialize the RX descriptors and allocate mbufs for them. Note that * we arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int arge_rx_ring_init(struct arge_softc *sc) { struct arge_ring_data *rd; struct arge_rxdesc *rxd; bus_addr_t addr; int i; sc->arge_cdata.arge_rx_cons = 0; rd = &sc->arge_rdata; bzero(rd->arge_rx_ring, sizeof(rd->arge_rx_ring)); for (i = 0; i < ARGE_RX_RING_COUNT; i++) { rxd = &sc->arge_cdata.arge_rxdesc[i]; if (rxd->rx_m != NULL) { device_printf(sc->arge_dev, "%s: ring[%d] rx_m wasn't free?\n", __func__, i); } rxd->rx_m = NULL; rxd->desc = &rd->arge_rx_ring[i]; if (i == ARGE_RX_RING_COUNT - 1) addr = ARGE_RX_RING_ADDR(sc, 0); else addr = ARGE_RX_RING_ADDR(sc, i + 1); rd->arge_rx_ring[i].next_desc = addr; if (arge_newbuf(sc, i) != 0) { return (ENOBUFS); } } bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map, BUS_DMASYNC_PREWRITE); return (0); } /* * Free all the buffers in the RX ring. * * TODO: ensure that DMA is disabled and no pending DMA * is lurking in the FIFO. */ static void arge_rx_ring_free(struct arge_softc *sc) { int i; struct arge_rxdesc *rxd; ARGE_LOCK_ASSERT(sc); for (i = 0; i < ARGE_RX_RING_COUNT; i++) { rxd = &sc->arge_cdata.arge_rxdesc[i]; /* Unmap the mbuf */ if (rxd->rx_m != NULL) { bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); m_free(rxd->rx_m); rxd->rx_m = NULL; } } } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int arge_newbuf(struct arge_softc *sc, int idx) { struct arge_desc *desc; struct arge_rxdesc *rxd; struct mbuf *m; bus_dma_segment_t segs[1]; bus_dmamap_t map; int nsegs; /* XXX TODO: should just allocate an explicit 2KiB buffer */ m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = MCLBYTES; /* * Add extra space to "adjust" (copy) the packet back to be aligned * for purposes of IPv4/IPv6 header contents. */ if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) m_adj(m, sizeof(uint64_t)); /* * If it's a 1-byte aligned buffer, then just offset it two bytes * and that will give us a hopefully correctly DWORD aligned * L3 payload - and we won't have to undo it afterwards. */ else if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_1BYTE) m_adj(m, sizeof(uint16_t)); if (bus_dmamap_load_mbuf_sg(sc->arge_cdata.arge_rx_tag, sc->arge_cdata.arge_rx_sparemap, m, segs, &nsegs, 0) != 0) { m_freem(m); return (ENOBUFS); } KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); rxd = &sc->arge_cdata.arge_rxdesc[idx]; if (rxd->rx_m != NULL) { bus_dmamap_unload(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap); } map = rxd->rx_dmamap; rxd->rx_dmamap = sc->arge_cdata.arge_rx_sparemap; sc->arge_cdata.arge_rx_sparemap = map; rxd->rx_m = m; desc = rxd->desc; if ((sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) && segs[0].ds_addr & 3) panic("RX packet address unaligned"); desc->packet_addr = segs[0].ds_addr; desc->packet_ctrl = ARGE_DESC_EMPTY | ARGE_DMASIZE(segs[0].ds_len); bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map, BUS_DMASYNC_PREWRITE); return (0); } /* * Move the data backwards 16 bits to (hopefully!) ensure the * IPv4/IPv6 payload is aligned. * * This is required for earlier hardware where the RX path * requires DWORD aligned buffers. */ static __inline void arge_fixup_rx(struct mbuf *m) { int i; uint16_t *src, *dst; src = mtod(m, uint16_t *); dst = src - 1; for (i = 0; i < m->m_len / sizeof(uint16_t); i++) { *dst++ = *src++; } if (m->m_len % sizeof(uint16_t)) *(uint8_t *)dst = *(uint8_t *)src; m->m_data -= ETHER_ALIGN; } #ifdef DEVICE_POLLING static int arge_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct arge_softc *sc = ifp->if_softc; int rx_npkts = 0; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ARGE_LOCK(sc); arge_tx_locked(sc); rx_npkts = arge_rx_locked(sc); ARGE_UNLOCK(sc); } return (rx_npkts); } #endif /* DEVICE_POLLING */ static void arge_tx_locked(struct arge_softc *sc) { struct arge_txdesc *txd; struct arge_desc *cur_tx; struct ifnet *ifp; uint32_t ctrl; int cons, prod; ARGE_LOCK_ASSERT(sc); cons = sc->arge_cdata.arge_tx_cons; prod = sc->arge_cdata.arge_tx_prod; ARGEDEBUG(sc, ARGE_DBG_TX, "%s: cons=%d, prod=%d\n", __func__, cons, prod); if (cons == prod) return; bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); ifp = sc->arge_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ for (; cons != prod; ARGE_INC(cons, ARGE_TX_RING_COUNT)) { cur_tx = &sc->arge_rdata.arge_tx_ring[cons]; ctrl = cur_tx->packet_ctrl; /* Check if descriptor has "finished" flag */ if ((ctrl & ARGE_DESC_EMPTY) == 0) break; ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_PKT_SENT); sc->arge_cdata.arge_tx_cnt--; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; txd = &sc->arge_cdata.arge_txdesc[cons]; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); bus_dmamap_sync(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->arge_cdata.arge_tx_tag, txd->tx_dmamap); /* Free only if it's first descriptor in list */ if (txd->tx_m) m_freem(txd->tx_m); txd->tx_m = NULL; /* reset descriptor */ cur_tx->packet_addr = 0; } sc->arge_cdata.arge_tx_cons = cons; bus_dmamap_sync(sc->arge_cdata.arge_tx_ring_tag, sc->arge_cdata.arge_tx_ring_map, BUS_DMASYNC_PREWRITE); } static int arge_rx_locked(struct arge_softc *sc) { struct arge_rxdesc *rxd; struct ifnet *ifp = sc->arge_ifp; int cons, prog, packet_len, i; struct arge_desc *cur_rx; struct mbuf *m; int rx_npkts = 0; ARGE_LOCK_ASSERT(sc); cons = sc->arge_cdata.arge_rx_cons; bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (prog = 0; prog < ARGE_RX_RING_COUNT; ARGE_INC(cons, ARGE_RX_RING_COUNT)) { cur_rx = &sc->arge_rdata.arge_rx_ring[cons]; rxd = &sc->arge_cdata.arge_rxdesc[cons]; m = rxd->rx_m; if ((cur_rx->packet_ctrl & ARGE_DESC_EMPTY) != 0) break; ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_PKT_RECVD); prog++; packet_len = ARGE_DMASIZE(cur_rx->packet_ctrl); bus_dmamap_sync(sc->arge_cdata.arge_rx_tag, rxd->rx_dmamap, BUS_DMASYNC_POSTREAD); m = rxd->rx_m; /* * If the MAC requires 4 byte alignment then the RX setup * routine will have pre-offset things; so un-offset it here. */ if (sc->arge_hw_flags & ARGE_HW_FLG_RX_DESC_ALIGN_4BYTE) arge_fixup_rx(m); m->m_pkthdr.rcvif = ifp; /* Skip 4 bytes of CRC */ m->m_pkthdr.len = m->m_len = packet_len - ETHER_CRC_LEN; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); rx_npkts++; ARGE_UNLOCK(sc); (*ifp->if_input)(ifp, m); ARGE_LOCK(sc); cur_rx->packet_addr = 0; } if (prog > 0) { i = sc->arge_cdata.arge_rx_cons; for (; prog > 0 ; prog--) { if (arge_newbuf(sc, i) != 0) { device_printf(sc->arge_dev, "Failed to allocate buffer\n"); break; } ARGE_INC(i, ARGE_RX_RING_COUNT); } bus_dmamap_sync(sc->arge_cdata.arge_rx_ring_tag, sc->arge_cdata.arge_rx_ring_map, BUS_DMASYNC_PREWRITE); sc->arge_cdata.arge_rx_cons = cons; } return (rx_npkts); } static int arge_intr_filter(void *arg) { struct arge_softc *sc = arg; uint32_t status, ints; status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); ints = ARGE_READ(sc, AR71XX_DMA_INTR); ARGEDEBUG(sc, ARGE_DBG_INTR, "int mask(filter) = %b\n", ints, "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); ARGEDEBUG(sc, ARGE_DBG_INTR, "status(filter) = %b\n", status, "\20\10RX_BUS_ERROR\7RX_OVERFLOW\5RX_PKT_RCVD" "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); if (status & DMA_INTR_ALL) { sc->arge_intr_status |= status; ARGE_WRITE(sc, AR71XX_DMA_INTR, 0); sc->stats.intr_ok++; return (FILTER_SCHEDULE_THREAD); } sc->arge_intr_status = 0; sc->stats.intr_stray++; return (FILTER_STRAY); } static void arge_intr(void *arg) { struct arge_softc *sc = arg; uint32_t status; struct ifnet *ifp = sc->arge_ifp; #ifdef ARGE_DEBUG int i; #endif status = ARGE_READ(sc, AR71XX_DMA_INTR_STATUS); status |= sc->arge_intr_status; ARGEDEBUG(sc, ARGE_DBG_INTR, "int status(intr) = %b\n", status, "\20\10\7RX_OVERFLOW\5RX_PKT_RCVD" "\4TX_BUS_ERROR\2TX_UNDERRUN\1TX_PKT_SENT"); /* * Is it our interrupt at all? */ if (status == 0) { sc->stats.intr_stray2++; return; } #ifdef ARGE_DEBUG for (i = 0; i < 32; i++) { if (status & (1U << i)) { sc->intr_stats.count[i]++; } } #endif if (status & DMA_INTR_RX_BUS_ERROR) { ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_BUS_ERROR); device_printf(sc->arge_dev, "RX bus error"); return; } if (status & DMA_INTR_TX_BUS_ERROR) { ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_BUS_ERROR); device_printf(sc->arge_dev, "TX bus error"); return; } ARGE_LOCK(sc); arge_flush_ddr(sc); if (status & DMA_INTR_RX_PKT_RCVD) arge_rx_locked(sc); /* * RX overrun disables the receiver. * Clear indication and re-enable rx. */ if ( status & DMA_INTR_RX_OVERFLOW) { ARGE_WRITE(sc, AR71XX_DMA_RX_STATUS, DMA_RX_STATUS_OVERFLOW); ARGE_WRITE(sc, AR71XX_DMA_RX_CONTROL, DMA_RX_CONTROL_EN); sc->stats.rx_overflow++; } if (status & DMA_INTR_TX_PKT_SENT) arge_tx_locked(sc); /* * Underrun turns off TX. Clear underrun indication. * If there's anything left in the ring, reactivate the tx. */ if (status & DMA_INTR_TX_UNDERRUN) { ARGE_WRITE(sc, AR71XX_DMA_TX_STATUS, DMA_TX_STATUS_UNDERRUN); sc->stats.tx_underflow++; ARGEDEBUG(sc, ARGE_DBG_TX, "%s: TX underrun; tx_cnt=%d\n", __func__, sc->arge_cdata.arge_tx_cnt); if (sc->arge_cdata.arge_tx_cnt > 0 ) { ARGE_WRITE(sc, AR71XX_DMA_TX_CONTROL, DMA_TX_CONTROL_EN); } } /* * If we've finished TXing and there's space for more packets * to be queued for TX, do so. Otherwise we may end up in a * situation where the interface send queue was filled * whilst the hardware queue was full, then the hardware * queue was drained by the interface send queue wasn't, * and thus if_start() is never called to kick-start * the send process (and all subsequent packets are simply * discarded. * * XXX TODO: make sure that the hardware deals nicely * with the possibility of the queue being enabled above * after a TX underrun, then having the hardware queue added * to below. */ if (status & (DMA_INTR_TX_PKT_SENT | DMA_INTR_TX_UNDERRUN) && (ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { if (!IFQ_IS_EMPTY(&ifp->if_snd)) arge_start_locked(ifp); } /* * We handled all bits, clear status */ sc->arge_intr_status = 0; ARGE_UNLOCK(sc); /* * re-enable all interrupts */ ARGE_WRITE(sc, AR71XX_DMA_INTR, DMA_INTR_ALL); } static void arge_tick(void *xsc) { struct arge_softc *sc = xsc; struct mii_data *mii; ARGE_LOCK_ASSERT(sc); if (sc->arge_miibus) { mii = device_get_softc(sc->arge_miibus); mii_tick(mii); callout_reset(&sc->arge_stat_callout, hz, arge_tick, sc); } } int arge_multiphy_mediachange(struct ifnet *ifp) { struct arge_softc *sc = ifp->if_softc; struct ifmedia *ifm = &sc->arge_ifmedia; struct ifmedia_entry *ife = ifm->ifm_cur; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if (IFM_SUBTYPE(ife->ifm_media) == IFM_AUTO) { device_printf(sc->arge_dev, "AUTO is not supported for multiphy MAC"); return (EINVAL); } /* * Ignore everything */ return (0); } void arge_multiphy_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) { struct arge_softc *sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | sc->arge_media_type | sc->arge_duplex_mode; } #if defined(ARGE_MDIO) static int argemdio_probe(device_t dev) { device_set_desc(dev, "Atheros AR71xx built-in ethernet interface, MDIO controller"); return (0); } static int argemdio_attach(device_t dev) { struct arge_softc *sc; int error = 0; sc = device_get_softc(dev); sc->arge_dev = dev; sc->arge_mac_unit = device_get_unit(dev); sc->arge_rid = 0; sc->arge_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->arge_rid, RF_ACTIVE | RF_SHAREABLE); if (sc->arge_res == NULL) { device_printf(dev, "couldn't map memory\n"); error = ENXIO; goto fail; } /* Reset MAC - required for AR71xx MDIO to successfully occur */ arge_reset_mac(sc); /* Reset MII bus */ arge_reset_miibus(sc); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); error = bus_generic_attach(dev); fail: return (error); } static int argemdio_detach(device_t dev) { return (0); } #endif Index: head/sys/mips/atheros/qca955x_pci.c =================================================================== --- head/sys/mips/atheros/qca955x_pci.c (revision 295879) +++ head/sys/mips/atheros/qca955x_pci.c (revision 295880) @@ -1,606 +1,605 @@ /*- * Copyright (c) 2009, Oleksandr Tymoshenko * Copyright (c) 2011, Luiz Otavio O Souza. * Copyright (c) 2015, Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ar71xx.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include /* XXX aim to eliminate this! */ #include #include #include #include #undef AR724X_PCI_DEBUG //#define AR724X_PCI_DEBUG #ifdef AR724X_PCI_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* * This is a PCI controller for the QCA955x and later SoCs. * It needs to be aware of >1 PCIe host endpoints. * * XXX TODO; it may be nice to merge this with ar724x_pci.c; * they're very similar. */ struct ar71xx_pci_irq { struct ar71xx_pci_softc *sc; int irq; }; struct ar71xx_pci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman; struct rman sc_irq_rman; uint32_t sc_pci_reg_base; /* XXX until bus stuff is done */ uint32_t sc_pci_crp_base; /* XXX until bus stuff is done */ uint32_t sc_pci_ctrl_base; /* XXX until bus stuff is done */ uint32_t sc_pci_mem_base; /* XXX until bus stuff is done */ uint32_t sc_pci_membase_limit; struct intr_event *sc_eventstab[AR71XX_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[AR71XX_PCI_NIRQS]; struct ar71xx_pci_irq sc_pci_irq[AR71XX_PCI_NIRQS]; struct resource *sc_irq; void *sc_ih; }; static int qca955x_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int qca955x_pci_teardown_intr(device_t, device_t, struct resource *, void *); static int qca955x_pci_intr(void *); static void qca955x_pci_write(uint32_t reg, uint32_t offset, uint32_t data, int bytes) { uint32_t val, mask, shift; /* Register access is 32-bit aligned */ shift = (offset & 3) * 8; if (bytes % 4) mask = (1 << (bytes * 8)) - 1; else mask = 0xffffffff; val = ATH_READ_REG(reg + (offset & ~3)); val &= ~(mask << shift); val |= ((data & mask) << shift); ATH_WRITE_REG(reg + (offset & ~3), val); dprintf("%s: %#x/%#x addr=%#x, data=%#x(%#x), bytes=%d\n", __func__, reg, reg + (offset & ~3), offset, data, val, bytes); } static uint32_t qca955x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t data, shift, mask; /* Register access is 32-bit aligned */ shift = (reg & 3) * 8; /* Create a mask based on the width, post-shift */ if (bytes == 2) mask = 0xffff; else if (bytes == 1) mask = 0xff; else mask = 0xffffffff; dprintf("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); if ((bus == 0) && (slot == 0) && (func == 0)) data = ATH_READ_REG(sc->sc_pci_reg_base + (reg & ~3)); else data = -1; /* Get request bytes from 32-bit word */ data = (data >> shift) & mask; dprintf("%s: read 0x%x\n", __func__, data); return (data); } static void qca955x_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct ar71xx_pci_softc *sc = device_get_softc(dev); dprintf("%s: tag (%x, %x, %x) reg %d(%d): %x\n", __func__, bus, slot, func, reg, bytes, data); if ((bus != 0) || (slot != 0) || (func != 0)) return; qca955x_pci_write(sc->sc_pci_reg_base, reg, data, bytes); } static void qca955x_pci_mask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; /* XXX - Only one interrupt ? Only one device ? */ if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg & ~QCA955X_PCI_INTR_DEV0); /* Clear any pending interrupt */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, reg | QCA955X_PCI_INTR_DEV0); } static void qca955x_pci_unmask_irq(void *source) { uint32_t reg; struct ar71xx_pci_irq *pirq = source; struct ar71xx_pci_softc *sc = pirq->sc; if (pirq->irq != AR71XX_PCI_IRQ_START) return; /* Update the interrupt mask reg */ reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, reg | QCA955X_PCI_INTR_DEV0); } static int qca955x_pci_setup(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); uint32_t reg; /* setup COMMAND register */ reg = PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN; qca955x_pci_write(sc->sc_pci_crp_base, PCIR_COMMAND, reg, 2); /* These are the memory/prefetch base/limit parameters */ qca955x_pci_write(sc->sc_pci_crp_base, 0x20, sc->sc_pci_membase_limit, 4); qca955x_pci_write(sc->sc_pci_crp_base, 0x24, sc->sc_pci_membase_limit, 4); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if (reg != 0x7) { DELAY(100000); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 0); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET, 4); ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); DELAY(100000); } ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP, 0x1ffc1); /* Flush write */ (void) ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_APP); DELAY(1000); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_RESET); if ((reg & QCA955X_PCI_RESET_LINK_UP) == 0) { device_printf(dev, "no PCIe controller found\n"); return (ENXIO); } return (0); } static int qca955x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int qca955x_pci_attach(device_t dev) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int unit = device_get_unit(dev); int rid = 0; /* Dirty; maybe these could all just be hints */ if (unit == 0) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE0; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE0; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE0; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE0; /* XXX verify */ sc->sc_pci_membase_limit = 0x11f01000; } else if (unit == 1) { sc->sc_pci_reg_base = QCA955X_PCI_CFG_BASE1; sc->sc_pci_crp_base = QCA955X_PCI_CRP_BASE1; sc->sc_pci_ctrl_base = QCA955X_PCI_CTRL_BASE1; sc->sc_pci_mem_base = QCA955X_PCI_MEM_BASE1; /* XXX verify */ sc->sc_pci_membase_limit = 0x12f01200; } else { device_printf(dev, "%s: invalid unit (%d)\n", __func__, unit); return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "qca955x PCI memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_pci_mem_base, sc->sc_pci_mem_base + QCA955X_PCI_MEM_SIZE - 1) != 0) { panic("qca955x_pci_attach: failed to set up I/O rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "qca955x PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, AR71XX_PCI_IRQ_START, AR71XX_PCI_IRQ_END) != 0) panic("qca955x_pci_attach: failed to set up IRQ rman"); /* Disable interrupts */ ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS, 0); ATH_WRITE_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK, 0); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, qca955x_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Reset PCIe core and PCIe PHY */ ar71xx_device_stop(QCA955X_RESET_PCIE); ar71xx_device_stop(QCA955X_RESET_PCIE_PHY); DELAY(100); ar71xx_device_start(QCA955X_RESET_PCIE_PHY); ar71xx_device_start(QCA955X_RESET_PCIE); if (qca955x_pci_setup(dev)) return (ENXIO); /* * Write initial base address. * * I'm not yet sure why this is required and/or why it isn't * initialised like this. The AR71xx PCI code initialises * the PCI windows for each device, but neither it or the * 724x PCI bridge modules explicitly initialise the BAR. * * So before this gets committed, have a chat with jhb@ or * someone else who knows PCI well and figure out whether * the initial BAR is supposed to be determined by /other/ * means. */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), sc->sc_pci_mem_base, 4); /* Fixup internal PCI bridge */ qca955x_pci_write_config(dev, 0, 0, 0, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_SERRESPEN | PCIM_CMD_BACKTOBACK | PCIM_CMD_PERRESPEN | PCIM_CMD_MWRICEN, 2); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int qca955x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct ar71xx_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int qca955x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct ar71xx_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * qca955x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int qca955x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int res = (BUS_ACTIVATE_RESOURCE(device_get_parent(bus), child, type, rid, r)); if (!res) { switch(type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rman_set_bustag(r, ar71xx_bus_space_pcimem); break; } } return (res); } static int qca955x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct ar71xx_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); event = sc->sc_eventstab[irq]; if (event == NULL) { sc->sc_pci_irq[irq].sc = sc; sc->sc_pci_irq[irq].irq = irq; error = intr_event_create(&event, (void *)&sc->sc_pci_irq[irq], 0, irq, qca955x_pci_mask_irq, qca955x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irq] = event; sc->sc_intr_counter[irq] = mips_intrcnt_create(event->ie_name); } else return error; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irq], event->ie_fullname); qca955x_pci_unmask_irq(&sc->sc_pci_irq[irq]); return (0); } static int qca955x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct ar71xx_pci_softc *sc = device_get_softc(dev); int irq, result; irq = rman_get_start(ires); if (irq > AR71XX_PCI_IRQ_END) panic("%s: bad irq %d", __func__, irq); if (sc->sc_eventstab[irq] == NULL) panic("Trying to teardown unoccupied IRQ"); qca955x_pci_mask_irq(&sc->sc_pci_irq[irq]); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irq] = NULL; return (result); } static int qca955x_pci_intr(void *arg) { struct ar71xx_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, mask; /* There's only one PCIe DDR flush for both PCIe EPs */ ar71xx_device_flush_ddr(AR71XX_CPU_DDR_FLUSH_PCIE); reg = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_STATUS); mask = ATH_READ_REG(sc->sc_pci_ctrl_base + QCA955X_PCI_INTR_MASK); /* * Handle only unmasked interrupts */ reg &= mask; /* * XXX TODO: handle >1 PCIe end point! */ if (reg & QCA955X_PCI_INTR_DEV0) { irq = AR71XX_PCI_IRQ_START; event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { printf("Stray IRQ %d\n", irq); return (FILTER_STRAY); } /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irq]); } return (FILTER_HANDLED); } static int qca955x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int qca955x_pci_route_interrupt(device_t pcib, device_t device, int pin) { return (pci_get_slot(device)); } static device_method_t qca955x_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, qca955x_pci_probe), DEVMETHOD(device_attach, qca955x_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, qca955x_pci_read_ivar), DEVMETHOD(bus_write_ivar, qca955x_pci_write_ivar), DEVMETHOD(bus_alloc_resource, qca955x_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, qca955x_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, qca955x_pci_setup_intr), DEVMETHOD(bus_teardown_intr, qca955x_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, qca955x_pci_maxslots), DEVMETHOD(pcib_read_config, qca955x_pci_read_config), DEVMETHOD(pcib_write_config, qca955x_pci_write_config), DEVMETHOD(pcib_route_interrupt, qca955x_pci_route_interrupt), DEVMETHOD_END }; static driver_t qca955x_pci_driver = { "pcib", qca955x_pci_methods, sizeof(struct ar71xx_pci_softc), }; static devclass_t qca955x_pci_devclass; DRIVER_MODULE(qca955x_pci, nexus, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); DRIVER_MODULE(qca955x_pci, apb, qca955x_pci_driver, qca955x_pci_devclass, 0, 0); Index: head/sys/mips/cavium/cvmx_config.h =================================================================== --- head/sys/mips/cavium/cvmx_config.h (revision 295879) +++ head/sys/mips/cavium/cvmx_config.h (revision 295880) @@ -1,198 +1,197 @@ /***********************license start*************** * Copyright (c) 2003-2008 Cavium Networks (support@cavium.com). All rights * reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * * Neither the name of Cavium Networks nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS * OR WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH * RESPECT TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY * REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT * DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES * OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR * PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET * POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT * OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU. * * * For any questions regarding licensing please contact marketing@caviumnetworks.com * ***********************license end**************************************/ /* $FreeBSD$ */ #ifndef _CVMX_CONFIG_H #define _CVMX_CONFIG_H #include "opt_cvmx.h" #include #include #include #include #include #include -#include #include #define asm __asm #define CVMX_DONT_INCLUDE_CONFIG /* Define to enable the use of simple executive packet output functions. ** For packet I/O setup enable the helper functions below. */ #define CVMX_ENABLE_PKO_FUNCTIONS /* Define to enable the use of simple executive helper functions. These ** include many hardware setup functions. See cvmx-helper.[ch] for ** details. */ #define CVMX_ENABLE_HELPER_FUNCTIONS /* CVMX_HELPER_FIRST_MBUFF_SKIP is the number of bytes to reserve before ** the beginning of the packet. If necessary, override the default ** here. See the IPD section of the hardware manual for MBUFF SKIP ** details.*/ #define CVMX_HELPER_FIRST_MBUFF_SKIP 184 /* CVMX_HELPER_NOT_FIRST_MBUFF_SKIP is the number of bytes to reserve in each ** chained packet element. If necessary, override the default here */ #define CVMX_HELPER_NOT_FIRST_MBUFF_SKIP 0 /* CVMX_HELPER_ENABLE_BACK_PRESSURE controls whether back pressure is enabled ** for all input ports. This controls if IPD sends backpressure to all ports if ** Octeon's FPA pools don't have enough packet or work queue entries. Even when ** this is off, it is still possible to get backpressure from individual ** hardware ports. When configuring backpressure, also check ** CVMX_HELPER_DISABLE_*_BACKPRESSURE below. If necessary, override the default ** here */ #define CVMX_HELPER_ENABLE_BACK_PRESSURE 1 /* CVMX_HELPER_ENABLE_IPD controls if the IPD is enabled in the helper ** function. Once it is enabled the hardware starts accepting packets. You ** might want to skip the IPD enable if configuration changes are need ** from the default helper setup. If necessary, override the default here */ #define CVMX_HELPER_ENABLE_IPD 1 /* CVMX_HELPER_INPUT_TAG_TYPE selects the type of tag that the IPD assigns ** to incoming packets. */ #define CVMX_HELPER_INPUT_TAG_TYPE CVMX_POW_TAG_TYPE_ORDERED /* The following select which fields are used by the PIP to generate ** the tag on INPUT ** 0: don't include ** 1: include */ #define CVMX_HELPER_INPUT_TAG_IPV6_SRC_IP 0 #define CVMX_HELPER_INPUT_TAG_IPV6_DST_IP 0 #define CVMX_HELPER_INPUT_TAG_IPV6_SRC_PORT 0 #define CVMX_HELPER_INPUT_TAG_IPV6_DST_PORT 0 #define CVMX_HELPER_INPUT_TAG_IPV6_NEXT_HEADER 0 #define CVMX_HELPER_INPUT_TAG_IPV4_SRC_IP 0 #define CVMX_HELPER_INPUT_TAG_IPV4_DST_IP 0 #define CVMX_HELPER_INPUT_TAG_IPV4_SRC_PORT 0 #define CVMX_HELPER_INPUT_TAG_IPV4_DST_PORT 0 #define CVMX_HELPER_INPUT_TAG_IPV4_PROTOCOL 0 #define CVMX_HELPER_INPUT_TAG_INPUT_PORT 1 /* Select skip mode for input ports */ #define CVMX_HELPER_INPUT_PORT_SKIP_MODE CVMX_PIP_PORT_CFG_MODE_SKIPL2 /* Define the number of queues per output port */ #define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE0 1 #define CVMX_HELPER_PKO_QUEUES_PER_PORT_INTERFACE1 1 /* Configure PKO to use per-core queues (PKO lockless operation). ** Please see the related SDK documentation for PKO that illustrates ** how to enable and configure this option. */ //#define CVMX_ENABLE_PKO_LOCKLESS_OPERATION 1 //#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 8 //#define CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 8 /* Force backpressure to be disabled. This overrides all other ** backpressure configuration */ #define CVMX_HELPER_DISABLE_RGMII_BACKPRESSURE 1 /* Disable the SPI4000's processing of backpressure packets and backpressure ** generation. When this is 1, the SPI4000 will not stop sending packets when ** receiving backpressure. It will also not generate backpressure packets when ** its internal FIFOs are full. */ #define CVMX_HELPER_DISABLE_SPI4000_BACKPRESSURE 1 /* CVMX_HELPER_SPI_TIMEOUT is used to determine how long the SPI initialization ** routines wait for SPI training. You can override the value using ** executive-config.h if necessary */ #define CVMX_HELPER_SPI_TIMEOUT 10 /* Select the number of low latency memory ports (interfaces) that ** will be configured. Valid values are 1 and 2. */ #define CVMX_LLM_CONFIG_NUM_PORTS 2 /* Enable the fix for PKI-100 errata ("Size field is 8 too large in WQE and next ** pointers"). If CVMX_ENABLE_LEN_M8_FIX is set to 0, the fix for this errata will ** not be enabled. ** 0: Fix is not enabled ** 1: Fix is enabled, if supported by hardware */ #define CVMX_ENABLE_LEN_M8_FIX 1 #if defined(CVMX_ENABLE_HELPER_FUNCTIONS) && !defined(CVMX_ENABLE_PKO_FUNCTIONS) #define CVMX_ENABLE_PKO_FUNCTIONS #endif /* Enable debug and informational printfs */ #define CVMX_CONFIG_ENABLE_DEBUG_PRINTS 1 /************************* Config Specific Defines ************************/ #define CVMX_LLM_NUM_PORTS 1 #define CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 1 /**< PKO queues per port for interface 0 (ports 0-15) */ #define CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 1 /**< PKO queues per port for interface 1 (ports 16-31) */ #define CVMX_PKO_MAX_PORTS_INTERFACE0 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE0 /**< Limit on the number of PKO ports enabled for interface 0 */ #define CVMX_PKO_MAX_PORTS_INTERFACE1 CVMX_HELPER_PKO_MAX_PORTS_INTERFACE1 /**< Limit on the number of PKO ports enabled for interface 1 */ #define CVMX_PKO_QUEUES_PER_PORT_PCI 1 /**< PKO queues per port for PCI (ports 32-35) */ #define CVMX_PKO_QUEUES_PER_PORT_LOOP 1 /**< PKO queues per port for Loop devices (ports 36-39) */ /************************* FPA allocation *********************************/ /* Pool sizes in bytes, must be multiple of a cache line */ #define CVMX_FPA_POOL_0_SIZE (15 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_1_SIZE (1 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_2_SIZE (8 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_3_SIZE (0 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_4_SIZE (0 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_5_SIZE (0 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_6_SIZE (0 * CVMX_CACHE_LINE_SIZE) #define CVMX_FPA_POOL_7_SIZE (0 * CVMX_CACHE_LINE_SIZE) /* Pools in use */ #define CVMX_FPA_PACKET_POOL (0) /**< Packet buffers */ #define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE #define CVMX_FPA_WQE_POOL (1) /**< Work queue entrys */ #define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE #define CVMX_FPA_OUTPUT_BUFFER_POOL (2) /**< PKO queue command buffers */ #define CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE CVMX_FPA_POOL_2_SIZE /************************* FAU allocation ********************************/ #define CVMX_FAU_REG_END 2048 #define CVMX_SCR_SCRATCH 0 #endif /* !_CVMX_CONFIG_H */ Index: head/sys/mips/cavium/octopci.c =================================================================== --- head/sys/mips/cavium/octopci.c (revision 295879) +++ head/sys/mips/cavium/octopci.c (revision 295880) @@ -1,992 +1,991 @@ /*- * Copyright (c) 2010-2011 Juli Mallett * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include "pcib_if.h" #define NPI_WRITE(addr, value) cvmx_write64_uint32((addr) ^ 4, (value)) #define NPI_READ(addr) cvmx_read64_uint32((addr) ^ 4) struct octopci_softc { device_t sc_dev; unsigned sc_domain; unsigned sc_bus; bus_addr_t sc_io_base; unsigned sc_io_next; struct rman sc_io; bus_addr_t sc_mem1_base; unsigned sc_mem1_next; struct rman sc_mem1; }; static void octopci_identify(driver_t *, device_t); static int octopci_probe(device_t); static int octopci_attach(device_t); static int octopci_read_ivar(device_t, device_t, int, uintptr_t *); static struct resource *octopci_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int octopci_activate_resource(device_t, device_t, int, int, struct resource *); static int octopci_maxslots(device_t); static uint32_t octopci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void octopci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int octopci_route_interrupt(device_t, device_t, int); static unsigned octopci_init_bar(device_t, unsigned, unsigned, unsigned, unsigned, uint8_t *); static unsigned octopci_init_device(device_t, unsigned, unsigned, unsigned, unsigned); static unsigned octopci_init_bus(device_t, unsigned); static void octopci_init_pci(device_t); static uint64_t octopci_cs_addr(unsigned, unsigned, unsigned, unsigned); static void octopci_identify(driver_t *drv, device_t parent) { BUS_ADD_CHILD(parent, 0, "pcib", 0); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) BUS_ADD_CHILD(parent, 0, "pcib", 1); } static int octopci_probe(device_t dev) { if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { device_set_desc(dev, "Cavium Octeon PCIe bridge"); return (0); } /* Check whether we are a PCI host. */ if ((cvmx_sysinfo_get()->bootloader_config_flags & CVMX_BOOTINFO_CFG_FLAG_PCI_HOST) == 0) return (ENXIO); if (device_get_unit(dev) != 0) return (ENXIO); device_set_desc(dev, "Cavium Octeon PCI bridge"); return (0); } static int octopci_attach(device_t dev) { struct octopci_softc *sc; unsigned subbus; int error; sc = device_get_softc(dev); sc->sc_dev = dev; if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { sc->sc_domain = device_get_unit(dev); error = cvmx_pcie_rc_initialize(sc->sc_domain); if (error != 0) { device_printf(dev, "Failed to put PCIe bus in host mode.\n"); return (ENXIO); } /* * In RC mode, the Simple Executive programs the first bus to * be numbered as bus 1, because some IDT bridges used in * Octeon systems object to being attached to bus 0. */ sc->sc_bus = 1; sc->sc_io_base = CVMX_ADD_IO_SEG(cvmx_pcie_get_io_base_address(sc->sc_domain)); sc->sc_io.rm_descr = "Cavium Octeon PCIe I/O Ports"; sc->sc_mem1_base = CVMX_ADD_IO_SEG(cvmx_pcie_get_mem_base_address(sc->sc_domain)); sc->sc_mem1.rm_descr = "Cavium Octeon PCIe Memory"; } else { octopci_init_pci(dev); sc->sc_domain = 0; sc->sc_bus = 0; sc->sc_io_base = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_IO)); sc->sc_io.rm_descr = "Cavium Octeon PCI I/O Ports"; sc->sc_mem1_base = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_MEM1)); sc->sc_mem1.rm_descr = "Cavium Octeon PCI Memory"; } sc->sc_io.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_io); if (error != 0) return (error); error = rman_manage_region(&sc->sc_io, CVMX_OCT_PCI_IO_BASE, CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE); if (error != 0) return (error); sc->sc_mem1.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_mem1); if (error != 0) return (error); error = rman_manage_region(&sc->sc_mem1, CVMX_OCT_PCI_MEM1_BASE, CVMX_OCT_PCI_MEM1_BASE + CVMX_OCT_PCI_MEM1_SIZE); if (error != 0) return (error); /* * Next offsets for resource allocation in octopci_init_bar. */ sc->sc_io_next = 0; sc->sc_mem1_next = 0; /* * Configure devices. */ octopci_write_config(dev, sc->sc_bus, 0, 0, PCIR_SUBBUS_1, 0xff, 1); subbus = octopci_init_bus(dev, sc->sc_bus); octopci_write_config(dev, sc->sc_bus, 0, 0, PCIR_SUBBUS_1, subbus, 1); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int octopci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct octopci_softc *sc; sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = sc->sc_domain; return (0); case PCIB_IVAR_BUS: *result = sc->sc_bus; return (0); } return (ENOENT); } static struct resource * octopci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct octopci_softc *sc; struct resource *res; struct rman *rm; int error; sc = device_get_softc(bus); switch (type) { case SYS_RES_IRQ: res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); if (res != NULL) return (res); return (NULL); case SYS_RES_MEMORY: rm = &sc->sc_mem1; break; case SYS_RES_IOPORT: rm = &sc->sc_io; break; default: return (NULL); } res = rman_reserve_resource(rm, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); rman_set_bustag(res, octopci_bus_space); switch (type) { case SYS_RES_MEMORY: rman_set_bushandle(res, sc->sc_mem1_base + rman_get_start(res)); break; case SYS_RES_IOPORT: rman_set_bushandle(res, sc->sc_io_base + rman_get_start(res)); #if __mips_n64 rman_set_virtual(res, (void *)rman_get_bushandle(res)); #else /* * XXX * We can't access ports via a 32-bit pointer. */ rman_set_virtual(res, NULL); #endif break; } if ((flags & RF_ACTIVE) != 0) { error = bus_activate_resource(child, type, *rid, res); if (error != 0) { rman_release_resource(res); return (NULL); } } return (res); } static int octopci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { bus_space_handle_t bh; int error; switch (type) { case SYS_RES_IRQ: error = bus_generic_activate_resource(bus, child, type, rid, res); if (error != 0) return (error); return (0); case SYS_RES_MEMORY: case SYS_RES_IOPORT: error = bus_space_map(rman_get_bustag(res), rman_get_bushandle(res), rman_get_size(res), 0, &bh); if (error != 0) return (error); rman_set_bushandle(res, bh); break; default: return (ENXIO); } error = rman_activate_resource(res); if (error != 0) return (error); return (0); } static int octopci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t octopci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct octopci_softc *sc; uint64_t addr; uint32_t data; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { if (bus == 0 && slot == 0 && func == 0) return ((uint32_t)-1); switch (bytes) { case 4: return (cvmx_pcie_config_read32(sc->sc_domain, bus, slot, func, reg)); case 2: return (cvmx_pcie_config_read16(sc->sc_domain, bus, slot, func, reg)); case 1: return (cvmx_pcie_config_read8(sc->sc_domain, bus, slot, func, reg)); default: return ((uint32_t)-1); } } addr = octopci_cs_addr(bus, slot, func, reg); switch (bytes) { case 4: data = le32toh(cvmx_read64_uint32(addr)); return (data); case 2: data = le16toh(cvmx_read64_uint16(addr)); return (data); case 1: data = cvmx_read64_uint8(addr); return (data); default: return ((uint32_t)-1); } } static void octopci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct octopci_softc *sc; uint64_t addr; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) { switch (bytes) { case 4: cvmx_pcie_config_write32(sc->sc_domain, bus, slot, func, reg, data); return; case 2: cvmx_pcie_config_write16(sc->sc_domain, bus, slot, func, reg, data); return; case 1: cvmx_pcie_config_write8(sc->sc_domain, bus, slot, func, reg, data); return; default: return; } } addr = octopci_cs_addr(bus, slot, func, reg); switch (bytes) { case 4: cvmx_write64_uint32(addr, htole32(data)); return; case 2: cvmx_write64_uint16(addr, htole16(data)); return; case 1: cvmx_write64_uint8(addr, data); return; default: return; } } static int octopci_route_interrupt(device_t dev, device_t child, int pin) { struct octopci_softc *sc; unsigned bus, slot, func; unsigned irq; sc = device_get_softc(dev); if (octeon_has_feature(OCTEON_FEATURE_PCIE)) return (OCTEON_IRQ_PCI_INT0 + pin - 1); bus = pci_get_bus(child); slot = pci_get_slot(child); func = pci_get_function(child); /* * Board types we have to know at compile-time. */ #if defined(OCTEON_BOARD_CAPK_0100ND) if (bus == 0 && slot == 12 && func == 0) return (OCTEON_IRQ_PCI_INT2); #endif /* * For board types we can determine at runtime. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: return (OCTEON_IRQ_PCI_INT0 + pin - 1); case CVMX_BOARD_TYPE_CUST_LANNER_MR320: if (slot < 32) { if (slot == 3 || slot == 9) irq = pin; else irq = pin - 1; return (OCTEON_IRQ_PCI_INT0 + (irq & 3)); } break; #endif default: break; } irq = slot + pin - 3; return (OCTEON_IRQ_PCI_INT0 + (irq & 3)); } static unsigned octopci_init_bar(device_t dev, unsigned b, unsigned s, unsigned f, unsigned barnum, uint8_t *commandp) { struct octopci_softc *sc; uint64_t bar; unsigned size; int barsize; sc = device_get_softc(dev); octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), 0xffffffff, 4); bar = octopci_read_config(dev, b, s, f, PCIR_BAR(barnum), 4); if (bar == 0) { /* Bar not implemented; got to next bar. */ return (barnum + 1); } if (PCI_BAR_IO(bar)) { size = ~(bar & PCIM_BAR_IO_BASE) + 1; sc->sc_io_next = (sc->sc_io_next + size - 1) & ~(size - 1); if (sc->sc_io_next + size > CVMX_OCT_PCI_IO_SIZE) { device_printf(dev, "%02x.%02x:%02x: no ports for BAR%u.\n", b, s, f, barnum); return (barnum + 1); } octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), CVMX_OCT_PCI_IO_BASE + sc->sc_io_next, 4); sc->sc_io_next += size; /* * Enable I/O ports. */ *commandp |= PCIM_CMD_PORTEN; return (barnum + 1); } else { if (PCIR_BAR(barnum) == PCIR_BIOS) { /* * ROM BAR is always 32-bit. */ barsize = 1; } else { switch (bar & PCIM_BAR_MEM_TYPE) { case PCIM_BAR_MEM_64: /* * XXX * High 32 bits are all zeroes for now. */ octopci_write_config(dev, b, s, f, PCIR_BAR(barnum + 1), 0, 4); barsize = 2; break; default: barsize = 1; break; } } size = ~(bar & (uint32_t)PCIM_BAR_MEM_BASE) + 1; sc->sc_mem1_next = (sc->sc_mem1_next + size - 1) & ~(size - 1); if (sc->sc_mem1_next + size > CVMX_OCT_PCI_MEM1_SIZE) { device_printf(dev, "%02x.%02x:%02x: no memory for BAR%u.\n", b, s, f, barnum); return (barnum + barsize); } octopci_write_config(dev, b, s, f, PCIR_BAR(barnum), CVMX_OCT_PCI_MEM1_BASE + sc->sc_mem1_next, 4); sc->sc_mem1_next += size; /* * Enable memory access. */ *commandp |= PCIM_CMD_MEMEN; return (barnum + barsize); } } static unsigned octopci_init_device(device_t dev, unsigned b, unsigned s, unsigned f, unsigned secbus) { unsigned barnum, bars; uint8_t brctl; uint8_t class, subclass; uint8_t command; uint8_t hdrtype; /* Read header type (again.) */ hdrtype = octopci_read_config(dev, b, s, f, PCIR_HDRTYPE, 1); /* * Disable memory and I/O while programming BARs. */ command = octopci_read_config(dev, b, s, f, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); DELAY(10000); /* Program BARs. */ switch (hdrtype & PCIM_HDRTYPE) { case PCIM_HDRTYPE_NORMAL: bars = 6; break; case PCIM_HDRTYPE_BRIDGE: bars = 2; break; case PCIM_HDRTYPE_CARDBUS: bars = 0; break; default: device_printf(dev, "%02x.%02x:%02x: invalid header type %#x\n", b, s, f, hdrtype); return (secbus); } barnum = 0; while (barnum < bars) barnum = octopci_init_bar(dev, b, s, f, barnum, &command); /* Enable bus mastering. */ command |= PCIM_CMD_BUSMASTEREN; /* Enable whatever facilities the BARs require. */ octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); DELAY(10000); /* * Set cache line size. On Octeon it should be 128 bytes, * but according to Linux some Intel bridges have trouble * with values over 64 bytes, so use 64 bytes. */ octopci_write_config(dev, b, s, f, PCIR_CACHELNSZ, 16, 1); /* Set latency timer. */ octopci_write_config(dev, b, s, f, PCIR_LATTIMER, 48, 1); /* Board-specific or device-specific fixups and workarounds. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: if (b == 1 && s == 7 && f == 0) { bus_addr_t busaddr, unitbusaddr; uint32_t bar; uint32_t tmp; unsigned unit; /* * Set Tx DMA power. */ bar = octopci_read_config(dev, b, s, f, PCIR_BAR(3), 4); busaddr = CVMX_ADDR_DID(CVMX_FULL_DID(CVMX_OCT_DID_PCI, CVMX_OCT_SUBDID_PCI_MEM1)); busaddr += (bar & (uint32_t)PCIM_BAR_MEM_BASE); for (unit = 0; unit < 4; unit++) { unitbusaddr = busaddr + 0x430 + (unit << 8); tmp = le32toh(cvmx_read64_uint32(unitbusaddr)); tmp &= ~0x700; tmp |= 0x300; cvmx_write64_uint32(unitbusaddr, htole32(tmp)); } } break; #endif default: break; } /* Configure PCI-PCI bridges. */ class = octopci_read_config(dev, b, s, f, PCIR_CLASS, 1); if (class != PCIC_BRIDGE) return (secbus); subclass = octopci_read_config(dev, b, s, f, PCIR_SUBCLASS, 1); if (subclass != PCIS_BRIDGE_PCI) return (secbus); /* Enable memory and I/O access. */ command |= PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; octopci_write_config(dev, b, s, f, PCIR_COMMAND, command, 1); /* Enable errors and parity checking. Do a bus reset. */ brctl = octopci_read_config(dev, b, s, f, PCIR_BRIDGECTL_1, 1); brctl |= PCIB_BCR_PERR_ENABLE | PCIB_BCR_SERR_ENABLE; /* Perform a secondary bus reset. */ brctl |= PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); brctl &= ~PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); secbus++; /* Program memory and I/O ranges. */ octopci_write_config(dev, b, s, f, PCIR_MEMBASE_1, CVMX_OCT_PCI_MEM1_BASE >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_MEMLIMIT_1, (CVMX_OCT_PCI_MEM1_BASE + CVMX_OCT_PCI_MEM1_SIZE - 1) >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_IOBASEL_1, CVMX_OCT_PCI_IO_BASE >> 8, 1); octopci_write_config(dev, b, s, f, PCIR_IOBASEH_1, CVMX_OCT_PCI_IO_BASE >> 16, 2); octopci_write_config(dev, b, s, f, PCIR_IOLIMITL_1, (CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE - 1) >> 8, 1); octopci_write_config(dev, b, s, f, PCIR_IOLIMITH_1, (CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE - 1) >> 16, 2); /* Program prefetchable memory decoder. */ /* XXX */ /* Probe secondary/subordinate buses. */ octopci_write_config(dev, b, s, f, PCIR_PRIBUS_1, b, 1); octopci_write_config(dev, b, s, f, PCIR_SECBUS_1, secbus, 1); octopci_write_config(dev, b, s, f, PCIR_SUBBUS_1, 0xff, 1); /* Perform a secondary bus reset. */ brctl |= PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); DELAY(100000); brctl &= ~PCIB_BCR_SECBUS_RESET; octopci_write_config(dev, b, s, f, PCIR_BRIDGECTL_1, brctl, 1); /* Give the bus time to settle now before reading configspace. */ DELAY(100000); secbus = octopci_init_bus(dev, secbus); octopci_write_config(dev, b, s, f, PCIR_SUBBUS_1, secbus, 1); return (secbus); } static unsigned octopci_init_bus(device_t dev, unsigned b) { unsigned s, f; uint8_t hdrtype; unsigned secbus; secbus = b; for (s = 0; s <= PCI_SLOTMAX; s++) { for (f = 0; f <= PCI_FUNCMAX; f++) { hdrtype = octopci_read_config(dev, b, s, f, PCIR_HDRTYPE, 1); if (hdrtype == 0xff) { if (f == 0) break; /* Next slot. */ continue; /* Next function. */ } secbus = octopci_init_device(dev, b, s, f, secbus); if (f == 0 && (hdrtype & PCIM_MFDEV) == 0) break; /* Next slot. */ } } return (secbus); } static uint64_t octopci_cs_addr(unsigned bus, unsigned slot, unsigned func, unsigned reg) { octeon_pci_config_space_address_t pci_addr; pci_addr.u64 = 0; pci_addr.s.upper = 2; pci_addr.s.io = 1; pci_addr.s.did = 3; pci_addr.s.subdid = CVMX_OCT_SUBDID_PCI_CFG; pci_addr.s.endian_swap = 1; pci_addr.s.bus = bus; pci_addr.s.dev = slot; pci_addr.s.func = func; pci_addr.s.reg = reg; return (pci_addr.u64); } static void octopci_init_pci(device_t dev) { cvmx_npi_mem_access_subid_t npi_mem_access_subid; cvmx_npi_pci_int_arb_cfg_t npi_pci_int_arb_cfg; cvmx_npi_ctl_status_t npi_ctl_status; cvmx_pci_ctl_status_2_t pci_ctl_status_2; cvmx_pci_cfg56_t pci_cfg56; cvmx_pci_cfg22_t pci_cfg22; cvmx_pci_cfg16_t pci_cfg16; cvmx_pci_cfg19_t pci_cfg19; cvmx_pci_cfg01_t pci_cfg01; unsigned i; /* * Reset the PCI bus. */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x1); cvmx_read_csr(CVMX_CIU_SOFT_PRST); DELAY(2000); npi_ctl_status.u64 = 0; npi_ctl_status.s.max_word = 1; npi_ctl_status.s.timer = 1; cvmx_write_csr(CVMX_NPI_CTL_STATUS, npi_ctl_status.u64); /* * Set host mode. */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR320: case CVMX_BOARD_TYPE_CUST_LANNER_MR955: /* 32-bit PCI-X */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x0); break; #endif default: /* 64-bit PCI-X */ cvmx_write_csr(CVMX_CIU_SOFT_PRST, 0x4); break; } cvmx_read_csr(CVMX_CIU_SOFT_PRST); DELAY(2000); /* * Enable BARs and configure big BAR mode. */ pci_ctl_status_2.u32 = 0; pci_ctl_status_2.s.bb1_hole = 5; /* 256MB hole in BAR1 */ pci_ctl_status_2.s.bb1_siz = 1; /* BAR1 is 2GB */ pci_ctl_status_2.s.bb_ca = 1; /* Bypass cache for big BAR */ pci_ctl_status_2.s.bb_es = 1; /* Do big BAR byte-swapping */ pci_ctl_status_2.s.bb1 = 1; /* BAR1 is big */ pci_ctl_status_2.s.bb0 = 1; /* BAR0 is big */ pci_ctl_status_2.s.bar2pres = 1; /* BAR2 present */ pci_ctl_status_2.s.pmo_amod = 1; /* Round-robin priority */ pci_ctl_status_2.s.tsr_hwm = 1; pci_ctl_status_2.s.bar2_enb = 1; /* Enable BAR2 */ pci_ctl_status_2.s.bar2_esx = 1; /* Do BAR2 byte-swapping */ pci_ctl_status_2.s.bar2_cax = 1; /* Bypass cache for BAR2 */ NPI_WRITE(CVMX_NPI_PCI_CTL_STATUS_2, pci_ctl_status_2.u32); DELAY(2000); pci_ctl_status_2.u32 = NPI_READ(CVMX_NPI_PCI_CTL_STATUS_2); device_printf(dev, "%u-bit PCI%s bus.\n", pci_ctl_status_2.s.ap_64ad ? 64 : 32, pci_ctl_status_2.s.ap_pcix ? "-X" : ""); /* * Set up transaction splitting, etc., parameters. */ pci_cfg19.u32 = 0; pci_cfg19.s.mrbcm = 1; if (pci_ctl_status_2.s.ap_pcix) { pci_cfg19.s.mdrrmc = 0; pci_cfg19.s.tdomc = 4; } else { pci_cfg19.s.mdrrmc = 2; pci_cfg19.s.tdomc = 1; } NPI_WRITE(CVMX_NPI_PCI_CFG19, pci_cfg19.u32); NPI_READ(CVMX_NPI_PCI_CFG19); /* * Set up PCI error handling and memory access. */ pci_cfg01.u32 = 0; pci_cfg01.s.fbbe = 1; pci_cfg01.s.see = 1; pci_cfg01.s.pee = 1; pci_cfg01.s.me = 1; pci_cfg01.s.msae = 1; if (pci_ctl_status_2.s.ap_pcix) { pci_cfg01.s.fbb = 0; } else { pci_cfg01.s.fbb = 1; } NPI_WRITE(CVMX_NPI_PCI_CFG01, pci_cfg01.u32); NPI_READ(CVMX_NPI_PCI_CFG01); /* * Enable the Octeon bus arbiter. */ npi_pci_int_arb_cfg.u64 = 0; npi_pci_int_arb_cfg.s.en = 1; cvmx_write_csr(CVMX_NPI_PCI_INT_ARB_CFG, npi_pci_int_arb_cfg.u64); /* * Disable master latency timer. */ pci_cfg16.u32 = 0; pci_cfg16.s.mltd = 1; NPI_WRITE(CVMX_NPI_PCI_CFG16, pci_cfg16.u32); NPI_READ(CVMX_NPI_PCI_CFG16); /* * Configure master arbiter. */ pci_cfg22.u32 = 0; pci_cfg22.s.flush = 1; pci_cfg22.s.mrv = 255; NPI_WRITE(CVMX_NPI_PCI_CFG22, pci_cfg22.u32); NPI_READ(CVMX_NPI_PCI_CFG22); /* * Set up PCI-X capabilities. */ if (pci_ctl_status_2.s.ap_pcix) { pci_cfg56.u32 = 0; pci_cfg56.s.most = 3; pci_cfg56.s.roe = 1; /* Enable relaxed ordering */ pci_cfg56.s.dpere = 1; pci_cfg56.s.ncp = 0xe8; pci_cfg56.s.pxcid = 7; NPI_WRITE(CVMX_NPI_PCI_CFG56, pci_cfg56.u32); NPI_READ(CVMX_NPI_PCI_CFG56); } NPI_WRITE(CVMX_NPI_PCI_READ_CMD_6, 0x22); NPI_READ(CVMX_NPI_PCI_READ_CMD_6); NPI_WRITE(CVMX_NPI_PCI_READ_CMD_C, 0x33); NPI_READ(CVMX_NPI_PCI_READ_CMD_C); NPI_WRITE(CVMX_NPI_PCI_READ_CMD_E, 0x33); NPI_READ(CVMX_NPI_PCI_READ_CMD_E); /* * Configure MEM1 sub-DID access. */ npi_mem_access_subid.u64 = 0; npi_mem_access_subid.s.esr = 1; /* Byte-swap on read */ npi_mem_access_subid.s.esw = 1; /* Byte-swap on write */ switch (cvmx_sysinfo_get()->board_type) { #if defined(OCTEON_VENDOR_LANNER) case CVMX_BOARD_TYPE_CUST_LANNER_MR955: npi_mem_access_subid.s.shortl = 1; break; #endif default: break; } cvmx_write_csr(CVMX_NPI_MEM_ACCESS_SUBID3, npi_mem_access_subid.u64); /* * Configure BAR2. Linux says this has to come first. */ NPI_WRITE(CVMX_NPI_PCI_CFG08, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG08); NPI_WRITE(CVMX_NPI_PCI_CFG09, 0x00000080); NPI_READ(CVMX_NPI_PCI_CFG09); /* * Disable BAR1 IndexX. */ for (i = 0; i < 32; i++) { NPI_WRITE(CVMX_NPI_PCI_BAR1_INDEXX(i), 0); NPI_READ(CVMX_NPI_PCI_BAR1_INDEXX(i)); } /* * Configure BAR0 and BAR1. */ NPI_WRITE(CVMX_NPI_PCI_CFG04, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG04); NPI_WRITE(CVMX_NPI_PCI_CFG05, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG05); NPI_WRITE(CVMX_NPI_PCI_CFG06, 0x80000000); NPI_READ(CVMX_NPI_PCI_CFG06); NPI_WRITE(CVMX_NPI_PCI_CFG07, 0x00000000); NPI_READ(CVMX_NPI_PCI_CFG07); /* * Clear PCI interrupts. */ cvmx_write_csr(CVMX_NPI_PCI_INT_SUM2, 0xffffffffffffffffull); } static device_method_t octopci_methods[] = { /* Device interface */ DEVMETHOD(device_identify, octopci_identify), DEVMETHOD(device_probe, octopci_probe), DEVMETHOD(device_attach, octopci_attach), /* Bus interface */ DEVMETHOD(bus_read_ivar, octopci_read_ivar), DEVMETHOD(bus_alloc_resource, octopci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource,octopci_activate_resource), DEVMETHOD(bus_deactivate_resource,bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_add_child, bus_generic_add_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, octopci_maxslots), DEVMETHOD(pcib_read_config, octopci_read_config), DEVMETHOD(pcib_write_config, octopci_write_config), DEVMETHOD(pcib_route_interrupt, octopci_route_interrupt), DEVMETHOD_END }; static driver_t octopci_driver = { "pcib", octopci_methods, sizeof(struct octopci_softc), }; static devclass_t octopci_devclass; DRIVER_MODULE(octopci, ciu, octopci_driver, octopci_devclass, 0, 0); Index: head/sys/mips/idt/idtpci.c =================================================================== --- head/sys/mips/idt/idtpci.c (revision 295879) +++ head/sys/mips/idt/idtpci.c (revision 295880) @@ -1,557 +1,556 @@ /* $NetBSD: idtpci.c,v 1.1 2007/03/20 08:52:02 dyoung Exp $ */ /*- * Copyright (c) 2007 David Young. * Copyright (c) 2007 Oleskandr Tymoshenko. All rights reserved. * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * 3. The name of the author may not be used to endorse or promote * products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, * OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * OF SUCH DAMAGE. */ /*- * Copyright (c) 2006 Itronix Inc. * All rights reserved. * * Written by Garrett D'Amore for Itronix Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of Itronix Inc. may not be used to endorse * or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY ITRONIX INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ITRONIX INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #ifdef IDTPCI_DEBUG int idtpci_debug = 1; #define IDTPCI_DPRINTF(__fmt, ...) \ do { \ if (idtpci_debug) \ printf((__fmt), __VA_ARGS__); \ } while (/*CONSTCOND*/0) #else /* !IDTPCI_DEBUG */ #define IDTPCI_DPRINTF(__fmt, ...) do { } while (/*CONSTCOND*/0) #endif /* IDTPCI_DEBUG */ #define IDTPCI_TAG_BUS_MASK 0x007f0000 #define IDTPCI_TAG_DEVICE_MASK 0x00007800 #define IDTPCI_TAG_FUNCTION_MASK 0x00000300 #define IDTPCI_TAG_REGISTER_MASK 0x0000007c #define IDTPCI_MAX_DEVICE #define REG_READ(o) *((volatile uint32_t *)MIPS_PHYS_TO_KSEG1(IDT_BASE_PCI + (o))) #define REG_WRITE(o,v) (REG_READ(o)) = (v) unsigned int korina_fixup[24] = { 0x00000157, 0x00000000, 0x00003c04, 0x00000008, 0x18800001, 0x18000001, 0x48000008, 0x00000000, 0x00000000, 0x00000000, 0x011d0214, 0x00000000, 0x00000000, 0x00000000, 0x38080101, 0x00008080, 0x00000d6e, 0x00000000, 0x00000051, 0x00000000, 0x00000055, 0x18000000, 0x00000000, 0x00000000 }; struct idtpci_softc { device_t sc_dev; int sc_busno; struct rman sc_mem_rman[2]; struct rman sc_io_rman[2]; struct rman sc_irq_rman; }; static uint32_t idtpci_make_addr(int bus, int slot, int func, int reg) { return 0x80000000 | (bus << 16) | (slot << 11) | (func << 8) | reg; } static int idtpci_probe(device_t dev) { return (0); } static int idtpci_attach(device_t dev) { int busno = 0; struct idtpci_softc *sc = device_get_softc(dev); unsigned int pci_data, force_endianess = 0; int i; bus_addr_t addr; sc->sc_dev = dev; sc->sc_busno = busno; /* TODO: Check for host mode */ /* Enabled PCI, IG mode, EAP mode */ REG_WRITE(IDT_PCI_CNTL, IDT_PCI_CNTL_IGM | IDT_PCI_CNTL_EAP | IDT_PCI_CNTL_EN); /* Wait while "Reset in progress bit" set */ while(1) { pci_data = REG_READ(IDT_PCI_STATUS); if((pci_data & IDT_PCI_STATUS_RIP) == 0) break; } /* Reset status register */ REG_WRITE(IDT_PCI_STATUS, 0); /* Mask interrupts related to status register */ REG_WRITE(IDT_PCI_STATUS_MASK, 0xffffffff); /* Disable PCI decoupled access */ REG_WRITE(IDT_PCI_DAC, 0); /* Zero status and mask DA interrupts */ REG_WRITE(IDT_PCI_DAS, 0); REG_WRITE(IDT_PCI_DASM, 0x7f); /* Init PCI messaging unit */ /* Disable messaging interrupts */ REG_WRITE(IDT_PCI_IIC, 0); REG_WRITE(IDT_PCI_IIM, 0xffffffff); REG_WRITE(IDT_PCI_OIC, 0); REG_WRITE(IDT_PCI_OIM, 0); #ifdef __MIPSEB__ force_endianess = IDT_PCI_LBA_FE; #endif /* LBA0 -- memory window */ REG_WRITE(IDT_PCI_LBA0, IDT_PCIMEM0_BASE); REG_WRITE(IDT_PCI_LBA0_MAP, IDT_PCIMEM0_BASE); REG_WRITE(IDT_PCI_LBA0_CNTL, IDT_PCI_LBA_SIZE_16MB | force_endianess); pci_data = REG_READ(IDT_PCI_LBA0_CNTL); /* LBA1 -- memory window */ REG_WRITE(IDT_PCI_LBA1, IDT_PCIMEM1_BASE); REG_WRITE(IDT_PCI_LBA1_MAP, IDT_PCIMEM1_BASE); REG_WRITE(IDT_PCI_LBA1_CNTL, IDT_PCI_LBA_SIZE_256MB | force_endianess); pci_data = REG_READ(IDT_PCI_LBA1_CNTL); /* LBA2 -- IO window */ REG_WRITE(IDT_PCI_LBA2, IDT_PCIMEM2_BASE); REG_WRITE(IDT_PCI_LBA2_MAP, IDT_PCIMEM2_BASE); REG_WRITE(IDT_PCI_LBA2_CNTL, IDT_PCI_LBA_SIZE_4MB | IDT_PCI_LBA_MSI | force_endianess); pci_data = REG_READ(IDT_PCI_LBA2_CNTL); /* LBA3 -- IO window */ REG_WRITE(IDT_PCI_LBA3, IDT_PCIMEM3_BASE); REG_WRITE(IDT_PCI_LBA3_MAP, IDT_PCIMEM3_BASE); REG_WRITE(IDT_PCI_LBA3_CNTL, IDT_PCI_LBA_SIZE_1MB | IDT_PCI_LBA_MSI | force_endianess); pci_data = REG_READ(IDT_PCI_LBA3_CNTL); pci_data = REG_READ(IDT_PCI_CNTL) & ~IDT_PCI_CNTL_TNR; REG_WRITE(IDT_PCI_CNTL, pci_data); pci_data = REG_READ(IDT_PCI_CNTL); /* Rewrite Target Control register with default values */ REG_WRITE(IDT_PCI_TC, (IDT_PCI_TC_DTIMER << 8) | IDT_PCI_TC_RTIMER); /* Perform Korina fixup */ addr = idtpci_make_addr(0, 0, 0, 4); for (i = 0; i < 24; i++) { REG_WRITE(IDT_PCI_CFG_ADDR, addr); REG_WRITE(IDT_PCI_CFG_DATA, korina_fixup[i]); __asm__ volatile ("sync"); REG_WRITE(IDT_PCI_CFG_ADDR, 0); REG_WRITE(IDT_PCI_CFG_DATA, 0); addr += 4; } /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io_rman[0].rm_type = RMAN_ARRAY; sc->sc_io_rman[0].rm_descr = "IDTPCI I/O Ports window 1"; if (rman_init(&sc->sc_io_rman[0]) != 0 || rman_manage_region(&sc->sc_io_rman[0], IDT_PCIMEM2_BASE, IDT_PCIMEM2_BASE + IDT_PCIMEM2_SIZE - 1) != 0) { panic("idtpci_attach: failed to set up I/O rman"); } sc->sc_io_rman[1].rm_type = RMAN_ARRAY; sc->sc_io_rman[1].rm_descr = "IDTPCI I/O Ports window 2"; if (rman_init(&sc->sc_io_rman[1]) != 0 || rman_manage_region(&sc->sc_io_rman[1], IDT_PCIMEM3_BASE, IDT_PCIMEM3_BASE + IDT_PCIMEM3_SIZE - 1) != 0) { panic("idtpci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem_rman[0].rm_type = RMAN_ARRAY; sc->sc_mem_rman[0].rm_descr = "IDTPCI PCI Memory window 1"; if (rman_init(&sc->sc_mem_rman[0]) != 0 || rman_manage_region(&sc->sc_mem_rman[0], IDT_PCIMEM0_BASE, IDT_PCIMEM0_BASE + IDT_PCIMEM0_SIZE) != 0) { panic("idtpci_attach: failed to set up memory rman"); } sc->sc_mem_rman[1].rm_type = RMAN_ARRAY; sc->sc_mem_rman[1].rm_descr = "IDTPCI PCI Memory window 2"; if (rman_init(&sc->sc_mem_rman[1]) != 0 || rman_manage_region(&sc->sc_mem_rman[1], IDT_PCIMEM1_BASE, IDT_PCIMEM1_BASE + IDT_PCIMEM1_SIZE) != 0) { panic("idtpci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "IDTPCI PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, PCI_IRQ_BASE, PCI_IRQ_END) != 0) panic("idtpci_attach: failed to set up IRQ rman"); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int idtpci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t idtpci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { uint32_t data; uint32_t shift, mask; bus_addr_t addr; IDTPCI_DPRINTF("%s: tag (%x, %x, %x) reg %d(%d)\n", __func__, bus, slot, func, reg, bytes); addr = idtpci_make_addr(bus, slot, func, reg); REG_WRITE(IDT_PCI_CFG_ADDR, addr); data = REG_READ(IDT_PCI_CFG_DATA); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if (reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } __asm__ volatile ("sync"); IDTPCI_DPRINTF("%s: read 0x%x\n", __func__, data); return (data); } static void idtpci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { bus_addr_t addr; uint32_t reg_data; uint32_t shift, mask; IDTPCI_DPRINTF("%s: tag (%x, %x, %x) reg %d(%d) data %08x\n", __func__, bus, slot, func, reg, bytes, data); if (bytes != 4) { reg_data = idtpci_read_config(dev, bus, slot, func, reg, 4); switch (reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch (bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if (reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("%s: wrong bytes count", __func__); break; } } addr = idtpci_make_addr(bus, slot, func, reg); REG_WRITE(IDT_PCI_CFG_ADDR, addr); REG_WRITE(IDT_PCI_CFG_DATA, data); __asm__ volatile ("sync"); REG_WRITE(IDT_PCI_CFG_ADDR, 0); REG_WRITE(IDT_PCI_CFG_DATA, 0); } static int idtpci_route_interrupt(device_t pcib, device_t device, int pin) { static int idt_pci_table[2][12] = { { 0, 0, 2, 3, 2, 3, 0, 0, 0, 0, 0, 1 }, { 0, 0, 1, 3, 0, 2, 1, 3, 0, 2, 1, 3 } }; int dev, bus, irq; dev = pci_get_slot(device); bus = pci_get_bus(device); if (bootverbose) device_printf(pcib, "routing pin %d for %s\n", pin, device_get_nameunit(device)); if (bus >= 0 && bus <= 1 && dev >= 0 && dev <= 11) { irq = IP_IRQ(6, idt_pci_table[bus][dev] + 4); if (bootverbose) printf("idtpci: %d/%d/%d -> IRQ%d\n", pci_get_bus(device), dev, pci_get_function(device), irq); return (irq); } else printf("idtpci: no mapping for %d/%d/%d\n", pci_get_bus(device), dev, pci_get_function(device)); return (-1); } static int idtpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct idtpci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int idtpci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct idtpci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * idtpci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct idtpci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm1, *rm2; switch (type) { case SYS_RES_IRQ: rm1 = &sc->sc_irq_rman; rm2 = NULL; break; case SYS_RES_MEMORY: rm1 = &sc->sc_mem_rman[0]; rm2 = &sc->sc_mem_rman[1]; break; case SYS_RES_IOPORT: rm1 = &sc->sc_io_rman[0]; rm2 = &sc->sc_io_rman[1]; break; default: return (NULL); } rv = rman_reserve_resource(rm1, start, end, count, flags, child); /* Try second window if it exists */ if ((rv == NULL) && (rm2 != NULL)) rv = rman_reserve_resource(rm2, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int idtpci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { return (intr_event_remove_handler(cookie)); } static device_method_t idtpci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, idtpci_probe), DEVMETHOD(device_attach, idtpci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, idtpci_read_ivar), DEVMETHOD(bus_write_ivar, idtpci_write_ivar), DEVMETHOD(bus_alloc_resource, idtpci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, idtpci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, idtpci_maxslots), DEVMETHOD(pcib_read_config, idtpci_read_config), DEVMETHOD(pcib_write_config, idtpci_write_config), DEVMETHOD(pcib_route_interrupt, idtpci_route_interrupt), DEVMETHOD_END }; static driver_t idtpci_driver = { "pcib", idtpci_methods, sizeof(struct idtpci_softc), }; static devclass_t idtpci_devclass; DRIVER_MODULE(idtpci, obio, idtpci_driver, idtpci_devclass, 0, 0); Index: head/sys/mips/malta/gt_pci.c =================================================================== --- head/sys/mips/malta/gt_pci.c (revision 295879) +++ head/sys/mips/malta/gt_pci.c (revision 295880) @@ -1,774 +1,773 @@ /* $NetBSD: gt_pci.c,v 1.4 2003/07/15 00:24:54 lukem Exp $ */ /*- * Copyright (c) 2001, 2002 Wasabi Systems, Inc. * All rights reserved. * * Written by Jason R. Thorpe for Wasabi Systems, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed for the NetBSD Project by * Wasabi Systems, Inc. * 4. The name of Wasabi Systems, Inc. may not be used to endorse * or promote products derived from this software without specific prior * written permission. * * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * PCI configuration support for gt I/O Processor chip. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include "pcib_if.h" #include #define ICU_LEN 16 /* number of ISA IRQs */ /* * XXX: These defines are from NetBSD's . Respective file * from FreeBSD src tree lacks some definitions. */ #define PIC_OCW1 1 #define PIC_OCW2 0 #define PIC_OCW3 0 #define OCW2_SELECT 0 #define OCW2_ILS(x) ((x) << 0) /* interrupt level select */ #define OCW3_POLL_IRQ(x) ((x) & 0x7f) #define OCW3_POLL_PENDING (1U << 7) /* * Galileo controller's registers are LE so convert to then * to/from native byte order. We rely on boot loader or emulator * to set "swap bytes" configuration correctly for us */ #define GT_PCI_DATA(v) htole32((v)) #define GT_HOST_DATA(v) le32toh((v)) struct gt_pci_softc; struct gt_pci_intr_cookie { int irq; struct gt_pci_softc *sc; }; struct gt_pci_softc { device_t sc_dev; bus_space_tag_t sc_st; bus_space_handle_t sc_ioh_icu1; bus_space_handle_t sc_ioh_icu2; bus_space_handle_t sc_ioh_elcr; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; unsigned long sc_mem; bus_space_handle_t sc_io; struct resource *sc_irq; struct intr_event *sc_eventstab[ICU_LEN]; struct gt_pci_intr_cookie sc_intr_cookies[ICU_LEN]; uint16_t sc_imask; uint16_t sc_elcr; uint16_t sc_reserved; void *sc_ih; }; static void gt_pci_set_icus(struct gt_pci_softc *); static int gt_pci_intr(void *v); static int gt_pci_probe(device_t); static int gt_pci_attach(device_t); static int gt_pci_activate_resource(device_t, device_t, int, int, struct resource *); static int gt_pci_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); static int gt_pci_teardown_intr(device_t, device_t, struct resource *, void*); static int gt_pci_maxslots(device_t ); static int gt_pci_conf_setup(struct gt_pci_softc *, int, int, int, int, uint32_t *); static uint32_t gt_pci_read_config(device_t, u_int, u_int, u_int, u_int, int); static void gt_pci_write_config(device_t, u_int, u_int, u_int, u_int, uint32_t, int); static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin); static struct resource * gt_pci_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static void gt_pci_mask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; sc->sc_imask |= (1 << irq); sc->sc_elcr |= (1 << irq); gt_pci_set_icus(sc); } static void gt_pci_unmask_irq(void *source) { struct gt_pci_intr_cookie *cookie = source; struct gt_pci_softc *sc = cookie->sc; int irq = cookie->irq; /* Enable it, set trigger mode. */ sc->sc_imask &= ~(1 << irq); sc->sc_elcr &= ~(1 << irq); gt_pci_set_icus(sc); } static void gt_pci_set_icus(struct gt_pci_softc *sc) { /* Enable the cascade IRQ (2) if 8-15 is enabled. */ if ((sc->sc_imask & 0xff00) != 0xff00) sc->sc_imask &= ~(1U << 2); else sc->sc_imask |= (1U << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW1, sc->sc_imask & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW1, (sc->sc_imask >> 8) & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); } static int gt_pci_intr(void *v) { struct gt_pci_softc *sc = v; struct intr_event *event; int irq; for (;;) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW3); if ((irq & OCW3_POLL_PENDING) == 0) { return FILTER_HANDLED; } irq = OCW3_POLL_IRQ(irq); if (irq == 2) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3, OCW3_SEL | OCW3_P); irq = bus_space_read_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW3); if (irq & OCW3_POLL_PENDING) irq = OCW3_POLL_IRQ(irq) + 8; else irq = 2; } event = sc->sc_eventstab[irq]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) continue; /* TODO: frame instead of NULL? */ intr_event_handle(event, NULL); /* XXX: Log stray IRQs */ /* Send a specific EOI to the 8259. */ if (irq > 7) { bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq & 7)); irq = 2; } bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, PIC_OCW2, OCW2_SELECT | OCW2_EOI | OCW2_SL | OCW2_ILS(irq)); } return FILTER_HANDLED; } static int gt_pci_probe(device_t dev) { device_set_desc(dev, "GT64120 PCI bridge"); return (0); } static int gt_pci_attach(device_t dev) { uint32_t busno; struct gt_pci_softc *sc = device_get_softc(dev); int rid; busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_st = mips_bus_space_generic; /* Use KSEG1 to access IO ports for it is uncached */ sc->sc_io = MIPS_PHYS_TO_KSEG1(MALTA_PCI0_IO_BASE); sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "GT64120 PCI I/O Ports"; /* * First 256 bytes are ISA's registers: e.g. i8259's * So do not use them for general purpose PCI I/O window */ if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, 0x100, 0xffff) != 0) { panic("gt_pci_attach: failed to set up I/O rman"); } /* Use KSEG1 to access PCI memory for it is uncached */ sc->sc_mem = MIPS_PHYS_TO_KSEG1(MALTA_PCIMEM1_BASE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "GT64120 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem, sc->sc_mem + MALTA_PCIMEM1_SIZE) != 0) { panic("gt_pci_attach: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "GT64120 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("gt_pci_attach: failed to set up IRQ rman"); /* * Map the PIC/ELCR registers. */ #if 0 if (bus_space_map(sc->sc_st, 0x4d0, 2, 0, &sc->sc_ioh_elcr) != 0) device_printf(dev, "unable to map ELCR registers\n"); if (bus_space_map(sc->sc_st, IO_ICU1, 2, 0, &sc->sc_ioh_icu1) != 0) device_printf(dev, "unable to map ICU1 registers\n"); if (bus_space_map(sc->sc_st, IO_ICU2, 2, 0, &sc->sc_ioh_icu2) != 0) device_printf(dev, "unable to map ICU2 registers\n"); #else sc->sc_ioh_elcr = sc->sc_io + 0x4d0; sc->sc_ioh_icu1 = sc->sc_io + IO_ICU1; sc->sc_ioh_icu2 = sc->sc_io + IO_ICU2; #endif /* All interrupts default to "masked off". */ sc->sc_imask = 0xffff; /* All interrupts default to edge-triggered. */ sc->sc_elcr = 0; /* * Initialize the 8259s. */ /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, ICW1_RESET | ICW1_IC4); /* * XXX: values from NetBSD's */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu1, 0, OCW3_SEL | OCW3_RR); /* reset, program device, 4 bytes */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, ICW1_RESET | ICW1_IC4); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 0/*XXX*/); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, 1 << 2); bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, ICW4_8086); /* mask all interrupts */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 1, sc->sc_imask & 0xff); /* enable special mask mode */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_ESMM | OCW3_SMM); /* read IRR by default */ bus_space_write_1(sc->sc_st, sc->sc_ioh_icu2, 0, OCW3_SEL | OCW3_RR); /* * Default all interrupts to edge-triggered. */ bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 0, sc->sc_elcr & 0xff); bus_space_write_1(sc->sc_st, sc->sc_ioh_elcr, 1, (sc->sc_elcr >> 8) & 0xff); /* * Some ISA interrupts are reserved for devices that * we know are hard-wired to certain IRQs. */ sc->sc_reserved = (1U << 0) | /* timer */ (1U << 1) | /* keyboard controller (keyboard) */ (1U << 2) | /* PIC cascade */ (1U << 3) | /* COM 2 */ (1U << 4) | /* COM 1 */ (1U << 6) | /* floppy */ (1U << 7) | /* centronics */ (1U << 8) | /* RTC */ (1U << 9) | /* I2C */ (1U << 12) | /* keyboard controller (mouse) */ (1U << 14) | /* IDE primary */ (1U << 15); /* IDE secondary */ /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, MALTA_SOUTHBRIDGE_INTR, MALTA_SOUTHBRIDGE_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return ENXIO; } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, gt_pci_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return ENXIO; } /* Initialize memory and i/o rmans. */ device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int gt_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static int gt_pci_conf_setup(struct gt_pci_softc *sc, int bus, int slot, int func, int reg, uint32_t *addr) { *addr = (bus << 16) | (slot << 11) | (func << 8) | reg; return (0); } static uint32_t gt_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t data; uint32_t addr; uint32_t shift, mask; if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return (uint32_t)(-1); /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) data = GT_PCI_DATA(GT_REGVAL(GT_PCI0_CFG_DATA)); else data = GT_REGVAL(GT_PCI0_CFG_DATA); /* Check for master abort. */ if (GT_HOST_DATA(GT_REGVAL(GT_INTR_CAUSE)) & (GTIC_MASABORT0 | GTIC_TARABORT0)) data = (uint32_t) -1; switch(reg % 4) { case 3: shift = 24; break; case 2: shift = 16; break; case 1: shift = 8; break; default: shift = 0; break; } switch(bytes) { case 1: mask = 0xff; data = (data >> shift) & mask; break; case 2: mask = 0xffff; if(reg % 4 == 0) data = data & mask; else data = (data >> 16) & mask; break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } #if 0 printf("PCICONF_READ(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif return (data); } static void gt_pci_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t data, int bytes) { struct gt_pci_softc *sc = device_get_softc(dev); uint32_t addr; uint32_t reg_data; uint32_t shift, mask; if(bytes != 4) { reg_data = gt_pci_read_config(dev, bus, slot, func, reg, 4); shift = 8 * (reg & 3); switch(bytes) { case 1: mask = 0xff; data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 2: mask = 0xffff; if(reg % 4 == 0) data = (reg_data & ~mask) | data; else data = (reg_data & ~ (mask << shift)) | (data << shift); break; case 4: break; default: panic("gt_pci_readconfig: wrong bytes count"); break; } } if (gt_pci_conf_setup(sc, bus, slot, func, reg & ~3, &addr)) return; /* The galileo has problems accessing device 31. */ if (bus == 0 && slot == 31) return; /* XXX: no support for bus > 0 yet */ if (bus > 0) return; /* Clear cause register bits. */ GT_REGVAL(GT_INTR_CAUSE) = GT_PCI_DATA(0); GT_REGVAL(GT_PCI0_CFG_ADDR) = GT_PCI_DATA((1U << 31) | addr); /* * Galileo system controller is special */ if ((bus == 0) && (slot == 0)) GT_REGVAL(GT_PCI0_CFG_DATA) = GT_PCI_DATA(data); else GT_REGVAL(GT_PCI0_CFG_DATA) = data; #if 0 printf("PCICONF_WRITE(%02x:%02x.%02x[%04x] -> %02x(%d)\n", bus, slot, func, reg, data, bytes); #endif } static int gt_pci_route_interrupt(device_t pcib, device_t dev, int pin) { int bus; int device; int func; /* struct gt_pci_softc *sc = device_get_softc(pcib); */ bus = pci_get_bus(dev); device = pci_get_slot(dev); func = pci_get_function(dev); /* * XXXMIPS: We need routing logic. This is just a stub . */ switch (device) { case 9: /* * PIIX4 IDE adapter. HW IRQ0 */ return 0; case 11: /* Ethernet */ return 10; default: device_printf(pcib, "no IRQ mapping for %d/%d/%d/%d\n", bus, device, func, pin); } return (0); } static int gt_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct gt_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int gt_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct gt_pci_softc * sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * gt_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gt_pci_softc *sc = device_get_softc(bus); struct resource *rv = NULL; struct rman *rm; bus_space_handle_t bh = 0; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; bh = sc->sc_mem; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; bh = sc->sc_io; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { bh += (rman_get_start(rv)); rman_set_bustag(rv, gt_pci_bus_space); rman_set_bushandle(rv, bh); if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } } return (rv); } static int gt_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t p; int error; if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { error = bus_space_map(rman_get_bustag(r), rman_get_bushandle(r), rman_get_size(r), 0, &p); if (error) return (error); rman_set_bushandle(r, p); } return (rman_activate_resource(r)); } static int gt_pci_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct gt_pci_softc *sc = device_get_softc(dev); struct intr_event *event; int irq, error; irq = rman_get_start(ires); if (irq >= ICU_LEN || irq == 2) panic("%s: bad irq or type", __func__); event = sc->sc_eventstab[irq]; sc->sc_intr_cookies[irq].irq = irq; sc->sc_intr_cookies[irq].sc = sc; if (event == NULL) { error = intr_event_create(&event, (void *)&sc->sc_intr_cookies[irq], 0, irq, gt_pci_mask_irq, gt_pci_unmask_irq, NULL, NULL, "gt_pci intr%d:", irq); if (error) return 0; sc->sc_eventstab[irq] = event; } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); gt_pci_unmask_irq((void *)&sc->sc_intr_cookies[irq]); return 0; } static int gt_pci_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct gt_pci_softc *sc = device_get_softc(dev); int irq; irq = rman_get_start(res); gt_pci_mask_irq((void *)&sc->sc_intr_cookies[irq]); return (intr_event_remove_handler(cookie)); } static device_method_t gt_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gt_pci_probe), DEVMETHOD(device_attach, gt_pci_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, gt_read_ivar), DEVMETHOD(bus_write_ivar, gt_write_ivar), DEVMETHOD(bus_alloc_resource, gt_pci_alloc_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, gt_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_setup_intr, gt_pci_setup_intr), DEVMETHOD(bus_teardown_intr, gt_pci_teardown_intr), /* pcib interface */ DEVMETHOD(pcib_maxslots, gt_pci_maxslots), DEVMETHOD(pcib_read_config, gt_pci_read_config), DEVMETHOD(pcib_write_config, gt_pci_write_config), DEVMETHOD(pcib_route_interrupt, gt_pci_route_interrupt), DEVMETHOD_END }; static driver_t gt_pci_driver = { "pcib", gt_pci_methods, sizeof(struct gt_pci_softc), }; static devclass_t gt_pci_devclass; DRIVER_MODULE(gt_pci, gt, gt_pci_driver, gt_pci_devclass, 0, 0); Index: head/sys/mips/mips/minidump_machdep.c =================================================================== --- head/sys/mips/mips/minidump_machdep.c (revision 295879) +++ head/sys/mips/mips/minidump_machdep.c (revision 295880) @@ -1,341 +1,340 @@ /*- * Copyright (c) 2010 Oleksandr Tymoshenko * Copyright (c) 2008 Semihalf, Grzegorz Bernacki * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * from: FreeBSD: src/sys/arm/arm/minidump_machdep.c v214223 */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); /* * Don't touch the first SIZEOF_METADATA bytes on the dump device. This * is to protect us from metadata and to protect metadata from us. */ #define SIZEOF_METADATA (64*1024) uint32_t *vm_page_dump; int vm_page_dump_size; static struct kerneldumpheader kdh; static off_t dumplo; static off_t origdumplo; /* Handle chunked writes. */ static uint64_t counter, progress; /* Just auxiliary bufffer */ static char tmpbuffer[PAGE_SIZE]; extern pd_entry_t *kernel_segmap; CTASSERT(sizeof(*vm_page_dump) == 4); static int is_dumpable(vm_paddr_t pa) { int i; for (i = 0; dump_avail[i] != 0 || dump_avail[i + 1] != 0; i += 2) { if (pa >= dump_avail[i] && pa < dump_avail[i + 1]) return (1); } return (0); } void dump_add_page(vm_paddr_t pa) { int idx, bit; pa >>= PAGE_SHIFT; idx = pa >> 5; /* 2^5 = 32 */ bit = pa & 31; atomic_set_int(&vm_page_dump[idx], 1ul << bit); } void dump_drop_page(vm_paddr_t pa) { int idx, bit; pa >>= PAGE_SHIFT; idx = pa >> 5; /* 2^5 = 32 */ bit = pa & 31; atomic_clear_int(&vm_page_dump[idx], 1ul << bit); } #define PG2MB(pgs) (((pgs) + (1 << 8) - 1) >> 8) static int write_buffer(struct dumperinfo *di, char *ptr, size_t sz) { size_t len; int error, c; u_int maxdumpsz; maxdumpsz = di->maxiosize; if (maxdumpsz == 0) /* seatbelt */ maxdumpsz = PAGE_SIZE; error = 0; while (sz) { len = min(maxdumpsz, sz); counter += len; progress -= len; if (counter >> 22) { printf(" %jd", PG2MB(progress >> PAGE_SHIFT)); counter &= (1<<22) - 1; } if (ptr) { error = dump_write(di, ptr, 0, dumplo, len); if (error) return (error); dumplo += len; ptr += len; sz -= len; } else { panic("pa is not supported"); } /* Check for user abort. */ c = cncheckc(); if (c == 0x03) return (ECANCELED); if (c != -1) printf(" (CTRL-C to abort) "); } return (0); } int minidumpsys(struct dumperinfo *di) { struct minidumphdr mdhdr; uint64_t dumpsize; uint32_t ptesize; uint32_t bits; vm_paddr_t pa; vm_offset_t prev_pte = 0; uint32_t count = 0; vm_offset_t va; pt_entry_t *pte; int i, bit, error; void *dump_va; /* Flush cache */ mips_dcache_wbinv_all(); counter = 0; /* Walk page table pages, set bits in vm_page_dump */ ptesize = 0; for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { ptesize += PAGE_SIZE; pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); for (i = 0; i < NPTEPG; i++) { if (pte_test(&pte[i], PTE_V)) { pa = TLBLO_PTE_TO_PA(pte[i]); if (is_dumpable(pa)) dump_add_page(pa); } } } /* * Now mark pages from 0 to phys_avail[0], that's where kernel * and pages allocated by pmap_steal reside */ for (pa = 0; pa < phys_avail[0]; pa += PAGE_SIZE) { if (is_dumpable(pa)) dump_add_page(pa); } /* Calculate dump size. */ dumpsize = ptesize; dumpsize += round_page(msgbufp->msg_size); dumpsize += round_page(vm_page_dump_size); for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { bits = vm_page_dump[i]; while (bits) { bit = ffs(bits) - 1; pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE; /* Clear out undumpable pages now if needed */ if (is_dumpable(pa)) dumpsize += PAGE_SIZE; else dump_drop_page(pa); bits &= ~(1ul << bit); } } dumpsize += PAGE_SIZE; /* Determine dump offset on device. */ if (di->mediasize < SIZEOF_METADATA + dumpsize + sizeof(kdh) * 2) { error = ENOSPC; goto fail; } origdumplo = dumplo = di->mediaoffset + di->mediasize - dumpsize; dumplo -= sizeof(kdh) * 2; progress = dumpsize; /* Initialize mdhdr */ bzero(&mdhdr, sizeof(mdhdr)); strcpy(mdhdr.magic, MINIDUMP_MAGIC); mdhdr.version = MINIDUMP_VERSION; mdhdr.msgbufsize = msgbufp->msg_size; mdhdr.bitmapsize = vm_page_dump_size; mdhdr.ptesize = ptesize; mdhdr.kernbase = VM_MIN_KERNEL_ADDRESS; mkdumpheader(&kdh, KERNELDUMPMAGIC, KERNELDUMP_MIPS_VERSION, dumpsize, di->blocksize); printf("Physical memory: %ju MB\n", (uintmax_t)ptoa((uintmax_t)physmem) / 1048576); printf("Dumping %llu MB:", (long long)dumpsize >> 20); /* Dump leader */ error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); if (error) goto fail; dumplo += sizeof(kdh); /* Dump my header */ bzero(tmpbuffer, sizeof(tmpbuffer)); bcopy(&mdhdr, tmpbuffer, sizeof(mdhdr)); error = write_buffer(di, tmpbuffer, PAGE_SIZE); if (error) goto fail; /* Dump msgbuf up front */ error = write_buffer(di, (char *)msgbufp->msg_ptr, round_page(msgbufp->msg_size)); if (error) goto fail; /* Dump bitmap */ error = write_buffer(di, (char *)vm_page_dump, round_page(vm_page_dump_size)); if (error) goto fail; /* Dump kernel page table pages */ for (va = VM_MIN_KERNEL_ADDRESS; va < kernel_vm_end; va += NBPDR) { pte = pmap_pte(kernel_pmap, va); KASSERT(pte != NULL, ("pte for %jx is NULL", (uintmax_t)va)); if (!count) { prev_pte = (vm_offset_t)pte; count++; } else { if ((vm_offset_t)pte == (prev_pte + count * PAGE_SIZE)) count++; else { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 1; prev_pte = (vm_offset_t)pte; } } } if (count) { error = write_buffer(di, (char*)prev_pte, count * PAGE_SIZE); if (error) goto fail; count = 0; prev_pte = 0; } /* Dump memory chunks page by page*/ for (i = 0; i < vm_page_dump_size / sizeof(*vm_page_dump); i++) { bits = vm_page_dump[i]; while (bits) { bit = ffs(bits) - 1; pa = (((uint64_t)i * sizeof(*vm_page_dump) * NBBY) + bit) * PAGE_SIZE; dump_va = pmap_kenter_temporary(pa, 0); error = write_buffer(di, dump_va, PAGE_SIZE); if (error) goto fail; pmap_kenter_temporary_free(pa); bits &= ~(1ul << bit); } } /* Dump trailer */ error = dump_write(di, &kdh, 0, dumplo, sizeof(kdh)); if (error) goto fail; dumplo += sizeof(kdh); /* Signal completion, signoff and exit stage left. */ dump_write(di, NULL, 0, 0, 0); printf("\nDump complete\n"); return (0); fail: if (error < 0) error = -error; if (error == ECANCELED) printf("\nDump aborted\n"); else if (error == ENOSPC) printf("\nDump failed. Partition too small.\n"); else printf("\n** DUMP FAILED (ERROR %d) **\n", error); return (error); } Index: head/sys/mips/mips/nexus.c =================================================================== --- head/sys/mips/mips/nexus.c (revision 295879) +++ head/sys/mips/mips/nexus.c (revision 295880) @@ -1,588 +1,587 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* * This code implements a `root nexus' for MIPS Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests and memory address space. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #ifdef MIPS_INTRNG #include #else #include #endif #ifdef FDT #include #include "ofw_bus_if.h" #endif #undef NEXUS_DEBUG #ifdef NEXUS_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif /* NEXUS_DEBUG */ #define NUM_MIPS_IRQS 6 static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); struct nexus_device { struct resource_list nx_resources; }; #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) static struct rman irq_rman; static struct rman mem_rman; static struct resource * nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static device_t nexus_add_child(device_t, u_int, const char *, int); static int nexus_attach(device_t); static void nexus_delete_resource(device_t, device_t, int, int); static struct resource_list * nexus_get_reslist(device_t, device_t); static int nexus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); static int nexus_print_child(device_t, device_t); static int nexus_print_all_resources(device_t dev); static int nexus_probe(device_t); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_set_resource(device_t, device_t, int, int, rman_res_t, rman_res_t); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static void nexus_hinted_child(device_t, const char *, int); static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); #ifdef MIPS_INTRNG #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr); #endif static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol); #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_delete_resource, nexus_delete_resource), DEVMETHOD(bus_get_resource, nexus_get_resource), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), DEVMETHOD(bus_activate_resource,nexus_activate_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_hinted_child, nexus_hinted_child), #ifdef MIPS_INTRNG DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif #ifdef FDT DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), #endif #endif { 0, 0 } }; static driver_t nexus_driver = { "nexus", nexus_methods, 1 /* no softc */ }; static devclass_t nexus_devclass; static int nexus_probe(device_t dev) { device_set_desc(dev, "MIPS32 root nexus"); irq_rman.rm_start = 0; irq_rman.rm_end = NUM_MIPS_IRQS - 1; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Hardware IRQs"; if (rman_init(&irq_rman) != 0 || rman_manage_region(&irq_rman, 0, NUM_MIPS_IRQS - 1) != 0) { panic("%s: irq_rman", __func__); } mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "Memory addresses"; if (rman_init(&mem_rman) != 0 || rman_manage_region(&mem_rman, 0, ~0) != 0) { panic("%s: mem_rman", __func__); } return (0); } static int nexus_attach(device_t dev) { bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on %s\n", device_get_nameunit(bus)); return (retval); } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return (0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); if (child == NULL) { device_printf(bus, "failed to add child: %s%d\n", name, unit); return (0); } /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return (child); } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include footbridge.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int isdefault, needactivate, passthrough; dprintf("%s: entry (%p, %p, %d, %p, %p, %p, %ld, %d)\n", __func__, bus, child, type, rid, (void *)(intptr_t)start, (void *)(intptr_t)end, count, flags); dprintf("%s: requested rid is %d\n", __func__, *rid); isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; /* * If this is an allocation of the "default" range for a given RID, * and we know what the resources for this device are (ie. they aren't * maintained by a child bus), then work out the start/end values. */ if (isdefault) { rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) { panic("%s: resource entry is busy", __func__); } start = rle->start; end = rle->end; count = rle->count; } switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_MEMORY: rm = &mem_rman; break; default: printf("%s: unknown resource type %d\n", __func__, type); return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) { printf("%s: could not reserve resource for %s\n", __func__, device_get_nameunit(child)); return (0); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { printf("%s: could not activate resource\n", __func__); rman_release_resource(rv); return (0); } } return (rv); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; dprintf("%s: entry (%p, %p, %d, %d, %p, %ld)\n", __func__, dev, child, type, rid, (void *)(intptr_t)start, count); rle = resource_list_add(rl, type, rid, start, start + count - 1, count); if (rle == NULL) return (ENXIO); return (0); } static int nexus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return (0); } static void nexus_delete_resource(device_t dev, device_t child, int type, int rid) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; dprintf("%s: entry\n", __func__); resource_list_delete(rl, type, rid); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { dprintf("%s: entry\n", __func__); if (rman_get_flags(r) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, r); if (error) return error; } return (rman_release_resource(r)); } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { void *vaddr; vm_paddr_t paddr; vm_size_t psize; int err; /* * If this is a memory resource, use pmap_mapdev to map it. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = rman_get_start(r); psize = rman_get_size(r); rman_set_bustag(r, mips_bus_space_generic); err = bus_space_map(rman_get_bustag(r), paddr, psize, 0, (bus_space_handle_t *)&vaddr); if (err != 0) { rman_deactivate_resource(r); return (err); } rman_set_virtual(r, vaddr); rman_set_bushandle(r, (bus_space_handle_t)(uintptr_t)vaddr); } return (rman_activate_resource(r)); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { bus_space_handle_t vaddr; bus_size_t psize; vaddr = rman_get_bushandle(r); if (type == SYS_RES_MEMORY && vaddr != 0) { psize = (bus_size_t)rman_get_size(r); bus_space_unmap(rman_get_bustag(r), vaddr, psize); rman_set_virtual(r, NULL); rman_set_bushandle(r, 0); } return (rman_deactivate_resource(r)); } static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { int irq; #ifdef MIPS_INTRNG for (irq = rman_get_start(res); irq <= rman_get_end(res); irq++) { intr_irq_add_handler(child, filt, intr, arg, irq, flags, cookiep); } #else register_t s; s = intr_disable(); irq = rman_get_start(res); if (irq >= NUM_MIPS_IRQS) { intr_restore(s); return (0); } cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg, irq, flags, cookiep); intr_restore(s); #endif return (0); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { #ifdef MIPS_INTRNG return (intr_irq_remove_handler(child, rman_get_start(r), ih)); #else printf("Unimplemented %s at %s:%d\n", __func__, __FILE__, __LINE__); return (0); #endif } #ifdef MIPS_INTRNG static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (intr_irq_config(irq, trig, pol)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_irq_describe(rman_get_start(irq), cookie, descr)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_irq_bind(rman_get_start(irq), cpu)); } #endif #ifdef FDT static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *intr) { return (intr_fdt_map_irq(iparent, intr, icells)); } #endif #endif /* MIPS_INTRNG */ static void nexus_hinted_child(device_t bus, const char *dname, int dunit) { device_t child; long maddr; int msize; int order; int result; int irq; int mem_hints_count; if ((resource_int_value(dname, dunit, "order", &order)) != 0) order = 1000; child = BUS_ADD_CHILD(bus, order, dname, dunit); if (child == NULL) return; /* * Set hard-wired resources for hinted child using * specific RIDs. */ mem_hints_count = 0; if (resource_long_value(dname, dunit, "maddr", &maddr) == 0) mem_hints_count++; if (resource_int_value(dname, dunit, "msize", &msize) == 0) mem_hints_count++; /* check if all info for mem resource has been provided */ if ((mem_hints_count > 0) && (mem_hints_count < 2)) { printf("Either maddr or msize hint is missing for %s%d\n", dname, dunit); } else if (mem_hints_count) { dprintf("%s: discovered hinted child %s at maddr %p(%d)\n", __func__, device_get_nameunit(child), (void *)(intptr_t)maddr, msize); result = bus_set_resource(child, SYS_RES_MEMORY, 0, maddr, msize); if (result != 0) { device_printf(bus, "warning: bus_set_resource() failed\n"); } } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { result = bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1); if (result != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } EARLY_DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_EARLY); Index: head/sys/mips/nlm/xlp_simplebus.c =================================================================== --- head/sys/mips/nlm/xlp_simplebus.c (revision 295879) +++ head/sys/mips/nlm/xlp_simplebus.c (revision 295880) @@ -1,323 +1,322 @@ /*- * Copyright (c) 2015 Broadcom Corporation * (based on sys/dev/fdt/simplebus.c) * Copyright (c) 2013 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include #include /* flash memory region for chipselects */ #define GBU_MEM_BASE 0x16000000UL #define GBU_MEM_LIMIT 0x17ffffffUL /* * Device registers in pci ecfg memory region for devices without regular PCI BARs */ #define PCI_ECFG_BASE XLP_DEFAULT_IO_BASE #define PCI_ECFG_LIMIT (XLP_DEFAULT_IO_BASE + 0x0fffffff) /* * Bus interface. */ static int xlp_simplebus_probe(device_t dev); static struct resource *xlp_simplebus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int xlp_simplebus_activate_resource(device_t, device_t, int, int, struct resource *); static int xlp_simplebus_setup_intr(device_t, device_t, struct resource *, int, driver_filter_t *, driver_intr_t *, void *, void **); /* * ofw_bus interface */ static int xlp_simplebus_ofw_map_intr(device_t, device_t, phandle_t, int, pcell_t *); static devclass_t simplebus_devclass; static device_method_t xlp_simplebus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xlp_simplebus_probe), DEVMETHOD(bus_alloc_resource, xlp_simplebus_alloc_resource), DEVMETHOD(bus_activate_resource, xlp_simplebus_activate_resource), DEVMETHOD(bus_setup_intr, xlp_simplebus_setup_intr), DEVMETHOD(ofw_bus_map_intr, xlp_simplebus_ofw_map_intr), DEVMETHOD_END }; DEFINE_CLASS_1(simplebus, xlp_simplebus_driver, xlp_simplebus_methods, sizeof(struct simplebus_softc), simplebus_driver); DRIVER_MODULE(xlp_simplebus, ofwbus, xlp_simplebus_driver, simplebus_devclass, 0, 0); static struct rman irq_rman, port_rman, mem_rman, pci_ecfg_rman, gbu_rman; static void xlp_simplebus_init_resources(void) { irq_rman.rm_start = 0; irq_rman.rm_end = 255; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Mapped Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 255)) panic("xlp_simplebus_init_resources irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = ~0ul; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, PCIE_IO_BASE, PCIE_IO_LIMIT)) panic("xlp_simplebus_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, PCIE_MEM_BASE, PCIE_MEM_LIMIT)) panic("xlp_simplebus_init_resources mem_rman"); pci_ecfg_rman.rm_start = 0; pci_ecfg_rman.rm_end = ~0ul; pci_ecfg_rman.rm_type = RMAN_ARRAY; pci_ecfg_rman.rm_descr = "PCI ECFG IO"; if (rman_init(&pci_ecfg_rman) || rman_manage_region(&pci_ecfg_rman, PCI_ECFG_BASE, PCI_ECFG_LIMIT)) panic("xlp_simplebus_init_resources pci_ecfg_rman"); gbu_rman.rm_start = 0; gbu_rman.rm_end = ~0ul; gbu_rman.rm_type = RMAN_ARRAY; gbu_rman.rm_descr = "Flash region"; if (rman_init(&gbu_rman) || rman_manage_region(&gbu_rman, GBU_MEM_BASE, GBU_MEM_LIMIT)) panic("xlp_simplebus_init_resources gbu_rman"); } static int xlp_simplebus_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); /* * FDT data puts a "simple-bus" compatible string on many things that * have children but aren't really busses in our world. Without a * ranges property we will fail to attach, so just fail to probe too. */ if (!(ofw_bus_is_compatible(dev, "simple-bus") && ofw_bus_has_prop(dev, "ranges")) && (ofw_bus_get_type(dev) == NULL || strcmp(ofw_bus_get_type(dev), "soc") != 0)) return (ENXIO); xlp_simplebus_init_resources(); device_set_desc(dev, "XLP SoC bus"); return (BUS_PROBE_SPECIFIC); } static struct resource * xlp_simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct rman *rm; struct resource *rv; struct resource_list_entry *rle; struct simplebus_softc *sc; struct simplebus_devinfo *di; bus_space_tag_t bustag; int j, isdefault, passthrough, needsactivate; passthrough = (device_get_parent(child) != bus); needsactivate = flags & RF_ACTIVE; sc = device_get_softc(bus); di = device_get_ivars(child); rle = NULL; bustag = NULL; if (!passthrough) { isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (isdefault) { rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } if (type == SYS_RES_MEMORY) { /* Remap through ranges property */ for (j = 0; j < sc->nranges; j++) { if (start >= sc->ranges[j].bus && end < sc->ranges[j].bus + sc->ranges[j].size) { start -= sc->ranges[j].bus; start += sc->ranges[j].host; end -= sc->ranges[j].bus; end += sc->ranges[j].host; break; } } if (j == sc->nranges && sc->nranges != 0) { if (bootverbose) device_printf(bus, "Could not map resource " "%#lx-%#lx\n", start, end); return (NULL); } } } switch (type) { case SYS_RES_IRQ: rm = &irq_rman; break; case SYS_RES_IOPORT: rm = &port_rman; bustag = rmi_bus_space; break; case SYS_RES_MEMORY: if (start >= GBU_MEM_BASE && end <= GBU_MEM_LIMIT) { rm = &gbu_rman; bustag = rmi_bus_space; } else if (start >= PCI_ECFG_BASE && end <= PCI_ECFG_LIMIT) { rm = &pci_ecfg_rman; bustag = rmi_uart_bus_space; } else if (start >= PCIE_MEM_BASE && end <= PCIE_MEM_LIMIT) { rm = &mem_rman; bustag = rmi_bus_space; } else { if (bootverbose) device_printf(bus, "Invalid MEM range" "%#lx-%#lx\n", start, end); return (NULL); } break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) { device_printf(bus, "%s: could not reserve resource for %s\n", __func__, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (bustag != NULL) rman_set_bustag(rv, bustag); if (needsactivate) { if (bus_activate_resource(child, type, *rid, rv)) { device_printf(bus, "%s: could not activate resource\n", __func__); rman_release_resource(rv); return (NULL); } } return (rv); } static int xlp_simplebus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { void *vaddr; vm_paddr_t paddr; vm_size_t psize; /* * If this is a memory resource, use pmap_mapdev to map it. */ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { paddr = rman_get_start(r); psize = rman_get_size(r); vaddr = pmap_mapdev(paddr, psize); rman_set_virtual(r, vaddr); rman_set_bushandle(r, (bus_space_handle_t)(uintptr_t)vaddr); } return (rman_activate_resource(r)); } static int xlp_simplebus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { register_t s; int irq; /* setup irq */ s = intr_disable(); irq = rman_get_start(res); cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg, irq, flags, cookiep); intr_restore(s); return (0); } static int xlp_simplebus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells, pcell_t *irq) { return ((int)irq[0]); } Index: head/sys/mips/rt305x/rt305x_pci.c =================================================================== --- head/sys/mips/rt305x/rt305x_pci.c (revision 295879) +++ head/sys/mips/rt305x/rt305x_pci.c (revision 295880) @@ -1,955 +1,954 @@ /*- * Copyright (c) 2015 Stanislav Galabov. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * This is based on the pci allocator code from sys/dev/arm/mv/: * * Copyright (c) 2008 MARVELL INTERNATIONAL LTD. * Copyright (c) 2010 The FreeBSD Foundation * Copyright (c) 2010-2012 Semihalf * All rights reserved. * * Developed by Semihalf. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "pcib_if.h" #include #include #include struct mtx rt305x_pci_mtx; MTX_SYSINIT(rt305x_pci_mtx, &rt305x_pci_mtx, "rt305x PCI/PCIe mutex", MTX_SPIN); struct rt305x_pci_softc { device_t sc_dev; bus_space_tag_t sc_bst; bus_space_handle_t sc_bsh; int sc_busno; struct rman sc_mem_rman; struct rman sc_io_rman; struct rman sc_irq_rman; bus_addr_t sc_mem_base; bus_addr_t sc_mem_size; uint32_t sc_mem_map[(256*1024*1024) / (PCI_MIN_MEM_ALLOC * BITS_PER_UINT32)]; bus_addr_t sc_io_base; bus_addr_t sc_io_size; uint32_t sc_io_map[(16*1024*1024) / (PCI_MIN_IO_ALLOC * BITS_PER_UINT32)]; struct intr_event *sc_eventstab[RT305X_PCI_NIRQS]; mips_intrcnt_t sc_intr_counter[RT305X_PCI_NIRQS]; int pcie_link_status; }; static void rt305x_pci_phy_init(device_t); static void rt305x_pci_init(device_t); static int rt305x_pcib_init(device_t, int, int); static int rt305x_pci_intr(void *); static void rt305x_pci_dump_regs(device_t); static struct rt305x_pci_softc *rt_sc = NULL; static int rt305x_pci_probe(device_t dev) { return (BUS_PROBE_NOWILDCARD); } static int rt305x_pci_attach(device_t dev) { struct rt305x_pci_softc *sc = device_get_softc(dev); rt_sc = sc; sc->sc_dev = dev; sc->sc_mem_base = PCIE_MEM_BASE; sc->sc_mem_size = 0x10000000; sc->sc_io_base = PCIE_IO_BASE; sc->sc_io_size = 0x10000; sc->sc_bsh = MIPS_PHYS_TO_KSEG1(PCIE_BASE); sc->sc_bst = mips_bus_space_generic; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "rt305x pci memory window"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, sc->sc_mem_base, sc->sc_mem_base + sc->sc_mem_size - 1) != 0) { panic("%s: failed to set up memory rman", __FUNCTION__); } sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "rt305x pci io window"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, sc->sc_io_base, sc->sc_io_base + sc->sc_io_size - 1) != 0) { panic("%s: failed to set up io rman", __FUNCTION__); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "rt305x pci irqs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, RT305X_PCIE0_IRQ, RT305X_PCIE0_IRQ) != 0) { panic("%s: failed to set up irq rman", __FUNCTION__); } cpu_establish_hardintr("pci", rt305x_pci_intr, NULL, sc, RT305X_PCI_INTR_PIN, INTR_TYPE_MISC | INTR_EXCL, NULL); rt305x_pci_phy_init(dev); rt305x_pci_init(dev); rt305x_pci_dump_regs(dev); rt305x_pcib_init(dev, 0, PCI_SLOTMAX); device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static int rt305x_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct rt305x_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_DOMAIN: *result = device_get_unit(dev); return (0); case PCIB_IVAR_BUS: *result = sc->sc_busno; return (0); } return (ENOENT); } static int rt305x_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t result) { struct rt305x_pci_softc *sc = device_get_softc(dev); switch (which) { case PCIB_IVAR_BUS: sc->sc_busno = result; return (0); } return (ENOENT); } static struct resource * rt305x_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct rt305x_pci_softc *sc = device_get_softc(bus); struct resource *rv; struct rman *rm; vm_offset_t va; switch (type) { case SYS_RES_IRQ: rm = &sc->sc_irq_rman; break; case SYS_RES_IOPORT: rm = &sc->sc_io_rman; break; case SYS_RES_MEMORY: rm = &sc->sc_mem_rman; break; default: return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if (type != SYS_RES_IRQ) { if (type == SYS_RES_MEMORY) { va = (vm_offset_t)pmap_mapdev(start, count); } else if (type == SYS_RES_IOPORT){ va = (vm_offset_t)MIPS_PHYS_TO_KSEG1(start); } rman_set_bushandle(rv, va); rman_set_virtual(rv, (void *)va); rman_set_bustag(rv, mips_bus_space_generic); } if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return (NULL); } } return (rv); } static int rt305x_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { return rman_activate_resource(r); } static inline int rt305x_idx_to_irq(int idx) { return ((idx == 0) ? RT305X_PCIE0_IRQ : (idx == 1) ? RT305X_PCIE1_IRQ : (idx == 2) ? RT305X_PCIE2_IRQ : -1); } static inline int rt305x_irq_to_idx(int irq) { return ((irq == RT305X_PCIE0_IRQ) ? 0 : (irq == RT305X_PCIE1_IRQ) ? 1 : (irq == RT305X_PCIE2_IRQ) ? 2 : -1); } static void rt305x_pci_mask_irq(void *source) { RT_WRITE32(rt_sc, RT305X_PCI_PCIENA, RT_READ32(rt_sc, RT305X_PCI_PCIENA) & ~(1<<((int)source))); } static void rt305x_pci_unmask_irq(void *source) { RT_WRITE32(rt_sc, RT305X_PCI_PCIENA, RT_READ32(rt_sc, RT305X_PCI_PCIENA) | (1<<((int)source))); } static int rt305x_pci_setup_intr(device_t bus, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *handler, void *arg, void **cookiep) { struct rt305x_pci_softc *sc = device_get_softc(bus); struct intr_event *event; int irq, error, irqidx; irq = rman_get_start(ires); if ((irqidx = rt305x_irq_to_idx(irq)) == -1) panic("%s: bad irq %d", __FUNCTION__, irq); event = sc->sc_eventstab[irqidx]; if (event == NULL) { error = intr_event_create(&event, (void *)irq, 0, irq, rt305x_pci_mask_irq, rt305x_pci_unmask_irq, NULL, NULL, "pci intr%d:", irq); if (error == 0) { sc->sc_eventstab[irqidx] = event; sc->sc_intr_counter[irqidx] = mips_intrcnt_create(event->ie_name); } else return (error); } intr_event_add_handler(event, device_get_nameunit(child), filt, handler, arg, intr_priority(flags), flags, cookiep); mips_intrcnt_setname(sc->sc_intr_counter[irqidx], event->ie_fullname); rt305x_pci_unmask_irq((void*)irq); return (0); } static int rt305x_pci_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct rt305x_pci_softc *sc = device_get_softc(dev); int irq, result, irqidx; irq = rman_get_start(ires); if ((irqidx = rt305x_irq_to_idx(irq)) == -1) panic("%s: bad irq %d", __FUNCTION__, irq); if (sc->sc_eventstab[irqidx] == NULL) panic("Trying to teardown unoccupied IRQ"); rt305x_pci_mask_irq((void*)irq); result = intr_event_remove_handler(cookie); if (!result) sc->sc_eventstab[irqidx] = NULL; return (result); } static inline uint32_t rt305x_pci_make_addr(int bus, int slot, int func, int reg) { uint32_t addr; addr = (((reg & 0xf00) >> 8) << 24) | (bus << 16) | (slot << 11) | (func << 8) | (reg & 0xfc) | (1 << 31); return (addr); } static int rt305x_pci_maxslots(device_t dev) { return (PCI_SLOTMAX); } static uint32_t rt305x_pci_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int bytes) { struct rt305x_pci_softc *sc = device_get_softc(dev); uint32_t addr = 0, data = 0; if (bus == 0 && (sc->pcie_link_status & (1<pcie_link_status & (1<sc_mem_base); RT_WRITE32(sc, RT305X_PCI_IOBASE, sc->sc_io_base); RT_WRITE32(sc, RT305X_PCI_PCICFG, RT_READ32(sc, 0) & ~(1<<1)); DELAY(500000); if ((RT_READ32(sc, RT305X_PCI_PCIE0_STATUS) & 0x1) == 1) sc->pcie_link_status = 1; else sc->pcie_link_status = 0; RT_WRITE32(sc, RT305X_PCI_PCIE0_BAR0SETUP, 0x7FFF0001); RT_WRITE32(sc, RT305X_PCI_PCIE0_BAR1SETUP, 0x00000000); RT_WRITE32(sc, RT305X_PCI_PCIE0_IMBASEBAR0, 0x00000000); RT_WRITE32(sc, RT305X_PCI_PCIE0_CLASS, 0x06040001); tmp = rt305x_pci_read_config(dev, 0, 0, 0, 4, 4); rt305x_pci_write_config(dev, 0, 0, 0, 4, tmp | 0x7, 4); tmp = rt305x_pci_read_config(dev, 0, 0, 0, 0x70c, 4); tmp &= ~(0xff)<<8; tmp |= 0x50<<8; rt305x_pci_write_config(dev, 0, 0, 0, 0x70c, tmp, 4); tmp = rt305x_pci_read_config(dev, 0, 0, 0, 0x70c, 4); rt305x_pci_write_config(dev, 0, 0, 0, PCIR_BAR(0), 0, 4); } static inline uint32_t pcib_bit_get(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; return (map[n] & (1 << bit)); } static inline void pcib_bit_set(uint32_t *map, uint32_t bit) { uint32_t n = bit / BITS_PER_UINT32; bit = bit % BITS_PER_UINT32; map[n] |= (1 << bit); } static inline uint32_t pcib_map_check(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) if (pcib_bit_get(map, i)) return (0); return (1); } static inline void pcib_map_set(uint32_t *map, uint32_t start, uint32_t bits) { uint32_t i; for (i = start; i < start + bits; i++) pcib_bit_set(map, i); } static bus_addr_t pcib_alloc(device_t dev, uint32_t smask) { struct rt305x_pci_softc *sc = device_get_softc(dev); uint32_t bits, bits_limit, i, *map, min_alloc, size; bus_addr_t addr = 0; bus_addr_t base; if (smask & 1) { base = sc->sc_io_base; min_alloc = PCI_MIN_IO_ALLOC; bits_limit = sc->sc_io_size / min_alloc; map = sc->sc_io_map; smask &= ~0x3; } else { base = sc->sc_mem_base; min_alloc = PCI_MIN_MEM_ALLOC; bits_limit = sc->sc_mem_size / min_alloc; map = sc->sc_mem_map; smask &= ~0xF; } size = ~smask + 1; bits = size / min_alloc; for (i = 0; i + bits <= bits_limit; i+= bits) if (pcib_map_check(map, i, bits)) { pcib_map_set(map, i, bits); addr = base + (i * min_alloc); return (addr); } return (addr); } static int rt305x_pcib_init_bar(device_t dev, int bus, int slot, int func, int barno) { uint32_t addr, bar; int reg, width; reg = PCIR_BAR(barno); rt305x_pci_write_config(dev, bus, slot, func, reg, ~0, 4); bar = rt305x_pci_read_config(dev, bus, slot, func, reg, 4); if (bar == 0) return (1); /* Calculate BAR size: 64 or 32 bit (in 32-bit units) */ width = ((bar & 7) == 4) ? 2 : 1; addr = pcib_alloc(dev, bar); if (!addr) return (-1); if (bootverbose) printf("PCI %u:%u:%u: reg %x: smask=%08x: addr=%08x\n", bus, slot, func, reg, bar, addr); rt305x_pci_write_config(dev, bus, slot, func, reg, addr, 4); if (width == 2) rt305x_pci_write_config(dev, bus, slot, func, reg + 4, 0, 4); return (width); } static int rt305x_pcib_init_all_bars(device_t dev, int bus, int slot, int func, int hdrtype) { int maxbar, bar, i; maxbar = (hdrtype & PCIM_HDRTYPE) ? 0 : 6; bar = 0; while (bar < maxbar) { i = rt305x_pcib_init_bar(dev, bus, slot, func, bar); bar += i; if (i < 0) { device_printf(dev, "PCI IO/Memory space exhausted\n"); return (ENOMEM); } } return (0); } static inline int rt305x_pci_slot_has_link(device_t dev, int slot) { struct rt305x_pci_softc *sc = device_get_softc(dev); return !!(sc->pcie_link_status & (1<sc_io_base; io_limit = io_base + sc->sc_io_size - 1; mem_base = sc->sc_mem_base; mem_limit = mem_base + sc->sc_mem_size - 1; rt305x_pci_write_config(dev, bus, slot, func, PCIR_IOBASEL_1, io_base >> 8, 1); rt305x_pci_write_config(dev, bus, slot, func, PCIR_IOBASEH_1, io_base >> 16, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_IOLIMITL_1, io_limit >> 8, 1); rt305x_pci_write_config(dev, bus, slot, func, PCIR_IOLIMITH_1, io_limit >> 16, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_MEMBASE_1, mem_base >> 16, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_MEMLIMIT_1, mem_limit >> 16, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_PMBASEL_1, 0x10, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_PMBASEH_1, 0x0, 4); rt305x_pci_write_config(dev, bus, slot, func, PCIR_PMLIMITL_1, 0xF, 2); rt305x_pci_write_config(dev, bus, slot, func, PCIR_PMLIMITH_1, 0x0, 4); secbus = rt305x_pci_read_config(dev, bus, slot, func, PCIR_SECBUS_1, 1); if (secbus == 0) { rt305x_pci_write_config(dev, bus, slot, func, PCIR_SECBUS_1, ++cur_secbus, 1); rt305x_pci_write_config(dev, bus, slot, func, PCIR_SUBBUS_1, cur_secbus, 1); secbus = cur_secbus; } rt305x_pcib_init(dev, secbus, PCI_SLOTMAX); } static int rt305x_pcib_init(device_t dev, int bus, int maxslot) { int slot, func, maxfunc, error; uint8_t hdrtype, command, class, subclass; for (slot = 0; slot <= maxslot; slot++) { maxfunc = 0; for (func = 0; func <= maxfunc; func++) { hdrtype = rt305x_pci_read_config(dev, bus, slot, func, PCIR_HDRTYPE, 1); if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) continue; if (func == 0 && (hdrtype & PCIM_MFDEV)) maxfunc = PCI_FUNCMAX; command = rt305x_pci_read_config(dev, bus, slot, func, PCIR_COMMAND, 1); command &= ~(PCIM_CMD_MEMEN | PCIM_CMD_PORTEN); rt305x_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, command, 1); error = rt305x_pcib_init_all_bars(dev, bus, slot, func, hdrtype); if (error) return (error); command |= PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN | PCIM_CMD_PORTEN; rt305x_pci_write_config(dev, bus, slot, func, PCIR_COMMAND, command, 1); rt305x_pci_write_config(dev, bus, slot, func, PCIR_CACHELNSZ, 16, 1); class = rt305x_pci_read_config(dev, bus, slot, func, PCIR_CLASS, 1); subclass = rt305x_pci_read_config(dev, bus, slot, func, PCIR_SUBCLASS, 1); if (class != PCIC_BRIDGE || subclass != PCIS_BRIDGE_PCI) continue; rt305x_pcib_init_bridge(dev, bus, slot, func); } } return (0); } #define BUSY 0x80000000 #define WAITRETRY_MAX 10 #define WRITE_MODE (1<<23) #define DATA_SHIFT 0 #define ADDR_SHIFT 8 static int rt305x_wait_pci_phy_busy(struct rt305x_pci_softc *sc) { uint32_t reg_value = 0x0, retry = 0; while (1) { reg_value = RT_READ32(sc, RT305X_PCI_PHY0_CFG); if (reg_value & BUSY) DELAY(100000); else break; if (retry++ > WAITRETRY_MAX) { printf("PHY retry failed\n"); return (-1); } } return (0); } static uint32_t rt305x_pci_phy(struct rt305x_pci_softc *sc, char rwmode, uint32_t addr, uint32_t val) { uint32_t reg_value = 0x0; rt305x_wait_pci_phy_busy(sc); if (rwmode == 'w') { reg_value |= WRITE_MODE; reg_value |= (val) << DATA_SHIFT; } reg_value |= (addr) << ADDR_SHIFT; RT_WRITE32(sc, RT305X_PCI_PHY0_CFG, reg_value); DELAY(1000); rt305x_wait_pci_phy_busy(sc); if (rwmode == 'r') { reg_value = RT_READ32(sc, RT305X_PCI_PHY0_CFG); return (reg_value); } return (0); } static void rt305x_pci_phy_init(device_t dev) { struct rt305x_pci_softc *sc = device_get_softc(dev); uint32_t tmp; rt305x_pci_phy(sc, 'w', 0x00, 0x80); rt305x_pci_phy(sc, 'w', 0x01, 0x04); rt305x_pci_phy(sc, 'w', 0x68, 0x84); rt305x_sysctl_set(SYSCTL_RSTCTRL, rt305x_sysctl_get(SYSCTL_RSTCTRL) | (1<<26)); rt305x_sysctl_set(SYSCTL_CLKCFG1, rt305x_sysctl_get(SYSCTL_CLKCFG1) & ~(1<<26)); tmp = rt305x_sysctl_get(SYSCTL_PPLL_CFG1); tmp &= ~(1<<19); rt305x_sysctl_set(SYSCTL_PPLL_CFG1, tmp); tmp |= (1<<31); rt305x_sysctl_set(SYSCTL_PPLL_CFG1, tmp); } static int rt305x_pci_intr(void *arg) { struct rt305x_pci_softc *sc = arg; struct intr_event *event; uint32_t reg, irq, irqidx; reg = RT_READ32(sc, RT305X_PCI_PCIINT); for (irqidx = 0; irqidx < RT305X_PCI_NIRQS; irqidx++) { irq = rt305x_idx_to_irq(irqidx); if (reg & (1<sc_eventstab[irqidx]; if (!event || TAILQ_EMPTY(&event->ie_handlers)) { if (irq != 0) printf("Stray PCI IRQ %d\n", irq); continue; } intr_event_handle(event, NULL); mips_intrcnt_inc(sc->sc_intr_counter[irqidx]); } } return (FILTER_HANDLED); } Index: head/sys/mips/sibyte/sb_zbpci.c =================================================================== --- head/sys/mips/sibyte/sb_zbpci.c (revision 295879) +++ head/sys/mips/sibyte/sb_zbpci.c (revision 295880) @@ -1,543 +1,542 @@ /*- * Copyright (c) 2009 Neelkanth Natu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include "pcib_if.h" #include "sb_bus_space.h" #include "sb_scd.h" __FBSDID("$FreeBSD$"); static struct { vm_offset_t vaddr; vm_paddr_t paddr; } zbpci_config_space[MAXCPU]; static const vm_paddr_t CFG_PADDR_BASE = 0xFE000000; static const u_long PCI_IOSPACE_ADDR = 0xFC000000; static const u_long PCI_IOSPACE_SIZE = 0x02000000; #define PCI_MATCH_BYTE_LANES_START 0x40000000 #define PCI_MATCH_BYTE_LANES_END 0x5FFFFFFF #define PCI_MATCH_BYTE_LANES_SIZE 0x20000000 #define PCI_MATCH_BIT_LANES_MASK (1 << 29) #define PCI_MATCH_BIT_LANES_START 0x60000000 #define PCI_MATCH_BIT_LANES_END 0x7FFFFFFF #define PCI_MATCH_BIT_LANES_SIZE 0x20000000 static struct rman port_rman; static int zbpci_probe(device_t dev) { device_set_desc(dev, "Broadcom/Sibyte PCI I/O Bridge"); return (0); } static int zbpci_attach(device_t dev) { int n, rid, size; vm_offset_t va; struct resource *res; /* * Reserve the physical memory window used to map PCI I/O space. */ rid = 0; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, PCI_IOSPACE_ADDR, PCI_IOSPACE_ADDR + PCI_IOSPACE_SIZE - 1, PCI_IOSPACE_SIZE, 0); if (res == NULL) panic("Cannot allocate resource for PCI I/O space mapping."); port_rman.rm_start = 0; port_rman.rm_end = PCI_IOSPACE_SIZE - 1; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "PCI I/O ports"; if (rman_init(&port_rman) != 0 || rman_manage_region(&port_rman, 0, PCI_IOSPACE_SIZE - 1) != 0) panic("%s: port_rman", __func__); /* * Reserve the physical memory that is used to read/write to the * pci config space but don't activate it. We are using a page worth * of KVA as a window over this region. */ rid = 1; size = (PCI_BUSMAX + 1) * (PCI_SLOTMAX + 1) * (PCI_FUNCMAX + 1) * 256; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, CFG_PADDR_BASE, CFG_PADDR_BASE + size - 1, size, 0); if (res == NULL) panic("Cannot allocate resource for config space accesses."); /* * Allocate the entire "match bit lanes" address space. */ #if _BYTE_ORDER == _BIG_ENDIAN rid = 2; res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, PCI_MATCH_BIT_LANES_START, PCI_MATCH_BIT_LANES_END, PCI_MATCH_BIT_LANES_SIZE, 0); if (res == NULL) panic("Cannot allocate resource for pci match bit lanes."); #endif /* _BYTE_ORDER ==_BIG_ENDIAN */ /* * Allocate KVA for accessing PCI config space. */ va = kva_alloc(PAGE_SIZE * mp_ncpus); if (va == 0) { device_printf(dev, "Cannot allocate virtual addresses for " "config space access.\n"); return (ENOMEM); } for (n = 0; n < mp_ncpus; ++n) zbpci_config_space[n].vaddr = va + n * PAGE_SIZE; /* * Sibyte has the PCI bus hierarchy rooted at bus 0 and HT-PCI * hierarchy rooted at bus 1. */ if (device_add_child(dev, "pci", 0) == NULL) panic("zbpci_attach: could not add pci bus 0.\n"); if (device_add_child(dev, "pci", 1) == NULL) panic("zbpci_attach: could not add pci bus 1.\n"); if (bootverbose) device_printf(dev, "attached.\n"); return (bus_generic_attach(dev)); } static struct resource * zbpci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct resource *res; /* * Handle PCI I/O port resources here and pass everything else to nexus. */ if (type != SYS_RES_IOPORT) { res = bus_generic_alloc_resource(bus, child, type, rid, start, end, count, flags); return (res); } res = rman_reserve_resource(&port_rman, start, end, count, flags, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); /* Activate the resource is requested */ if (flags & RF_ACTIVE) { if (bus_activate_resource(child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } } return (res); } static int zbpci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { int error; void *vaddr; u_long orig_paddr, paddr, psize; paddr = rman_get_start(res); psize = rman_get_size(res); orig_paddr = paddr; #if _BYTE_ORDER == _BIG_ENDIAN /* * The CFE allocates PCI memory resources that map to the * "match byte lanes" address space. This address space works * best for DMA transfers because it does not do any automatic * byte swaps when data crosses the pci-cpu interface. * * This also makes it sub-optimal for accesses to PCI device * registers because it exposes the little-endian nature of * the PCI bus to the big-endian CPU. The Sibyte has another * address window called the "match bit lanes" window which * automatically swaps bytes when data crosses the pci-cpu * interface. * * We "assume" that any bus_space memory accesses done by the * CPU to a PCI device are register/configuration accesses and * are done through the "match bit lanes" window. Any DMA * transfers will continue to be through the "match byte lanes" * window because the PCI BAR registers will not be changed. */ if (type == SYS_RES_MEMORY) { if (paddr >= PCI_MATCH_BYTE_LANES_START && paddr + psize - 1 <= PCI_MATCH_BYTE_LANES_END) { paddr |= PCI_MATCH_BIT_LANES_MASK; rman_set_start(res, paddr); rman_set_end(res, paddr + psize - 1); } } #endif if (type != SYS_RES_IOPORT) { error = bus_generic_activate_resource(bus, child, type, rid, res); #if _BYTE_ORDER == _BIG_ENDIAN if (type == SYS_RES_MEMORY) { rman_set_start(res, orig_paddr); rman_set_end(res, orig_paddr + psize - 1); } #endif return (error); } /* * Map the I/O space resource through the memory window starting * at PCI_IOSPACE_ADDR. */ vaddr = pmap_mapdev(paddr + PCI_IOSPACE_ADDR, psize); rman_set_virtual(res, vaddr); rman_set_bustag(res, mips_bus_space_generic); rman_set_bushandle(res, (bus_space_handle_t)vaddr); return (rman_activate_resource(res)); } static int zbpci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { int error; if (type != SYS_RES_IOPORT) return (bus_generic_release_resource(bus, child, type, rid, r)); if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } static int zbpci_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { vm_offset_t va; if (type != SYS_RES_IOPORT) { return (bus_generic_deactivate_resource(bus, child, type, rid, r)); } va = (vm_offset_t)rman_get_virtual(r); pmap_unmapdev(va, rman_get_size(r)); return (rman_deactivate_resource(r)); } static int zbpci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { switch (which) { case PCIB_IVAR_DOMAIN: *result = 0; /* single PCI domain */ return (0); case PCIB_IVAR_BUS: *result = device_get_unit(child); /* PCI bus 0 or 1 */ return (0); default: return (ENOENT); } } /* * We rely on the CFE to have configured the intline correctly to point to * one of PCI-A/PCI-B/PCI-C/PCI-D in the interupt mapper. */ static int zbpci_route_interrupt(device_t pcib, device_t dev, int pin) { return (PCI_INVALID_IRQ); } /* * This function is expected to be called in a critical section since it * changes the per-cpu pci config space va-to-pa mappings. */ static vm_offset_t zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes) { int cpu; vm_offset_t va_page; vm_paddr_t pa, pa_page; if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX && reg <= PCI_REGMAX && (bytes == 1 || bytes == 2 || bytes == 4) && ((reg & (bytes - 1)) == 0)) { cpu = PCPU_GET(cpuid); va_page = zbpci_config_space[cpu].vaddr; pa = CFG_PADDR_BASE | (bus << 16) | (slot << 11) | (func << 8) | reg; #if _BYTE_ORDER == _BIG_ENDIAN pa = pa ^ (4 - bytes); #endif pa_page = pa & ~(PAGE_SIZE - 1); if (zbpci_config_space[cpu].paddr != pa_page) { pmap_kremove(va_page); pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED); zbpci_config_space[cpu].paddr = pa_page; } return (va_page + (pa - pa_page)); } else { return (0); } } static uint32_t zbpci_read_config(device_t dev, u_int b, u_int s, u_int f, u_int r, int w) { uint32_t data; vm_offset_t va; critical_enter(); va = zbpci_config_space_va(b, s, f, r, w); if (va == 0) { panic("zbpci_read_config: invalid %d/%d/%d[%d] %d\n", b, s, f, r, w); } switch (w) { case 4: data = *(uint32_t *)va; break; case 2: data = *(uint16_t *)va; break; case 1: data = *(uint8_t *)va; break; default: panic("zbpci_read_config: invalid width %d\n", w); } critical_exit(); return (data); } static void zbpci_write_config(device_t d, u_int b, u_int s, u_int f, u_int r, uint32_t data, int w) { vm_offset_t va; critical_enter(); va = zbpci_config_space_va(b, s, f, r, w); if (va == 0) { panic("zbpci_write_config: invalid %d/%d/%d[%d] %d/%d\n", b, s, f, r, data, w); } switch (w) { case 4: *(uint32_t *)va = data; break; case 2: *(uint16_t *)va = data; break; case 1: *(uint8_t *)va = data; break; default: panic("zbpci_write_config: invalid width %d\n", w); } critical_exit(); } static device_method_t zbpci_methods[] ={ /* Device interface */ DEVMETHOD(device_probe, zbpci_probe), DEVMETHOD(device_attach, zbpci_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, zbpci_read_ivar), DEVMETHOD(bus_write_ivar, bus_generic_write_ivar), DEVMETHOD(bus_alloc_resource, zbpci_alloc_resource), DEVMETHOD(bus_activate_resource, zbpci_activate_resource), DEVMETHOD(bus_deactivate_resource, zbpci_deactivate_resource), DEVMETHOD(bus_release_resource, zbpci_release_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_add_child, bus_generic_add_child), /* pcib interface */ DEVMETHOD(pcib_maxslots, pcib_maxslots), DEVMETHOD(pcib_read_config, zbpci_read_config), DEVMETHOD(pcib_write_config, zbpci_write_config), DEVMETHOD(pcib_route_interrupt, zbpci_route_interrupt), { 0, 0 } }; /* * The "zbpci" class inherits from the "pcib" base class. Therefore in * addition to drivers that belong to the "zbpci" class we will also * consider drivers belonging to the "pcib" when probing children of * "zbpci". */ DEFINE_CLASS_1(zbpci, zbpci_driver, zbpci_methods, 0, pcib_driver); static devclass_t zbpci_devclass; DRIVER_MODULE(zbpci, zbbus, zbpci_driver, zbpci_devclass, 0, 0); /* * Big endian bus space routines */ #if _BYTE_ORDER == _BIG_ENDIAN /* * The CPU correctly deals with the big-endian to little-endian swap if * we are accessing 4 bytes at a time. However if we want to read 1 or 2 * bytes then we need to fudge the address generated by the CPU such that * it generates the right byte enables on the PCI bus. */ static bus_addr_t sb_match_bit_lane_addr(bus_addr_t addr, int bytes) { vm_offset_t pa; pa = vtophys(addr); if (pa >= PCI_MATCH_BIT_LANES_START && pa <= PCI_MATCH_BIT_LANES_END) return (addr ^ (4 - bytes)); else return (addr); } uint8_t sb_big_endian_read8(bus_addr_t addr) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 1); return (readb(addr2)); } uint16_t sb_big_endian_read16(bus_addr_t addr) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 2); return (readw(addr2)); } uint32_t sb_big_endian_read32(bus_addr_t addr) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 4); return (readl(addr2)); } void sb_big_endian_write8(bus_addr_t addr, uint8_t val) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 1); writeb(addr2, val); } void sb_big_endian_write16(bus_addr_t addr, uint16_t val) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 2); writew(addr2, val); } void sb_big_endian_write32(bus_addr_t addr, uint32_t val) { bus_addr_t addr2; addr2 = sb_match_bit_lane_addr(addr, 4); writel(addr2, val); } #endif /* _BIG_ENDIAN */ Index: head/sys/powerpc/aim/slb.c =================================================================== --- head/sys/powerpc/aim/slb.c (revision 295879) +++ head/sys/powerpc/aim/slb.c (revision 295880) @@ -1,543 +1,542 @@ /*- * Copyright (c) 2010 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include uintptr_t moea64_get_unique_vsid(void); void moea64_release_vsid(uint64_t vsid); static void slb_zone_init(void *); static uma_zone_t slbt_zone; static uma_zone_t slb_cache_zone; int n_slbs = 64; SYSINIT(slb_zone_init, SI_SUB_KMEM, SI_ORDER_ANY, slb_zone_init, NULL); struct slbtnode { uint16_t ua_alloc; uint8_t ua_level; /* Only 36 bits needed for full 64-bit address space. */ uint64_t ua_base; union { struct slbtnode *ua_child[16]; struct slb slb_entries[16]; } u; }; /* * For a full 64-bit address space, there are 36 bits in play in an * esid, so 8 levels, with the leaf being at level 0. * * |3333|3322|2222|2222|1111|1111|11 | | | esid * |5432|1098|7654|3210|9876|5432|1098|7654|3210| bits * +----+----+----+----+----+----+----+----+----+-------- * | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | level */ #define UAD_ROOT_LEVEL 8 #define UAD_LEAF_LEVEL 0 static inline int esid2idx(uint64_t esid, int level) { int shift; shift = level * 4; return ((esid >> shift) & 0xF); } /* * The ua_base field should have 0 bits after the first 4*(level+1) * bits; i.e. only */ #define uad_baseok(ua) \ (esid2base(ua->ua_base, ua->ua_level) == ua->ua_base) static inline uint64_t esid2base(uint64_t esid, int level) { uint64_t mask; int shift; shift = (level + 1) * 4; mask = ~((1ULL << shift) - 1); return (esid & mask); } /* * Allocate a new leaf node for the specified esid/vmhandle from the * parent node. */ static struct slb * make_new_leaf(uint64_t esid, uint64_t slbv, struct slbtnode *parent) { struct slbtnode *child; struct slb *retval; int idx; idx = esid2idx(esid, parent->ua_level); KASSERT(parent->u.ua_child[idx] == NULL, ("Child already exists!")); /* unlock and M_WAITOK and loop? */ child = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); KASSERT(child != NULL, ("unhandled NULL case")); child->ua_level = UAD_LEAF_LEVEL; child->ua_base = esid2base(esid, child->ua_level); idx = esid2idx(esid, child->ua_level); child->u.slb_entries[idx].slbv = slbv; child->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; setbit(&child->ua_alloc, idx); retval = &child->u.slb_entries[idx]; /* * The above stores must be visible before the next one, so * that a lockless searcher always sees a valid path through * the tree. */ powerpc_lwsync(); idx = esid2idx(esid, parent->ua_level); parent->u.ua_child[idx] = child; setbit(&parent->ua_alloc, idx); return (retval); } /* * Allocate a new intermediate node to fit between the parent and * esid. */ static struct slbtnode* make_intermediate(uint64_t esid, struct slbtnode *parent) { struct slbtnode *child, *inter; int idx, level; idx = esid2idx(esid, parent->ua_level); child = parent->u.ua_child[idx]; KASSERT(esid2base(esid, child->ua_level) != child->ua_base, ("No need for an intermediate node?")); /* * Find the level where the existing child and our new esid * meet. It must be lower than parent->ua_level or we would * have chosen a different index in parent. */ level = child->ua_level + 1; while (esid2base(esid, level) != esid2base(child->ua_base, level)) level++; KASSERT(level < parent->ua_level, ("Found splitting level %d for %09jx and %09jx, " "but it's the same as %p's", level, esid, child->ua_base, parent)); /* unlock and M_WAITOK and loop? */ inter = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); KASSERT(inter != NULL, ("unhandled NULL case")); /* Set up intermediate node to point to child ... */ inter->ua_level = level; inter->ua_base = esid2base(esid, inter->ua_level); idx = esid2idx(child->ua_base, inter->ua_level); inter->u.ua_child[idx] = child; setbit(&inter->ua_alloc, idx); powerpc_lwsync(); /* Set up parent to point to intermediate node ... */ idx = esid2idx(inter->ua_base, parent->ua_level); parent->u.ua_child[idx] = inter; setbit(&parent->ua_alloc, idx); return (inter); } uint64_t kernel_va_to_slbv(vm_offset_t va) { uint64_t slbv; /* Set kernel VSID to deterministic value */ slbv = (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)) << SLBV_VSID_SHIFT; /* Figure out if this is a large-page mapping */ if (hw_direct_map && va < VM_MIN_KERNEL_ADDRESS) { /* * XXX: If we have set up a direct map, assumes * all physical memory is mapped with large pages. */ if (mem_valid(va, 0) == 0) slbv |= SLBV_L; } return (slbv); } struct slb * user_va_to_slb_entry(pmap_t pm, vm_offset_t va) { uint64_t esid = va >> ADDR_SR_SHFT; struct slbtnode *ua; int idx; ua = pm->pm_slb_tree_root; for (;;) { KASSERT(uad_baseok(ua), ("uad base %016jx level %d bad!", ua->ua_base, ua->ua_level)); idx = esid2idx(esid, ua->ua_level); /* * This code is specific to ppc64 where a load is * atomic, so no need for atomic_load macro. */ if (ua->ua_level == UAD_LEAF_LEVEL) return ((ua->u.slb_entries[idx].slbe & SLBE_VALID) ? &ua->u.slb_entries[idx] : NULL); /* * The following accesses are implicitly ordered under the POWER * ISA by load dependencies (the store ordering is provided by * the powerpc_lwsync() calls elsewhere) and so are run without * barriers. */ ua = ua->u.ua_child[idx]; if (ua == NULL || esid2base(esid, ua->ua_level) != ua->ua_base) return (NULL); } return (NULL); } uint64_t va_to_vsid(pmap_t pm, vm_offset_t va) { struct slb *entry; /* Shortcut kernel case */ if (pm == kernel_pmap) return (KERNEL_VSID((uintptr_t)va >> ADDR_SR_SHFT)); /* * If there is no vsid for this VA, we need to add a new entry * to the PMAP's segment table. */ entry = user_va_to_slb_entry(pm, va); if (entry == NULL) return (allocate_user_vsid(pm, (uintptr_t)va >> ADDR_SR_SHFT, 0)); return ((entry->slbv & SLBV_VSID_MASK) >> SLBV_VSID_SHIFT); } uint64_t allocate_user_vsid(pmap_t pm, uint64_t esid, int large) { uint64_t vsid, slbv; struct slbtnode *ua, *next, *inter; struct slb *slb; int idx; KASSERT(pm != kernel_pmap, ("Attempting to allocate a kernel VSID")); PMAP_LOCK_ASSERT(pm, MA_OWNED); vsid = moea64_get_unique_vsid(); slbv = vsid << SLBV_VSID_SHIFT; if (large) slbv |= SLBV_L; ua = pm->pm_slb_tree_root; /* Descend to the correct leaf or NULL pointer. */ for (;;) { KASSERT(uad_baseok(ua), ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); idx = esid2idx(esid, ua->ua_level); if (ua->ua_level == UAD_LEAF_LEVEL) { ua->u.slb_entries[idx].slbv = slbv; eieio(); ua->u.slb_entries[idx].slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; setbit(&ua->ua_alloc, idx); slb = &ua->u.slb_entries[idx]; break; } next = ua->u.ua_child[idx]; if (next == NULL) { slb = make_new_leaf(esid, slbv, ua); break; } /* * Check if the next item down has an okay ua_base. * If not, we need to allocate an intermediate node. */ if (esid2base(esid, next->ua_level) != next->ua_base) { inter = make_intermediate(esid, ua); slb = make_new_leaf(esid, slbv, inter); break; } ua = next; } /* * Someone probably wants this soon, and it may be a wired * SLB mapping, so pre-spill this entry. */ eieio(); slb_insert_user(pm, slb); return (vsid); } void free_vsid(pmap_t pm, uint64_t esid, int large) { struct slbtnode *ua; int idx; PMAP_LOCK_ASSERT(pm, MA_OWNED); ua = pm->pm_slb_tree_root; /* Descend to the correct leaf. */ for (;;) { KASSERT(uad_baseok(ua), ("uad base %09jx level %d bad!", ua->ua_base, ua->ua_level)); idx = esid2idx(esid, ua->ua_level); if (ua->ua_level == UAD_LEAF_LEVEL) { ua->u.slb_entries[idx].slbv = 0; eieio(); ua->u.slb_entries[idx].slbe = 0; clrbit(&ua->ua_alloc, idx); return; } ua = ua->u.ua_child[idx]; if (ua == NULL || esid2base(esid, ua->ua_level) != ua->ua_base) { /* Perhaps just return instead of assert? */ KASSERT(0, ("Asked to remove an entry that was never inserted!")); return; } } } static void free_slb_tree_node(struct slbtnode *ua) { int idx; for (idx = 0; idx < 16; idx++) { if (ua->ua_level != UAD_LEAF_LEVEL) { if (ua->u.ua_child[idx] != NULL) free_slb_tree_node(ua->u.ua_child[idx]); } else { if (ua->u.slb_entries[idx].slbv != 0) moea64_release_vsid(ua->u.slb_entries[idx].slbv >> SLBV_VSID_SHIFT); } } uma_zfree(slbt_zone, ua); } void slb_free_tree(pmap_t pm) { free_slb_tree_node(pm->pm_slb_tree_root); } struct slbtnode * slb_alloc_tree(void) { struct slbtnode *root; root = uma_zalloc(slbt_zone, M_NOWAIT | M_ZERO); root->ua_level = UAD_ROOT_LEVEL; return (root); } /* Lock entries mapping kernel text and stacks */ void slb_insert_kernel(uint64_t slbe, uint64_t slbv) { struct slb *slbcache; int i; /* We don't want to be preempted while modifying the kernel map */ critical_enter(); slbcache = PCPU_GET(slb); /* Check for an unused slot, abusing the user slot as a full flag */ if (slbcache[USER_SLB_SLOT].slbe == 0) { for (i = 0; i < n_slbs; i++) { if (i == USER_SLB_SLOT) continue; if (!(slbcache[i].slbe & SLBE_VALID)) goto fillkernslb; } if (i == n_slbs) slbcache[USER_SLB_SLOT].slbe = 1; } i = mftb() % n_slbs; if (i == USER_SLB_SLOT) i = (i+1) % n_slbs; fillkernslb: KASSERT(i != USER_SLB_SLOT, ("Filling user SLB slot with a kernel mapping")); slbcache[i].slbv = slbv; slbcache[i].slbe = slbe | (uint64_t)i; /* If it is for this CPU, put it in the SLB right away */ if (pmap_bootstrapped) { /* slbie not required */ __asm __volatile ("slbmte %0, %1" :: "r"(slbcache[i].slbv), "r"(slbcache[i].slbe)); } critical_exit(); } void slb_insert_user(pmap_t pm, struct slb *slb) { int i; PMAP_LOCK_ASSERT(pm, MA_OWNED); if (pm->pm_slb_len < n_slbs) { i = pm->pm_slb_len; pm->pm_slb_len++; } else { i = mftb() % n_slbs; } /* Note that this replacement is atomic with respect to trap_subr */ pm->pm_slb[i] = slb; } static void * slb_uma_real_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { static vm_offset_t realmax = 0; void *va; vm_page_t m; int pflags; if (realmax == 0) realmax = platform_real_maxaddr(); *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; for (;;) { m = vm_page_alloc_contig(NULL, 0, pflags, 1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); VM_WAIT; } else break; } va = (void *) VM_PAGE_TO_PHYS(m); if (!hw_direct_map) pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m)); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); } static void slb_zone_init(void *dummy) { slbt_zone = uma_zcreate("SLB tree node", sizeof(struct slbtnode), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); slb_cache_zone = uma_zcreate("SLB cache", (n_slbs + 1)*sizeof(struct slb *), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); if (platform_real_maxaddr() != VM_MAX_ADDRESS) { uma_zone_set_allocf(slb_cache_zone, slb_uma_real_alloc); uma_zone_set_allocf(slbt_zone, slb_uma_real_alloc); } } struct slb ** slb_alloc_user_cache(void) { return (uma_zalloc(slb_cache_zone, M_ZERO)); } void slb_free_user_cache(struct slb **slb) { uma_zfree(slb_cache_zone, slb); } Index: head/sys/powerpc/ofw/ofw_real.c =================================================================== --- head/sys/powerpc/ofw/ofw_real.c (revision 295879) +++ head/sys/powerpc/ofw/ofw_real.c (revision 295880) @@ -1,1102 +1,1101 @@ /* $NetBSD: Locore.c,v 1.7 2000/08/20 07:04:59 tsubai Exp $ */ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (C) 2000 Benno Rice. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY Benno Rice ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "ofw_if.h" static int ofw_real_init(ofw_t, void *openfirm); static int ofw_real_test(ofw_t, const char *name); static phandle_t ofw_real_peer(ofw_t, phandle_t node); static phandle_t ofw_real_child(ofw_t, phandle_t node); static phandle_t ofw_real_parent(ofw_t, phandle_t node); static phandle_t ofw_real_instance_to_package(ofw_t, ihandle_t instance); static ssize_t ofw_real_getproplen(ofw_t, phandle_t package, const char *propname); static ssize_t ofw_real_getprop(ofw_t, phandle_t package, const char *propname, void *buf, size_t buflen); static int ofw_real_nextprop(ofw_t, phandle_t package, const char *previous, char *buf, size_t); static int ofw_real_setprop(ofw_t, phandle_t package, const char *propname, const void *buf, size_t len); static ssize_t ofw_real_canon(ofw_t, const char *device, char *buf, size_t len); static phandle_t ofw_real_finddevice(ofw_t, const char *device); static ssize_t ofw_real_instance_to_path(ofw_t, ihandle_t instance, char *buf, size_t len); static ssize_t ofw_real_package_to_path(ofw_t, phandle_t package, char *buf, size_t len); static int ofw_real_call_method(ofw_t, ihandle_t instance, const char *method, int nargs, int nreturns, cell_t *args_and_returns); static int ofw_real_interpret(ofw_t ofw, const char *cmd, int nreturns, cell_t *returns); static ihandle_t ofw_real_open(ofw_t, const char *device); static void ofw_real_close(ofw_t, ihandle_t instance); static ssize_t ofw_real_read(ofw_t, ihandle_t instance, void *addr, size_t len); static ssize_t ofw_real_write(ofw_t, ihandle_t instance, const void *addr, size_t len); static int ofw_real_seek(ofw_t, ihandle_t instance, u_int64_t pos); static caddr_t ofw_real_claim(ofw_t, void *virt, size_t size, u_int align); static void ofw_real_release(ofw_t, void *virt, size_t size); static void ofw_real_enter(ofw_t); static void ofw_real_exit(ofw_t); static ofw_method_t ofw_real_methods[] = { OFWMETHOD(ofw_init, ofw_real_init), OFWMETHOD(ofw_peer, ofw_real_peer), OFWMETHOD(ofw_child, ofw_real_child), OFWMETHOD(ofw_parent, ofw_real_parent), OFWMETHOD(ofw_instance_to_package, ofw_real_instance_to_package), OFWMETHOD(ofw_getproplen, ofw_real_getproplen), OFWMETHOD(ofw_getprop, ofw_real_getprop), OFWMETHOD(ofw_nextprop, ofw_real_nextprop), OFWMETHOD(ofw_setprop, ofw_real_setprop), OFWMETHOD(ofw_canon, ofw_real_canon), OFWMETHOD(ofw_finddevice, ofw_real_finddevice), OFWMETHOD(ofw_instance_to_path, ofw_real_instance_to_path), OFWMETHOD(ofw_package_to_path, ofw_real_package_to_path), OFWMETHOD(ofw_test, ofw_real_test), OFWMETHOD(ofw_call_method, ofw_real_call_method), OFWMETHOD(ofw_interpret, ofw_real_interpret), OFWMETHOD(ofw_open, ofw_real_open), OFWMETHOD(ofw_close, ofw_real_close), OFWMETHOD(ofw_read, ofw_real_read), OFWMETHOD(ofw_write, ofw_real_write), OFWMETHOD(ofw_seek, ofw_real_seek), OFWMETHOD(ofw_claim, ofw_real_claim), OFWMETHOD(ofw_release, ofw_real_release), OFWMETHOD(ofw_enter, ofw_real_enter), OFWMETHOD(ofw_exit, ofw_real_exit), { 0, 0 } }; static ofw_def_t ofw_real = { OFW_STD_REAL, ofw_real_methods, 0 }; OFW_DEF(ofw_real); static ofw_def_t ofw_32bit = { OFW_STD_32BIT, ofw_real_methods, 0 }; OFW_DEF(ofw_32bit); static MALLOC_DEFINE(M_OFWREAL, "ofwreal", "Open Firmware Real Mode Bounce Page"); static int (*openfirmware)(void *); static vm_offset_t of_bounce_phys; static caddr_t of_bounce_virt; static off_t of_bounce_offset; static size_t of_bounce_size; static struct mtx of_bounce_mtx; extern int ofw_real_mode; /* * After the VM is up, allocate a wired, low memory bounce page. */ static void ofw_real_bounce_alloc(void *); SYSINIT(ofw_real_bounce_alloc, SI_SUB_KMEM, SI_ORDER_ANY, ofw_real_bounce_alloc, NULL); static void ofw_real_start(void) { mtx_lock(&of_bounce_mtx); of_bounce_offset = 0; } static void ofw_real_stop(void) { mtx_unlock(&of_bounce_mtx); } static void ofw_real_bounce_alloc(void *junk) { /* * Check that ofw_real is actually in use before allocating wads * of memory. Do this by checking if our mutex has been set up. */ if (!mtx_initialized(&of_bounce_mtx)) return; /* * Allocate a page of contiguous, wired physical memory that can * fit into a 32-bit address space and accessed from real mode. */ mtx_lock(&of_bounce_mtx); of_bounce_virt = contigmalloc(4 * PAGE_SIZE, M_OFWREAL, 0, 0, ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT), PAGE_SIZE, 4 * PAGE_SIZE); of_bounce_phys = vtophys(of_bounce_virt); of_bounce_size = 4 * PAGE_SIZE; /* * For virtual-mode OF, direct map this physical address so that * we have a 32-bit virtual address to give OF. */ if (!ofw_real_mode && !hw_direct_map) pmap_kenter(of_bounce_phys, of_bounce_phys); mtx_unlock(&of_bounce_mtx); } static cell_t ofw_real_map(const void *buf, size_t len) { static char emergency_buffer[255]; cell_t phys; mtx_assert(&of_bounce_mtx, MA_OWNED); if (of_bounce_virt == NULL) { /* * If we haven't set up the MMU, then buf is guaranteed * to be accessible to OF, because the only memory we * can use right now is memory mapped by firmware. */ if (!pmap_bootstrapped) return (cell_t)(uintptr_t)buf; /* * XXX: It is possible for us to get called before the VM has * come online, but after the MMU is up. We don't have the * bounce buffer yet, but can no longer presume a 1:1 mapping. * Copy into the emergency buffer, and reset at the end. */ of_bounce_virt = emergency_buffer; of_bounce_phys = (vm_offset_t)of_bounce_virt; of_bounce_size = sizeof(emergency_buffer); } /* * Make sure the bounce page offset satisfies any reasonable * alignment constraint. */ of_bounce_offset += sizeof(register_t) - (of_bounce_offset % sizeof(register_t)); if (of_bounce_offset + len > of_bounce_size) { panic("Oversize Open Firmware call!"); return 0; } if (buf != NULL) memcpy(of_bounce_virt + of_bounce_offset, buf, len); else return (0); phys = of_bounce_phys + of_bounce_offset; of_bounce_offset += len; return (phys); } static void ofw_real_unmap(cell_t physaddr, void *buf, size_t len) { mtx_assert(&of_bounce_mtx, MA_OWNED); if (of_bounce_virt == NULL) return; if (physaddr == 0) return; memcpy(buf,of_bounce_virt + (physaddr - of_bounce_phys),len); } /* Initialiser */ static int ofw_real_init(ofw_t ofw, void *openfirm) { openfirmware = (int (*)(void *))openfirm; mtx_init(&of_bounce_mtx, "OF Bounce Page", NULL, MTX_DEF); of_bounce_virt = NULL; return (0); } /* * Generic functions */ /* Test to see if a service exists. */ static int ofw_real_test(ofw_t ofw, const char *name) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t service; cell_t missing; } args; args.name = (cell_t)(uintptr_t)"test"; args.nargs = 1; args.nreturns = 1; ofw_real_start(); args.service = ofw_real_map(name, strlen(name) + 1); argsptr = ofw_real_map(&args, sizeof(args)); if (args.service == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.missing); } /* * Device tree functions */ /* Return the next sibling of this node or 0. */ static phandle_t ofw_real_peer(ofw_t ofw, phandle_t node) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t node; cell_t next; } args; args.name = (cell_t)(uintptr_t)"peer"; args.nargs = 1; args.nreturns = 1; args.node = node; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.next); } /* Return the first child of this node or 0. */ static phandle_t ofw_real_child(ofw_t ofw, phandle_t node) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t node; cell_t child; } args; args.name = (cell_t)(uintptr_t)"child"; args.nargs = 1; args.nreturns = 1; args.node = node; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.child); } /* Return the parent of this node or 0. */ static phandle_t ofw_real_parent(ofw_t ofw, phandle_t node) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t node; cell_t parent; } args; args.name = (cell_t)(uintptr_t)"parent"; args.nargs = 1; args.nreturns = 1; args.node = node; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.parent); } /* Return the package handle that corresponds to an instance handle. */ static phandle_t ofw_real_instance_to_package(ofw_t ofw, ihandle_t instance) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; cell_t package; } args; args.name = (cell_t)(uintptr_t)"instance-to-package"; args.nargs = 1; args.nreturns = 1; args.instance = instance; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.package); } /* Get the length of a property of a package. */ static ssize_t ofw_real_getproplen(ofw_t ofw, phandle_t package, const char *propname) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t package; cell_t propname; int32_t proplen; } args; args.name = (cell_t)(uintptr_t)"getproplen"; args.nargs = 2; args.nreturns = 1; ofw_real_start(); args.package = package; args.propname = ofw_real_map(propname, strlen(propname) + 1); argsptr = ofw_real_map(&args, sizeof(args)); if (args.propname == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.proplen); } /* Get the value of a property of a package. */ static ssize_t ofw_real_getprop(ofw_t ofw, phandle_t package, const char *propname, void *buf, size_t buflen) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t package; cell_t propname; cell_t buf; cell_t buflen; int32_t size; } args; args.name = (cell_t)(uintptr_t)"getprop"; args.nargs = 4; args.nreturns = 1; ofw_real_start(); args.package = package; args.propname = ofw_real_map(propname, strlen(propname) + 1); args.buf = ofw_real_map(buf, buflen); args.buflen = buflen; argsptr = ofw_real_map(&args, sizeof(args)); if (args.propname == 0 || args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.buf, buf, buflen); ofw_real_stop(); return (args.size); } /* Get the next property of a package. */ static int ofw_real_nextprop(ofw_t ofw, phandle_t package, const char *previous, char *buf, size_t size) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t package; cell_t previous; cell_t buf; cell_t flag; } args; args.name = (cell_t)(uintptr_t)"nextprop"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.package = package; args.previous = ofw_real_map(previous, (previous != NULL) ? (strlen(previous) + 1) : 0); args.buf = ofw_real_map(buf, size); argsptr = ofw_real_map(&args, sizeof(args)); if (args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.buf, buf, size); ofw_real_stop(); return (args.flag); } /* Set the value of a property of a package. */ /* XXX Has a bug on FirePower */ static int ofw_real_setprop(ofw_t ofw, phandle_t package, const char *propname, const void *buf, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t package; cell_t propname; cell_t buf; cell_t len; cell_t size; } args; args.name = (cell_t)(uintptr_t)"setprop"; args.nargs = 4; args.nreturns = 1; ofw_real_start(); args.package = package; args.propname = ofw_real_map(propname, strlen(propname) + 1); args.buf = ofw_real_map(buf, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.propname == 0 || args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.size); } /* Convert a device specifier to a fully qualified pathname. */ static ssize_t ofw_real_canon(ofw_t ofw, const char *device, char *buf, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t device; cell_t buf; cell_t len; int32_t size; } args; args.name = (cell_t)(uintptr_t)"canon"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.device = ofw_real_map(device, strlen(device) + 1); args.buf = ofw_real_map(buf, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.device == 0 || args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.buf, buf, len); ofw_real_stop(); return (args.size); } /* Return a package handle for the specified device. */ static phandle_t ofw_real_finddevice(ofw_t ofw, const char *device) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t device; cell_t package; } args; args.name = (cell_t)(uintptr_t)"finddevice"; args.nargs = 1; args.nreturns = 1; ofw_real_start(); args.device = ofw_real_map(device, strlen(device) + 1); argsptr = ofw_real_map(&args, sizeof(args)); if (args.device == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.package); } /* Return the fully qualified pathname corresponding to an instance. */ static ssize_t ofw_real_instance_to_path(ofw_t ofw, ihandle_t instance, char *buf, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; cell_t buf; cell_t len; int32_t size; } args; args.name = (cell_t)(uintptr_t)"instance-to-path"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.instance = instance; args.buf = ofw_real_map(buf, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.buf, buf, len); ofw_real_stop(); return (args.size); } /* Return the fully qualified pathname corresponding to a package. */ static ssize_t ofw_real_package_to_path(ofw_t ofw, phandle_t package, char *buf, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t package; cell_t buf; cell_t len; int32_t size; } args; args.name = (cell_t)(uintptr_t)"package-to-path"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.package = package; args.buf = ofw_real_map(buf, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.buf == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.buf, buf, len); ofw_real_stop(); return (args.size); } /* Call the method in the scope of a given instance. */ static int ofw_real_call_method(ofw_t ofw, ihandle_t instance, const char *method, int nargs, int nreturns, cell_t *args_and_returns) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t method; cell_t instance; cell_t args_n_results[12]; } args; cell_t *ap, *cp; int n; args.name = (cell_t)(uintptr_t)"call-method"; args.nargs = 2; args.nreturns = 1; if (nargs > 6) return (-1); ofw_real_start(); args.nargs = nargs + 2; args.nreturns = nreturns + 1; args.method = ofw_real_map(method, strlen(method) + 1); args.instance = instance; ap = args_and_returns; for (cp = args.args_n_results + (n = nargs); --n >= 0;) *--cp = *(ap++); argsptr = ofw_real_map(&args, sizeof(args)); if (args.method == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); if (args.args_n_results[nargs]) return (args.args_n_results[nargs]); for (cp = args.args_n_results + nargs + (n = args.nreturns); --n > 0;) *(ap++) = *--cp; return (0); } static int ofw_real_interpret(ofw_t ofw, const char *cmd, int nreturns, cell_t *returns) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t slot[16]; } args; cell_t status; int i = 0, j = 0; args.name = (cell_t)(uintptr_t)"interpret"; args.nargs = 1; ofw_real_start(); args.nreturns = ++nreturns; args.slot[i++] = ofw_real_map(cmd, strlen(cmd) + 1); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); status = args.slot[i++]; while (i < 1 + nreturns) returns[j++] = args.slot[i++]; return (status); } /* * Device I/O functions */ /* Open an instance for a device. */ static ihandle_t ofw_real_open(ofw_t ofw, const char *device) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t device; cell_t instance; } args; args.name = (cell_t)(uintptr_t)"open"; args.nargs = 1; args.nreturns = 1; ofw_real_start(); args.device = ofw_real_map(device, strlen(device) + 1); argsptr = ofw_real_map(&args, sizeof(args)); if (args.device == 0 || openfirmware((void *)argsptr) == -1 || args.instance == 0) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.instance); } /* Close an instance. */ static void ofw_real_close(ofw_t ofw, ihandle_t instance) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; } args; args.name = (cell_t)(uintptr_t)"close"; args.nargs = 1; args.nreturns = 0; args.instance = instance; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); openfirmware((void *)argsptr); ofw_real_stop(); } /* Read from an instance. */ static ssize_t ofw_real_read(ofw_t ofw, ihandle_t instance, void *addr, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; cell_t addr; cell_t len; int32_t actual; } args; args.name = (cell_t)(uintptr_t)"read"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.instance = instance; args.addr = ofw_real_map(addr, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.addr == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_unmap(args.addr, addr, len); ofw_real_stop(); return (args.actual); } /* Write to an instance. */ static ssize_t ofw_real_write(ofw_t ofw, ihandle_t instance, const void *addr, size_t len) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; cell_t addr; cell_t len; int32_t actual; } args; args.name = (cell_t)(uintptr_t)"write"; args.nargs = 3; args.nreturns = 1; ofw_real_start(); args.instance = instance; args.addr = ofw_real_map(addr, len); args.len = len; argsptr = ofw_real_map(&args, sizeof(args)); if (args.addr == 0 || openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.actual); } /* Seek to a position. */ static int ofw_real_seek(ofw_t ofw, ihandle_t instance, u_int64_t pos) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t instance; cell_t poshi; cell_t poslo; cell_t status; } args; args.name = (cell_t)(uintptr_t)"seek"; args.nargs = 3; args.nreturns = 1; args.instance = instance; args.poshi = pos >> 32; args.poslo = pos; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return (-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return (args.status); } /* * Memory functions */ /* Claim an area of memory. */ static caddr_t ofw_real_claim(ofw_t ofw, void *virt, size_t size, u_int align) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t virt; cell_t size; cell_t align; cell_t baseaddr; } args; args.name = (cell_t)(uintptr_t)"claim"; args.nargs = 3; args.nreturns = 1; args.virt = (cell_t)(uintptr_t)virt; args.size = size; args.align = align; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); if (openfirmware((void *)argsptr) == -1) { ofw_real_stop(); return ((void *)-1); } ofw_real_unmap(argsptr, &args, sizeof(args)); ofw_real_stop(); return ((void *)(uintptr_t)args.baseaddr); } /* Release an area of memory. */ static void ofw_real_release(ofw_t ofw, void *virt, size_t size) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; cell_t virt; cell_t size; } args; args.name = (cell_t)(uintptr_t)"release"; args.nargs = 2; args.nreturns = 0; args.virt = (cell_t)(uintptr_t)virt; args.size = size; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); openfirmware((void *)argsptr); ofw_real_stop(); } /* * Control transfer functions */ /* Suspend and drop back to the Open Firmware interface. */ static void ofw_real_enter(ofw_t ofw) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; } args; args.name = (cell_t)(uintptr_t)"enter"; args.nargs = 0; args.nreturns = 0; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); openfirmware((void *)argsptr); /* We may come back. */ ofw_real_stop(); } /* Shut down and drop back to the Open Firmware interface. */ static void ofw_real_exit(ofw_t ofw) { vm_offset_t argsptr; struct { cell_t name; cell_t nargs; cell_t nreturns; } args; args.name = (cell_t)(uintptr_t)"exit"; args.nargs = 0; args.nreturns = 0; ofw_real_start(); argsptr = ofw_real_map(&args, sizeof(args)); openfirmware((void *)argsptr); for (;;) /* just in case */ ; ofw_real_stop(); } Index: head/sys/powerpc/ofw/rtas.c =================================================================== --- head/sys/powerpc/ofw/rtas.c (revision 295879) +++ head/sys/powerpc/ofw/rtas.c (revision 295880) @@ -1,271 +1,270 @@ /*- * Copyright (c) 2011 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include static MALLOC_DEFINE(M_RTAS, "rtas", "Run Time Abstraction Service"); static vm_offset_t rtas_bounce_phys; static caddr_t rtas_bounce_virt; static off_t rtas_bounce_offset; static size_t rtas_bounce_size; static uintptr_t rtas_private_data; static struct mtx rtas_mtx; static phandle_t rtas; /* From ofwcall.S */ int rtascall(vm_offset_t callbuffer, uintptr_t rtas_privdat); extern uintptr_t rtas_entry; extern register_t rtasmsr; /* * After the VM is up, allocate RTAS memory and instantiate it */ static void rtas_setup(void *); SYSINIT(rtas_setup, SI_SUB_KMEM, SI_ORDER_ANY, rtas_setup, NULL); static void rtas_setup(void *junk) { ihandle_t rtasi; cell_t rtas_size = 0, rtas_ptr; char path[31]; int result; rtas = OF_finddevice("/rtas"); if (rtas == -1) { rtas = 0; return; } OF_package_to_path(rtas, path, sizeof(path)); mtx_init(&rtas_mtx, "RTAS", NULL, MTX_SPIN); /* RTAS must be called with everything turned off in MSR */ rtasmsr = mfmsr(); rtasmsr &= ~(PSL_IR | PSL_DR | PSL_EE | PSL_SE); #ifdef __powerpc64__ rtasmsr &= ~PSL_SF; #endif /* * Allocate rtas_size + one page of contiguous, wired physical memory * that can fit into a 32-bit address space and accessed from real mode. * This is used both to bounce arguments and for RTAS private data. * * It must be 4KB-aligned and not cross a 256 MB boundary. */ OF_getencprop(rtas, "rtas-size", &rtas_size, sizeof(rtas_size)); rtas_size = round_page(rtas_size); rtas_bounce_virt = contigmalloc(rtas_size + PAGE_SIZE, M_RTAS, 0, 0, ulmin(platform_real_maxaddr(), BUS_SPACE_MAXADDR_32BIT), 4096, 256*1024*1024); rtas_private_data = vtophys(rtas_bounce_virt); rtas_bounce_virt += rtas_size; /* Actual bounce area */ rtas_bounce_phys = vtophys(rtas_bounce_virt); rtas_bounce_size = PAGE_SIZE; /* * Instantiate RTAS. We always use the 32-bit version. */ if (OF_hasprop(rtas, "linux,rtas-entry") && OF_hasprop(rtas, "linux,rtas-base")) { OF_getencprop(rtas, "linux,rtas-base", &rtas_ptr, sizeof(rtas_ptr)); rtas_private_data = rtas_ptr; OF_getencprop(rtas, "linux,rtas-entry", &rtas_ptr, sizeof(rtas_ptr)); } else { rtasi = OF_open(path); if (rtasi == 0) { rtas = 0; printf("Error initializing RTAS: could not open " "node\n"); return; } result = OF_call_method("instantiate-rtas", rtasi, 1, 1, (cell_t)rtas_private_data, &rtas_ptr); OF_close(rtasi); if (result != 0) { rtas = 0; rtas_ptr = 0; printf("Error initializing RTAS (%d)\n", result); return; } } rtas_entry = (uintptr_t)(rtas_ptr); } static cell_t rtas_real_map(const void *buf, size_t len) { cell_t phys; mtx_assert(&rtas_mtx, MA_OWNED); /* * Make sure the bounce page offset satisfies any reasonable * alignment constraint. */ rtas_bounce_offset += sizeof(register_t) - (rtas_bounce_offset % sizeof(register_t)); if (rtas_bounce_offset + len > rtas_bounce_size) { panic("Oversize RTAS call!"); return 0; } if (buf != NULL) memcpy(rtas_bounce_virt + rtas_bounce_offset, buf, len); else return (0); phys = rtas_bounce_phys + rtas_bounce_offset; rtas_bounce_offset += len; return (phys); } static void rtas_real_unmap(cell_t physaddr, void *buf, size_t len) { mtx_assert(&rtas_mtx, MA_OWNED); if (physaddr == 0) return; memcpy(buf, rtas_bounce_virt + (physaddr - rtas_bounce_phys), len); } /* Check if we have RTAS */ int rtas_exists(void) { return (rtas != 0); } /* Call an RTAS method by token */ int rtas_call_method(cell_t token, int nargs, int nreturns, ...) { vm_offset_t argsptr; jmp_buf env, *oldfaultbuf; va_list ap; struct { cell_t token; cell_t nargs; cell_t nreturns; cell_t args_n_results[12]; } args; int n, result; if (!rtas_exists() || nargs + nreturns > 12) return (-1); args.token = token; va_start(ap, nreturns); mtx_lock_spin(&rtas_mtx); rtas_bounce_offset = 0; args.nargs = nargs; args.nreturns = nreturns; for (n = 0; n < nargs; n++) args.args_n_results[n] = va_arg(ap, cell_t); argsptr = rtas_real_map(&args, sizeof(args)); /* Get rid of any stale machine checks that have been waiting. */ __asm __volatile ("sync; isync"); oldfaultbuf = curthread->td_pcb->pcb_onfault; curthread->td_pcb->pcb_onfault = &env; if (!setjmp(env)) { __asm __volatile ("sync"); result = rtascall(argsptr, rtas_private_data); __asm __volatile ("sync; isync"); } else { result = RTAS_HW_ERROR; } curthread->td_pcb->pcb_onfault = oldfaultbuf; __asm __volatile ("sync"); rtas_real_unmap(argsptr, &args, sizeof(args)); mtx_unlock_spin(&rtas_mtx); if (result < 0) return (result); for (n = nargs; n < nargs + nreturns; n++) *va_arg(ap, cell_t *) = args.args_n_results[n]; return (result); } /* Look up an RTAS token */ cell_t rtas_token_lookup(const char *method) { cell_t token; if (!rtas_exists()) return (-1); if (OF_getencprop(rtas, method, &token, sizeof(token)) == -1) return (-1); return (token); } Index: head/sys/powerpc/powermac/macgpio.c =================================================================== --- head/sys/powerpc/powermac/macgpio.c (revision 295879) +++ head/sys/powerpc/powermac/macgpio.c (revision 295880) @@ -1,404 +1,403 @@ /*- * Copyright 2008 by Nathan Whitehorn. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for MacIO GPIO controller */ #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include /* * Macgpio softc */ struct macgpio_softc { phandle_t sc_node; struct resource *sc_gpios; int sc_gpios_rid; uint32_t sc_saved_gpio_levels[2]; uint32_t sc_saved_gpios[GPIO_COUNT]; uint32_t sc_saved_extint_gpios[GPIO_EXTINT_COUNT]; }; static MALLOC_DEFINE(M_MACGPIO, "macgpio", "macgpio device information"); static int macgpio_probe(device_t); static int macgpio_attach(device_t); static int macgpio_print_child(device_t dev, device_t child); static void macgpio_probe_nomatch(device_t, device_t); static struct resource *macgpio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macgpio_activate_resource(device_t, device_t, int, int, struct resource *); static int macgpio_deactivate_resource(device_t, device_t, int, int, struct resource *); static ofw_bus_get_devinfo_t macgpio_get_devinfo; static int macgpio_suspend(device_t dev); static int macgpio_resume(device_t dev); /* * Bus interface definition */ static device_method_t macgpio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macgpio_probe), DEVMETHOD(device_attach, macgpio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, macgpio_suspend), DEVMETHOD(device_resume, macgpio_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macgpio_print_child), DEVMETHOD(bus_probe_nomatch, macgpio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, macgpio_alloc_resource), DEVMETHOD(bus_activate_resource, macgpio_activate_resource), DEVMETHOD(bus_deactivate_resource, macgpio_deactivate_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macgpio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macgpio_pci_driver = { "macgpio", macgpio_methods, sizeof(struct macgpio_softc) }; devclass_t macgpio_devclass; DRIVER_MODULE(macgpio, macio, macgpio_pci_driver, macgpio_devclass, 0, 0); struct macgpio_devinfo { struct ofw_bus_devinfo mdi_obdinfo; struct resource_list mdi_resources; int gpio_num; }; static int macgpio_probe(device_t dev) { const char *name; name = ofw_bus_get_name(dev); if (name && strcmp(name, "gpio") == 0) { device_set_desc(dev, "MacIO GPIO Controller"); return (0); } return (ENXIO); } /* * Scan Open Firmware child nodes, and attach these as children * of the macgpio bus */ static int macgpio_attach(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; phandle_t root, child, iparent; device_t cdev; uint32_t irq; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); sc->sc_gpios = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_gpios_rid, RF_ACTIVE); /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACGPIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACGPIO); continue; } if (OF_getencprop(child, "reg", &dinfo->gpio_num, sizeof(dinfo->gpio_num)) != sizeof(dinfo->gpio_num)) { /* * Some early GPIO controllers don't provide GPIO * numbers for GPIOs designed only to provide * interrupt resources. We should still allow these * to attach, but with caution. */ dinfo->gpio_num = -1; } resource_list_init(&dinfo->mdi_resources); if (OF_getencprop(child, "interrupts", &irq, sizeof(irq)) == sizeof(irq)) { OF_searchencprop(child, "interrupt-parent", &iparent, sizeof(iparent)); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, 0, MAP_IRQ(iparent, irq), MAP_IRQ(iparent, irq), 1); } /* Fix messed-up offsets */ if (dinfo->gpio_num > 0x50) dinfo->gpio_num -= 0x50; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACGPIO); continue; } device_set_ivars(cdev, dinfo); } return (bus_generic_attach(dev)); } static int macgpio_print_child(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; int retval = 0; dinfo = device_get_ivars(child); retval += bus_print_child_header(dev, child); if (dinfo->gpio_num >= GPIO_BASE) printf(" gpio %d", dinfo->gpio_num - GPIO_BASE); else if (dinfo->gpio_num >= GPIO_EXTINT_BASE) printf(" extint-gpio %d", dinfo->gpio_num - GPIO_EXTINT_BASE); else if (dinfo->gpio_num >= 0) printf(" addr 0x%02x", dinfo->gpio_num); /* should not happen */ resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%ld"); retval += bus_print_child_footer(dev, child); return (retval); } static void macgpio_probe_nomatch(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); if (dinfo->gpio_num >= 0) printf(" gpio %d",dinfo->gpio_num); resource_list_print_type(&dinfo->mdi_resources, "irq", SYS_RES_IRQ, "%ld"); printf(" (no driver attached)\n"); } } static struct resource * macgpio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return (NULL); return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); } static int macgpio_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val |= 0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } return (bus_activate_resource(bus, type, rid, res)); } static int macgpio_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; u_char val; sc = device_get_softc(bus); dinfo = device_get_ivars(child); if (type != SYS_RES_IRQ) return ENXIO; if (dinfo->gpio_num >= 0) { val = bus_read_1(sc->sc_gpios,dinfo->gpio_num); val &= ~0x80; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } return (bus_deactivate_resource(bus, type, rid, res)); } uint8_t macgpio_read(device_t dev) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return (0); return (bus_read_1(sc->sc_gpios,dinfo->gpio_num)); } void macgpio_write(device_t dev, uint8_t val) { struct macgpio_softc *sc; struct macgpio_devinfo *dinfo; sc = device_get_softc(device_get_parent(dev)); dinfo = device_get_ivars(dev); if (dinfo->gpio_num < 0) return; bus_write_1(sc->sc_gpios,dinfo->gpio_num,val); } static const struct ofw_bus_devinfo * macgpio_get_devinfo(device_t dev, device_t child) { struct macgpio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } static int macgpio_suspend(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); sc->sc_saved_gpio_levels[0] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_0); sc->sc_saved_gpio_levels[1] = bus_read_4(sc->sc_gpios, GPIO_LEVELS_1); for (i = 0; i < GPIO_COUNT; i++) sc->sc_saved_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_BASE + i); for (i = 0; i < GPIO_EXTINT_COUNT; i++) sc->sc_saved_extint_gpios[i] = bus_read_1(sc->sc_gpios, GPIO_EXTINT_BASE + i); return (0); } static int macgpio_resume(device_t dev) { struct macgpio_softc *sc; int i; sc = device_get_softc(dev); bus_write_4(sc->sc_gpios, GPIO_LEVELS_0, sc->sc_saved_gpio_levels[0]); bus_write_4(sc->sc_gpios, GPIO_LEVELS_1, sc->sc_saved_gpio_levels[1]); for (i = 0; i < GPIO_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_BASE + i, sc->sc_saved_gpios[i]); for (i = 0; i < GPIO_EXTINT_COUNT; i++) bus_write_1(sc->sc_gpios, GPIO_EXTINT_BASE + i, sc->sc_saved_extint_gpios[i]); return (0); } Index: head/sys/powerpc/powermac/macio.c =================================================================== --- head/sys/powerpc/powermac/macio.c (revision 295879) +++ head/sys/powerpc/powermac/macio.c (revision 295880) @@ -1,704 +1,703 @@ /*- * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Driver for KeyLargo/Pangea, the MacPPC south bridge ASIC. */ #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include /* * Macio softc */ struct macio_softc { phandle_t sc_node; vm_offset_t sc_base; vm_offset_t sc_size; struct rman sc_mem_rman; /* FCR registers */ int sc_memrid; struct resource *sc_memr; }; static MALLOC_DEFINE(M_MACIO, "macio", "macio device information"); static int macio_probe(device_t); static int macio_attach(device_t); static int macio_print_child(device_t dev, device_t child); static void macio_probe_nomatch(device_t, device_t); static struct resource *macio_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int macio_activate_resource(device_t, device_t, int, int, struct resource *); static int macio_deactivate_resource(device_t, device_t, int, int, struct resource *); static int macio_release_resource(device_t, device_t, int, int, struct resource *); static struct resource_list *macio_get_resource_list (device_t, device_t); static ofw_bus_get_devinfo_t macio_get_devinfo; /* * Bus interface definition */ static device_method_t macio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, macio_probe), DEVMETHOD(device_attach, macio_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, macio_print_child), DEVMETHOD(bus_probe_nomatch, macio_probe_nomatch), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, macio_alloc_resource), DEVMETHOD(bus_release_resource, macio_release_resource), DEVMETHOD(bus_activate_resource, macio_activate_resource), DEVMETHOD(bus_deactivate_resource, macio_deactivate_resource), DEVMETHOD(bus_get_resource_list, macio_get_resource_list), DEVMETHOD(bus_child_pnpinfo_str, ofw_bus_gen_child_pnpinfo_str), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, macio_get_devinfo), DEVMETHOD(ofw_bus_get_compat, ofw_bus_gen_get_compat), DEVMETHOD(ofw_bus_get_model, ofw_bus_gen_get_model), DEVMETHOD(ofw_bus_get_name, ofw_bus_gen_get_name), DEVMETHOD(ofw_bus_get_node, ofw_bus_gen_get_node), DEVMETHOD(ofw_bus_get_type, ofw_bus_gen_get_type), { 0, 0 } }; static driver_t macio_pci_driver = { "macio", macio_methods, sizeof(struct macio_softc) }; devclass_t macio_devclass; DRIVER_MODULE(macio, pci, macio_pci_driver, macio_devclass, 0, 0); /* * PCI ID search table */ static struct macio_pci_dev { u_int32_t mpd_devid; char *mpd_desc; } macio_pci_devlist[] = { { 0x0017106b, "Paddington I/O Controller" }, { 0x0022106b, "KeyLargo I/O Controller" }, { 0x0025106b, "Pangea I/O Controller" }, { 0x003e106b, "Intrepid I/O Controller" }, { 0x0041106b, "K2 KeyLargo I/O Controller" }, { 0x004f106b, "Shasta I/O Controller" }, { 0, NULL } }; /* * Devices to exclude from the probe * XXX some of these may be required in the future... */ #define MACIO_QUIRK_IGNORE 0x00000001 #define MACIO_QUIRK_CHILD_HAS_INTR 0x00000002 #define MACIO_QUIRK_USE_CHILD_REG 0x00000004 struct macio_quirk_entry { const char *mq_name; int mq_quirks; }; static struct macio_quirk_entry macio_quirks[] = { { "escc-legacy", MACIO_QUIRK_IGNORE }, { "timer", MACIO_QUIRK_IGNORE }, { "escc", MACIO_QUIRK_CHILD_HAS_INTR }, { "i2s", MACIO_QUIRK_CHILD_HAS_INTR | MACIO_QUIRK_USE_CHILD_REG }, { NULL, 0 } }; static int macio_get_quirks(const char *name) { struct macio_quirk_entry *mqe; for (mqe = macio_quirks; mqe->mq_name != NULL; mqe++) if (strcmp(name, mqe->mq_name) == 0) return (mqe->mq_quirks); return (0); } /* * Add an interrupt to the dev's resource list if present */ static void macio_add_intr(phandle_t devnode, struct macio_devinfo *dinfo) { phandle_t iparent; int *intr; int i, nintr; int icells; if (dinfo->mdi_ninterrupts >= 6) { printf("macio: device has more than 6 interrupts\n"); return; } nintr = OF_getprop_alloc(devnode, "interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) { nintr = OF_getprop_alloc(devnode, "AAPL,interrupts", sizeof(*intr), (void **)&intr); if (nintr == -1) return; } if (intr[0] == -1) return; if (OF_getprop(devnode, "interrupt-parent", &iparent, sizeof(iparent)) <= 0) panic("Interrupt but no interrupt parent!\n"); if (OF_getprop(OF_node_from_xref(iparent), "#interrupt-cells", &icells, sizeof(icells)) <= 0) icells = 1; for (i = 0; i < nintr; i+=icells) { u_int irq = MAP_IRQ(iparent, intr[i]); resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, irq, irq, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = irq; dinfo->mdi_ninterrupts++; } } static void macio_add_reg(phandle_t devnode, struct macio_devinfo *dinfo) { struct macio_reg *reg, *regp; phandle_t child; char buf[8]; int i, layout_id = 0, nreg, res; nreg = OF_getprop_alloc(devnode, "reg", sizeof(*reg), (void **)®); if (nreg == -1) return; /* * Some G5's have broken properties in the i2s-a area. If so we try * to fix it. Right now we know of two different cases, one for * sound layout-id 36 and the other one for sound layout-id 76. * What is missing is the base address for the memory addresses. * We take them from the parent node (i2s) and use the size * information from the child. */ if (reg[0].mr_base == 0) { child = OF_child(devnode); while (child != 0) { res = OF_getprop(child, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "sound") == 0) break; child = OF_peer(child); } res = OF_getprop(child, "layout-id", &layout_id, sizeof(layout_id)); if (res > 0 && (layout_id == 36 || layout_id == 76)) { res = OF_getprop_alloc(OF_parent(devnode), "reg", sizeof(*regp), (void **)®p); reg[0] = regp[0]; reg[1].mr_base = regp[1].mr_base; reg[2].mr_base = regp[1].mr_base + reg[1].mr_size; } } for (i = 0; i < nreg; i++) { resource_list_add(&dinfo->mdi_resources, SYS_RES_MEMORY, i, reg[i].mr_base, reg[i].mr_base + reg[i].mr_size, reg[i].mr_size); } } /* * PCI probe */ static int macio_probe(device_t dev) { int i; u_int32_t devid; devid = pci_get_devid(dev); for (i = 0; macio_pci_devlist[i].mpd_desc != NULL; i++) { if (devid == macio_pci_devlist[i].mpd_devid) { device_set_desc(dev, macio_pci_devlist[i].mpd_desc); return (0); } } return (ENXIO); } /* * PCI attach: scan Open Firmware child nodes, and attach these as children * of the macio bus */ static int macio_attach(device_t dev) { struct macio_softc *sc; struct macio_devinfo *dinfo; phandle_t root; phandle_t child; phandle_t subchild; device_t cdev; u_int reg[3]; char compat[32]; int error, quirks; sc = device_get_softc(dev); root = sc->sc_node = ofw_bus_get_node(dev); /* * Locate the device node and it's base address */ if (OF_getprop(root, "assigned-addresses", reg, sizeof(reg)) < (ssize_t)sizeof(reg)) { return (ENXIO); } /* Used later to see if we have to enable the I2S part. */ OF_getprop(root, "compatible", compat, sizeof(compat)); sc->sc_base = reg[2]; sc->sc_size = MACIO_REG_SIZE; sc->sc_memrid = PCIR_BAR(0); sc->sc_memr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_memrid, RF_ACTIVE); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "MacIO Device Memory"; error = rman_init(&sc->sc_mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); if (error) { device_printf(dev, "rman_manage_region() failed. error = %d\n", error); return (error); } /* * Iterate through the sub-devices */ for (child = OF_child(root); child != 0; child = OF_peer(child)) { dinfo = malloc(sizeof(*dinfo), M_MACIO, M_WAITOK | M_ZERO); if (ofw_bus_gen_setup_devinfo(&dinfo->mdi_obdinfo, child) != 0) { free(dinfo, M_MACIO); continue; } quirks = macio_get_quirks(dinfo->mdi_obdinfo.obd_name); if ((quirks & MACIO_QUIRK_IGNORE) != 0) { ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } resource_list_init(&dinfo->mdi_resources); dinfo->mdi_ninterrupts = 0; macio_add_intr(child, dinfo); if ((quirks & MACIO_QUIRK_USE_CHILD_REG) != 0) macio_add_reg(OF_child(child), dinfo); else macio_add_reg(child, dinfo); if ((quirks & MACIO_QUIRK_CHILD_HAS_INTR) != 0) for (subchild = OF_child(child); subchild != 0; subchild = OF_peer(subchild)) macio_add_intr(subchild, dinfo); cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", dinfo->mdi_obdinfo.obd_name); resource_list_free(&dinfo->mdi_resources); ofw_bus_gen_destroy_devinfo(&dinfo->mdi_obdinfo); free(dinfo, M_MACIO); continue; } device_set_ivars(cdev, dinfo); /* Set FCRs to enable some devices */ if (sc->sc_memr == NULL) continue; if (strcmp(ofw_bus_get_name(cdev), "bmac") == 0 || strcmp(ofw_bus_get_compat(cdev), "bmac+") == 0) { uint32_t fcr; fcr = bus_read_4(sc->sc_memr, HEATHROW_FCR); fcr |= FCR_ENET_ENABLE & ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr |= FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); fcr &= ~FCR_ENET_RESET; bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); DELAY(50000); bus_write_4(sc->sc_memr, HEATHROW_FCR, fcr); } /* * Make sure the I2S0 and the I2S0_CLK are enabled. * On certain G5's they are not. */ if ((strcmp(ofw_bus_get_name(cdev), "i2s") == 0) && (strcmp(compat, "K2-Keylargo") == 0)) { uint32_t fcr1; fcr1 = bus_read_4(sc->sc_memr, KEYLARGO_FCR1); fcr1 |= FCR1_I2S0_CLK_ENABLE | FCR1_I2S0_ENABLE; bus_write_4(sc->sc_memr, KEYLARGO_FCR1, fcr1); } } return (bus_generic_attach(dev)); } static int macio_print_child(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); retval += bus_print_child_footer(dev, child); return (retval); } static void macio_probe_nomatch(device_t dev, device_t child) { struct macio_devinfo *dinfo; struct resource_list *rl; const char *type; if (bootverbose) { dinfo = device_get_ivars(child); rl = &dinfo->mdi_resources; if ((type = ofw_bus_get_type(child)) == NULL) type = "(unknown)"; device_printf(dev, "<%s, %s>", type, ofw_bus_get_name(child)); resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); printf(" (no driver attached)\n"); } } static struct resource * macio_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct macio_softc *sc; int needactivate; struct resource *rv; struct rman *rm; u_long adjstart, adjend, adjcount; struct macio_devinfo *dinfo; struct resource_list_entry *rle; sc = device_get_softc(bus); dinfo = device_get_ivars(child); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: /* Check for passthrough from subattachments like macgpio */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->mdi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->mdi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->mdi_resources, SYS_RES_IRQ, dinfo->mdi_ninterrupts, start, start, 1); dinfo->mdi_interrupts[dinfo->mdi_ninterrupts] = start; dinfo->mdi_ninterrupts++; } return (resource_list_alloc(&dinfo->mdi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource %#lx - %#lx (%#lx) for %s\n", adjstart, adjend, adjcount, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int macio_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, res); if (error) return error; } return (rman_release_resource(res)); } static int macio_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct macio_softc *sc; void *p; sc = device_get_softc(bus); if (type == SYS_RES_IRQ) return (bus_activate_resource(bus, type, rid, res)); if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { p = pmap_mapdev((vm_offset_t)rman_get_start(res) + sc->sc_base, (vm_size_t)rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_le_tag); rman_set_bushandle(res, (u_long)p); } return (rman_activate_resource(res)); } static int macio_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); } static struct resource_list * macio_get_resource_list (device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_resources); } static const struct ofw_bus_devinfo * macio_get_devinfo(device_t dev, device_t child) { struct macio_devinfo *dinfo; dinfo = device_get_ivars(child); return (&dinfo->mdi_obdinfo); } int macio_enable_wireless(device_t dev, bool enable) { struct macio_softc *sc = device_get_softc(dev); uint32_t x; if (enable) { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* Enable card slot. */ bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 5); DELAY(1000); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0f, 4); DELAY(1000); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 4); */ bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0b, 0); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0a, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_EXTINT_GPIO_REG_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0d, 0x28); bus_write_1(sc->sc_memr, KEYLARGO_GPIO_BASE + 0x0e, 0x28); bus_write_4(sc->sc_memr, 0x1c000, 0); /* Initialize the card. */ bus_write_4(sc->sc_memr, 0x1a3e0, 0x41); x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x |= 0x80000000; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); } else { x = bus_read_4(sc->sc_memr, KEYLARGO_FCR2); x &= ~0x4; bus_write_4(sc->sc_memr, KEYLARGO_FCR2, x); /* out8(gpio + 0x10, 0); */ } return (0); } Index: head/sys/powerpc/powermac/platform_powermac.c =================================================================== --- head/sys/powerpc/powermac/platform_powermac.c (revision 295879) +++ head/sys/powerpc/powermac/platform_powermac.c (revision 295880) @@ -1,406 +1,405 @@ /*- * Copyright (c) 2008 Marcel Moolenaar * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* For save_vec() */ #include #include #include /* For save_fpu() */ #include #include -#include #include #include #include #include #include #include "platform_if.h" extern void *ap_pcpu; static int powermac_probe(platform_t); static int powermac_attach(platform_t); void powermac_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static u_long powermac_timebase_freq(platform_t, struct cpuref *cpuref); static int powermac_smp_first_cpu(platform_t, struct cpuref *cpuref); static int powermac_smp_next_cpu(platform_t, struct cpuref *cpuref); static int powermac_smp_get_bsp(platform_t, struct cpuref *cpuref); static int powermac_smp_start_cpu(platform_t, struct pcpu *cpu); static void powermac_reset(platform_t); static void powermac_sleep(platform_t); static platform_method_t powermac_methods[] = { PLATFORMMETHOD(platform_probe, powermac_probe), PLATFORMMETHOD(platform_attach, powermac_attach), PLATFORMMETHOD(platform_mem_regions, powermac_mem_regions), PLATFORMMETHOD(platform_timebase_freq, powermac_timebase_freq), PLATFORMMETHOD(platform_smp_first_cpu, powermac_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, powermac_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, powermac_smp_get_bsp), PLATFORMMETHOD(platform_smp_start_cpu, powermac_smp_start_cpu), PLATFORMMETHOD(platform_reset, powermac_reset), PLATFORMMETHOD(platform_sleep, powermac_sleep), PLATFORMMETHOD_END }; static platform_def_t powermac_platform = { "powermac", powermac_methods, 0 }; PLATFORM_DEF(powermac_platform); static int powermac_probe(platform_t plat) { char compat[255]; ssize_t compatlen; char *curstr; phandle_t root; root = OF_peer(0); if (root == 0) return (ENXIO); compatlen = OF_getprop(root, "compatible", compat, sizeof(compat)); for (curstr = compat; curstr < compat + compatlen; curstr += strlen(curstr) + 1) { if (strncmp(curstr, "MacRISC", 7) == 0) return (BUS_PROBE_SPECIFIC); } return (ENXIO); } void powermac_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { phandle_t memory; cell_t memoryprop[PHYS_AVAIL_SZ * 2]; ssize_t propsize, i, j; int physacells = 1; memory = OF_finddevice("/memory"); if (memory == -1) memory = OF_finddevice("/memory@0"); /* "reg" has variable #address-cells, but #size-cells is always 1 */ OF_getprop(OF_parent(memory), "#address-cells", &physacells, sizeof(physacells)); propsize = OF_getprop(memory, "reg", memoryprop, sizeof(memoryprop)); propsize /= sizeof(cell_t); for (i = 0, j = 0; i < propsize; i += physacells+1, j++) { phys[j].mr_start = memoryprop[i]; if (physacells == 2) { #ifndef __powerpc64__ /* On 32-bit PPC, ignore regions starting above 4 GB */ if (memoryprop[i] != 0) { j--; continue; } #else phys[j].mr_start <<= 32; #endif phys[j].mr_start |= memoryprop[i+1]; } phys[j].mr_size = memoryprop[i + physacells]; } *physsz = j; /* "available" always has #address-cells = 1 */ propsize = OF_getprop(memory, "available", memoryprop, sizeof(memoryprop)); if (propsize <= 0) { for (i = 0; i < *physsz; i++) { avail[i].mr_start = phys[i].mr_start; avail[i].mr_size = phys[i].mr_size; } *availsz = *physsz; } else { propsize /= sizeof(cell_t); for (i = 0, j = 0; i < propsize; i += 2, j++) { avail[j].mr_start = memoryprop[i]; avail[j].mr_size = memoryprop[i + 1]; } #ifdef __powerpc64__ /* Add in regions above 4 GB to the available list */ for (i = 0; i < *physsz; i++) { if (phys[i].mr_start > BUS_SPACE_MAXADDR_32BIT) { avail[j].mr_start = phys[i].mr_start; avail[j].mr_size = phys[i].mr_size; j++; } } #endif *availsz = j; } } static int powermac_attach(platform_t plat) { phandle_t rootnode; char model[32]; /* * Quiesce Open Firmware on PowerMac11,2 and 12,1. It is * necessary there to shut down a background thread doing fan * management, and is harmful on other machines (it will make OF * shut off power to various system components it had turned on). * * Note: we don't need to worry about which OF module we are * using since this is called only from very early boot, within * OF's boot context. */ rootnode = OF_finddevice("/"); if (OF_getprop(rootnode, "model", model, sizeof(model)) > 0) { if (strcmp(model, "PowerMac11,2") == 0 || strcmp(model, "PowerMac12,1") == 0) { ofw_quiesce(); } } return (0); } static u_long powermac_timebase_freq(platform_t plat, struct cpuref *cpuref) { phandle_t phandle; int32_t ticks = -1; phandle = cpuref->cr_hwref; OF_getprop(phandle, "timebase-frequency", &ticks, sizeof(ticks)); if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); } static int powermac_smp_fill_cpuref(struct cpuref *cpuref, phandle_t cpu) { cell_t cpuid; int res; cpuref->cr_hwref = cpu; res = OF_getprop(cpu, "reg", &cpuid, sizeof(cpuid)); /* * psim doesn't have a reg property, so assume 0 as for the * uniprocessor case in the CHRP spec. */ if (res < 0) { cpuid = 0; } cpuref->cr_cpuid = cpuid & 0xff; return (0); } static int powermac_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu, dev, root; int res; root = OF_peer(0); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } if (dev == 0) { /* * psim doesn't have a name property on the /cpus node, * but it can be found directly */ dev = OF_finddevice("/cpus"); if (dev == -1) return (ENOENT); } cpu = OF_child(dev); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); return (powermac_smp_fill_cpuref(cpuref, cpu)); } static int powermac_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu; int res; cpu = OF_peer(cpuref->cr_hwref); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); return (powermac_smp_fill_cpuref(cpuref, cpu)); } static int powermac_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { ihandle_t inst; phandle_t bsp, chosen; int res; chosen = OF_finddevice("/chosen"); if (chosen == -1) return (ENXIO); res = OF_getprop(chosen, "cpu", &inst, sizeof(inst)); if (res < 0) return (ENXIO); bsp = OF_instance_to_package(inst); return (powermac_smp_fill_cpuref(cpuref, bsp)); } static int powermac_smp_start_cpu(platform_t plat, struct pcpu *pc) { #ifdef SMP phandle_t cpu; volatile uint8_t *rstvec; static volatile uint8_t *rstvec_virtbase = NULL; int res, reset, timeout; cpu = pc->pc_hwref; res = OF_getprop(cpu, "soft-reset", &reset, sizeof(reset)); if (res < 0) { reset = 0x58; switch (pc->pc_cpuid) { case 0: reset += 0x03; break; case 1: reset += 0x04; break; case 2: reset += 0x0f; break; case 3: reset += 0x10; break; default: return (ENXIO); } } ap_pcpu = pc; if (rstvec_virtbase == NULL) rstvec_virtbase = pmap_mapdev(0x80000000, PAGE_SIZE); rstvec = rstvec_virtbase + reset; *rstvec = 4; powerpc_sync(); (void)(*rstvec); powerpc_sync(); DELAY(1); *rstvec = 0; powerpc_sync(); (void)(*rstvec); powerpc_sync(); timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); return ((pc->pc_awake) ? 0 : EBUSY); #else /* No SMP support */ return (ENXIO); #endif } static void powermac_reset(platform_t platform) { OF_reboot(); } void powermac_sleep(platform_t platform) { *(unsigned long *)0x80 = 0x100; cpu_sleep(); } Index: head/sys/powerpc/powerpc/genassym.c =================================================================== --- head/sys/powerpc/powerpc/genassym.c (revision 295879) +++ head/sys/powerpc/powerpc/genassym.c (revision 295880) @@ -1,263 +1,262 @@ /*- * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_TEMPSAVE, offsetof(struct pcpu, pc_tempsave)); ASSYM(PC_DISISAVE, offsetof(struct pcpu, pc_disisave)); ASSYM(PC_DBSAVE, offsetof(struct pcpu, pc_dbsave)); ASSYM(PC_RESTORE, offsetof(struct pcpu, pc_restore)); #if defined(BOOKE) ASSYM(PC_BOOKE_CRITSAVE, offsetof(struct pcpu, pc_booke_critsave)); ASSYM(PC_BOOKE_MCHKSAVE, offsetof(struct pcpu, pc_booke_mchksave)); ASSYM(PC_BOOKE_TLBSAVE, offsetof(struct pcpu, pc_booke_tlbsave)); ASSYM(PC_BOOKE_TLB_LEVEL, offsetof(struct pcpu, pc_booke_tlb_level)); ASSYM(PC_BOOKE_TLB_LOCK, offsetof(struct pcpu, pc_booke_tlb_lock)); #endif ASSYM(CPUSAVE_R27, CPUSAVE_R27*sizeof(register_t)); ASSYM(CPUSAVE_R28, CPUSAVE_R28*sizeof(register_t)); ASSYM(CPUSAVE_R29, CPUSAVE_R29*sizeof(register_t)); ASSYM(CPUSAVE_R30, CPUSAVE_R30*sizeof(register_t)); ASSYM(CPUSAVE_R31, CPUSAVE_R31*sizeof(register_t)); ASSYM(CPUSAVE_SRR0, CPUSAVE_SRR0*sizeof(register_t)); ASSYM(CPUSAVE_SRR1, CPUSAVE_SRR1*sizeof(register_t)); ASSYM(CPUSAVE_AIM_DAR, CPUSAVE_AIM_DAR*sizeof(register_t)); ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*sizeof(register_t)); ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*sizeof(register_t)); ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_SRR0, TLBSAVE_BOOKE_SRR0*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_SRR1, TLBSAVE_BOOKE_SRR1*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R20, TLBSAVE_BOOKE_R20*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R21, TLBSAVE_BOOKE_R21*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R22, TLBSAVE_BOOKE_R22*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R23, TLBSAVE_BOOKE_R23*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R24, TLBSAVE_BOOKE_R24*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R25, TLBSAVE_BOOKE_R25*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R26, TLBSAVE_BOOKE_R26*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R27, TLBSAVE_BOOKE_R27*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R28, TLBSAVE_BOOKE_R28*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R29, TLBSAVE_BOOKE_R29*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R30, TLBSAVE_BOOKE_R30*sizeof(register_t)); ASSYM(TLBSAVE_BOOKE_R31, TLBSAVE_BOOKE_R31*sizeof(register_t)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); #if defined(AIM) ASSYM(USER_ADDR, USER_ADDR); #ifdef __powerpc64__ ASSYM(PC_KERNSLB, offsetof(struct pcpu, pc_slb)); ASSYM(PC_USERSLB, offsetof(struct pcpu, pc_userslb)); ASSYM(PC_SLBSAVE, offsetof(struct pcpu, pc_slbsave)); ASSYM(PC_SLBSTACK, offsetof(struct pcpu, pc_slbstack)); ASSYM(USER_SLB_SLOT, USER_SLB_SLOT); ASSYM(USER_SLB_SLBE, USER_SLB_SLBE); ASSYM(SEGMENT_MASK, SEGMENT_MASK); #else ASSYM(PM_SR, offsetof(struct pmap, pm_sr)); ASSYM(USER_SR, USER_SR); #endif #elif defined(BOOKE) ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir)); /* * With pte_t being a bitfield struct, these fields cannot be addressed via * offsetof(). */ ASSYM(PTE_RPN, 0); ASSYM(PTE_FLAGS, sizeof(uint32_t)); #if defined(BOOKE_E500) ASSYM(TLB0_ENTRY_SIZE, sizeof(struct tlb_entry)); #endif #endif #ifdef __powerpc64__ ASSYM(FSP, 48); #else ASSYM(FSP, 8); #endif ASSYM(FRAMELEN, FRAMELEN); ASSYM(FRAME_0, offsetof(struct trapframe, fixreg[0])); ASSYM(FRAME_1, offsetof(struct trapframe, fixreg[1])); ASSYM(FRAME_2, offsetof(struct trapframe, fixreg[2])); ASSYM(FRAME_3, offsetof(struct trapframe, fixreg[3])); ASSYM(FRAME_4, offsetof(struct trapframe, fixreg[4])); ASSYM(FRAME_5, offsetof(struct trapframe, fixreg[5])); ASSYM(FRAME_6, offsetof(struct trapframe, fixreg[6])); ASSYM(FRAME_7, offsetof(struct trapframe, fixreg[7])); ASSYM(FRAME_8, offsetof(struct trapframe, fixreg[8])); ASSYM(FRAME_9, offsetof(struct trapframe, fixreg[9])); ASSYM(FRAME_10, offsetof(struct trapframe, fixreg[10])); ASSYM(FRAME_11, offsetof(struct trapframe, fixreg[11])); ASSYM(FRAME_12, offsetof(struct trapframe, fixreg[12])); ASSYM(FRAME_13, offsetof(struct trapframe, fixreg[13])); ASSYM(FRAME_14, offsetof(struct trapframe, fixreg[14])); ASSYM(FRAME_15, offsetof(struct trapframe, fixreg[15])); ASSYM(FRAME_16, offsetof(struct trapframe, fixreg[16])); ASSYM(FRAME_17, offsetof(struct trapframe, fixreg[17])); ASSYM(FRAME_18, offsetof(struct trapframe, fixreg[18])); ASSYM(FRAME_19, offsetof(struct trapframe, fixreg[19])); ASSYM(FRAME_20, offsetof(struct trapframe, fixreg[20])); ASSYM(FRAME_21, offsetof(struct trapframe, fixreg[21])); ASSYM(FRAME_22, offsetof(struct trapframe, fixreg[22])); ASSYM(FRAME_23, offsetof(struct trapframe, fixreg[23])); ASSYM(FRAME_24, offsetof(struct trapframe, fixreg[24])); ASSYM(FRAME_25, offsetof(struct trapframe, fixreg[25])); ASSYM(FRAME_26, offsetof(struct trapframe, fixreg[26])); ASSYM(FRAME_27, offsetof(struct trapframe, fixreg[27])); ASSYM(FRAME_28, offsetof(struct trapframe, fixreg[28])); ASSYM(FRAME_29, offsetof(struct trapframe, fixreg[29])); ASSYM(FRAME_30, offsetof(struct trapframe, fixreg[30])); ASSYM(FRAME_31, offsetof(struct trapframe, fixreg[31])); ASSYM(FRAME_LR, offsetof(struct trapframe, lr)); ASSYM(FRAME_CR, offsetof(struct trapframe, cr)); ASSYM(FRAME_CTR, offsetof(struct trapframe, ctr)); ASSYM(FRAME_XER, offsetof(struct trapframe, xer)); ASSYM(FRAME_SRR0, offsetof(struct trapframe, srr0)); ASSYM(FRAME_SRR1, offsetof(struct trapframe, srr1)); ASSYM(FRAME_EXC, offsetof(struct trapframe, exc)); ASSYM(FRAME_AIM_DAR, offsetof(struct trapframe, dar)); ASSYM(FRAME_AIM_DSISR, offsetof(struct trapframe, cpu.aim.dsisr)); ASSYM(FRAME_BOOKE_DEAR, offsetof(struct trapframe, dar)); ASSYM(FRAME_BOOKE_ESR, offsetof(struct trapframe, cpu.booke.esr)); ASSYM(FRAME_BOOKE_DBCR0, offsetof(struct trapframe, cpu.booke.dbcr0)); ASSYM(CF_FUNC, offsetof(struct callframe, cf_func)); ASSYM(CF_ARG0, offsetof(struct callframe, cf_arg0)); ASSYM(CF_ARG1, offsetof(struct callframe, cf_arg1)); ASSYM(CF_SIZE, sizeof(struct callframe)); ASSYM(PCB_CONTEXT, offsetof(struct pcb, pcb_context)); ASSYM(PCB_CR, offsetof(struct pcb, pcb_cr)); ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp)); ASSYM(PCB_TOC, offsetof(struct pcb, pcb_toc)); ASSYM(PCB_LR, offsetof(struct pcb, pcb_lr)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_FPU, PCB_FPU); ASSYM(PCB_VEC, PCB_VEC); ASSYM(PCB_AIM_USR_VSID, offsetof(struct pcb, pcb_cpu.aim.usr_vsid)); ASSYM(PCB_BOOKE_DBCR0, offsetof(struct pcb, pcb_cpu.booke.dbcr0)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(SF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(KERNBASE, KERNBASE); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(PSL_DE, PSL_DE); ASSYM(PSL_DS, PSL_DS); ASSYM(PSL_IS, PSL_IS); ASSYM(PSL_CE, PSL_CE); ASSYM(PSL_UCLE, PSL_UCLE); ASSYM(PSL_WE, PSL_WE); ASSYM(PSL_UBLE, PSL_UBLE); #if defined(BOOKE_E500) ASSYM(PSL_KERNSET_INIT, PSL_KERNSET_INIT); #endif #if defined(AIM) && defined(__powerpc64__) ASSYM(PSL_SF, PSL_SF); ASSYM(PSL_HV, PSL_HV); #endif ASSYM(PSL_POW, PSL_POW); ASSYM(PSL_ILE, PSL_ILE); ASSYM(PSL_LE, PSL_LE); ASSYM(PSL_SE, PSL_SE); ASSYM(PSL_RI, PSL_RI); ASSYM(PSL_DR, PSL_DR); ASSYM(PSL_IP, PSL_IP); ASSYM(PSL_IR, PSL_IR); ASSYM(PSL_FE_DIS, PSL_FE_DIS); ASSYM(PSL_FE_NONREC, PSL_FE_NONREC); ASSYM(PSL_FE_PREC, PSL_FE_PREC); ASSYM(PSL_FE_REC, PSL_FE_REC); ASSYM(PSL_VEC, PSL_VEC); ASSYM(PSL_BE, PSL_BE); ASSYM(PSL_EE, PSL_EE); ASSYM(PSL_FE0, PSL_FE0); ASSYM(PSL_FE1, PSL_FE1); ASSYM(PSL_FP, PSL_FP); ASSYM(PSL_ME, PSL_ME); ASSYM(PSL_PR, PSL_PR); ASSYM(PSL_PMM, PSL_PMM); ASSYM(PSL_KERNSET, PSL_KERNSET); ASSYM(PSL_USERSET, PSL_USERSET); ASSYM(PSL_USERSTATIC, PSL_USERSTATIC); Index: head/sys/powerpc/powerpc/trap.c =================================================================== --- head/sys/powerpc/powerpc/trap.c (revision 295879) +++ head/sys/powerpc/powerpc/trap.c (revision 295880) @@ -1,827 +1,826 @@ /*- * Copyright (C) 1995, 1996 Wolfgang Solfrank. * Copyright (C) 1995, 1996 TooLs GmbH. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by TooLs GmbH. * 4. The name of TooLs GmbH may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include /* Below matches setjmp.S */ #define FAULTBUF_LR 21 #define FAULTBUF_R1 1 #define FAULTBUF_R2 2 #define FAULTBUF_CR 22 #define FAULTBUF_R14 3 static void trap_fatal(struct trapframe *frame); static void printtrap(u_int vector, struct trapframe *frame, int isfatal, int user); static int trap_pfault(struct trapframe *frame, int user); static int fix_unaligned(struct thread *td, struct trapframe *frame); static int handle_onfault(struct trapframe *frame); static void syscall(struct trapframe *frame); #ifdef __powerpc64__ void handle_kernel_slb_spill(int, register_t, register_t); static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr); extern int n_slbs; #endif struct powerpc_exception { u_int vector; char *name; }; #ifdef KDTRACE_HOOKS #include int (*dtrace_invop_jump_addr)(struct trapframe *); #endif static struct powerpc_exception powerpc_exceptions[] = { { EXC_CRIT, "critical input" }, { EXC_RST, "system reset" }, { EXC_MCHK, "machine check" }, { EXC_DSI, "data storage interrupt" }, { EXC_DSE, "data segment exception" }, { EXC_ISI, "instruction storage interrupt" }, { EXC_ISE, "instruction segment exception" }, { EXC_EXI, "external interrupt" }, { EXC_ALI, "alignment" }, { EXC_PGM, "program" }, { EXC_FPU, "floating-point unavailable" }, { EXC_APU, "auxiliary proc unavailable" }, { EXC_DECR, "decrementer" }, { EXC_FIT, "fixed-interval timer" }, { EXC_WDOG, "watchdog timer" }, { EXC_SC, "system call" }, { EXC_TRC, "trace" }, { EXC_FPA, "floating-point assist" }, { EXC_DEBUG, "debug" }, { EXC_PERF, "performance monitoring" }, { EXC_VEC, "altivec unavailable" }, { EXC_VSX, "vsx unavailable" }, { EXC_ITMISS, "instruction tlb miss" }, { EXC_DLMISS, "data load tlb miss" }, { EXC_DSMISS, "data store tlb miss" }, { EXC_BPT, "instruction breakpoint" }, { EXC_SMI, "system management" }, { EXC_VECAST_G4, "altivec assist" }, { EXC_THRM, "thermal management" }, { EXC_RUNMODETRC, "run mode/trace" }, { EXC_LAST, NULL } }; static const char * trapname(u_int vector) { struct powerpc_exception *pe; for (pe = powerpc_exceptions; pe->vector != EXC_LAST; pe++) { if (pe->vector == vector) return (pe->name); } return ("unknown"); } void trap(struct trapframe *frame) { struct thread *td; struct proc *p; #ifdef KDTRACE_HOOKS uint32_t inst; #endif int sig, type, user; u_int ucode; ksiginfo_t ksi; PCPU_INC(cnt.v_trap); td = curthread; p = td->td_proc; type = ucode = frame->exc; sig = 0; user = frame->srr1 & PSL_PR; CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name, trapname(type), user ? "user" : "kernel"); #ifdef KDTRACE_HOOKS /* * A trap can occur while DTrace executes a probe. Before * executing the probe, DTrace blocks re-scheduling and sets * a flag in its per-cpu flags to indicate that it doesn't * want to fault. On returning from the probe, the no-fault * flag is cleared and finally re-scheduling is enabled. * * If the DTrace kernel module has registered a trap handler, * call it and if it returns non-zero, assume that it has * handled the trap and modified the trap frame so that this * function can return normally. */ if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0) return; #endif if (user) { td->td_pticks = 0; td->td_frame = frame; if (td->td_cowgen != p->p_cowgen) thread_cow_update(td); /* User Mode Traps */ switch (type) { case EXC_RUNMODETRC: case EXC_TRC: frame->srr1 &= ~PSL_SE; sig = SIGTRAP; ucode = TRAP_TRACE; break; #ifdef __powerpc64__ case EXC_ISE: case EXC_DSE: if (handle_user_slb_spill(&p->p_vmspace->vm_pmap, (type == EXC_ISE) ? frame->srr0 : frame->dar) != 0){ sig = SIGSEGV; ucode = SEGV_MAPERR; } break; #endif case EXC_DSI: case EXC_ISI: sig = trap_pfault(frame, 1); if (sig == SIGSEGV) ucode = SEGV_MAPERR; break; case EXC_SC: syscall(frame); break; case EXC_FPU: KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU, ("FPU already enabled for thread")); enable_fpu(td); break; case EXC_VEC: KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC, ("Altivec already enabled for thread")); enable_vec(td); break; case EXC_VSX: KASSERT((td->td_pcb->pcb_flags & PCB_VSX) != PCB_VSX, ("VSX already enabled for thread")); if (!(td->td_pcb->pcb_flags & PCB_VEC)) enable_vec(td); if (!(td->td_pcb->pcb_flags & PCB_FPU)) save_fpu(td); td->td_pcb->pcb_flags |= PCB_VSX; enable_fpu(td); break; case EXC_VECAST_E: case EXC_VECAST_G4: case EXC_VECAST_G5: /* * We get a VPU assist exception for IEEE mode * vector operations on denormalized floats. * Emulating this is a giant pain, so for now, * just switch off IEEE mode and treat them as * zero. */ save_vec(td); td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ; enable_vec(td); break; case EXC_ALI: if (fix_unaligned(td, frame) != 0) { sig = SIGBUS; ucode = BUS_ADRALN; } else frame->srr0 += 4; break; case EXC_DEBUG: /* Single stepping */ mtspr(SPR_DBSR, mfspr(SPR_DBSR)); frame->srr1 &= ~PSL_DE; frame->cpu.booke.dbcr0 &= ~(DBCR0_IDM || DBCR0_IC); sig = SIGTRAP; ucode = TRAP_TRACE; break; case EXC_PGM: /* Identify the trap reason */ #ifdef AIM if (frame->srr1 & EXC_PGM_TRAP) { #else if (frame->cpu.booke.esr & ESR_PTR) { #endif #ifdef KDTRACE_HOOKS inst = fuword32((const void *)frame->srr0); if (inst == 0x0FFFDDDD && dtrace_pid_probe_ptr != NULL) { struct reg regs; fill_regs(td, ®s); (*dtrace_pid_probe_ptr)(®s); break; } #endif sig = SIGTRAP; ucode = TRAP_BRKPT; } else { sig = ppc_instr_emulate(frame, td->td_pcb); if (sig == SIGILL) { if (frame->srr1 & EXC_PGM_PRIV) ucode = ILL_PRVOPC; else if (frame->srr1 & EXC_PGM_ILLEGAL) ucode = ILL_ILLOPC; } else if (sig == SIGFPE) ucode = FPE_FLTINV; /* Punt for now, invalid operation. */ } break; case EXC_MCHK: /* * Note that this may not be recoverable for the user * process, depending on the type of machine check, * but it at least prevents the kernel from dying. */ sig = SIGBUS; ucode = BUS_OBJERR; break; default: trap_fatal(frame); } } else { /* Kernel Mode Traps */ KASSERT(cold || td->td_ucred != NULL, ("kernel trap doesn't have ucred")); switch (type) { #ifdef KDTRACE_HOOKS case EXC_PGM: if (frame->srr1 & EXC_PGM_TRAP) { if (*(uint32_t *)frame->srr0 == EXC_DTRACE) { if (dtrace_invop_jump_addr != NULL) { dtrace_invop_jump_addr(frame); return; } } } break; #endif #ifdef __powerpc64__ case EXC_DSE: if ((frame->dar & SEGMENT_MASK) == USER_ADDR) { __asm __volatile ("slbmte %0, %1" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); return; } break; #endif case EXC_DSI: if (trap_pfault(frame, 0) == 0) return; break; case EXC_MCHK: if (handle_onfault(frame)) return; break; default: break; } trap_fatal(frame); } if (sig != 0) { if (p->p_sysent->sv_transtrap != NULL) sig = (p->p_sysent->sv_transtrap)(sig, type); ksiginfo_init_trap(&ksi); ksi.ksi_signo = sig; ksi.ksi_code = (int) ucode; /* XXX, not POSIX */ /* ksi.ksi_addr = ? */ ksi.ksi_trapno = type; trapsignal(td, &ksi); } userret(td, frame); } static void trap_fatal(struct trapframe *frame) { printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR)); #ifdef KDB if ((debugger_on_panic || kdb_active) && kdb_trap(frame->exc, 0, frame)) return; #endif panic("%s trap", trapname(frame->exc)); } static void printtrap(u_int vector, struct trapframe *frame, int isfatal, int user) { uint16_t ver; #ifdef BOOKE vm_paddr_t pa; #endif printf("\n"); printf("%s %s trap:\n", isfatal ? "fatal" : "handled", user ? "user" : "kernel"); printf("\n"); printf(" exception = 0x%x (%s)\n", vector, trapname(vector)); switch (vector) { case EXC_DSE: case EXC_DSI: case EXC_DTMISS: printf(" virtual address = 0x%" PRIxPTR "\n", frame->dar); #ifdef AIM printf(" dsisr = 0x%lx\n", (u_long)frame->cpu.aim.dsisr); #endif break; case EXC_ISE: case EXC_ISI: case EXC_ITMISS: printf(" virtual address = 0x%" PRIxPTR "\n", frame->srr0); break; case EXC_MCHK: ver = mfpvr() >> 16; #if defined(AIM) if (MPC745X_P(ver)) printf(" msssr0 = 0x%lx\n", (u_long)mfspr(SPR_MSSSR0)); #elif defined(BOOKE) pa = mfspr(SPR_MCARU); pa = (pa << 32) | mfspr(SPR_MCAR); printf(" mcsr = 0x%lx\n", (u_long)mfspr(SPR_MCSR)); printf(" mcar = 0x%jx\n", (uintmax_t)pa); #endif break; } #ifdef BOOKE printf(" esr = 0x%" PRIxPTR "\n", frame->cpu.booke.esr); #endif printf(" srr0 = 0x%" PRIxPTR "\n", frame->srr0); printf(" srr1 = 0x%lx\n", (u_long)frame->srr1); printf(" lr = 0x%" PRIxPTR "\n", frame->lr); printf(" curthread = %p\n", curthread); if (curthread != NULL) printf(" pid = %d, comm = %s\n", curthread->td_proc->p_pid, curthread->td_name); printf("\n"); } /* * Handles a fatal fault when we have onfault state to recover. Returns * non-zero if there was onfault recovery state available. */ static int handle_onfault(struct trapframe *frame) { struct thread *td; jmp_buf *fb; td = curthread; fb = td->td_pcb->pcb_onfault; if (fb != NULL) { frame->srr0 = (*fb)->_jb[FAULTBUF_LR]; frame->fixreg[1] = (*fb)->_jb[FAULTBUF_R1]; frame->fixreg[2] = (*fb)->_jb[FAULTBUF_R2]; frame->fixreg[3] = 1; frame->cr = (*fb)->_jb[FAULTBUF_CR]; bcopy(&(*fb)->_jb[FAULTBUF_R14], &frame->fixreg[14], 18 * sizeof(register_t)); td->td_pcb->pcb_onfault = NULL; /* Returns twice, not thrice */ return (1); } return (0); } int cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa) { struct proc *p; struct trapframe *frame; caddr_t params; size_t argsz; int error, n, i; p = td->td_proc; frame = td->td_frame; sa->code = frame->fixreg[0]; params = (caddr_t)(frame->fixreg + FIRSTARG); n = NARGREG; if (sa->code == SYS_syscall) { /* * code is first argument, * followed by actual args. */ sa->code = *(register_t *) params; params += sizeof(register_t); n -= 1; } else if (sa->code == SYS___syscall) { /* * Like syscall, but code is a quad, * so as to maintain quad alignment * for the rest of the args. */ if (SV_PROC_FLAG(p, SV_ILP32)) { params += sizeof(register_t); sa->code = *(register_t *) params; params += sizeof(register_t); n -= 2; } else { sa->code = *(register_t *) params; params += sizeof(register_t); n -= 1; } } if (p->p_sysent->sv_mask) sa->code &= p->p_sysent->sv_mask; if (sa->code >= p->p_sysent->sv_size) sa->callp = &p->p_sysent->sv_table[0]; else sa->callp = &p->p_sysent->sv_table[sa->code]; sa->narg = sa->callp->sy_narg; if (SV_PROC_FLAG(p, SV_ILP32)) { argsz = sizeof(uint32_t); for (i = 0; i < n; i++) sa->args[i] = ((u_register_t *)(params))[i] & 0xffffffff; } else { argsz = sizeof(uint64_t); for (i = 0; i < n; i++) sa->args[i] = ((u_register_t *)(params))[i]; } if (sa->narg > n) error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n, (sa->narg - n) * argsz); else error = 0; #ifdef __powerpc64__ if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) { /* Expand the size of arguments copied from the stack */ for (i = sa->narg; i >= n; i--) sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n]; } #endif if (error == 0) { td->td_retval[0] = 0; td->td_retval[1] = frame->fixreg[FIRSTARG + 1]; } return (error); } #include "../../kern/subr_syscall.c" void syscall(struct trapframe *frame) { struct thread *td; struct syscall_args sa; int error; td = curthread; td->td_frame = frame; #ifdef __powerpc64__ /* * Speculatively restore last user SLB segment, which we know is * invalid already, since we are likely to do copyin()/copyout(). */ __asm __volatile ("slbmte %0, %1; isync" :: "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE)); #endif error = syscallenter(td, &sa); syscallret(td, error, &sa); } #ifdef __powerpc64__ /* Handle kernel SLB faults -- runs in real mode, all seat belts off */ void handle_kernel_slb_spill(int type, register_t dar, register_t srr0) { struct slb *slbcache; uint64_t slbe, slbv; uint64_t esid, addr; int i; addr = (type == EXC_ISE) ? srr0 : dar; slbcache = PCPU_GET(slb); esid = (uintptr_t)addr >> ADDR_SR_SHFT; slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID; /* See if the hardware flushed this somehow (can happen in LPARs) */ for (i = 0; i < n_slbs; i++) if (slbcache[i].slbe == (slbe | (uint64_t)i)) return; /* Not in the map, needs to actually be added */ slbv = kernel_va_to_slbv(addr); if (slbcache[USER_SLB_SLOT].slbe == 0) { for (i = 0; i < n_slbs; i++) { if (i == USER_SLB_SLOT) continue; if (!(slbcache[i].slbe & SLBE_VALID)) goto fillkernslb; } if (i == n_slbs) slbcache[USER_SLB_SLOT].slbe = 1; } /* Sacrifice a random SLB entry that is not the user entry */ i = mftb() % n_slbs; if (i == USER_SLB_SLOT) i = (i+1) % n_slbs; fillkernslb: /* Write new entry */ slbcache[i].slbv = slbv; slbcache[i].slbe = slbe | (uint64_t)i; /* Trap handler will restore from cache on exit */ } static int handle_user_slb_spill(pmap_t pm, vm_offset_t addr) { struct slb *user_entry; uint64_t esid; int i; esid = (uintptr_t)addr >> ADDR_SR_SHFT; PMAP_LOCK(pm); user_entry = user_va_to_slb_entry(pm, addr); if (user_entry == NULL) { /* allocate_vsid auto-spills it */ (void)allocate_user_vsid(pm, esid, 0); } else { /* * Check that another CPU has not already mapped this. * XXX: Per-thread SLB caches would be better. */ for (i = 0; i < pm->pm_slb_len; i++) if (pm->pm_slb[i] == user_entry) break; if (i == pm->pm_slb_len) slb_insert_user(pm, user_entry); } PMAP_UNLOCK(pm); return (0); } #endif static int trap_pfault(struct trapframe *frame, int user) { vm_offset_t eva, va; struct thread *td; struct proc *p; vm_map_t map; vm_prot_t ftype; int rv; #ifdef AIM register_t user_sr; #endif td = curthread; p = td->td_proc; if (frame->exc == EXC_ISI) { eva = frame->srr0; ftype = VM_PROT_EXECUTE; if (frame->srr1 & SRR1_ISI_PFAULT) ftype |= VM_PROT_READ; } else { eva = frame->dar; #ifdef BOOKE if (frame->cpu.booke.esr & ESR_ST) #else if (frame->cpu.aim.dsisr & DSISR_STORE) #endif ftype = VM_PROT_WRITE; else ftype = VM_PROT_READ; } if (user) { KASSERT(p->p_vmspace != NULL, ("trap_pfault: vmspace NULL")); map = &p->p_vmspace->vm_map; } else { #ifdef BOOKE if (eva < VM_MAXUSER_ADDRESS) { #else if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) { #endif map = &p->p_vmspace->vm_map; #ifdef AIM user_sr = td->td_pcb->pcb_cpu.aim.usr_segm; eva &= ADDR_PIDX | ADDR_POFF; eva |= user_sr << ADDR_SR_SHFT; #endif } else { map = kernel_map; } } va = trunc_page(eva); /* Fault in the page. */ rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); /* * XXXDTRACE: add dtrace_doubletrap_func here? */ if (rv == KERN_SUCCESS) return (0); if (!user && handle_onfault(frame)) return (0); return (SIGSEGV); } /* * For now, this only deals with the particular unaligned access case * that gcc tends to generate. Eventually it should handle all of the * possibilities that can happen on a 32-bit PowerPC in big-endian mode. */ static int fix_unaligned(struct thread *td, struct trapframe *frame) { struct thread *fputhread; int indicator, reg; double *fpr; indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr); switch (indicator) { case EXC_ALI_LFD: case EXC_ALI_STFD: reg = EXC_ALI_RST(frame->cpu.aim.dsisr); fpr = &td->td_pcb->pcb_fpu.fpr[reg].fpr; fputhread = PCPU_GET(fputhread); /* Juggle the FPU to ensure that we've initialized * the FPRs, and that their current state is in * the PCB. */ if (fputhread != td) { if (fputhread) save_fpu(fputhread); enable_fpu(td); } save_fpu(td); if (indicator == EXC_ALI_LFD) { if (copyin((void *)frame->dar, fpr, sizeof(double)) != 0) return (-1); enable_fpu(td); } else { if (copyout(fpr, (void *)frame->dar, sizeof(double)) != 0) return (-1); } return (0); break; } return (-1); } #ifdef KDB int db_trap_glue(struct trapframe *); /* Called from trap_subr.S */ int db_trap_glue(struct trapframe *frame) { if (!(frame->srr1 & PSL_PR) && (frame->exc == EXC_TRC || frame->exc == EXC_RUNMODETRC #ifdef AIM || (frame->exc == EXC_PGM && (frame->srr1 & EXC_PGM_TRAP)) #else || (frame->exc == EXC_DEBUG) #endif || frame->exc == EXC_BPT || frame->exc == EXC_DSI)) { int type = frame->exc; /* Ignore DTrace traps. */ if (*(uint32_t *)frame->srr0 == EXC_DTRACE) return (0); #ifdef AIM if (type == EXC_PGM && (frame->srr1 & EXC_PGM_TRAP)) { #else if (frame->cpu.booke.esr & ESR_PTR) { #endif type = T_BREAKPOINT; } return (kdb_trap(type, 0, frame)); } return (0); } #endif Index: head/sys/powerpc/ps3/if_glc.c =================================================================== --- head/sys/powerpc/ps3/if_glc.c (revision 295879) +++ head/sys/powerpc/ps3/if_glc.c (revision 295880) @@ -1,961 +1,960 @@ /*- * Copyright (C) 2010 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include "ps3bus.h" #include "ps3-hvcall.h" #include "if_glcreg.h" static int glc_probe(device_t); static int glc_attach(device_t); static void glc_init(void *xsc); static void glc_start(struct ifnet *ifp); static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); static void glc_set_multicast(struct glc_softc *sc); static int glc_add_rxbuf(struct glc_softc *sc, int idx); static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx); static int glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc); static int glc_intr_filter(void *xsc); static void glc_intr(void *xsc); static void glc_tick(void *xsc); static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr); static int glc_media_change(struct ifnet *ifp); static MALLOC_DEFINE(M_GLC, "gelic", "PS3 GELIC ethernet"); static device_method_t glc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, glc_probe), DEVMETHOD(device_attach, glc_attach), { 0, 0 } }; static driver_t glc_driver = { "glc", glc_methods, sizeof(struct glc_softc) }; static devclass_t glc_devclass; DRIVER_MODULE(glc, ps3bus, glc_driver, glc_devclass, 0, 0); static int glc_probe(device_t dev) { if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_SYSBUS || ps3bus_get_devtype(dev) != PS3_DEVTYPE_GELIC) return (ENXIO); device_set_desc(dev, "Playstation 3 GELIC Network Controller"); return (BUS_PROBE_SPECIFIC); } static void glc_getphys(void *xaddr, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; *(bus_addr_t *)xaddr = segs[0].ds_addr; } static int glc_attach(device_t dev) { struct glc_softc *sc; struct glc_txsoft *txs; uint64_t mac64, val, junk; int i, err; sc = device_get_softc(dev); sc->sc_bus = ps3bus_get_bus(dev); sc->sc_dev = ps3bus_get_device(dev); sc->sc_self = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); sc->next_txdma_slot = 0; sc->bsy_txdma_slots = 0; sc->sc_next_rxdma_slot = 0; sc->first_used_txdma_slot = -1; /* * Shut down existing tasks. */ lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0); lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0); sc->sc_ifp = if_alloc(IFT_ETHER); sc->sc_ifp->if_softc = sc; /* * Get MAC address and VLAN id */ lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_MAC_ADDRESS, 0, 0, 0, &mac64, &junk); memcpy(sc->sc_enaddr, &((uint8_t *)&mac64)[2], sizeof(sc->sc_enaddr)); sc->sc_tx_vlan = sc->sc_rx_vlan = -1; err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID, GELIC_VLAN_TX_ETHERNET, 0, 0, &val, &junk); if (err == 0) sc->sc_tx_vlan = val; err = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_VLAN_ID, GELIC_VLAN_RX_ETHERNET, 0, 0, &val, &junk); if (err == 0) sc->sc_rx_vlan = val; /* * Set up interrupt handler */ sc->sc_irqid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid, RF_ACTIVE); if (sc->sc_irq == NULL) { device_printf(dev, "Could not allocate IRQ!\n"); mtx_destroy(&sc->sc_mtx); return (ENXIO); } bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE | INTR_ENTROPY, glc_intr_filter, glc_intr, sc, &sc->sc_irqctx); sc->sc_hwirq_status = (uint64_t *)contigmalloc(8, M_GLC, M_ZERO, 0, BUS_SPACE_MAXADDR_32BIT, 8, PAGE_SIZE); lv1_net_set_interrupt_status_indicator(sc->sc_bus, sc->sc_dev, vtophys(sc->sc_hwirq_status), 0); lv1_net_set_interrupt_mask(sc->sc_bus, sc->sc_dev, GELIC_INT_RXDONE | GELIC_INT_RXFRAME | GELIC_INT_PHY | GELIC_INT_TX_CHAIN_END, 0); /* * Set up DMA. */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 32, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, 129*sizeof(struct glc_dmadesc), 1, 128*sizeof(struct glc_dmadesc), 0, NULL,NULL, &sc->sc_dmadesc_tag); err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_txdmadesc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_txdmadesc_map); err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map, sc->sc_txdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys, &sc->sc_txdmadesc_phys, 0); err = bus_dmamem_alloc(sc->sc_dmadesc_tag, (void **)&sc->sc_rxdmadesc, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_rxdmadesc_map); err = bus_dmamap_load(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map, sc->sc_rxdmadesc, 128*sizeof(struct glc_dmadesc), glc_getphys, &sc->sc_rxdmadesc_phys, 0); err = bus_dma_tag_create(bus_get_dma_tag(dev), 128, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL, &sc->sc_rxdma_tag); err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 16, BUS_SPACE_MAXSIZE_32BIT, 0, NULL,NULL, &sc->sc_txdma_tag); /* init transmit descriptors */ STAILQ_INIT(&sc->sc_txfreeq); STAILQ_INIT(&sc->sc_txdirtyq); /* create TX DMA maps */ err = ENOMEM; for (i = 0; i < GLC_MAX_TX_PACKETS; i++) { txs = &sc->sc_txsoft[i]; txs->txs_mbuf = NULL; err = bus_dmamap_create(sc->sc_txdma_tag, 0, &txs->txs_dmamap); if (err) { device_printf(dev, "unable to create TX DMA map %d, error = %d\n", i, err); } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* Create the receive buffer DMA maps. */ for (i = 0; i < GLC_MAX_RX_PACKETS; i++) { err = bus_dmamap_create(sc->sc_rxdma_tag, 0, &sc->sc_rxsoft[i].rxs_dmamap); if (err) { device_printf(dev, "unable to create RX DMA map %d, error = %d\n", i, err); } sc->sc_rxsoft[i].rxs_mbuf = NULL; } /* * Attach to network stack */ if_initname(sc->sc_ifp, device_get_name(dev), device_get_unit(dev)); sc->sc_ifp->if_mtu = ETHERMTU; sc->sc_ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; sc->sc_ifp->if_hwassist = CSUM_TCP | CSUM_UDP; sc->sc_ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_RXCSUM; sc->sc_ifp->if_capenable = IFCAP_HWCSUM | IFCAP_RXCSUM; sc->sc_ifp->if_start = glc_start; sc->sc_ifp->if_ioctl = glc_ioctl; sc->sc_ifp->if_init = glc_init; ifmedia_init(&sc->sc_media, IFM_IMASK, glc_media_change, glc_media_status); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO); IFQ_SET_MAXLEN(&sc->sc_ifp->if_snd, GLC_MAX_TX_PACKETS); sc->sc_ifp->if_snd.ifq_drv_maxlen = GLC_MAX_TX_PACKETS; IFQ_SET_READY(&sc->sc_ifp->if_snd); ether_ifattach(sc->sc_ifp, sc->sc_enaddr); sc->sc_ifp->if_hwassist = 0; return (0); mtx_destroy(&sc->sc_mtx); if_free(sc->sc_ifp); return (ENXIO); } static void glc_init_locked(struct glc_softc *sc) { int i, error; struct glc_rxsoft *rxs; struct glc_txsoft *txs; mtx_assert(&sc->sc_mtx, MA_OWNED); lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0); lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0); glc_set_multicast(sc); for (i = 0; i < GLC_MAX_RX_PACKETS; i++) { rxs = &sc->sc_rxsoft[i]; rxs->rxs_desc_slot = i; if (rxs->rxs_mbuf == NULL) { glc_add_rxbuf(sc, i); if (rxs->rxs_mbuf == NULL) { rxs->rxs_desc_slot = -1; break; } } glc_add_rxbuf_dma(sc, i); bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map, BUS_DMASYNC_PREREAD); } /* Clear TX dirty queue */ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } sc->first_used_txdma_slot = -1; sc->bsy_txdma_slots = 0; error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev, sc->sc_rxsoft[0].rxs_desc, 0); if (error != 0) device_printf(sc->sc_self, "lv1_net_start_rx_dma error: %d\n", error); sc->sc_ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->sc_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_ifpflags = sc->sc_ifp->if_flags; sc->sc_wdog_timer = 0; callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc); } static void glc_stop(void *xsc) { struct glc_softc *sc = xsc; mtx_assert(&sc->sc_mtx, MA_OWNED); lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0); lv1_net_stop_rx_dma(sc->sc_bus, sc->sc_dev, 0); } static void glc_init(void *xsc) { struct glc_softc *sc = xsc; mtx_lock(&sc->sc_mtx); glc_init_locked(sc); mtx_unlock(&sc->sc_mtx); } static void glc_tick(void *xsc) { struct glc_softc *sc = xsc; mtx_assert(&sc->sc_mtx, MA_OWNED); /* * XXX: Sometimes the RX queue gets stuck. Poke it periodically until * we figure out why. This will fail harmlessly if the RX queue is * already running. */ lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev, sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0); if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) { callout_reset(&sc->sc_tick_ch, hz, glc_tick, sc); return; } /* Problems */ device_printf(sc->sc_self, "device timeout\n"); glc_init_locked(sc); } static void glc_start_locked(struct ifnet *ifp) { struct glc_softc *sc = ifp->if_softc; bus_addr_t first, pktdesc; int kickstart = 0; int error; struct mbuf *mb_head; mtx_assert(&sc->sc_mtx, MA_OWNED); first = 0; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (STAILQ_EMPTY(&sc->sc_txdirtyq)) kickstart = 1; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, mb_head); if (mb_head == NULL) break; /* Check if the ring buffer is full */ if (sc->bsy_txdma_slots > 125) { /* Put the packet back and stop */ ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, mb_head); break; } BPF_MTAP(ifp, mb_head); if (sc->sc_tx_vlan >= 0) mb_head = ether_vlanencap(mb_head, sc->sc_tx_vlan); if (glc_encap(sc, &mb_head, &pktdesc)) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } if (first == 0) first = pktdesc; } if (kickstart && first != 0) { error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, first, 0); if (error != 0) device_printf(sc->sc_self, "lv1_net_start_tx_dma error: %d\n", error); sc->sc_wdog_timer = 5; } } static void glc_start(struct ifnet *ifp) { struct glc_softc *sc = ifp->if_softc; mtx_lock(&sc->sc_mtx); glc_start_locked(ifp); mtx_unlock(&sc->sc_mtx); } static int glc_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct glc_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int err = 0; switch (cmd) { case SIOCSIFFLAGS: mtx_lock(&sc->sc_mtx); if ((ifp->if_flags & IFF_UP) != 0) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0 && ((ifp->if_flags ^ sc->sc_ifpflags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) glc_set_multicast(sc); else glc_init_locked(sc); } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) glc_stop(sc); sc->sc_ifpflags = ifp->if_flags; mtx_unlock(&sc->sc_mtx); break; case SIOCADDMULTI: case SIOCDELMULTI: mtx_lock(&sc->sc_mtx); glc_set_multicast(sc); mtx_unlock(&sc->sc_mtx); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return (err); } static void glc_set_multicast(struct glc_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct ifmultiaddr *inm; uint64_t addr; int naddrs; /* Clear multicast filter */ lv1_net_remove_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1); /* Add broadcast */ lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0xffffffffffffL, 0); if ((ifp->if_flags & IFF_ALLMULTI) != 0) { lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1); } else { if_maddr_rlock(ifp); naddrs = 1; /* Include broadcast */ TAILQ_FOREACH(inm, &ifp->if_multiaddrs, ifma_link) { if (inm->ifma_addr->sa_family != AF_LINK) continue; addr = 0; memcpy(&((uint8_t *)(&addr))[2], LLADDR((struct sockaddr_dl *)inm->ifma_addr), ETHER_ADDR_LEN); lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, addr, 0); /* * Filter can only hold 32 addresses, so fall back to * the IFF_ALLMULTI case if we have too many. */ if (++naddrs >= 32) { lv1_net_add_multicast_address(sc->sc_bus, sc->sc_dev, 0, 1); break; } } if_maddr_runlock(ifp); } } static int glc_add_rxbuf(struct glc_softc *sc, int idx) { struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx]; struct mbuf *m; bus_dma_segment_t segs[1]; int error, nsegs; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; if (rxs->rxs_mbuf != NULL) { bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rxdma_tag, rxs->rxs_dmamap); } error = bus_dmamap_load_mbuf_sg(sc->sc_rxdma_tag, rxs->rxs_dmamap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_self, "cannot load RS DMA map %d, error = %d\n", idx, error); m_freem(m); return (error); } /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs == 1, ("%s: too many DMA segments (%d)", __func__, nsegs)); rxs->rxs_mbuf = m; rxs->segment = segs[0]; bus_dmamap_sync(sc->sc_rxdma_tag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); return (0); } static int glc_add_rxbuf_dma(struct glc_softc *sc, int idx) { struct glc_rxsoft *rxs = &sc->sc_rxsoft[idx]; bzero(&sc->sc_rxdmadesc[idx], sizeof(sc->sc_rxdmadesc[idx])); sc->sc_rxdmadesc[idx].paddr = rxs->segment.ds_addr; sc->sc_rxdmadesc[idx].len = rxs->segment.ds_len; sc->sc_rxdmadesc[idx].next = sc->sc_rxdmadesc_phys + ((idx + 1) % GLC_MAX_RX_PACKETS)*sizeof(sc->sc_rxdmadesc[idx]); sc->sc_rxdmadesc[idx].cmd_stat = GELIC_DESCR_OWNED; rxs->rxs_desc_slot = idx; rxs->rxs_desc = sc->sc_rxdmadesc_phys + idx*sizeof(struct glc_dmadesc); return (0); } static int glc_encap(struct glc_softc *sc, struct mbuf **m_head, bus_addr_t *pktdesc) { bus_dma_segment_t segs[16]; struct glc_txsoft *txs; struct mbuf *m; bus_addr_t firstslotphys; int i, idx, nsegs, nsegs_max; int err = 0; /* Max number of segments is the number of free DMA slots */ nsegs_max = 128 - sc->bsy_txdma_slots; if (nsegs_max > 16 || sc->first_used_txdma_slot < 0) nsegs_max = 16; /* Get a work queue entry. */ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { /* Ran out of descriptors. */ return (ENOBUFS); } nsegs = 0; for (m = *m_head; m != NULL; m = m->m_next) nsegs++; if (nsegs > nsegs_max) { m = m_collapse(*m_head, M_NOWAIT, nsegs_max); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; } err = bus_dmamap_load_mbuf_sg(sc->sc_txdma_tag, txs->txs_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT); if (err != 0) { m_freem(*m_head); *m_head = NULL; return (err); } KASSERT(nsegs <= 128 - sc->bsy_txdma_slots, ("GLC: Mapped too many (%d) DMA segments with %d available", nsegs, 128 - sc->bsy_txdma_slots)); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } txs->txs_ndescs = nsegs; txs->txs_firstdesc = sc->next_txdma_slot; idx = txs->txs_firstdesc; firstslotphys = sc->sc_txdmadesc_phys + txs->txs_firstdesc*sizeof(struct glc_dmadesc); for (i = 0; i < nsegs; i++) { bzero(&sc->sc_txdmadesc[idx], sizeof(sc->sc_txdmadesc[idx])); sc->sc_txdmadesc[idx].paddr = segs[i].ds_addr; sc->sc_txdmadesc[idx].len = segs[i].ds_len; sc->sc_txdmadesc[idx].next = sc->sc_txdmadesc_phys + ((idx + 1) % GLC_MAX_TX_PACKETS)*sizeof(struct glc_dmadesc); sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_NOIPSEC; if (i+1 == nsegs) { txs->txs_lastdesc = idx; sc->sc_txdmadesc[idx].next = 0; sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_LAST; } if ((*m_head)->m_pkthdr.csum_flags & CSUM_TCP) sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_TCP; if ((*m_head)->m_pkthdr.csum_flags & CSUM_UDP) sc->sc_txdmadesc[idx].cmd_stat |= GELIC_CMDSTAT_CSUM_UDP; sc->sc_txdmadesc[idx].cmd_stat |= GELIC_DESCR_OWNED; idx = (idx + 1) % GLC_MAX_TX_PACKETS; } sc->next_txdma_slot = idx; sc->bsy_txdma_slots += nsegs; if (txs->txs_firstdesc != 0) idx = txs->txs_firstdesc - 1; else idx = GLC_MAX_TX_PACKETS - 1; if (sc->first_used_txdma_slot < 0) sc->first_used_txdma_slot = txs->txs_firstdesc; bus_dmamap_sync(sc->sc_txdma_tag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); sc->sc_txdmadesc[idx].next = firstslotphys; STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); txs->txs_mbuf = *m_head; *pktdesc = firstslotphys; return (0); } static void glc_rxintr(struct glc_softc *sc) { int i, restart_rxdma, error; struct mbuf *m; struct ifnet *ifp = sc->sc_ifp; bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map, BUS_DMASYNC_POSTREAD); restart_rxdma = 0; while ((sc->sc_rxdmadesc[sc->sc_next_rxdma_slot].cmd_stat & GELIC_DESCR_OWNED) == 0) { i = sc->sc_next_rxdma_slot; sc->sc_next_rxdma_slot++; if (sc->sc_next_rxdma_slot >= GLC_MAX_RX_PACKETS) sc->sc_next_rxdma_slot = 0; if (sc->sc_rxdmadesc[i].cmd_stat & GELIC_CMDSTAT_CHAIN_END) restart_rxdma = 1; if (sc->sc_rxdmadesc[i].rxerror & GELIC_RXERRORS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto requeue; } m = sc->sc_rxsoft[i].rxs_mbuf; if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_IPCSUM) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED | CSUM_IP_VALID; } if (sc->sc_rxdmadesc[i].data_stat & GELIC_RX_TCPUDPCSUM) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } if (glc_add_rxbuf(sc, i)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); goto requeue; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_len = sc->sc_rxdmadesc[i].valid_size; m->m_pkthdr.len = m->m_len; /* * Remove VLAN tag. Even on early firmwares that do not allow * multiple VLANs, the VLAN tag is still in place here. */ m_adj(m, 2); mtx_unlock(&sc->sc_mtx); (*ifp->if_input)(ifp, m); mtx_lock(&sc->sc_mtx); requeue: glc_add_rxbuf_dma(sc, i); } bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_rxdmadesc_map, BUS_DMASYNC_PREWRITE); if (restart_rxdma) { error = lv1_net_start_rx_dma(sc->sc_bus, sc->sc_dev, sc->sc_rxsoft[sc->sc_next_rxdma_slot].rxs_desc, 0); if (error != 0) device_printf(sc->sc_self, "lv1_net_start_rx_dma error: %d\n", error); } } static void glc_txintr(struct glc_softc *sc) { struct ifnet *ifp = sc->sc_ifp; struct glc_txsoft *txs; int progress = 0, kickstart = 0, error; bus_dmamap_sync(sc->sc_dmadesc_tag, sc->sc_txdmadesc_map, BUS_DMASYNC_POSTREAD); while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & GELIC_DESCR_OWNED) break; STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); bus_dmamap_unload(sc->sc_txdma_tag, txs->txs_dmamap); sc->bsy_txdma_slots -= txs->txs_ndescs; if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } if ((sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & 0xf0000000) != 0) { lv1_net_stop_tx_dma(sc->sc_bus, sc->sc_dev, 0); kickstart = 1; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } if (sc->sc_txdmadesc[txs->txs_lastdesc].cmd_stat & GELIC_CMDSTAT_CHAIN_END) kickstart = 1; STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); progress = 1; } if (txs != NULL) sc->first_used_txdma_slot = txs->txs_firstdesc; else sc->first_used_txdma_slot = -1; if (kickstart || txs != NULL) { /* Speculatively (or necessarily) start the TX queue again */ error = lv1_net_start_tx_dma(sc->sc_bus, sc->sc_dev, sc->sc_txdmadesc_phys + txs->txs_firstdesc*sizeof(struct glc_dmadesc), 0); if (error != 0) device_printf(sc->sc_self, "lv1_net_start_tx_dma error: %d\n", error); } if (progress) { /* * We freed some descriptors, so reset IFF_DRV_OACTIVE * and restart. */ ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->sc_wdog_timer = STAILQ_EMPTY(&sc->sc_txdirtyq) ? 0 : 5; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && !IFQ_DRV_IS_EMPTY(&ifp->if_snd)) glc_start_locked(ifp); } } static int glc_intr_filter(void *xsc) { struct glc_softc *sc = xsc; powerpc_sync(); atomic_set_64(&sc->sc_interrupt_status, *sc->sc_hwirq_status); return (FILTER_SCHEDULE_THREAD); } static void glc_intr(void *xsc) { struct glc_softc *sc = xsc; uint64_t status, linkstat, junk; mtx_lock(&sc->sc_mtx); status = atomic_readandclear_64(&sc->sc_interrupt_status); if (status == 0) { mtx_unlock(&sc->sc_mtx); return; } if (status & (GELIC_INT_RXDONE | GELIC_INT_RXFRAME)) glc_rxintr(sc); if (status & (GELIC_INT_TXDONE | GELIC_INT_TX_CHAIN_END)) glc_txintr(sc); if (status & GELIC_INT_PHY) { lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS, GELIC_VLAN_TX_ETHERNET, 0, 0, &linkstat, &junk); linkstat = (linkstat & GELIC_LINK_UP) ? LINK_STATE_UP : LINK_STATE_DOWN; if (linkstat != sc->sc_ifp->if_link_state) if_link_state_change(sc->sc_ifp, linkstat); } mtx_unlock(&sc->sc_mtx); } static void glc_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct glc_softc *sc = ifp->if_softc; uint64_t status, junk; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_GET_LINK_STATUS, GELIC_VLAN_TX_ETHERNET, 0, 0, &status, &junk); if (status & GELIC_LINK_UP) ifmr->ifm_status |= IFM_ACTIVE; if (status & GELIC_SPEED_10) ifmr->ifm_active |= IFM_10_T; else if (status & GELIC_SPEED_100) ifmr->ifm_active |= IFM_100_TX; else if (status & GELIC_SPEED_1000) ifmr->ifm_active |= IFM_1000_T; if (status & GELIC_FULL_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } static int glc_media_change(struct ifnet *ifp) { struct glc_softc *sc = ifp->if_softc; uint64_t mode, junk; int result; if (IFM_TYPE(sc->sc_media.ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(sc->sc_media.ifm_media)) { case IFM_AUTO: mode = GELIC_AUTO_NEG; break; case IFM_10_T: mode = GELIC_SPEED_10; break; case IFM_100_TX: mode = GELIC_SPEED_100; break; case IFM_1000_T: mode = GELIC_SPEED_1000 | GELIC_FULL_DUPLEX; break; default: return (EINVAL); } if (IFM_OPTIONS(sc->sc_media.ifm_media) & IFM_FDX) mode |= GELIC_FULL_DUPLEX; result = lv1_net_control(sc->sc_bus, sc->sc_dev, GELIC_SET_LINK_MODE, GELIC_VLAN_TX_ETHERNET, mode, 0, &junk, &junk); return (result ? EIO : 0); } Index: head/sys/powerpc/ps3/platform_ps3.c =================================================================== --- head/sys/powerpc/ps3/platform_ps3.c (revision 295879) +++ head/sys/powerpc/ps3/platform_ps3.c (revision 295880) @@ -1,270 +1,269 @@ /*- * Copyright (c) 2010 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include "platform_if.h" #include "ps3-hvcall.h" #ifdef SMP extern void *ap_pcpu; #endif static int ps3_probe(platform_t); static int ps3_attach(platform_t); static void ps3_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static vm_offset_t ps3_real_maxaddr(platform_t); static u_long ps3_timebase_freq(platform_t, struct cpuref *cpuref); #ifdef SMP static int ps3_smp_first_cpu(platform_t, struct cpuref *cpuref); static int ps3_smp_next_cpu(platform_t, struct cpuref *cpuref); static int ps3_smp_get_bsp(platform_t, struct cpuref *cpuref); static int ps3_smp_start_cpu(platform_t, struct pcpu *cpu); static struct cpu_group *ps3_smp_topo(platform_t); #endif static void ps3_reset(platform_t); static void ps3_cpu_idle(sbintime_t); static platform_method_t ps3_methods[] = { PLATFORMMETHOD(platform_probe, ps3_probe), PLATFORMMETHOD(platform_attach, ps3_attach), PLATFORMMETHOD(platform_mem_regions, ps3_mem_regions), PLATFORMMETHOD(platform_real_maxaddr, ps3_real_maxaddr), PLATFORMMETHOD(platform_timebase_freq, ps3_timebase_freq), #ifdef SMP PLATFORMMETHOD(platform_smp_first_cpu, ps3_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, ps3_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, ps3_smp_get_bsp), PLATFORMMETHOD(platform_smp_start_cpu, ps3_smp_start_cpu), PLATFORMMETHOD(platform_smp_topo, ps3_smp_topo), #endif PLATFORMMETHOD(platform_reset, ps3_reset), PLATFORMMETHOD_END }; static platform_def_t ps3_platform = { "ps3", ps3_methods, 0 }; PLATFORM_DEF(ps3_platform); static int ps3_probe(platform_t plat) { phandle_t root; char compatible[64]; root = OF_finddevice("/"); if (OF_getprop(root, "compatible", compatible, sizeof(compatible)) <= 0) return (BUS_PROBE_NOWILDCARD); if (strncmp(compatible, "sony,ps3", sizeof(compatible)) != 0) return (BUS_PROBE_NOWILDCARD); return (BUS_PROBE_SPECIFIC); } static int ps3_attach(platform_t plat) { pmap_mmu_install("mmu_ps3", BUS_PROBE_SPECIFIC); cpu_idle_hook = ps3_cpu_idle; /* Set a breakpoint to make NULL an invalid address */ lv1_set_dabr(0x7 /* read and write, MMU on */, 2 /* kernel accesses */); return (0); } void ps3_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail_regions, int *availsz) { uint64_t lpar_id, junk, ppe_id; /* Get real mode memory region */ avail_regions[0].mr_start = 0; lv1_get_logical_partition_id(&lpar_id); lv1_get_logical_ppe_id(&ppe_id); lv1_get_repository_node_value(lpar_id, lv1_repository_string("bi") >> 32, lv1_repository_string("pu"), ppe_id, lv1_repository_string("rm_size"), &avail_regions[0].mr_size, &junk); /* Now get extended memory region */ lv1_get_repository_node_value(lpar_id, lv1_repository_string("bi") >> 32, lv1_repository_string("rgntotal"), 0, 0, &avail_regions[1].mr_size, &junk); /* Convert to maximum amount we can allocate in 16 MB pages */ avail_regions[1].mr_size -= avail_regions[0].mr_size; avail_regions[1].mr_size -= avail_regions[1].mr_size % (16*1024*1024); /* Allocate extended memory region */ lv1_allocate_memory(avail_regions[1].mr_size, 24 /* 16 MB pages */, 0, 0x04 /* any address */, &avail_regions[1].mr_start, &junk); *availsz = 2; if (phys != NULL) { memcpy(phys, avail_regions, sizeof(*phys)*2); *physsz = 2; } } static u_long ps3_timebase_freq(platform_t plat, struct cpuref *cpuref) { uint64_t ticks, node_id, junk; lv1_get_repository_node_value(PS3_LPAR_ID_PME, lv1_repository_string("be") >> 32, 0, 0, 0, &node_id, &junk); lv1_get_repository_node_value(PS3_LPAR_ID_PME, lv1_repository_string("be") >> 32, node_id, lv1_repository_string("clock"), 0, &ticks, &junk); return (ticks); } #ifdef SMP static int ps3_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { cpuref->cr_cpuid = 0; cpuref->cr_hwref = cpuref->cr_cpuid; return (0); } static int ps3_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { if (cpuref->cr_cpuid >= 1) return (ENOENT); cpuref->cr_cpuid++; cpuref->cr_hwref = cpuref->cr_cpuid; return (0); } static int ps3_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { cpuref->cr_cpuid = 0; cpuref->cr_hwref = cpuref->cr_cpuid; return (0); } static int ps3_smp_start_cpu(platform_t plat, struct pcpu *pc) { /* loader(8) is spinning on 0x40 == 0 right now */ uint32_t *secondary_spin_sem = (uint32_t *)(0x40); int timeout; if (pc->pc_hwref != 1) return (ENXIO); ap_pcpu = pc; *secondary_spin_sem = 1; powerpc_sync(); DELAY(1); timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); return ((pc->pc_awake) ? 0 : EBUSY); } static struct cpu_group * ps3_smp_topo(platform_t plat) { return (smp_topo_1level(CG_SHARE_L1, 2, CG_FLAG_SMT)); } #endif static void ps3_reset(platform_t plat) { lv1_panic(1); } static vm_offset_t ps3_real_maxaddr(platform_t plat) { struct mem_region *phys, *avail; int nphys, navail; mem_regions(&phys, &nphys, &avail, &navail); return (phys[0].mr_start + phys[0].mr_size); } static void ps3_cpu_idle(sbintime_t sbt) { lv1_pause(0); } Index: head/sys/powerpc/ps3/ps3_syscons.c =================================================================== --- head/sys/powerpc/ps3/ps3_syscons.c (revision 295879) +++ head/sys/powerpc/ps3/ps3_syscons.c (revision 295880) @@ -1,197 +1,196 @@ /*- * Copyright (c) 2011-2014 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include "ps3-hvcall.h" #define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_MODE_SET 0x0100 #define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC 0x0101 #define L1GPU_DISPLAY_SYNC_HSYNC 1 #define L1GPU_DISPLAY_SYNC_VSYNC 2 #define L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP 0x0102 static vd_init_t ps3fb_init; static vd_probe_t ps3fb_probe; void ps3fb_remap(void); struct ps3fb_softc { struct fb_info fb_info; uint64_t sc_fbhandle; uint64_t sc_fbcontext; uint64_t sc_dma_control; uint64_t sc_driver_info; uint64_t sc_reports; uint64_t sc_reports_size; }; static struct vt_driver vt_ps3fb_driver = { .vd_name = "ps3fb", .vd_probe = ps3fb_probe, .vd_init = ps3fb_init, .vd_blank = vt_fb_blank, .vd_bitblt_text = vt_fb_bitblt_text, .vd_bitblt_bmp = vt_fb_bitblt_bitmap, .vd_drawrect = vt_fb_drawrect, .vd_setpixel = vt_fb_setpixel, .vd_fb_ioctl = vt_fb_ioctl, .vd_fb_mmap = vt_fb_mmap, /* Better than VGA, but still generic driver. */ .vd_priority = VD_PRIORITY_GENERIC + 1, }; VT_DRIVER_DECLARE(vt_ps3fb, vt_ps3fb_driver); static struct ps3fb_softc ps3fb_softc; static int ps3fb_probe(struct vt_device *vd) { struct ps3fb_softc *sc; int disable; char compatible[64]; phandle_t root; disable = 0; TUNABLE_INT_FETCH("hw.syscons.disable", &disable); if (disable != 0) return (0); sc = &ps3fb_softc; TUNABLE_STR_FETCH("hw.platform", compatible, sizeof(compatible)); if (strcmp(compatible, "ps3") == 0) return (CN_INTERNAL); root = OF_finddevice("/"); if (OF_getprop(root, "compatible", compatible, sizeof(compatible)) <= 0) return (CN_DEAD); if (strncmp(compatible, "sony,ps3", sizeof(compatible)) != 0) return (CN_DEAD); return (CN_INTERNAL); } void ps3fb_remap(void) { struct ps3fb_softc *sc; vm_offset_t va, fb_paddr; sc = &ps3fb_softc; lv1_gpu_close(); lv1_gpu_open(0); lv1_gpu_context_attribute(0, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_MODE_SET, 0,0,0,0); lv1_gpu_context_attribute(0, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_MODE_SET, 0,0,1,0); lv1_gpu_context_attribute(0, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC, 0,L1GPU_DISPLAY_SYNC_VSYNC,0,0); lv1_gpu_context_attribute(0, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_SYNC, 1,L1GPU_DISPLAY_SYNC_VSYNC,0,0); lv1_gpu_memory_allocate(roundup2(sc->fb_info.fb_size, 1024*1024), 0, 0, 0, 0, &sc->sc_fbhandle, &fb_paddr); lv1_gpu_context_allocate(sc->sc_fbhandle, 0, &sc->sc_fbcontext, &sc->sc_dma_control, &sc->sc_driver_info, &sc->sc_reports, &sc->sc_reports_size); lv1_gpu_context_attribute(sc->sc_fbcontext, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, 0, 0, 0, 0); lv1_gpu_context_attribute(sc->sc_fbcontext, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, 1, 0, 0, 0); sc->fb_info.fb_pbase = fb_paddr; for (va = 0; va < sc->fb_info.fb_size; va += PAGE_SIZE) pmap_kenter_attr(0x10000000 + va, fb_paddr + va, VM_MEMATTR_WRITE_COMBINING); sc->fb_info.fb_flags &= ~FB_FLAG_NOWRITE; } static int ps3fb_init(struct vt_device *vd) { struct ps3fb_softc *sc; /* Init softc */ vd->vd_softc = sc = &ps3fb_softc; /* XXX: get from HV repository */ sc->fb_info.fb_depth = 32; sc->fb_info.fb_height = 480; sc->fb_info.fb_width = 720; TUNABLE_INT_FETCH("hw.ps3fb.height", &sc->fb_info.fb_height); TUNABLE_INT_FETCH("hw.ps3fb.width", &sc->fb_info.fb_width); sc->fb_info.fb_stride = sc->fb_info.fb_width*4; sc->fb_info.fb_size = sc->fb_info.fb_height * sc->fb_info.fb_stride; sc->fb_info.fb_bpp = sc->fb_info.fb_stride / sc->fb_info.fb_width * 8; /* * Arbitrarily choose address for the framebuffer */ sc->fb_info.fb_vbase = 0x10000000; sc->fb_info.fb_flags |= FB_FLAG_NOWRITE; /* Not available yet */ sc->fb_info.fb_cmsize = 16; /* 32-bit VGA palette */ vt_generate_cons_palette(sc->fb_info.fb_cmap, COLOR_FORMAT_RGB, 255, 0, 255, 8, 255, 16); /* Set correct graphics context */ lv1_gpu_context_attribute(sc->sc_fbcontext, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, 0, 0, 0, 0); lv1_gpu_context_attribute(sc->sc_fbcontext, L1GPU_CONTEXT_ATTRIBUTE_DISPLAY_FLIP, 1, 0, 0, 0); vt_fb_init(vd); return (CN_INTERNAL); } Index: head/sys/powerpc/ps3/ps3bus.c =================================================================== --- head/sys/powerpc/ps3/ps3bus.c (revision 295879) +++ head/sys/powerpc/ps3/ps3bus.c (revision 295880) @@ -1,764 +1,763 @@ /*- * Copyright (C) 2010 Nathan Whitehorn * Copyright (C) 2011 glevand (geoffrey.levand@mail.ru) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include "ps3bus.h" #include "ps3-hvcall.h" #include "iommu_if.h" #include "clock_if.h" static void ps3bus_identify(driver_t *, device_t); static int ps3bus_probe(device_t); static int ps3bus_attach(device_t); static int ps3bus_print_child(device_t dev, device_t child); static int ps3bus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result); static struct resource *ps3bus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); static int ps3bus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res); static bus_dma_tag_t ps3bus_get_dma_tag(device_t dev, device_t child); static int ps3_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs, bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary, void *cookie); static int ps3_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie); static int ps3_gettime(device_t dev, struct timespec *ts); static int ps3_settime(device_t dev, struct timespec *ts); struct ps3bus_devinfo { int bus; int dev; uint64_t bustype; uint64_t devtype; int busidx; int devidx; struct resource_list resources; bus_dma_tag_t dma_tag; struct mtx iommu_mtx; bus_addr_t dma_base[4]; }; static MALLOC_DEFINE(M_PS3BUS, "ps3bus", "PS3 system bus device information"); enum ps3bus_irq_type { SB_IRQ = 2, OHCI_IRQ = 3, EHCI_IRQ = 4, }; enum ps3bus_reg_type { OHCI_REG = 3, EHCI_REG = 4, }; static device_method_t ps3bus_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ps3bus_identify), DEVMETHOD(device_probe, ps3bus_probe), DEVMETHOD(device_attach, ps3bus_attach), /* Bus interface */ DEVMETHOD(bus_add_child, bus_generic_add_child), DEVMETHOD(bus_get_dma_tag, ps3bus_get_dma_tag), DEVMETHOD(bus_print_child, ps3bus_print_child), DEVMETHOD(bus_read_ivar, ps3bus_read_ivar), DEVMETHOD(bus_alloc_resource, ps3bus_alloc_resource), DEVMETHOD(bus_activate_resource, ps3bus_activate_resource), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), /* IOMMU interface */ DEVMETHOD(iommu_map, ps3_iommu_map), DEVMETHOD(iommu_unmap, ps3_iommu_unmap), /* Clock interface */ DEVMETHOD(clock_gettime, ps3_gettime), DEVMETHOD(clock_settime, ps3_settime), DEVMETHOD_END }; struct ps3bus_softc { struct rman sc_mem_rman; struct rman sc_intr_rman; struct mem_region *regions; int rcount; }; static driver_t ps3bus_driver = { "ps3bus", ps3bus_methods, sizeof(struct ps3bus_softc) }; static devclass_t ps3bus_devclass; DRIVER_MODULE(ps3bus, nexus, ps3bus_driver, ps3bus_devclass, 0, 0); static void ps3bus_identify(driver_t *driver, device_t parent) { if (strcmp(installed_platform(), "ps3") != 0) return; if (device_find_child(parent, "ps3bus", -1) == NULL) BUS_ADD_CHILD(parent, 0, "ps3bus", 0); } static int ps3bus_probe(device_t dev) { /* Do not attach to any OF nodes that may be present */ device_set_desc(dev, "Playstation 3 System Bus"); return (BUS_PROBE_NOWILDCARD); } static void ps3bus_resources_init(struct rman *rm, int bus_index, int dev_index, struct ps3bus_devinfo *dinfo) { uint64_t irq_type, irq, outlet; uint64_t reg_type, paddr, len; uint64_t ppe, junk; int i, result; int thread; resource_list_init(&dinfo->resources); lv1_get_logical_ppe_id(&ppe); thread = 32 - fls(mfctrl()); /* Scan for interrupts */ for (i = 0; i < 10; i++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("intr") | i, 0, &irq_type, &irq); if (result != 0) break; switch (irq_type) { case SB_IRQ: lv1_construct_event_receive_port(&outlet); lv1_connect_irq_plug_ext(ppe, thread, outlet, outlet, 0); lv1_connect_interrupt_event_receive_port(dinfo->bus, dinfo->dev, outlet, irq); break; case OHCI_IRQ: case EHCI_IRQ: lv1_construct_io_irq_outlet(irq, &outlet); lv1_connect_irq_plug_ext(ppe, thread, outlet, outlet, 0); break; default: printf("Unknown IRQ type %ld for device %d.%d\n", irq_type, dinfo->bus, dinfo->dev); break; } resource_list_add(&dinfo->resources, SYS_RES_IRQ, i, outlet, outlet, 1); } /* Scan for registers */ for (i = 0; i < 10; i++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("reg") | i, lv1_repository_string("type"), ®_type, &junk); if (result != 0) break; result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("reg") | i, lv1_repository_string("data"), &paddr, &len); result = lv1_map_device_mmio_region(dinfo->bus, dinfo->dev, paddr, len, 12 /* log_2(4 KB) */, &paddr); if (result != 0) { printf("Mapping registers failed for device " "%d.%d (%ld.%ld): %d\n", dinfo->bus, dinfo->dev, dinfo->bustype, dinfo->devtype, result); continue; } rman_manage_region(rm, paddr, paddr + len - 1); resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, paddr, paddr + len, len); } } static void ps3bus_resources_init_by_type(struct rman *rm, int bus_index, int dev_index, uint64_t irq_type, uint64_t reg_type, struct ps3bus_devinfo *dinfo) { uint64_t _irq_type, irq, outlet; uint64_t _reg_type, paddr, len; uint64_t ppe, junk; int i, result; int thread; resource_list_init(&dinfo->resources); lv1_get_logical_ppe_id(&ppe); thread = 32 - fls(mfctrl()); /* Scan for interrupts */ for (i = 0; i < 10; i++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("intr") | i, 0, &_irq_type, &irq); if (result != 0) break; if (_irq_type != irq_type) continue; lv1_construct_io_irq_outlet(irq, &outlet); lv1_connect_irq_plug_ext(ppe, thread, outlet, outlet, 0); resource_list_add(&dinfo->resources, SYS_RES_IRQ, i, outlet, outlet, 1); } /* Scan for registers */ for (i = 0; i < 10; i++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("reg") | i, lv1_repository_string("type"), &_reg_type, &junk); if (result != 0) break; if (_reg_type != reg_type) continue; result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("reg") | i, lv1_repository_string("data"), &paddr, &len); result = lv1_map_device_mmio_region(dinfo->bus, dinfo->dev, paddr, len, 12 /* log_2(4 KB) */, &paddr); if (result != 0) { printf("Mapping registers failed for device " "%d.%d (%ld.%ld): %d\n", dinfo->bus, dinfo->dev, dinfo->bustype, dinfo->devtype, result); break; } rman_manage_region(rm, paddr, paddr + len - 1); resource_list_add(&dinfo->resources, SYS_RES_MEMORY, i, paddr, paddr + len, len); } } static int ps3bus_attach(device_t self) { struct ps3bus_softc *sc; struct ps3bus_devinfo *dinfo; int bus_index, dev_index, result; uint64_t bustype, bus, devs; uint64_t dev, devtype; uint64_t junk; device_t cdev; sc = device_get_softc(self); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "PS3Bus Memory Mapped I/O"; sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "PS3Bus Interrupts"; rman_init(&sc->sc_mem_rman); rman_init(&sc->sc_intr_rman); rman_manage_region(&sc->sc_intr_rman, 0, ~0); /* Get memory regions for DMA */ mem_regions(&sc->regions, &sc->rcount, &sc->regions, &sc->rcount); /* * Probe all the PS3's buses. */ for (bus_index = 0; bus_index < 5; bus_index++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("type"), 0, 0, &bustype, &junk); if (result != 0) continue; result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("id"), 0, 0, &bus, &junk); if (result != 0) continue; result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("num_dev"), 0, 0, &devs, &junk); for (dev_index = 0; dev_index < devs; dev_index++) { result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("type"), 0, &devtype, &junk); if (result != 0) continue; result = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("id"), 0, &dev, &junk); if (result != 0) continue; switch (devtype) { case PS3_DEVTYPE_USB: /* USB device has OHCI and EHCI USB host controllers */ lv1_open_device(bus, dev, 0); /* OHCI host controller */ dinfo = malloc(sizeof(*dinfo), M_PS3BUS, M_WAITOK | M_ZERO); dinfo->bus = bus; dinfo->dev = dev; dinfo->bustype = bustype; dinfo->devtype = devtype; dinfo->busidx = bus_index; dinfo->devidx = dev_index; ps3bus_resources_init_by_type(&sc->sc_mem_rman, bus_index, dev_index, OHCI_IRQ, OHCI_REG, dinfo); cdev = device_add_child(self, "ohci", -1); if (cdev == NULL) { device_printf(self, "device_add_child failed\n"); free(dinfo, M_PS3BUS); continue; } mtx_init(&dinfo->iommu_mtx, "iommu", NULL, MTX_DEF); device_set_ivars(cdev, dinfo); /* EHCI host controller */ dinfo = malloc(sizeof(*dinfo), M_PS3BUS, M_WAITOK | M_ZERO); dinfo->bus = bus; dinfo->dev = dev; dinfo->bustype = bustype; dinfo->devtype = devtype; dinfo->busidx = bus_index; dinfo->devidx = dev_index; ps3bus_resources_init_by_type(&sc->sc_mem_rman, bus_index, dev_index, EHCI_IRQ, EHCI_REG, dinfo); cdev = device_add_child(self, "ehci", -1); if (cdev == NULL) { device_printf(self, "device_add_child failed\n"); free(dinfo, M_PS3BUS); continue; } mtx_init(&dinfo->iommu_mtx, "iommu", NULL, MTX_DEF); device_set_ivars(cdev, dinfo); break; default: dinfo = malloc(sizeof(*dinfo), M_PS3BUS, M_WAITOK | M_ZERO); dinfo->bus = bus; dinfo->dev = dev; dinfo->bustype = bustype; dinfo->devtype = devtype; dinfo->busidx = bus_index; dinfo->devidx = dev_index; if (dinfo->bustype == PS3_BUSTYPE_SYSBUS || dinfo->bustype == PS3_BUSTYPE_STORAGE) lv1_open_device(bus, dev, 0); ps3bus_resources_init(&sc->sc_mem_rman, bus_index, dev_index, dinfo); cdev = device_add_child(self, NULL, -1); if (cdev == NULL) { device_printf(self, "device_add_child failed\n"); free(dinfo, M_PS3BUS); continue; } mtx_init(&dinfo->iommu_mtx, "iommu", NULL, MTX_DEF); device_set_ivars(cdev, dinfo); } } } clock_register(self, 1000); return (bus_generic_attach(self)); } static int ps3bus_print_child(device_t dev, device_t child) { struct ps3bus_devinfo *dinfo = device_get_ivars(child); int retval = 0; retval += bus_print_child_header(dev, child); retval += resource_list_print_type(&dinfo->resources, "mem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(&dinfo->resources, "irq", SYS_RES_IRQ, "%ld"); retval += bus_print_child_footer(dev, child); return (retval); } static int ps3bus_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct ps3bus_devinfo *dinfo = device_get_ivars(child); switch (which) { case PS3BUS_IVAR_BUS: *result = dinfo->bus; break; case PS3BUS_IVAR_DEVICE: *result = dinfo->dev; break; case PS3BUS_IVAR_BUSTYPE: *result = dinfo->bustype; break; case PS3BUS_IVAR_DEVTYPE: *result = dinfo->devtype; break; case PS3BUS_IVAR_BUSIDX: *result = dinfo->busidx; break; case PS3BUS_IVAR_DEVIDX: *result = dinfo->devidx; break; default: return (EINVAL); } return (0); } static struct resource * ps3bus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct ps3bus_devinfo *dinfo; struct ps3bus_softc *sc; int needactivate; struct resource *rv; struct rman *rm; rman_res_t adjstart, adjend, adjcount; struct resource_list_entry *rle; sc = device_get_softc(bus); dinfo = device_get_ivars(child); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, *rid); rm = &sc->sc_intr_rman; adjstart = rle->start; adjcount = ulmax(count, rle->count); adjend = ulmax(rle->end, rle->start + adjcount - 1); break; default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource %#lx - %#lx (%#lx)" " for %s\n", adjstart, adjend, adjcount, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int ps3bus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { void *p; if (type == SYS_RES_IRQ) return (bus_activate_resource(bus, type, rid, res)); if (type == SYS_RES_MEMORY) { vm_offset_t start; start = (vm_offset_t) rman_get_start(res); if (bootverbose) printf("ps3 mapdev: start %zx, len %ld\n", start, rman_get_size(res)); p = pmap_mapdev(start, (vm_size_t) rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_be_tag); rman_set_bushandle(res, (rman_res_t)p); } return (rman_activate_resource(res)); } static bus_dma_tag_t ps3bus_get_dma_tag(device_t dev, device_t child) { struct ps3bus_devinfo *dinfo = device_get_ivars(child); struct ps3bus_softc *sc = device_get_softc(dev); int i, err, flags, pagesize; if (dinfo->bustype != PS3_BUSTYPE_SYSBUS && dinfo->bustype != PS3_BUSTYPE_STORAGE) return (bus_get_dma_tag(dev)); mtx_lock(&dinfo->iommu_mtx); if (dinfo->dma_tag != NULL) { mtx_unlock(&dinfo->iommu_mtx); return (dinfo->dma_tag); } flags = 0; /* 32-bit mode */ if (dinfo->bustype == PS3_BUSTYPE_SYSBUS && dinfo->devtype == PS3_DEVTYPE_USB) flags = 2; /* 8-bit mode */ pagesize = 24; /* log_2(16 MB) */ if (dinfo->bustype == PS3_BUSTYPE_STORAGE) pagesize = 12; /* 4 KB */ for (i = 0; i < sc->rcount; i++) { err = lv1_allocate_device_dma_region(dinfo->bus, dinfo->dev, sc->regions[i].mr_size, pagesize, flags, &dinfo->dma_base[i]); if (err != 0) { device_printf(child, "could not allocate DMA region %d: %d\n", i, err); goto fail; } err = lv1_map_device_dma_region(dinfo->bus, dinfo->dev, sc->regions[i].mr_start, dinfo->dma_base[i], sc->regions[i].mr_size, 0xf800000000000800UL /* Cell Handbook Figure 7.3.4.1 */); if (err != 0) { device_printf(child, "could not map DMA region %d: %d\n", i, err); goto fail; } } err = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &dinfo->dma_tag); /* * Note: storage devices have IOMMU mappings set up by the hypervisor, * but use physical, non-translated addresses. The above IOMMU * initialization is necessary for the hypervisor to be able to set up * the mappings, but actual DMA mappings should not use the IOMMU * routines. */ if (dinfo->bustype != PS3_BUSTYPE_STORAGE) bus_dma_tag_set_iommu(dinfo->dma_tag, dev, dinfo); fail: mtx_unlock(&dinfo->iommu_mtx); if (err) return (NULL); return (dinfo->dma_tag); } static int ps3_iommu_map(device_t dev, bus_dma_segment_t *segs, int *nsegs, bus_addr_t min, bus_addr_t max, bus_size_t alignment, bus_addr_t boundary, void *cookie) { struct ps3bus_devinfo *dinfo = cookie; struct ps3bus_softc *sc = device_get_softc(dev); int i, j; for (i = 0; i < *nsegs; i++) { for (j = 0; j < sc->rcount; j++) { if (segs[i].ds_addr >= sc->regions[j].mr_start && segs[i].ds_addr < sc->regions[j].mr_start + sc->regions[j].mr_size) break; } KASSERT(j < sc->rcount, ("Trying to map address %#lx not in physical memory", segs[i].ds_addr)); segs[i].ds_addr = dinfo->dma_base[j] + (segs[i].ds_addr - sc->regions[j].mr_start); } return (0); } static int ps3_iommu_unmap(device_t dev, bus_dma_segment_t *segs, int nsegs, void *cookie) { return (0); } #define Y2K 946684800 static int ps3_gettime(device_t dev, struct timespec *ts) { uint64_t rtc, tb; int result; result = lv1_get_rtc(&rtc, &tb); if (result) return (result); ts->tv_sec = rtc + Y2K; ts->tv_nsec = 0; return (0); } static int ps3_settime(device_t dev, struct timespec *ts) { return (-1); } Index: head/sys/powerpc/ps3/ps3cdrom.c =================================================================== --- head/sys/powerpc/ps3/ps3cdrom.c (revision 295879) +++ head/sys/powerpc/ps3/ps3cdrom.c (revision 295880) @@ -1,710 +1,709 @@ /*- * Copyright (C) 2010 Nathan Whitehorn * Copyright (C) 2011 glevand * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include #include #include #include "ps3bus.h" #include "ps3-hvcall.h" #define PS3CDROM_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), "ps3cdrom", \ MTX_DEF) #define PS3CDROM_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define PS3CDROM_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define PS3CDROM_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define PS3CDROM_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define PS3CDROM_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define PS3CDROM_MAX_XFERS 3 #define LV1_STORAGE_SEND_ATAPI_COMMAND 0x01 struct ps3cdrom_softc; struct ps3cdrom_xfer { TAILQ_ENTRY(ps3cdrom_xfer) x_queue; struct ps3cdrom_softc *x_sc; union ccb *x_ccb; bus_dmamap_t x_dmamap; uint64_t x_tag; }; TAILQ_HEAD(ps3cdrom_xferq, ps3cdrom_xfer); struct ps3cdrom_softc { device_t sc_dev; struct mtx sc_mtx; uint64_t sc_blksize; uint64_t sc_nblocks; int sc_irqid; struct resource *sc_irq; void *sc_irqctx; bus_dma_tag_t sc_dmatag; struct cam_sim *sc_sim; struct cam_path *sc_path; struct ps3cdrom_xfer sc_xfer[PS3CDROM_MAX_XFERS]; struct ps3cdrom_xferq sc_active_xferq; struct ps3cdrom_xferq sc_free_xferq; }; enum lv1_ata_proto { NON_DATA_PROTO = 0x00, PIO_DATA_IN_PROTO = 0x01, PIO_DATA_OUT_PROTO = 0x02, DMA_PROTO = 0x03 }; enum lv1_ata_in_out { DIR_WRITE = 0x00, DIR_READ = 0x01 }; struct lv1_atapi_cmd { uint8_t pkt[32]; uint32_t pktlen; uint32_t nblocks; uint32_t blksize; uint32_t proto; /* enum lv1_ata_proto */ uint32_t in_out; /* enum lv1_ata_in_out */ uint64_t buf; uint32_t arglen; }; static void ps3cdrom_action(struct cam_sim *sim, union ccb *ccb); static void ps3cdrom_poll(struct cam_sim *sim); static void ps3cdrom_async(void *callback_arg, u_int32_t code, struct cam_path* path, void *arg); static void ps3cdrom_intr(void *arg); static void ps3cdrom_transfer(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static int ps3cdrom_decode_lv1_status(uint64_t status, u_int8_t *sense_key, u_int8_t *asc, u_int8_t *ascq); static int ps3cdrom_probe(device_t dev) { if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_STORAGE || ps3bus_get_devtype(dev) != PS3_DEVTYPE_CDROM) return (ENXIO); device_set_desc(dev, "Playstation 3 CDROM"); return (BUS_PROBE_SPECIFIC); } static int ps3cdrom_attach(device_t dev) { struct ps3cdrom_softc *sc = device_get_softc(dev); struct cam_devq *devq; struct ps3cdrom_xfer *xp; struct ccb_setasync csa; int i, err; sc->sc_dev = dev; PS3CDROM_LOCK_INIT(sc); /* Setup interrupt handler */ sc->sc_irqid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid, RF_ACTIVE); if (!sc->sc_irq) { device_printf(dev, "Could not allocate IRQ\n"); err = ENXIO; goto fail_destroy_lock; } err = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_CAM | INTR_MPSAFE | INTR_ENTROPY, NULL, ps3cdrom_intr, sc, &sc->sc_irqctx); if (err) { device_printf(dev, "Could not setup IRQ\n"); err = ENXIO; goto fail_release_intr; } /* Setup DMA */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_UNRESTRICTED, 1, PAGE_SIZE, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->sc_dmatag); if (err) { device_printf(dev, "Could not create DMA tag\n"); err = ENXIO; goto fail_teardown_intr; } /* Setup transfer queues */ TAILQ_INIT(&sc->sc_active_xferq); TAILQ_INIT(&sc->sc_free_xferq); for (i = 0; i < PS3CDROM_MAX_XFERS; i++) { xp = &sc->sc_xfer[i]; xp->x_sc = sc; err = bus_dmamap_create(sc->sc_dmatag, BUS_DMA_COHERENT, &xp->x_dmamap); if (err) { device_printf(dev, "Could not create DMA map (%d)\n", err); goto fail_destroy_dmamap; } TAILQ_INSERT_TAIL(&sc->sc_free_xferq, xp, x_queue); } /* Setup CAM */ devq = cam_simq_alloc(PS3CDROM_MAX_XFERS - 1); if (!devq) { device_printf(dev, "Could not allocate SIM queue\n"); err = ENOMEM; goto fail_destroy_dmatag; } sc->sc_sim = cam_sim_alloc(ps3cdrom_action, ps3cdrom_poll, "ps3cdrom", sc, device_get_unit(dev), &sc->sc_mtx, PS3CDROM_MAX_XFERS - 1, 0, devq); if (!sc->sc_sim) { device_printf(dev, "Could not allocate SIM\n"); cam_simq_free(devq); err = ENOMEM; goto fail_destroy_dmatag; } /* Setup XPT */ PS3CDROM_LOCK(sc); err = xpt_bus_register(sc->sc_sim, dev, 0); if (err != CAM_SUCCESS) { device_printf(dev, "Could not register XPT bus\n"); err = ENXIO; PS3CDROM_UNLOCK(sc); goto fail_free_sim; } err = xpt_create_path(&sc->sc_path, NULL, cam_sim_path(sc->sc_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); if (err != CAM_REQ_CMP) { device_printf(dev, "Could not create XPT path\n"); err = ENOMEM; PS3CDROM_UNLOCK(sc); goto fail_unregister_xpt_bus; } xpt_setup_ccb(&csa.ccb_h, sc->sc_path, 5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_LOST_DEVICE; csa.callback = ps3cdrom_async; csa.callback_arg = sc->sc_sim; xpt_action((union ccb *) &csa); CAM_DEBUG(sc->sc_path, CAM_DEBUG_TRACE, ("registered SIM for ps3cdrom%d\n", device_get_unit(dev))); PS3CDROM_UNLOCK(sc); return (BUS_PROBE_SPECIFIC); fail_unregister_xpt_bus: xpt_bus_deregister(cam_sim_path(sc->sc_sim)); fail_free_sim: cam_sim_free(sc->sc_sim, TRUE); fail_destroy_dmamap: while ((xp = TAILQ_FIRST(&sc->sc_free_xferq))) { TAILQ_REMOVE(&sc->sc_free_xferq, xp, x_queue); bus_dmamap_destroy(sc->sc_dmatag, xp->x_dmamap); } fail_destroy_dmatag: bus_dma_tag_destroy(sc->sc_dmatag); fail_teardown_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_irqctx); fail_release_intr: bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqid, sc->sc_irq); fail_destroy_lock: PS3CDROM_LOCK_DESTROY(sc); return (err); } static int ps3cdrom_detach(device_t dev) { struct ps3cdrom_softc *sc = device_get_softc(dev); int i; xpt_async(AC_LOST_DEVICE, sc->sc_path, NULL); xpt_free_path(sc->sc_path); xpt_bus_deregister(cam_sim_path(sc->sc_sim)); cam_sim_free(sc->sc_sim, TRUE); for (i = 0; i < PS3CDROM_MAX_XFERS; i++) bus_dmamap_destroy(sc->sc_dmatag, sc->sc_xfer[i].x_dmamap); bus_dma_tag_destroy(sc->sc_dmatag); bus_teardown_intr(dev, sc->sc_irq, sc->sc_irqctx); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqid, sc->sc_irq); PS3CDROM_LOCK_DESTROY(sc); return (0); } static void ps3cdrom_action(struct cam_sim *sim, union ccb *ccb) { struct ps3cdrom_softc *sc = (struct ps3cdrom_softc *)cam_sim_softc(sim); device_t dev = sc->sc_dev; struct ps3cdrom_xfer *xp; int err; PS3CDROM_ASSERT_LOCKED(sc); CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("function code 0x%02x\n", ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; if(ccb->ccb_h.target_id > 0) { ccb->ccb_h.status = CAM_TID_INVALID; break; } if(ccb->ccb_h.target_lun > 0) { ccb->ccb_h.status = CAM_LUN_INVALID; break; } xp = TAILQ_FIRST(&sc->sc_free_xferq); KASSERT(xp != NULL, ("no free transfers")); xp->x_ccb = ccb; TAILQ_REMOVE(&sc->sc_free_xferq, xp, x_queue); err = bus_dmamap_load_ccb(sc->sc_dmatag, xp->x_dmamap, ccb, ps3cdrom_transfer, xp, 0); if (err && err != EINPROGRESS) { device_printf(dev, "Could not load DMA map (%d)\n", err); xp->x_ccb = NULL; TAILQ_INSERT_TAIL(&sc->sc_free_xferq, xp, x_queue); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; break; } return; case XPT_SET_TRAN_SETTINGS: ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; cts->proto_specific.valid = 0; cts->xport_specific.valid = 0; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: case XPT_RESET_DEV: ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, 1); break; case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = 0; cpi->target_sprt = 0; cpi->hba_inquiry = PI_SDTR_ABLE; cpi->hba_misc = PIM_NOBUSRESET | PIM_SEQSCAN | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; bzero(cpi->vuhba_flags, sizeof(cpi->vuhba_flags)); cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 7; cpi->bus_id = cam_sim_bus(sim); cpi->unit_number = cam_sim_unit(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "Sony", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->maxio = PAGE_SIZE; cpi->ccb_h.status = CAM_REQ_CMP; break; } default: CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("unsupported function code 0x%02x\n", ccb->ccb_h.func_code)); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static void ps3cdrom_poll(struct cam_sim *sim) { ps3cdrom_intr(cam_sim_softc(sim)); } static void ps3cdrom_async(void *callback_arg, u_int32_t code, struct cam_path* path, void *arg) { switch (code) { case AC_LOST_DEVICE: xpt_print_path(path); break; default: break; } } static void ps3cdrom_intr(void *arg) { struct ps3cdrom_softc *sc = (struct ps3cdrom_softc *) arg; device_t dev = sc->sc_dev; uint64_t devid = ps3bus_get_device(dev); struct ps3cdrom_xfer *xp; union ccb *ccb; u_int8_t *cdb, sense_key, asc, ascq; uint64_t tag, status; if (lv1_storage_get_async_status(devid, &tag, &status) != 0) return; PS3CDROM_LOCK(sc); /* Find transfer with the returned tag */ TAILQ_FOREACH(xp, &sc->sc_active_xferq, x_queue) { if (xp->x_tag == tag) break; } if (xp) { ccb = xp->x_ccb; cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ATAPI command 0x%02x tag 0x%016lx completed (0x%016lx)\n", cdb[0], tag, status)); if (!status) { ccb->csio.scsi_status = SCSI_STATUS_OK; ccb->csio.resid = 0; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; if (!ps3cdrom_decode_lv1_status(status, &sense_key, &asc, &ascq)) { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("sense key 0x%02x asc 0x%02x ascq 0x%02x\n", sense_key, asc, ascq)); scsi_set_sense_data(&ccb->csio.sense_data, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, sense_key, asc, ascq, SSD_ELEM_NONE); ccb->csio.sense_len = SSD_FULL_SIZE; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) ccb->csio.resid = ccb->csio.dxfer_len; } if (ccb->ccb_h.flags & CAM_DIR_IN) bus_dmamap_sync(sc->sc_dmatag, xp->x_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmatag, xp->x_dmamap); xp->x_ccb = NULL; TAILQ_REMOVE(&sc->sc_active_xferq, xp, x_queue); TAILQ_INSERT_TAIL(&sc->sc_free_xferq, xp, x_queue); xpt_done(ccb); } else { device_printf(dev, "Could not find transfer with tag 0x%016lx\n", tag); } PS3CDROM_UNLOCK(sc); } static void ps3cdrom_transfer(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct ps3cdrom_xfer *xp = (struct ps3cdrom_xfer *) arg; struct ps3cdrom_softc *sc = xp->x_sc; device_t dev = sc->sc_dev; uint64_t devid = ps3bus_get_device(dev); union ccb *ccb = xp->x_ccb; u_int8_t *cdb; uint64_t start_sector, block_count; int err; KASSERT(nsegs == 1 || nsegs == 0, ("ps3cdrom_transfer: invalid number of DMA segments %d", nsegs)); KASSERT(error == 0, ("ps3cdrom_transfer: DMA error %d", error)); PS3CDROM_ASSERT_LOCKED(sc); if (error) { device_printf(dev, "Could not load DMA map (%d)\n", error); xp->x_ccb = NULL; TAILQ_INSERT_TAIL(&sc->sc_free_xferq, xp, x_queue); ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; xpt_done(ccb); return; } cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ? ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ATAPI command 0x%02x cdb_len %d dxfer_len %d\n ", cdb[0], ccb->csio.cdb_len, ccb->csio.dxfer_len)); switch (cdb[0]) { case READ_10: KASSERT(nsegs == 1, ("ps3cdrom_transfer: no data to read")); start_sector = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; block_count = (cdb[7] << 8) | cdb[8]; err = lv1_storage_read(devid, 0 /* region id */, start_sector, block_count, 0 /* flags */, segs[0].ds_addr, &xp->x_tag); bus_dmamap_sync(sc->sc_dmatag, xp->x_dmamap, BUS_DMASYNC_POSTREAD); break; case WRITE_10: KASSERT(nsegs == 1, ("ps3cdrom_transfer: no data to write")); start_sector = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; block_count = (cdb[7] << 8) | cdb[8]; bus_dmamap_sync(sc->sc_dmatag, xp->x_dmamap, BUS_DMASYNC_PREWRITE); err = lv1_storage_write(devid, 0 /* region id */, start_sector, block_count, 0 /* flags */, segs[0].ds_addr, &xp->x_tag); break; default: { struct lv1_atapi_cmd atapi_cmd; bzero(&atapi_cmd, sizeof(atapi_cmd)); atapi_cmd.pktlen = 12; bcopy(cdb, atapi_cmd.pkt, ccb->csio.cdb_len); if (ccb->ccb_h.flags & CAM_DIR_IN) { atapi_cmd.in_out = DIR_READ; atapi_cmd.proto = (ccb->csio.dxfer_len >= 2048) ? DMA_PROTO : PIO_DATA_IN_PROTO; } else if (ccb->ccb_h.flags & CAM_DIR_OUT) { atapi_cmd.in_out = DIR_WRITE; atapi_cmd.proto = (ccb->csio.dxfer_len >= 2048) ? DMA_PROTO : PIO_DATA_OUT_PROTO; } else { atapi_cmd.proto = NON_DATA_PROTO; } atapi_cmd.nblocks = atapi_cmd.arglen = (nsegs == 0) ? 0 : segs[0].ds_len; atapi_cmd.blksize = 1; atapi_cmd.buf = (nsegs == 0) ? 0 : segs[0].ds_addr; if (ccb->ccb_h.flags & CAM_DIR_OUT) bus_dmamap_sync(sc->sc_dmatag, xp->x_dmamap, BUS_DMASYNC_PREWRITE); err = lv1_storage_send_device_command(devid, LV1_STORAGE_SEND_ATAPI_COMMAND, vtophys(&atapi_cmd), sizeof(atapi_cmd), atapi_cmd.buf, atapi_cmd.arglen, &xp->x_tag); break; } } if (err) { device_printf(dev, "ATAPI command 0x%02x failed (%d)\n", cdb[0], err); bus_dmamap_unload(sc->sc_dmatag, xp->x_dmamap); xp->x_ccb = NULL; TAILQ_INSERT_TAIL(&sc->sc_free_xferq, xp, x_queue); bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); /* Invalid field in parameter list */ scsi_set_sense_data(&ccb->csio.sense_data, /*sense_format*/ SSD_TYPE_NONE, /*current_error*/ 1, /*sense_key*/ SSD_KEY_ILLEGAL_REQUEST, /*asc*/ 0x26, /*ascq*/ 0x00, SSD_ELEM_NONE); ccb->csio.sense_len = SSD_FULL_SIZE; ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; xpt_done(ccb); } else { CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ATAPI command 0x%02x tag 0x%016lx submitted\n ", cdb[0], xp->x_tag)); TAILQ_INSERT_TAIL(&sc->sc_active_xferq, xp, x_queue); ccb->ccb_h.status |= CAM_SIM_QUEUED; } } static int ps3cdrom_decode_lv1_status(uint64_t status, u_int8_t *sense_key, u_int8_t *asc, u_int8_t *ascq) { if (((status >> 24) & 0xff) != SCSI_STATUS_CHECK_COND) return -1; *sense_key = (status >> 16) & 0xff; *asc = (status >> 8) & 0xff; *ascq = status & 0xff; return (0); } static device_method_t ps3cdrom_methods[] = { DEVMETHOD(device_probe, ps3cdrom_probe), DEVMETHOD(device_attach, ps3cdrom_attach), DEVMETHOD(device_detach, ps3cdrom_detach), {0, 0}, }; static driver_t ps3cdrom_driver = { "ps3cdrom", ps3cdrom_methods, sizeof(struct ps3cdrom_softc), }; static devclass_t ps3cdrom_devclass; DRIVER_MODULE(ps3cdrom, ps3bus, ps3cdrom_driver, ps3cdrom_devclass, 0, 0); MODULE_DEPEND(ps3cdrom, cam, 1, 1, 1); Index: head/sys/powerpc/ps3/ps3disk.c =================================================================== --- head/sys/powerpc/ps3/ps3disk.c (revision 295879) +++ head/sys/powerpc/ps3/ps3disk.c (revision 295880) @@ -1,703 +1,702 @@ /*- * Copyright (C) 2011 glevand (geoffrey.levand@mail.ru) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include "ps3bus.h" #include "ps3-hvcall.h" #define PS3DISK_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), "ps3disk", MTX_DEF) #define PS3DISK_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define PS3DISK_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define PS3DISK_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define PS3DISK_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define PS3DISK_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); #define LV1_STORAGE_ATA_HDDOUT 0x23 static SYSCTL_NODE(_hw, OID_AUTO, ps3disk, CTLFLAG_RD, 0, "PS3 Disk driver parameters"); #ifdef PS3DISK_DEBUG static int ps3disk_debug = 0; SYSCTL_INT(_hw_ps3disk, OID_AUTO, debug, CTLFLAG_RW, &ps3disk_debug, 0, "control debugging printfs"); TUNABLE_INT("hw.ps3disk.debug", &ps3disk_debug); enum { PS3DISK_DEBUG_INTR = 0x00000001, PS3DISK_DEBUG_TASK = 0x00000002, PS3DISK_DEBUG_READ = 0x00000004, PS3DISK_DEBUG_WRITE = 0x00000008, PS3DISK_DEBUG_FLUSH = 0x00000010, PS3DISK_DEBUG_ANY = 0xffffffff }; #define DPRINTF(sc, m, fmt, ...) \ do { \ if (sc->sc_debug & (m)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, m, fmt, ...) #endif struct ps3disk_region { uint64_t r_id; uint64_t r_start; uint64_t r_size; uint64_t r_flags; }; struct ps3disk_softc { device_t sc_dev; struct mtx sc_mtx; uint64_t sc_blksize; uint64_t sc_nblocks; uint64_t sc_nregs; struct ps3disk_region *sc_reg; int sc_irqid; struct resource *sc_irq; void *sc_irqctx; struct disk **sc_disk; struct bio_queue_head sc_bioq; struct bio_queue_head sc_deferredq; struct proc *sc_task; bus_dma_tag_t sc_dmatag; int sc_running; int sc_debug; }; static int ps3disk_open(struct disk *dp); static int ps3disk_close(struct disk *dp); static void ps3disk_strategy(struct bio *bp); static void ps3disk_task(void *arg); static void ps3disk_intr(void *arg); static int ps3disk_get_disk_geometry(struct ps3disk_softc *sc); static int ps3disk_enum_regions(struct ps3disk_softc *sc); static void ps3disk_transfer(void *arg, bus_dma_segment_t *segs, int nsegs, int error); static void ps3disk_sysctlattach(struct ps3disk_softc *sc); static MALLOC_DEFINE(M_PS3DISK, "ps3disk", "PS3 Disk"); static int ps3disk_probe(device_t dev) { if (ps3bus_get_bustype(dev) != PS3_BUSTYPE_STORAGE || ps3bus_get_devtype(dev) != PS3_DEVTYPE_DISK) return (ENXIO); device_set_desc(dev, "Playstation 3 Disk"); return (BUS_PROBE_SPECIFIC); } static int ps3disk_attach(device_t dev) { struct ps3disk_softc *sc; struct disk *d; intmax_t mb; uint64_t junk; char unit; int i, err; sc = device_get_softc(dev); sc->sc_dev = dev; PS3DISK_LOCK_INIT(sc); err = ps3disk_get_disk_geometry(sc); if (err) { device_printf(dev, "Could not get disk geometry\n"); err = ENXIO; goto fail_destroy_lock; } device_printf(dev, "block size %lu total blocks %lu\n", sc->sc_blksize, sc->sc_nblocks); err = ps3disk_enum_regions(sc); if (err) { device_printf(dev, "Could not enumerate disk regions\n"); err = ENXIO; goto fail_destroy_lock; } device_printf(dev, "Found %lu regions\n", sc->sc_nregs); if (!sc->sc_nregs) { err = ENXIO; goto fail_destroy_lock; } /* Setup interrupt handler */ sc->sc_irqid = 0; sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irqid, RF_ACTIVE); if (!sc->sc_irq) { device_printf(dev, "Could not allocate IRQ\n"); err = ENXIO; goto fail_free_regions; } err = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_BIO | INTR_MPSAFE | INTR_ENTROPY, NULL, ps3disk_intr, sc, &sc->sc_irqctx); if (err) { device_printf(dev, "Could not setup IRQ\n"); err = ENXIO; goto fail_release_intr; } /* Setup DMA */ err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_UNRESTRICTED, 1, PAGE_SIZE, 0, busdma_lock_mutex, &sc->sc_mtx, &sc->sc_dmatag); if (err) { device_printf(dev, "Could not create DMA tag\n"); err = ENXIO; goto fail_teardown_intr; } /* Setup disks */ sc->sc_disk = malloc(sc->sc_nregs * sizeof(struct disk *), M_PS3DISK, M_ZERO | M_WAITOK); if (!sc->sc_disk) { device_printf(dev, "Could not allocate disk(s)\n"); err = ENOMEM; goto fail_teardown_intr; } for (i = 0; i < sc->sc_nregs; i++) { struct ps3disk_region *rp = &sc->sc_reg[i]; d = sc->sc_disk[i] = disk_alloc(); d->d_open = ps3disk_open; d->d_close = ps3disk_close; d->d_strategy = ps3disk_strategy; d->d_name = "ps3disk"; d->d_drv1 = sc; d->d_maxsize = PAGE_SIZE; d->d_sectorsize = sc->sc_blksize; d->d_unit = i; d->d_mediasize = sc->sc_reg[i].r_size * sc->sc_blksize; d->d_flags |= DISKFLAG_CANFLUSHCACHE; mb = d->d_mediasize >> 20; unit = 'M'; if (mb >= 10240) { unit = 'G'; mb /= 1024; } /* Test to see if we can read this region */ err = lv1_storage_read(ps3bus_get_device(dev), d->d_unit, 0, 0, rp->r_flags, 0, &junk); device_printf(dev, "region %d %ju%cB%s\n", i, mb, unit, (err == LV1_DENIED_BY_POLICY) ? " (hypervisor protected)" : ""); if (err != LV1_DENIED_BY_POLICY) disk_create(d, DISK_VERSION); } err = 0; bioq_init(&sc->sc_bioq); bioq_init(&sc->sc_deferredq); kproc_create(&ps3disk_task, sc, &sc->sc_task, 0, 0, "ps3disk"); ps3disk_sysctlattach(sc); sc->sc_running = 1; return (0); fail_teardown_intr: bus_teardown_intr(dev, sc->sc_irq, sc->sc_irqctx); fail_release_intr: bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqid, sc->sc_irq); fail_free_regions: free(sc->sc_reg, M_PS3DISK); fail_destroy_lock: PS3DISK_LOCK_DESTROY(sc); return (err); } static int ps3disk_detach(device_t dev) { struct ps3disk_softc *sc = device_get_softc(dev); int i; for (i = 0; i < sc->sc_nregs; i++) disk_destroy(sc->sc_disk[i]); bus_dma_tag_destroy(sc->sc_dmatag); bus_teardown_intr(dev, sc->sc_irq, sc->sc_irqctx); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irqid, sc->sc_irq); free(sc->sc_disk, M_PS3DISK); free(sc->sc_reg, M_PS3DISK); PS3DISK_LOCK_DESTROY(sc); return (0); } static int ps3disk_open(struct disk *dp) { return (0); } static int ps3disk_close(struct disk *dp) { return (0); } /* Process deferred blocks */ static void ps3disk_task(void *arg) { struct ps3disk_softc *sc = (struct ps3disk_softc *) arg; struct bio *bp; while (1) { kproc_suspend_check(sc->sc_task); tsleep(&sc->sc_deferredq, PRIBIO, "ps3disk", 10); PS3DISK_LOCK(sc); bp = bioq_takefirst(&sc->sc_deferredq); PS3DISK_UNLOCK(sc); if (bp == NULL) continue; if (bp->bio_driver1 != NULL) { bus_dmamap_unload(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); bus_dmamap_destroy(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); } ps3disk_strategy(bp); } kproc_exit(0); } static void ps3disk_strategy(struct bio *bp) { struct ps3disk_softc *sc = (struct ps3disk_softc *)bp->bio_disk->d_drv1; int err; if (sc == NULL) { bp->bio_flags |= BIO_ERROR; bp->bio_error = EINVAL; biodone(bp); return; } PS3DISK_LOCK(sc); bp->bio_resid = bp->bio_bcount; bioq_insert_tail(&sc->sc_bioq, bp); DPRINTF(sc, PS3DISK_DEBUG_TASK, "%s: bio_cmd 0x%02x\n", __func__, bp->bio_cmd); err = 0; if (bp->bio_cmd == BIO_FLUSH) { bp->bio_driver1 = 0; err = lv1_storage_send_device_command( ps3bus_get_device(sc->sc_dev), LV1_STORAGE_ATA_HDDOUT, 0, 0, 0, 0, (uint64_t *)&bp->bio_driver2); if (err == LV1_BUSY) err = EAGAIN; } else if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) { if (bp->bio_bcount % sc->sc_blksize != 0) { err = EINVAL; } else { bus_dmamap_create(sc->sc_dmatag, BUS_DMA_COHERENT, (bus_dmamap_t *)(&bp->bio_driver1)); err = bus_dmamap_load(sc->sc_dmatag, (bus_dmamap_t)(bp->bio_driver1), bp->bio_data, bp->bio_bcount, ps3disk_transfer, bp, 0); if (err == EINPROGRESS) err = 0; } } else { err = EINVAL; } if (err == EAGAIN) { bioq_remove(&sc->sc_bioq, bp); bioq_insert_tail(&sc->sc_deferredq, bp); } else if (err != 0) { bp->bio_error = err; bp->bio_flags |= BIO_ERROR; bioq_remove(&sc->sc_bioq, bp); disk_err(bp, "hard error", -1, 1); biodone(bp); } PS3DISK_UNLOCK(sc); } static void ps3disk_intr(void *arg) { struct ps3disk_softc *sc = (struct ps3disk_softc *) arg; device_t dev = sc->sc_dev; uint64_t devid = ps3bus_get_device(dev); struct bio *bp; uint64_t tag, status; if (lv1_storage_get_async_status(devid, &tag, &status) != 0) return; PS3DISK_LOCK(sc); DPRINTF(sc, PS3DISK_DEBUG_INTR, "%s: tag 0x%016lx " "status 0x%016lx\n", __func__, tag, status); /* Locate the matching request */ TAILQ_FOREACH(bp, &sc->sc_bioq.queue, bio_queue) { if ((uint64_t)bp->bio_driver2 != tag) continue; if (status != 0) { device_printf(sc->sc_dev, "%s error (%#lx)\n", (bp->bio_cmd == BIO_READ) ? "Read" : "Write", status); bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } else { bp->bio_error = 0; bp->bio_resid = 0; bp->bio_flags |= BIO_DONE; } if (bp->bio_driver1 != NULL) { if (bp->bio_cmd == BIO_READ) bus_dmamap_sync(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); bus_dmamap_destroy(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); } bioq_remove(&sc->sc_bioq, bp); biodone(bp); break; } if (bioq_first(&sc->sc_deferredq) != NULL) wakeup(&sc->sc_deferredq); PS3DISK_UNLOCK(sc); } static int ps3disk_get_disk_geometry(struct ps3disk_softc *sc) { device_t dev = sc->sc_dev; uint64_t bus_index = ps3bus_get_busidx(dev); uint64_t dev_index = ps3bus_get_devidx(dev); uint64_t junk; int err; err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("blk_size"), 0, &sc->sc_blksize, &junk); if (err) { device_printf(dev, "Could not get block size (0x%08x)\n", err); return (ENXIO); } err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("n_blocks"), 0, &sc->sc_nblocks, &junk); if (err) { device_printf(dev, "Could not get total number of blocks " "(0x%08x)\n", err); err = ENXIO; } return (err); } static int ps3disk_enum_regions(struct ps3disk_softc *sc) { device_t dev = sc->sc_dev; uint64_t bus_index = ps3bus_get_busidx(dev); uint64_t dev_index = ps3bus_get_devidx(dev); uint64_t junk; int i, err; /* Read number of regions */ err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("n_regs"), 0, &sc->sc_nregs, &junk); if (err) { device_printf(dev, "Could not get number of regions (0x%08x)\n", err); err = ENXIO; goto fail; } if (!sc->sc_nregs) return 0; sc->sc_reg = malloc(sc->sc_nregs * sizeof(struct ps3disk_region), M_PS3DISK, M_ZERO | M_WAITOK); if (!sc->sc_reg) { err = ENOMEM; goto fail; } /* Setup regions */ for (i = 0; i < sc->sc_nregs; i++) { err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("region") | i, lv1_repository_string("id"), &sc->sc_reg[i].r_id, &junk); if (err) { device_printf(dev, "Could not get region id (0x%08x)\n", err); err = ENXIO; goto fail; } err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("region") | i, lv1_repository_string("start"), &sc->sc_reg[i].r_start, &junk); if (err) { device_printf(dev, "Could not get region start " "(0x%08x)\n", err); err = ENXIO; goto fail; } err = lv1_get_repository_node_value(PS3_LPAR_ID_PME, (lv1_repository_string("bus") >> 32) | bus_index, lv1_repository_string("dev") | dev_index, lv1_repository_string("region") | i, lv1_repository_string("size"), &sc->sc_reg[i].r_size, &junk); if (err) { device_printf(dev, "Could not get region size " "(0x%08x)\n", err); err = ENXIO; goto fail; } if (i == 0) sc->sc_reg[i].r_flags = 0x2; else sc->sc_reg[i].r_flags = 0; } return (0); fail: sc->sc_nregs = 0; if (sc->sc_reg) free(sc->sc_reg, M_PS3DISK); return (err); } static void ps3disk_transfer(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct bio *bp = (struct bio *)(arg); struct ps3disk_softc *sc = (struct ps3disk_softc *)bp->bio_disk->d_drv1; struct ps3disk_region *rp = &sc->sc_reg[bp->bio_disk->d_unit]; uint64_t devid = ps3bus_get_device(sc->sc_dev); uint64_t block; int i, err; /* Locks already held by busdma */ PS3DISK_ASSERT_LOCKED(sc); if (error) { bp->bio_error = error; bp->bio_flags |= BIO_ERROR; bioq_remove(&sc->sc_bioq, bp); biodone(bp); return; } block = bp->bio_pblkno; for (i = 0; i < nsegs; i++) { KASSERT((segs[i].ds_len % sc->sc_blksize) == 0, ("DMA fragments not blocksize multiples")); if (bp->bio_cmd == BIO_READ) { err = lv1_storage_read(devid, rp->r_id, block, segs[i].ds_len/sc->sc_blksize, rp->r_flags, segs[i].ds_addr, (uint64_t *)&bp->bio_driver2); } else { bus_dmamap_sync(sc->sc_dmatag, (bus_dmamap_t)bp->bio_driver1, BUS_DMASYNC_PREWRITE); err = lv1_storage_write(devid, rp->r_id, block, segs[i].ds_len/sc->sc_blksize, rp->r_flags, segs[i].ds_addr, (uint64_t *)&bp->bio_driver2); } if (err) { if (err == LV1_BUSY) { bioq_remove(&sc->sc_bioq, bp); bioq_insert_tail(&sc->sc_deferredq, bp); } else { bus_dmamap_unload(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); bus_dmamap_destroy(sc->sc_dmatag, (bus_dmamap_t) bp->bio_driver1); device_printf(sc->sc_dev, "Could not read " "sectors (0x%08x)\n", err); bp->bio_error = EINVAL; bp->bio_flags |= BIO_ERROR; bioq_remove(&sc->sc_bioq, bp); biodone(bp); } break; } DPRINTF(sc, PS3DISK_DEBUG_READ, "%s: tag 0x%016lx\n", __func__, sc->sc_bounce_tag); } } #ifdef PS3DISK_DEBUG static int ps3disk_sysctl_debug(SYSCTL_HANDLER_ARGS) { struct ps3disk_softc *sc = arg1; int debug, error; debug = sc->sc_debug; error = sysctl_handle_int(oidp, &debug, 0, req); if (error || !req->newptr) return error; sc->sc_debug = debug; return 0; } #endif static void ps3disk_sysctlattach(struct ps3disk_softc *sc) { #ifdef PS3DISK_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_debug = ps3disk_debug; SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLTYPE_INT | CTLFLAG_RW, sc, 0, ps3disk_sysctl_debug, "I", "control debugging printfs"); #endif } static device_method_t ps3disk_methods[] = { DEVMETHOD(device_probe, ps3disk_probe), DEVMETHOD(device_attach, ps3disk_attach), DEVMETHOD(device_detach, ps3disk_detach), {0, 0}, }; static driver_t ps3disk_driver = { "ps3disk", ps3disk_methods, sizeof(struct ps3disk_softc), }; static devclass_t ps3disk_devclass; DRIVER_MODULE(ps3disk, ps3bus, ps3disk_driver, ps3disk_devclass, 0, 0); Index: head/sys/powerpc/pseries/platform_chrp.c =================================================================== --- head/sys/powerpc/pseries/platform_chrp.c (revision 295879) +++ head/sys/powerpc/pseries/platform_chrp.c (revision 295880) @@ -1,517 +1,516 @@ /*- * Copyright (c) 2008 Marcel Moolenaar * Copyright (c) 2009 Nathan Whitehorn * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include "platform_if.h" #ifdef SMP extern void *ap_pcpu; #endif #ifdef __powerpc64__ static uint8_t splpar_vpa[MAXCPU][640] __aligned(128); /* XXX: dpcpu */ #endif static vm_offset_t realmaxaddr = VM_MAX_ADDRESS; static int chrp_probe(platform_t); static int chrp_attach(platform_t); void chrp_mem_regions(platform_t, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz); static vm_offset_t chrp_real_maxaddr(platform_t); static u_long chrp_timebase_freq(platform_t, struct cpuref *cpuref); static int chrp_smp_first_cpu(platform_t, struct cpuref *cpuref); static int chrp_smp_next_cpu(platform_t, struct cpuref *cpuref); static int chrp_smp_get_bsp(platform_t, struct cpuref *cpuref); static void chrp_smp_ap_init(platform_t); #ifdef SMP static int chrp_smp_start_cpu(platform_t, struct pcpu *cpu); static struct cpu_group *chrp_smp_topo(platform_t plat); #endif static void chrp_reset(platform_t); #ifdef __powerpc64__ #include "phyp-hvcall.h" static void phyp_cpu_idle(sbintime_t sbt); #endif static platform_method_t chrp_methods[] = { PLATFORMMETHOD(platform_probe, chrp_probe), PLATFORMMETHOD(platform_attach, chrp_attach), PLATFORMMETHOD(platform_mem_regions, chrp_mem_regions), PLATFORMMETHOD(platform_real_maxaddr, chrp_real_maxaddr), PLATFORMMETHOD(platform_timebase_freq, chrp_timebase_freq), PLATFORMMETHOD(platform_smp_ap_init, chrp_smp_ap_init), PLATFORMMETHOD(platform_smp_first_cpu, chrp_smp_first_cpu), PLATFORMMETHOD(platform_smp_next_cpu, chrp_smp_next_cpu), PLATFORMMETHOD(platform_smp_get_bsp, chrp_smp_get_bsp), #ifdef SMP PLATFORMMETHOD(platform_smp_start_cpu, chrp_smp_start_cpu), PLATFORMMETHOD(platform_smp_topo, chrp_smp_topo), #endif PLATFORMMETHOD(platform_reset, chrp_reset), { 0, 0 } }; static platform_def_t chrp_platform = { "chrp", chrp_methods, 0 }; PLATFORM_DEF(chrp_platform); static int chrp_probe(platform_t plat) { if (OF_finddevice("/memory") != -1 || OF_finddevice("/memory@0") != -1) return (BUS_PROBE_GENERIC); return (ENXIO); } static int chrp_attach(platform_t plat) { #ifdef __powerpc64__ int i; /* XXX: check for /rtas/ibm,hypertas-functions? */ if (!(mfmsr() & PSL_HV)) { struct mem_region *phys, *avail; int nphys, navail; mem_regions(&phys, &nphys, &avail, &navail); realmaxaddr = phys[0].mr_size; pmap_mmu_install("mmu_phyp", BUS_PROBE_SPECIFIC); cpu_idle_hook = phyp_cpu_idle; /* Set up important VPA fields */ for (i = 0; i < MAXCPU; i++) { bzero(splpar_vpa[i], sizeof(splpar_vpa)); /* First two: VPA size */ splpar_vpa[i][4] = (uint8_t)((sizeof(splpar_vpa[i]) >> 8) & 0xff); splpar_vpa[i][5] = (uint8_t)(sizeof(splpar_vpa[i]) & 0xff); splpar_vpa[i][0xba] = 1; /* Maintain FPRs */ splpar_vpa[i][0xbb] = 1; /* Maintain PMCs */ splpar_vpa[i][0xfc] = 0xff; /* Maintain full SLB */ splpar_vpa[i][0xfd] = 0xff; splpar_vpa[i][0xff] = 1; /* Maintain Altivec */ } mb(); /* Set up hypervisor CPU stuff */ chrp_smp_ap_init(plat); } #endif /* Some systems (e.g. QEMU) need Open Firmware to stand down */ ofw_quiesce(); return (0); } static int parse_drconf_memory(struct mem_region *ofmem, int *msz, struct mem_region *ofavail, int *asz) { phandle_t phandle; vm_offset_t base; int i, idx, len, lasz, lmsz, res; uint32_t flags, lmb_size[2]; uint32_t *dmem; lmsz = *msz; lasz = *asz; phandle = OF_finddevice("/ibm,dynamic-reconfiguration-memory"); if (phandle == -1) /* No drconf node, return. */ return (0); res = OF_getencprop(phandle, "ibm,lmb-size", lmb_size, sizeof(lmb_size)); if (res == -1) return (0); printf("Logical Memory Block size: %d MB\n", lmb_size[1] >> 20); /* Parse the /ibm,dynamic-memory. The first position gives the # of entries. The next two words reflect the address of the memory block. The next four words are the DRC index, reserved, list index and flags. (see PAPR C.6.6.2 ibm,dynamic-reconfiguration-memory) #el Addr DRC-idx res list-idx flags ------------------------------------------------- | 4 | 8 | 4 | 4 | 4 | 4 |.... ------------------------------------------------- */ len = OF_getproplen(phandle, "ibm,dynamic-memory"); if (len > 0) { /* We have to use a variable length array on the stack since we have very limited stack space. */ cell_t arr[len/sizeof(cell_t)]; res = OF_getencprop(phandle, "ibm,dynamic-memory", arr, sizeof(arr)); if (res == -1) return (0); /* Number of elements */ idx = arr[0]; /* First address, in arr[1], arr[2]*/ dmem = &arr[1]; for (i = 0; i < idx; i++) { base = ((uint64_t)dmem[0] << 32) + dmem[1]; dmem += 4; flags = dmem[1]; /* Use region only if available and not reserved. */ if ((flags & 0x8) && !(flags & 0x80)) { ofmem[lmsz].mr_start = base; ofmem[lmsz].mr_size = (vm_size_t)lmb_size[1]; ofavail[lasz].mr_start = base; ofavail[lasz].mr_size = (vm_size_t)lmb_size[1]; lmsz++; lasz++; } dmem += 2; } } *msz = lmsz; *asz = lasz; return (1); } void chrp_mem_regions(platform_t plat, struct mem_region *phys, int *physsz, struct mem_region *avail, int *availsz) { vm_offset_t maxphysaddr; int i; ofw_mem_regions(phys, physsz, avail, availsz); parse_drconf_memory(phys, physsz, avail, availsz); /* * On some firmwares (SLOF), some memory may be marked available that * doesn't actually exist. This manifests as an extension of the last * available segment past the end of physical memory, so truncate that * one. */ maxphysaddr = 0; for (i = 0; i < *physsz; i++) if (phys[i].mr_start + phys[i].mr_size > maxphysaddr) maxphysaddr = phys[i].mr_start + phys[i].mr_size; for (i = 0; i < *availsz; i++) if (avail[i].mr_start + avail[i].mr_size > maxphysaddr) avail[i].mr_size = maxphysaddr - avail[i].mr_start; } static vm_offset_t chrp_real_maxaddr(platform_t plat) { return (realmaxaddr); } static u_long chrp_timebase_freq(platform_t plat, struct cpuref *cpuref) { phandle_t phandle; int32_t ticks = -1; phandle = cpuref->cr_hwref; OF_getencprop(phandle, "timebase-frequency", &ticks, sizeof(ticks)); if (ticks <= 0) panic("Unable to determine timebase frequency!"); return (ticks); } static int chrp_smp_first_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu, dev, root; int res, cpuid; root = OF_peer(0); dev = OF_child(root); while (dev != 0) { res = OF_getprop(dev, "name", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpus") == 0) break; dev = OF_peer(dev); } if (dev == 0) { /* * psim doesn't have a name property on the /cpus node, * but it can be found directly */ dev = OF_finddevice("/cpus"); if (dev == 0) return (ENOENT); } cpu = OF_child(dev); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); cpuref->cr_hwref = cpu; res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, sizeof(cpuid)); if (res <= 0) res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); if (res <= 0) cpuid = 0; cpuref->cr_cpuid = cpuid; return (0); } static int chrp_smp_next_cpu(platform_t plat, struct cpuref *cpuref) { char buf[8]; phandle_t cpu; int i, res, cpuid; /* Check for whether it should be the next thread */ res = OF_getproplen(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s"); if (res > 0) { cell_t interrupt_servers[res/sizeof(cell_t)]; OF_getencprop(cpuref->cr_hwref, "ibm,ppc-interrupt-server#s", interrupt_servers, res); for (i = 0; i < res/sizeof(cell_t) - 1; i++) { if (interrupt_servers[i] == cpuref->cr_cpuid) { cpuref->cr_cpuid = interrupt_servers[i+1]; return (0); } } } /* Next CPU core/package */ cpu = OF_peer(cpuref->cr_hwref); while (cpu != 0) { res = OF_getprop(cpu, "device_type", buf, sizeof(buf)); if (res > 0 && strcmp(buf, "cpu") == 0) break; cpu = OF_peer(cpu); } if (cpu == 0) return (ENOENT); cpuref->cr_hwref = cpu; res = OF_getencprop(cpu, "ibm,ppc-interrupt-server#s", &cpuid, sizeof(cpuid)); if (res <= 0) res = OF_getencprop(cpu, "reg", &cpuid, sizeof(cpuid)); if (res <= 0) cpuid = 0; cpuref->cr_cpuid = cpuid; return (0); } static int chrp_smp_get_bsp(platform_t plat, struct cpuref *cpuref) { ihandle_t inst; phandle_t bsp, chosen; int res, cpuid; chosen = OF_finddevice("/chosen"); if (chosen == 0) return (ENXIO); res = OF_getencprop(chosen, "cpu", &inst, sizeof(inst)); if (res < 0) return (ENXIO); bsp = OF_instance_to_package(inst); /* Pick the primary thread. Can it be any other? */ cpuref->cr_hwref = bsp; res = OF_getencprop(bsp, "ibm,ppc-interrupt-server#s", &cpuid, sizeof(cpuid)); if (res <= 0) res = OF_getencprop(bsp, "reg", &cpuid, sizeof(cpuid)); if (res <= 0) cpuid = 0; cpuref->cr_cpuid = cpuid; return (0); } #ifdef SMP static int chrp_smp_start_cpu(platform_t plat, struct pcpu *pc) { cell_t start_cpu; int result, err, timeout; if (!rtas_exists()) { printf("RTAS uninitialized: unable to start AP %d\n", pc->pc_cpuid); return (ENXIO); } start_cpu = rtas_token_lookup("start-cpu"); if (start_cpu == -1) { printf("RTAS unknown method: unable to start AP %d\n", pc->pc_cpuid); return (ENXIO); } ap_pcpu = pc; powerpc_sync(); result = rtas_call_method(start_cpu, 3, 1, pc->pc_cpuid, EXC_RST, pc, &err); if (result < 0 || err != 0) { printf("RTAS error (%d/%d): unable to start AP %d\n", result, err, pc->pc_cpuid); return (ENXIO); } timeout = 10000; while (!pc->pc_awake && timeout--) DELAY(100); return ((pc->pc_awake) ? 0 : EBUSY); } static struct cpu_group * chrp_smp_topo(platform_t plat) { struct pcpu *pc, *last_pc; int i, ncores, ncpus; ncores = ncpus = 0; last_pc = NULL; for (i = 0; i <= mp_maxid; i++) { pc = pcpu_find(i); if (pc == NULL) continue; if (last_pc == NULL || pc->pc_hwref != last_pc->pc_hwref) ncores++; last_pc = pc; ncpus++; } if (ncpus % ncores != 0) { printf("WARNING: Irregular SMP topology. Performance may be " "suboptimal (%d CPUS, %d cores)\n", ncpus, ncores); return (smp_topo_none()); } /* Don't do anything fancier for non-threaded SMP */ if (ncpus == ncores) return (smp_topo_none()); return (smp_topo_1level(CG_SHARE_L1, ncpus / ncores, CG_FLAG_SMT)); } #endif static void chrp_reset(platform_t platform) { OF_reboot(); } #ifdef __powerpc64__ static void phyp_cpu_idle(sbintime_t sbt) { phyp_hcall(H_CEDE); } static void chrp_smp_ap_init(platform_t platform) { if (!(mfmsr() & PSL_HV)) { /* Register VPA */ phyp_hcall(H_REGISTER_VPA, 1UL, PCPU_GET(cpuid), splpar_vpa[PCPU_GET(cpuid)]); /* Set interrupt priority */ phyp_hcall(H_CPPR, 0xff); } } #else static void chrp_smp_ap_init(platform_t platform) { } #endif Index: head/sys/powerpc/psim/iobus.c =================================================================== --- head/sys/powerpc/psim/iobus.c (revision 295879) +++ head/sys/powerpc/psim/iobus.c (revision 295880) @@ -1,414 +1,413 @@ /*- * Copyright 2002 by Peter Grehan. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * PSIM 'iobus' local bus. Should be set up in the device tree like: * * /iobus@0x80000000/name psim-iobus * * Code borrowed from various nexus.c and uninorth.c :-) */ #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include struct iobus_softc { phandle_t sc_node; vm_offset_t sc_addr; vm_offset_t sc_size; struct rman sc_mem_rman; }; static MALLOC_DEFINE(M_IOBUS, "iobus", "iobus device information"); static int iobus_probe(device_t); static int iobus_attach(device_t); static int iobus_print_child(device_t dev, device_t child); static void iobus_probe_nomatch(device_t, device_t); static int iobus_read_ivar(device_t, device_t, int, uintptr_t *); static int iobus_write_ivar(device_t, device_t, int, uintptr_t); static struct resource *iobus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int iobus_activate_resource(device_t, device_t, int, int, struct resource *); static int iobus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int iobus_release_resource(device_t, device_t, int, int, struct resource *); /* * Bus interface definition */ static device_method_t iobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iobus_probe), DEVMETHOD(device_attach, iobus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, iobus_print_child), DEVMETHOD(bus_probe_nomatch, iobus_probe_nomatch), DEVMETHOD(bus_read_ivar, iobus_read_ivar), DEVMETHOD(bus_write_ivar, iobus_write_ivar), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_alloc_resource, iobus_alloc_resource), DEVMETHOD(bus_release_resource, iobus_release_resource), DEVMETHOD(bus_activate_resource, iobus_activate_resource), DEVMETHOD(bus_deactivate_resource, iobus_deactivate_resource), { 0, 0 } }; static driver_t iobus_driver = { "iobus", iobus_methods, sizeof(struct iobus_softc) }; devclass_t iobus_devclass; DRIVER_MODULE(iobus, ofwbus, iobus_driver, iobus_devclass, 0, 0); static int iobus_probe(device_t dev) { const char *type = ofw_bus_get_name(dev); if (strcmp(type, "psim-iobus") != 0) return (ENXIO); device_set_desc(dev, "PSIM local bus"); return (0); } /* * Add interrupt/addr range to the dev's resource list if present */ static void iobus_add_intr(phandle_t devnode, struct iobus_devinfo *dinfo) { u_int intr = -1; if (OF_getprop(devnode, "interrupt", &intr, sizeof(intr)) != -1) { resource_list_add(&dinfo->id_resources, SYS_RES_IRQ, 0, intr, intr, 1); } dinfo->id_interrupt = intr; } static void iobus_add_reg(phandle_t devnode, struct iobus_devinfo *dinfo, vm_offset_t iobus_off) { u_int size; int i; size = OF_getprop(devnode, "reg", dinfo->id_reg,sizeof(dinfo->id_reg)); if (size != -1) { dinfo->id_nregs = size / (sizeof(dinfo->id_reg[0])); for (i = 0; i < dinfo->id_nregs; i+= 3) { /* * Scale the absolute addresses back to iobus * relative offsets. This is to better simulate * macio */ dinfo->id_reg[i+1] -= iobus_off; resource_list_add(&dinfo->id_resources, SYS_RES_MEMORY, 0, dinfo->id_reg[i+1], dinfo->id_reg[i+1] + dinfo->id_reg[i+2], dinfo->id_reg[i+2]); } } } static int iobus_attach(device_t dev) { struct iobus_softc *sc; struct iobus_devinfo *dinfo; phandle_t root; phandle_t child; device_t cdev; char *name; u_int reg[2]; int size; sc = device_get_softc(dev); sc->sc_node = ofw_bus_get_node(dev); /* * Find the base addr/size of the iobus, and initialize the * resource manager */ size = OF_getprop(sc->sc_node, "reg", reg, sizeof(reg)); if (size == sizeof(reg)) { sc->sc_addr = reg[0]; sc->sc_size = reg[1]; } else { return (ENXIO); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "IOBus Device Memory"; if (rman_init(&sc->sc_mem_rman) != 0) { device_printf(dev, "failed to init mem range resources\n"); return (ENXIO); } rman_manage_region(&sc->sc_mem_rman, 0, sc->sc_size); /* * Iterate through the sub-devices */ root = sc->sc_node; for (child = OF_child(root); child != 0; child = OF_peer(child)) { OF_getprop_alloc(child, "name", 1, (void **)&name); cdev = device_add_child(dev, NULL, -1); if (cdev != NULL) { dinfo = malloc(sizeof(*dinfo), M_IOBUS, M_WAITOK); memset(dinfo, 0, sizeof(*dinfo)); resource_list_init(&dinfo->id_resources); dinfo->id_node = child; dinfo->id_name = name; iobus_add_intr(child, dinfo); iobus_add_reg(child, dinfo, sc->sc_addr); device_set_ivars(cdev, dinfo); } else { free(name, M_OFWPROP); } } return (bus_generic_attach(dev)); } static int iobus_print_child(device_t dev, device_t child) { struct iobus_devinfo *dinfo; struct resource_list *rl; int retval = 0; dinfo = device_get_ivars(child); rl = &dinfo->id_resources; retval += bus_print_child_header(dev, child); retval += printf(" offset 0x%x", dinfo->id_reg[1]); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); retval += bus_print_child_footer(dev, child); return (retval); } static void iobus_probe_nomatch(device_t dev, device_t child) { } static int iobus_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct iobus_devinfo *dinfo; if ((dinfo = device_get_ivars(child)) == 0) return (ENOENT); switch (which) { case IOBUS_IVAR_NODE: *result = dinfo->id_node; break; case IOBUS_IVAR_NAME: *result = (uintptr_t)dinfo->id_name; break; case IOBUS_IVAR_NREGS: *result = dinfo->id_nregs; break; case IOBUS_IVAR_REGS: *result = (uintptr_t)dinfo->id_reg; break; default: return (ENOENT); } return (0); } static int iobus_write_ivar(device_t dev, device_t child, int which, uintptr_t value) { return (EINVAL); } static struct resource * iobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct iobus_softc *sc; int needactivate; struct resource *rv; struct rman *rm; sc = device_get_softc(bus); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: return (bus_alloc_resource(bus, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource for %s\n", device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); } static int iobus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { if (rman_get_flags(res) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, res); if (error) return error; } return (rman_release_resource(res)); } static int iobus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct iobus_softc *sc; void *p; sc = device_get_softc(bus); if (type == SYS_RES_IRQ) return (bus_activate_resource(bus, type, rid, res)); if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { p = pmap_mapdev((vm_offset_t)rman_get_start(res) + sc->sc_addr, (vm_size_t)rman_get_size(res)); if (p == NULL) return (ENOMEM); rman_set_virtual(res, p); rman_set_bustag(res, &bs_le_tag); rman_set_bushandle(res, (u_long)p); } return (rman_activate_resource(res)); } static int iobus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { /* * If this is a memory resource, unmap it. */ if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { u_int32_t psize; psize = rman_get_size(res); pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); } return (rman_deactivate_resource(res)); } Index: head/sys/sparc64/pci/fire.c =================================================================== --- head/sys/sparc64/pci/fire.c (revision 295879) +++ head/sys/sparc64/pci/fire.c (revision 295880) @@ -1,1874 +1,1873 @@ /*- * Copyright (c) 1999, 2000 Matthew R. Green * Copyright (c) 2001 - 2003 by Thomas Moestl * Copyright (c) 2009 by Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: psycho.c,v 1.39 2001/10/07 20:30:41 eeh Exp * from: FreeBSD: psycho.c 183152 2008-09-18 19:45:22Z marius */ #include __FBSDID("$FreeBSD$"); /* * Driver for `Fire' JBus to PCI Express and `Oberon' Uranus to PCI Express * bridges */ #include "opt_fire.h" #include "opt_ofw_pci.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #include #include #include "pcib_if.h" struct fire_msiqarg; static const struct fire_desc *fire_get_desc(device_t dev); static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map, bus_dmasync_op_t op); static int fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr, bus_addr_t *intrclrptr); static void fire_intr_assign(void *arg); static void fire_intr_clear(void *arg); static void fire_intr_disable(void *arg); static void fire_intr_enable(void *arg); static int fire_intr_register(struct fire_softc *sc, u_int ino); static inline void fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa); static void fire_msiq_filter(void *cookie); static void fire_msiq_handler(void *cookie); static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino, driver_filter_t handler, void *arg); static timecounter_get_t fire_get_timecount; /* Interrupt handlers */ static driver_filter_t fire_dmc_pec; static driver_filter_t fire_pcie; static driver_filter_t fire_xcb; /* * Methods */ static pcib_alloc_msi_t fire_alloc_msi; static pcib_alloc_msix_t fire_alloc_msix; static bus_alloc_resource_t fire_alloc_resource; static device_attach_t fire_attach; static pcib_map_msi_t fire_map_msi; static pcib_maxslots_t fire_maxslots; static device_probe_t fire_probe; static pcib_read_config_t fire_read_config; static pcib_release_msi_t fire_release_msi; static pcib_release_msix_t fire_release_msix; static pcib_route_interrupt_t fire_route_interrupt; static bus_setup_intr_t fire_setup_intr; static bus_teardown_intr_t fire_teardown_intr; static pcib_write_config_t fire_write_config; static device_method_t fire_methods[] = { /* Device interface */ DEVMETHOD(device_probe, fire_probe), DEVMETHOD(device_attach, fire_attach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar), DEVMETHOD(bus_setup_intr, fire_setup_intr), DEVMETHOD(bus_teardown_intr, fire_teardown_intr), DEVMETHOD(bus_alloc_resource, fire_alloc_resource), DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_get_dma_tag, ofw_pci_get_dma_tag), /* pcib interface */ DEVMETHOD(pcib_maxslots, fire_maxslots), DEVMETHOD(pcib_read_config, fire_read_config), DEVMETHOD(pcib_write_config, fire_write_config), DEVMETHOD(pcib_route_interrupt, fire_route_interrupt), DEVMETHOD(pcib_alloc_msi, fire_alloc_msi), DEVMETHOD(pcib_release_msi, fire_release_msi), DEVMETHOD(pcib_alloc_msix, fire_alloc_msix), DEVMETHOD(pcib_release_msix, fire_release_msix), DEVMETHOD(pcib_map_msi, fire_map_msi), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node), DEVMETHOD_END }; static devclass_t fire_devclass; DEFINE_CLASS_0(pcib, fire_driver, fire_methods, sizeof(struct fire_softc)); EARLY_DRIVER_MODULE(fire, nexus, fire_driver, fire_devclass, 0, 0, BUS_PASS_BUS); MODULE_DEPEND(fire, nexus, 1, 1, 1); static const struct intr_controller fire_ic = { fire_intr_enable, fire_intr_disable, fire_intr_assign, fire_intr_clear }; struct fire_icarg { struct fire_softc *fica_sc; bus_addr_t fica_map; bus_addr_t fica_clr; }; static const struct intr_controller fire_msiqc_filter = { fire_intr_enable, fire_intr_disable, fire_intr_assign, NULL }; struct fire_msiqarg { struct fire_icarg fmqa_fica; struct mtx fmqa_mtx; struct fo_msiq_record *fmqa_base; uint64_t fmqa_head; uint64_t fmqa_tail; uint32_t fmqa_msiq; uint32_t fmqa_msi; }; #define FIRE_PERF_CNT_QLTY 100 #define FIRE_SPC_BARRIER(spc, sc, offs, len, flags) \ bus_barrier((sc)->sc_mem_res[(spc)], (offs), (len), (flags)) #define FIRE_SPC_READ_8(spc, sc, offs) \ bus_read_8((sc)->sc_mem_res[(spc)], (offs)) #define FIRE_SPC_WRITE_8(spc, sc, offs, v) \ bus_write_8((sc)->sc_mem_res[(spc)], (offs), (v)) #ifndef FIRE_DEBUG #define FIRE_SPC_SET(spc, sc, offs, reg, v) \ FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)) #else #define FIRE_SPC_SET(spc, sc, offs, reg, v) do { \ device_printf((sc)->sc_dev, reg " 0x%016llx -> 0x%016llx\n", \ (unsigned long long)FIRE_SPC_READ_8((spc), (sc), (offs)), \ (unsigned long long)(v)); \ FIRE_SPC_WRITE_8((spc), (sc), (offs), (v)); \ } while (0) #endif #define FIRE_PCI_BARRIER(sc, offs, len, flags) \ FIRE_SPC_BARRIER(FIRE_PCI, (sc), (offs), len, flags) #define FIRE_PCI_READ_8(sc, offs) \ FIRE_SPC_READ_8(FIRE_PCI, (sc), (offs)) #define FIRE_PCI_WRITE_8(sc, offs, v) \ FIRE_SPC_WRITE_8(FIRE_PCI, (sc), (offs), (v)) #define FIRE_CTRL_BARRIER(sc, offs, len, flags) \ FIRE_SPC_BARRIER(FIRE_CTRL, (sc), (offs), len, flags) #define FIRE_CTRL_READ_8(sc, offs) \ FIRE_SPC_READ_8(FIRE_CTRL, (sc), (offs)) #define FIRE_CTRL_WRITE_8(sc, offs, v) \ FIRE_SPC_WRITE_8(FIRE_CTRL, (sc), (offs), (v)) #define FIRE_PCI_SET(sc, offs, v) \ FIRE_SPC_SET(FIRE_PCI, (sc), (offs), # offs, (v)) #define FIRE_CTRL_SET(sc, offs, v) \ FIRE_SPC_SET(FIRE_CTRL, (sc), (offs), # offs, (v)) struct fire_desc { const char *fd_string; int fd_mode; const char *fd_name; }; static const struct fire_desc fire_compats[] = { { "pciex108e,80f0", FIRE_MODE_FIRE, "Fire" }, #if 0 { "pciex108e,80f8", FIRE_MODE_OBERON, "Oberon" }, #endif { NULL, 0, NULL } }; static const struct fire_desc * fire_get_desc(device_t dev) { const struct fire_desc *desc; const char *compat; compat = ofw_bus_get_compat(dev); if (compat == NULL) return (NULL); for (desc = fire_compats; desc->fd_string != NULL; desc++) if (strcmp(desc->fd_string, compat) == 0) return (desc); return (NULL); } static int fire_probe(device_t dev) { const char *dtype; dtype = ofw_bus_get_type(dev); if (dtype != NULL && strcmp(dtype, OFW_TYPE_PCIE) == 0 && fire_get_desc(dev) != NULL) { device_set_desc(dev, "Sun Host-PCIe bridge"); return (BUS_PROBE_GENERIC); } return (ENXIO); } static int fire_attach(device_t dev) { struct fire_softc *sc; const struct fire_desc *desc; struct ofw_pci_msi_ranges msi_ranges; struct ofw_pci_msi_addr_ranges msi_addr_ranges; struct ofw_pci_msi_eq_to_devino msi_eq_to_devino; struct fire_msiqarg *fmqa; struct timecounter *tc; bus_dma_tag_t dmat; uint64_t ino_bitmap, val; phandle_t node; uint32_t prop, prop_array[2]; int i, j, mode; u_int lw; uint16_t mps; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); desc = fire_get_desc(dev); mode = desc->fd_mode; sc->sc_dev = dev; sc->sc_mode = mode; sc->sc_flags = 0; mtx_init(&sc->sc_msi_mtx, "msi_mtx", NULL, MTX_DEF); mtx_init(&sc->sc_pcib_mtx, "pcib_mtx", NULL, MTX_SPIN); /* * Fire and Oberon have two register banks: * (0) per-PBM PCI Express configuration and status registers * (1) (shared) Fire/Oberon controller configuration and status * registers */ for (i = 0; i < FIRE_NREG; i++) { j = i; sc->sc_mem_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &j, RF_ACTIVE); if (sc->sc_mem_res[i] == NULL) panic("%s: could not allocate register bank %d", __func__, i); } if (OF_getprop(node, "portid", &sc->sc_ign, sizeof(sc->sc_ign)) == -1) panic("%s: could not determine IGN", __func__); if (OF_getprop(node, "module-revision#", &prop, sizeof(prop)) == -1) panic("%s: could not determine module-revision", __func__); device_printf(dev, "%s, module-revision %d, IGN %#x\n", desc->fd_name, prop, sc->sc_ign); /* * Hunt through all the interrupt mapping regs and register * the interrupt controller for our interrupt vectors. We do * this early in order to be able to catch stray interrupts. */ i = OF_getprop(node, "ino-bitmap", (void *)prop_array, sizeof(prop_array)); if (i == -1) panic("%s: could not get ino-bitmap", __func__); ino_bitmap = ((uint64_t)prop_array[1] << 32) | prop_array[0]; for (i = 0; i <= FO_MAX_INO; i++) { if ((ino_bitmap & (1ULL << i)) == 0) continue; j = fire_intr_register(sc, i); if (j != 0) device_printf(dev, "could not register interrupt " "controller for INO %d (%d)\n", i, j); } /* JBC/UBC module initialization */ FIRE_CTRL_SET(sc, FO_XBC_ERR_LOG_EN, ~0ULL); FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); /* not enabled by OpenSolaris */ FIRE_CTRL_SET(sc, FO_XBC_INT_EN, ~0ULL); if (sc->sc_mode == FIRE_MODE_FIRE) { FIRE_CTRL_SET(sc, FIRE_JBUS_PAR_CTRL, FIRE_JBUS_PAR_CTRL_P_EN); FIRE_CTRL_SET(sc, FIRE_JBC_FATAL_RST_EN, ((1ULL << FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_SHFT) & FIRE_JBC_FATAL_RST_EN_SPARE_P_INT_MASK) | FIRE_JBC_FATAL_RST_EN_MB_PEA_P_INT | FIRE_JBC_FATAL_RST_EN_CPE_P_INT | FIRE_JBC_FATAL_RST_EN_APE_P_INT | FIRE_JBC_FATAL_RST_EN_PIO_CPE_INT | FIRE_JBC_FATAL_RST_EN_JTCEEW_P_INT | FIRE_JBC_FATAL_RST_EN_JTCEEI_P_INT | FIRE_JBC_FATAL_RST_EN_JTCEER_P_INT); FIRE_CTRL_SET(sc, FIRE_JBC_CORE_BLOCK_INT_EN, ~0ULL); } /* TLU initialization */ FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_STAT_CLR, FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK); /* not enabled by OpenSolaris */ FIRE_PCI_SET(sc, FO_PCI_TLU_OEVENT_INT_EN, FO_PCI_TLU_OEVENT_S_MASK | FO_PCI_TLU_OEVENT_P_MASK); FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_STAT_CLR, FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK); /* not enabled by OpenSolaris */ FIRE_PCI_SET(sc, FO_PCI_TLU_UERR_INT_EN, FO_PCI_TLU_UERR_INT_S_MASK | FO_PCI_TLU_UERR_INT_P_MASK); FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_STAT_CLR, FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK); /* not enabled by OpenSolaris */ FIRE_PCI_SET(sc, FO_PCI_TLU_CERR_INT_EN, FO_PCI_TLU_CERR_INT_S_MASK | FO_PCI_TLU_CERR_INT_P_MASK); val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) | ((FO_PCI_TLU_CTRL_L0S_TIM_DFLT << FO_PCI_TLU_CTRL_L0S_TIM_SHFT) & FO_PCI_TLU_CTRL_L0S_TIM_MASK) | ((FO_PCI_TLU_CTRL_CFG_DFLT << FO_PCI_TLU_CTRL_CFG_SHFT) & FO_PCI_TLU_CTRL_CFG_MASK); if (sc->sc_mode == FIRE_MODE_OBERON) val &= ~FO_PCI_TLU_CTRL_NWPR_EN; val |= FO_PCI_TLU_CTRL_CFG_REMAIN_DETECT_QUIET; FIRE_PCI_SET(sc, FO_PCI_TLU_CTRL, val); FIRE_PCI_SET(sc, FO_PCI_TLU_DEV_CTRL, 0); FIRE_PCI_SET(sc, FO_PCI_TLU_LNK_CTRL, FO_PCI_TLU_LNK_CTRL_CLK); /* DLU/LPU initialization */ if (sc->sc_mode == FIRE_MODE_OBERON) FIRE_PCI_SET(sc, FO_PCI_LPU_INT_MASK, 0); else FIRE_PCI_SET(sc, FO_PCI_LPU_RST, 0); FIRE_PCI_SET(sc, FO_PCI_LPU_LNK_LYR_CFG, FO_PCI_LPU_LNK_LYR_CFG_VC0_EN); FIRE_PCI_SET(sc, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL, FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_NP_EN | FO_PCI_LPU_FLW_CTRL_UPDT_CTRL_FC0_P_EN); if (sc->sc_mode == FIRE_MODE_OBERON) FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS, (OBERON_PCI_LPU_TXLNK_RPLY_TMR_THRS_DFLT << FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) & FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK); else { switch ((FIRE_PCI_READ_8(sc, FO_PCI_TLU_LNK_STAT) & FO_PCI_TLU_LNK_STAT_WDTH_MASK) >> FO_PCI_TLU_LNK_STAT_WDTH_SHFT) { case 1: lw = 0; break; case 4: lw = 1; break; case 8: lw = 2; break; case 16: lw = 3; break; default: lw = 0; } mps = (FIRE_PCI_READ_8(sc, FO_PCI_TLU_CTRL) & FO_PCI_TLU_CTRL_CFG_MPS_MASK) >> FO_PCI_TLU_CTRL_CFG_MPS_SHFT; i = sizeof(fire_freq_nak_tmr_thrs) / sizeof(*fire_freq_nak_tmr_thrs); if (mps >= i) mps = i - 1; FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS, (fire_freq_nak_tmr_thrs[mps][lw] << FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_SHFT) & FO_PCI_LPU_TXLNK_FREQ_LAT_TMR_THRS_MASK); FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RPLY_TMR_THRS, (fire_rply_tmr_thrs[mps][lw] << FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_SHFT) & FO_PCI_LPU_TXLNK_RPLY_TMR_THRS_MASK); FIRE_PCI_SET(sc, FO_PCI_LPU_TXLNK_RTR_FIFO_PTR, ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_DFLT << FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_SHFT) & FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_TL_MASK) | ((FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_DFLT << FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_SHFT) & FO_PCI_LPU_TXLNK_RTR_FIFO_PTR_HD_MASK)); FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG2, (FO_PCI_LPU_LTSSM_CFG2_12_TO_DFLT << FO_PCI_LPU_LTSSM_CFG2_12_TO_SHFT) & FO_PCI_LPU_LTSSM_CFG2_12_TO_MASK); FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG3, (FO_PCI_LPU_LTSSM_CFG3_2_TO_DFLT << FO_PCI_LPU_LTSSM_CFG3_2_TO_SHFT) & FO_PCI_LPU_LTSSM_CFG3_2_TO_MASK); FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG4, ((FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_DFLT << FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_SHFT) & FO_PCI_LPU_LTSSM_CFG4_DATA_RATE_MASK) | ((FO_PCI_LPU_LTSSM_CFG4_N_FTS_DFLT << FO_PCI_LPU_LTSSM_CFG4_N_FTS_SHFT) & FO_PCI_LPU_LTSSM_CFG4_N_FTS_MASK)); FIRE_PCI_SET(sc, FO_PCI_LPU_LTSSM_CFG5, 0); } /* ILU initialization */ FIRE_PCI_SET(sc, FO_PCI_ILU_ERR_STAT_CLR, ~0ULL); /* not enabled by OpenSolaris */ FIRE_PCI_SET(sc, FO_PCI_ILU_INT_EN, ~0ULL); /* IMU initialization */ FIRE_PCI_SET(sc, FO_PCI_IMU_ERR_STAT_CLR, ~0ULL); FIRE_PCI_SET(sc, FO_PCI_IMU_INT_EN, FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_EN) & ~(FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_S | FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_S | FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_S | FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P)); /* MMU initialization */ FIRE_PCI_SET(sc, FO_PCI_MMU_ERR_STAT_CLR, FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK); /* not enabled by OpenSolaris */ FIRE_PCI_SET(sc, FO_PCI_MMU_INT_EN, FO_PCI_MMU_ERR_INT_S_MASK | FO_PCI_MMU_ERR_INT_P_MASK); /* DMC initialization */ FIRE_PCI_SET(sc, FO_PCI_DMC_CORE_BLOCK_INT_EN, ~0ULL); FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTA, 0); FIRE_PCI_SET(sc, FO_PCI_DMC_DBG_SEL_PORTB, 0); /* PEC initialization */ FIRE_PCI_SET(sc, FO_PCI_PEC_CORE_BLOCK_INT_EN, ~0ULL); /* Establish handlers for interesting interrupts. */ if ((ino_bitmap & (1ULL << FO_DMC_PEC_INO)) != 0) fire_set_intr(sc, 1, FO_DMC_PEC_INO, fire_dmc_pec, sc); if ((ino_bitmap & (1ULL << FO_XCB_INO)) != 0) fire_set_intr(sc, 0, FO_XCB_INO, fire_xcb, sc); /* MSI/MSI-X support */ if (OF_getprop(node, "#msi", &sc->sc_msi_count, sizeof(sc->sc_msi_count)) == -1) panic("%s: could not determine MSI count", __func__); if (OF_getprop(node, "msi-ranges", &msi_ranges, sizeof(msi_ranges)) == -1) sc->sc_msi_first = 0; else sc->sc_msi_first = msi_ranges.first; if (OF_getprop(node, "msi-data-mask", &sc->sc_msi_data_mask, sizeof(sc->sc_msi_data_mask)) == -1) panic("%s: could not determine MSI data mask", __func__); if (OF_getprop(node, "msix-data-width", &sc->sc_msix_data_width, sizeof(sc->sc_msix_data_width)) > 0) sc->sc_flags |= FIRE_MSIX; if (OF_getprop(node, "msi-address-ranges", &msi_addr_ranges, sizeof(msi_addr_ranges)) == -1) panic("%s: could not determine MSI address ranges", __func__); sc->sc_msi_addr32 = OFW_PCI_MSI_ADDR_RANGE_32(&msi_addr_ranges); sc->sc_msi_addr64 = OFW_PCI_MSI_ADDR_RANGE_64(&msi_addr_ranges); if (OF_getprop(node, "#msi-eqs", &sc->sc_msiq_count, sizeof(sc->sc_msiq_count)) == -1) panic("%s: could not determine MSI event queue count", __func__); if (OF_getprop(node, "msi-eq-size", &sc->sc_msiq_size, sizeof(sc->sc_msiq_size)) == -1) panic("%s: could not determine MSI event queue size", __func__); if (OF_getprop(node, "msi-eq-to-devino", &msi_eq_to_devino, sizeof(msi_eq_to_devino)) == -1 && OF_getprop(node, "msi-eq-devino", &msi_eq_to_devino, sizeof(msi_eq_to_devino)) == -1) { sc->sc_msiq_first = 0; sc->sc_msiq_ino_first = FO_EQ_FIRST_INO; } else { sc->sc_msiq_first = msi_eq_to_devino.eq_first; sc->sc_msiq_ino_first = msi_eq_to_devino.devino_first; } if (sc->sc_msiq_ino_first < FO_EQ_FIRST_INO || sc->sc_msiq_ino_first + sc->sc_msiq_count - 1 > FO_EQ_LAST_INO) panic("%s: event queues exceed INO range", __func__); sc->sc_msi_bitmap = malloc(roundup2(sc->sc_msi_count, NBBY) / NBBY, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_msi_bitmap == NULL) panic("%s: could not malloc MSI bitmap", __func__); sc->sc_msi_msiq_table = malloc(sc->sc_msi_count * sizeof(*sc->sc_msi_msiq_table), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_msi_msiq_table == NULL) panic("%s: could not malloc MSI-MSI event queue table", __func__); sc->sc_msiq_bitmap = malloc(roundup2(sc->sc_msiq_count, NBBY) / NBBY, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_msiq_bitmap == NULL) panic("%s: could not malloc MSI event queue bitmap", __func__); j = FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * sc->sc_msiq_count; sc->sc_msiq = contigmalloc(j, M_DEVBUF, M_NOWAIT, 0, ~0UL, FO_EQ_ALIGNMENT, 0); if (sc->sc_msiq == NULL) panic("%s: could not contigmalloc MSI event queue", __func__); memset(sc->sc_msiq, 0, j); FIRE_PCI_SET(sc, FO_PCI_EQ_BASE_ADDR, FO_PCI_EQ_BASE_ADDR_BYPASS | (pmap_kextract((vm_offset_t)sc->sc_msiq) & FO_PCI_EQ_BASE_ADDR_MASK)); for (i = 0; i < sc->sc_msi_count; i++) { j = (i + sc->sc_msi_first) << 3; FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + j, FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + j) & ~FO_PCI_MSI_MAP_V); } for (i = 0; i < sc->sc_msiq_count; i++) { j = i + sc->sc_msiq_ino_first; if ((ino_bitmap & (1ULL << j)) == 0) { mtx_lock(&sc->sc_msi_mtx); setbit(sc->sc_msiq_bitmap, i); mtx_unlock(&sc->sc_msi_mtx); } fmqa = intr_vectors[INTMAP_VEC(sc->sc_ign, j)].iv_icarg; mtx_init(&fmqa->fmqa_mtx, "msiq_mtx", NULL, MTX_SPIN); fmqa->fmqa_base = (struct fo_msiq_record *)((caddr_t)sc->sc_msiq + (FO_EQ_RECORD_SIZE * FO_EQ_NRECORDS * i)); j = i + sc->sc_msiq_first; fmqa->fmqa_msiq = j; j <<= 3; fmqa->fmqa_head = FO_PCI_EQ_HD_BASE + j; fmqa->fmqa_tail = FO_PCI_EQ_TL_BASE + j; FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + j, FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I | FO_PCI_EQ_CTRL_CLR_DIS); FIRE_PCI_WRITE_8(sc, fmqa->fmqa_tail, (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK); FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK); } FIRE_PCI_SET(sc, FO_PCI_MSI_32_BIT_ADDR, sc->sc_msi_addr32 & FO_PCI_MSI_32_BIT_ADDR_MASK); FIRE_PCI_SET(sc, FO_PCI_MSI_64_BIT_ADDR, sc->sc_msi_addr64 & FO_PCI_MSI_64_BIT_ADDR_MASK); /* * Establish a handler for interesting PCIe messages and disable * unintersting ones. */ mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < sc->sc_msiq_count; i++) { if (isclr(sc->sc_msiq_bitmap, i) != 0) { j = i; break; } } if (i == sc->sc_msiq_count) { mtx_unlock(&sc->sc_msi_mtx); panic("%s: no spare event queue for PCIe messages", __func__); } setbit(sc->sc_msiq_bitmap, j); mtx_unlock(&sc->sc_msi_mtx); i = INTMAP_VEC(sc->sc_ign, j + sc->sc_msiq_ino_first); if (bus_set_resource(dev, SYS_RES_IRQ, 2, i, 1) != 0) panic("%s: failed to add interrupt for PCIe messages", __func__); fire_set_intr(sc, 2, INTINO(i), fire_pcie, intr_vectors[i].iv_icarg); j += sc->sc_msiq_first; /* * "Please note that setting the EQNUM field to a value larger than * 35 will yield unpredictable results." */ if (j > 35) panic("%s: invalid queue for PCIe messages (%d)", __func__, j); FIRE_PCI_SET(sc, FO_PCI_ERR_COR, FO_PCI_ERR_PME_V | ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); FIRE_PCI_SET(sc, FO_PCI_ERR_NONFATAL, FO_PCI_ERR_PME_V | ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); FIRE_PCI_SET(sc, FO_PCI_ERR_FATAL, FO_PCI_ERR_PME_V | ((j << FO_PCI_ERR_PME_EQNUM_SHFT) & FO_PCI_ERR_PME_EQNUM_MASK)); FIRE_PCI_SET(sc, FO_PCI_PM_PME, 0); FIRE_PCI_SET(sc, FO_PCI_PME_TO_ACK, 0); FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (j << 3), FO_PCI_EQ_CTRL_SET_EN); #define TC_COUNTER_MAX_MASK 0xffffffff /* * Setup JBC/UBC performance counter 0 in bus cycle counting * mode as timecounter. */ if (device_get_unit(dev) == 0) { FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT0, 0); FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT1, 0); FIRE_CTRL_SET(sc, FO_XBC_PRF_CNT_SEL, (FO_XBC_PRF_CNT_NONE << FO_XBC_PRF_CNT_CNT1_SHFT) | (FO_XBC_PRF_CNT_XB_CLK << FO_XBC_PRF_CNT_CNT0_SHFT)); tc = malloc(sizeof(*tc), M_DEVBUF, M_NOWAIT | M_ZERO); if (tc == NULL) panic("%s: could not malloc timecounter", __func__); tc->tc_get_timecount = fire_get_timecount; tc->tc_counter_mask = TC_COUNTER_MAX_MASK; if (OF_getprop(OF_peer(0), "clock-frequency", &prop, sizeof(prop)) == -1) panic("%s: could not determine clock frequency", __func__); tc->tc_frequency = prop; tc->tc_name = strdup(device_get_nameunit(dev), M_DEVBUF); tc->tc_priv = sc; /* * Due to initial problems with the JBus-driven performance * counters not advancing which might be firmware dependent * ensure that it actually works. */ if (fire_get_timecount(tc) - fire_get_timecount(tc) != 0) tc->tc_quality = FIRE_PERF_CNT_QLTY; else tc->tc_quality = -FIRE_PERF_CNT_QLTY; tc_init(tc); } /* * Set up the IOMMU. Both Fire and Oberon have one per PBM, but * neither has a streaming buffer. */ memcpy(&sc->sc_dma_methods, &iommu_dma_methods, sizeof(sc->sc_dma_methods)); sc->sc_is.is_flags = IOMMU_FIRE | IOMMU_PRESERVE_PROM; if (sc->sc_mode == FIRE_MODE_OBERON) { sc->sc_is.is_flags |= IOMMU_FLUSH_CACHE; sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(OBERON_IOMMU_BITS); } else { sc->sc_dma_methods.dm_dmamap_sync = fire_dmamap_sync; sc->sc_is.is_pmaxaddr = IOMMU_MAXADDR(FIRE_IOMMU_BITS); } sc->sc_is.is_sb[0] = sc->sc_is.is_sb[1] = 0; /* Punch in our copies. */ sc->sc_is.is_bustag = rman_get_bustag(sc->sc_mem_res[FIRE_PCI]); sc->sc_is.is_bushandle = rman_get_bushandle(sc->sc_mem_res[FIRE_PCI]); sc->sc_is.is_iommu = FO_PCI_MMU; val = FIRE_PCI_READ_8(sc, FO_PCI_MMU + IMR_CTL); iommu_init(device_get_nameunit(dev), &sc->sc_is, 7, -1, 0); #ifdef FIRE_DEBUG device_printf(dev, "FO_PCI_MMU + IMR_CTL 0x%016llx -> 0x%016llx\n", (long long unsigned)val, (long long unsigned)sc->sc_is.is_cr); #endif /* Create our DMA tag. */ if (bus_dma_tag_create(bus_get_dma_tag(dev), 8, 0x100000000, sc->sc_is.is_pmaxaddr, ~0, NULL, NULL, sc->sc_is.is_pmaxaddr, 0xff, 0xffffffff, 0, NULL, NULL, &dmat) != 0) panic("%s: could not create PCI DMA tag", __func__); dmat->dt_cookie = &sc->sc_is; dmat->dt_mt = &sc->sc_dma_methods; if (ofw_pci_attach_common(dev, dmat, FO_IO_SIZE, FO_MEM_SIZE) != 0) panic("%s: ofw_pci_attach_common() failed", __func__); #define FIRE_SYSCTL_ADD_UINT(name, arg, desc) \ SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), \ SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, \ (name), CTLFLAG_RD, (arg), 0, (desc)) FIRE_SYSCTL_ADD_UINT("ilu_err", &sc->sc_stats_ilu_err, "ILU unknown errors"); FIRE_SYSCTL_ADD_UINT("jbc_ce_async", &sc->sc_stats_jbc_ce_async, "JBC correctable errors"); FIRE_SYSCTL_ADD_UINT("jbc_unsol_int", &sc->sc_stats_jbc_unsol_int, "JBC unsolicited interrupt ACK/NACK errors"); FIRE_SYSCTL_ADD_UINT("jbc_unsol_rd", &sc->sc_stats_jbc_unsol_rd, "JBC unsolicited read response errors"); FIRE_SYSCTL_ADD_UINT("mmu_err", &sc->sc_stats_mmu_err, "MMU errors"); FIRE_SYSCTL_ADD_UINT("tlu_ce", &sc->sc_stats_tlu_ce, "DLU/TLU correctable errors"); FIRE_SYSCTL_ADD_UINT("tlu_oe_non_fatal", &sc->sc_stats_tlu_oe_non_fatal, "DLU/TLU other event non-fatal errors summary"), FIRE_SYSCTL_ADD_UINT("tlu_oe_rx_err", &sc->sc_stats_tlu_oe_rx_err, "DLU/TLU receive other event errors"), FIRE_SYSCTL_ADD_UINT("tlu_oe_tx_err", &sc->sc_stats_tlu_oe_tx_err, "DLU/TLU transmit other event errors"), FIRE_SYSCTL_ADD_UINT("ubc_dmardue", &sc->sc_stats_ubc_dmardue, "UBC DMARDUE erros"); #undef FIRE_SYSCTL_ADD_UINT device_add_child(dev, "pci", -1); return (bus_generic_attach(dev)); } static void fire_set_intr(struct fire_softc *sc, u_int index, u_int ino, driver_filter_t handler, void *arg) { u_long vec; int rid; rid = index; sc->sc_irq_res[index] = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->sc_irq_res[index] == NULL || INTINO(vec = rman_get_start(sc->sc_irq_res[index])) != ino || INTIGN(vec) != sc->sc_ign || intr_vectors[vec].iv_ic != &fire_ic || bus_setup_intr(sc->sc_dev, sc->sc_irq_res[index], INTR_TYPE_MISC | INTR_BRIDGE, handler, NULL, arg, &sc->sc_ihand[index]) != 0) panic("%s: failed to set up interrupt %d", __func__, index); } static int fire_intr_register(struct fire_softc *sc, u_int ino) { struct fire_icarg *fica; bus_addr_t intrclr, intrmap; int error; if (fire_get_intrmap(sc, ino, &intrmap, &intrclr) == 0) return (ENXIO); fica = malloc((ino >= FO_EQ_FIRST_INO && ino <= FO_EQ_LAST_INO) ? sizeof(struct fire_msiqarg) : sizeof(struct fire_icarg), M_DEVBUF, M_NOWAIT | M_ZERO); if (fica == NULL) return (ENOMEM); fica->fica_sc = sc; fica->fica_map = intrmap; fica->fica_clr = intrclr; error = (intr_controller_register(INTMAP_VEC(sc->sc_ign, ino), &fire_ic, fica)); if (error != 0) free(fica, M_DEVBUF); return (error); } static int fire_get_intrmap(struct fire_softc *sc, u_int ino, bus_addr_t *intrmapptr, bus_addr_t *intrclrptr) { if (ino > FO_MAX_INO) { device_printf(sc->sc_dev, "out of range INO %d requested\n", ino); return (0); } ino <<= 3; if (intrmapptr != NULL) *intrmapptr = FO_PCI_INT_MAP_BASE + ino; if (intrclrptr != NULL) *intrclrptr = FO_PCI_INT_CLR_BASE + ino; return (1); } /* * Interrupt handlers */ static int fire_dmc_pec(void *arg) { struct fire_softc *sc; device_t dev; uint64_t cestat, dmcstat, ilustat, imustat, mcstat, mmustat, mmutfar; uint64_t mmutfsr, oestat, pecstat, uestat, val; u_int fatal, oenfatal; fatal = 0; sc = arg; dev = sc->sc_dev; mtx_lock_spin(&sc->sc_pcib_mtx); mcstat = FIRE_PCI_READ_8(sc, FO_PCI_MULTI_CORE_ERR_STAT); if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_DMC) != 0) { dmcstat = FIRE_PCI_READ_8(sc, FO_PCI_DMC_CORE_BLOCK_ERR_STAT); if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_IMU) != 0) { imustat = FIRE_PCI_READ_8(sc, FO_PCI_IMU_INT_STAT); device_printf(dev, "IMU error %#llx\n", (unsigned long long)imustat); if ((imustat & FO_PCI_IMU_ERR_INT_EQ_NOT_EN_P) != 0) { fatal = 1; val = FIRE_PCI_READ_8(sc, FO_PCI_IMU_SCS_ERR_LOG); device_printf(dev, "SCS error log %#llx\n", (unsigned long long)val); } if ((imustat & FO_PCI_IMU_ERR_INT_EQ_OVER_P) != 0) { fatal = 1; val = FIRE_PCI_READ_8(sc, FO_PCI_IMU_EQS_ERR_LOG); device_printf(dev, "EQS error log %#llx\n", (unsigned long long)val); } if ((imustat & (FO_PCI_IMU_ERR_INT_MSI_MAL_ERR_P | FO_PCI_IMU_ERR_INT_MSI_PAR_ERR_P | FO_PCI_IMU_ERR_INT_PMEACK_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_PMPME_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_FATAL_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_NFATAL_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_COR_MES_NOT_EN_P | FO_PCI_IMU_ERR_INT_MSI_NOT_EN_P)) != 0) { fatal = 1; val = FIRE_PCI_READ_8(sc, FO_PCI_IMU_RDS_ERR_LOG); device_printf(dev, "RDS error log %#llx\n", (unsigned long long)val); } } if ((dmcstat & FO_PCI_DMC_CORE_BLOCK_INT_EN_MMU) != 0) { fatal = 1; mmustat = FIRE_PCI_READ_8(sc, FO_PCI_MMU_INT_STAT); mmutfar = FIRE_PCI_READ_8(sc, FO_PCI_MMU_TRANS_FAULT_ADDR); mmutfsr = FIRE_PCI_READ_8(sc, FO_PCI_MMU_TRANS_FAULT_STAT); if ((mmustat & (FO_PCI_MMU_ERR_INT_TBW_DPE_P | FO_PCI_MMU_ERR_INT_TBW_ERR_P | FO_PCI_MMU_ERR_INT_TBW_UDE_P | FO_PCI_MMU_ERR_INT_TBW_DME_P | FO_PCI_MMU_ERR_INT_TTC_CAE_P | FIRE_PCI_MMU_ERR_INT_TTC_DPE_P | OBERON_PCI_MMU_ERR_INT_TTC_DUE_P | FO_PCI_MMU_ERR_INT_TRN_ERR_P)) != 0) fatal = 1; else { sc->sc_stats_mmu_err++; FIRE_PCI_WRITE_8(sc, FO_PCI_MMU_ERR_STAT_CLR, mmustat); } device_printf(dev, "MMU error %#llx: TFAR %#llx TFSR %#llx\n", (unsigned long long)mmustat, (unsigned long long)mmutfar, (unsigned long long)mmutfsr); } } if ((mcstat & FO_PCI_MULTI_CORE_ERR_STAT_PEC) != 0) { pecstat = FIRE_PCI_READ_8(sc, FO_PCI_PEC_CORE_BLOCK_INT_STAT); if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_UERR) != 0) { fatal = 1; uestat = FIRE_PCI_READ_8(sc, FO_PCI_TLU_UERR_INT_STAT); device_printf(dev, "DLU/TLU uncorrectable error %#llx\n", (unsigned long long)uestat); if ((uestat & (FO_PCI_TLU_UERR_INT_UR_P | OBERON_PCI_TLU_UERR_INT_POIS_P | FO_PCI_TLU_UERR_INT_MFP_P | FO_PCI_TLU_UERR_INT_ROF_P | FO_PCI_TLU_UERR_INT_UC_P | FIRE_PCI_TLU_UERR_INT_PP_P | OBERON_PCI_TLU_UERR_INT_POIS_P)) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_RX_UERR_HDR1_LOG); device_printf(dev, "receive header log %#llx\n", (unsigned long long)val); val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_RX_UERR_HDR2_LOG); device_printf(dev, "receive header log 2 %#llx\n", (unsigned long long)val); } if ((uestat & FO_PCI_TLU_UERR_INT_CTO_P) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_TX_UERR_HDR1_LOG); device_printf(dev, "transmit header log %#llx\n", (unsigned long long)val); val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_TX_UERR_HDR2_LOG); device_printf(dev, "transmit header log 2 %#llx\n", (unsigned long long)val); } if ((uestat & FO_PCI_TLU_UERR_INT_DLP_P) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT); device_printf(dev, "link layer interrupt and status %#llx\n", (unsigned long long)val); } if ((uestat & FO_PCI_TLU_UERR_INT_TE_P) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_LPU_PHY_LYR_INT_STAT); device_printf(dev, "phy layer interrupt and status %#llx\n", (unsigned long long)val); } } if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_CERR) != 0) { sc->sc_stats_tlu_ce++; cestat = FIRE_PCI_READ_8(sc, FO_PCI_TLU_CERR_INT_STAT); device_printf(dev, "DLU/TLU correctable error %#llx\n", (unsigned long long)cestat); val = FIRE_PCI_READ_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT); device_printf(dev, "link layer interrupt and status %#llx\n", (unsigned long long)val); if ((cestat & FO_PCI_TLU_CERR_INT_RE_P) != 0) { FIRE_PCI_WRITE_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT, val); val = FIRE_PCI_READ_8(sc, FO_PCI_LPU_PHY_LYR_INT_STAT); device_printf(dev, "phy layer interrupt and status %#llx\n", (unsigned long long)val); } FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_CERR_STAT_CLR, cestat); } if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_OEVENT) != 0) { oenfatal = 0; oestat = FIRE_PCI_READ_8(sc, FO_PCI_TLU_OEVENT_INT_STAT); device_printf(dev, "DLU/TLU other event %#llx\n", (unsigned long long)oestat); if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | FO_PCI_TLU_OEVENT_MRC_P | FO_PCI_TLU_OEVENT_WUC_P | FO_PCI_TLU_OEVENT_RUC_P | FO_PCI_TLU_OEVENT_CRS_P)) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_RX_OEVENT_HDR1_LOG); device_printf(dev, "receive header log %#llx\n", (unsigned long long)val); val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_RX_OEVENT_HDR2_LOG); device_printf(dev, "receive header log 2 %#llx\n", (unsigned long long)val); if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | FO_PCI_TLU_OEVENT_MRC_P | FO_PCI_TLU_OEVENT_WUC_P | FO_PCI_TLU_OEVENT_RUC_P)) != 0) fatal = 1; else { sc->sc_stats_tlu_oe_rx_err++; oenfatal = 1; } } if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | FO_PCI_TLU_OEVENT_CTO_P | FO_PCI_TLU_OEVENT_WUC_P | FO_PCI_TLU_OEVENT_RUC_P)) != 0) { val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_TX_OEVENT_HDR1_LOG); device_printf(dev, "transmit header log %#llx\n", (unsigned long long)val); val = FIRE_PCI_READ_8(sc, FO_PCI_TLU_TX_OEVENT_HDR2_LOG); device_printf(dev, "transmit header log 2 %#llx\n", (unsigned long long)val); if ((oestat & (FO_PCI_TLU_OEVENT_MFC_P | FO_PCI_TLU_OEVENT_CTO_P | FO_PCI_TLU_OEVENT_WUC_P | FO_PCI_TLU_OEVENT_RUC_P)) != 0) fatal = 1; else { sc->sc_stats_tlu_oe_tx_err++; oenfatal = 1; } } if ((oestat & (FO_PCI_TLU_OEVENT_ERO_P | FO_PCI_TLU_OEVENT_EMP_P | FO_PCI_TLU_OEVENT_EPE_P | FIRE_PCI_TLU_OEVENT_ERP_P | OBERON_PCI_TLU_OEVENT_ERBU_P | FIRE_PCI_TLU_OEVENT_EIP_P | OBERON_PCI_TLU_OEVENT_EIUE_P)) != 0) { fatal = 1; val = FIRE_PCI_READ_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT); device_printf(dev, "link layer interrupt and status %#llx\n", (unsigned long long)val); } if ((oestat & (FO_PCI_TLU_OEVENT_IIP_P | FO_PCI_TLU_OEVENT_EDP_P | FIRE_PCI_TLU_OEVENT_EHP_P | OBERON_PCI_TLU_OEVENT_TLUEITMO_S | FO_PCI_TLU_OEVENT_ERU_P)) != 0) fatal = 1; if ((oestat & (FO_PCI_TLU_OEVENT_NFP_P | FO_PCI_TLU_OEVENT_LWC_P | FO_PCI_TLU_OEVENT_LIN_P | FO_PCI_TLU_OEVENT_LRS_P | FO_PCI_TLU_OEVENT_LDN_P | FO_PCI_TLU_OEVENT_LUP_P)) != 0) oenfatal = 1; if (oenfatal != 0) { sc->sc_stats_tlu_oe_non_fatal++; FIRE_PCI_WRITE_8(sc, FO_PCI_TLU_OEVENT_STAT_CLR, oestat); if ((oestat & FO_PCI_TLU_OEVENT_LIN_P) != 0) FIRE_PCI_WRITE_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT, FIRE_PCI_READ_8(sc, FO_PCI_LPU_LNK_LYR_INT_STAT)); } } if ((pecstat & FO_PCI_PEC_CORE_BLOCK_INT_STAT_ILU) != 0) { ilustat = FIRE_PCI_READ_8(sc, FO_PCI_ILU_INT_STAT); device_printf(dev, "ILU error %#llx\n", (unsigned long long)ilustat); if ((ilustat & (FIRE_PCI_ILU_ERR_INT_IHB_PE_P | FIRE_PCI_ILU_ERR_INT_IHB_PE_P)) != 0) fatal = 1; else { sc->sc_stats_ilu_err++; FIRE_PCI_WRITE_8(sc, FO_PCI_ILU_INT_STAT, ilustat); } } } mtx_unlock_spin(&sc->sc_pcib_mtx); if (fatal != 0) panic("%s: fatal DMC/PEC error", device_get_nameunit(sc->sc_dev)); return (FILTER_HANDLED); } static int fire_xcb(void *arg) { struct fire_softc *sc; device_t dev; uint64_t errstat, intstat, val; u_int fatal; fatal = 0; sc = arg; dev = sc->sc_dev; mtx_lock_spin(&sc->sc_pcib_mtx); if (sc->sc_mode == FIRE_MODE_OBERON) { intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT); device_printf(dev, "UBC error: interrupt status %#llx\n", (unsigned long long)intstat); if ((intstat & ~(OBERON_UBC_ERR_INT_DMARDUEB_P | OBERON_UBC_ERR_INT_DMARDUEA_P)) != 0) fatal = 1; else sc->sc_stats_ubc_dmardue++; if (fatal != 0) { mtx_unlock_spin(&sc->sc_pcib_mtx); panic("%s: fatal UBC core block error", device_get_nameunit(sc->sc_dev)); } else { FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); mtx_unlock_spin(&sc->sc_pcib_mtx); } } else { errstat = FIRE_CTRL_READ_8(sc, FIRE_JBC_CORE_BLOCK_ERR_STAT); if ((errstat & (FIRE_JBC_CORE_BLOCK_ERR_STAT_MERGE | FIRE_JBC_CORE_BLOCK_ERR_STAT_JBCINT | FIRE_JBC_CORE_BLOCK_ERR_STAT_DMCINT)) != 0) { intstat = FIRE_CTRL_READ_8(sc, FO_XBC_INT_STAT); device_printf(dev, "JBC interrupt status %#llx\n", (unsigned long long)intstat); if ((intstat & FIRE_JBC_ERR_INT_EBUS_TO_P) != 0) { val = FIRE_CTRL_READ_8(sc, FIRE_JBC_CSR_ERR_LOG); device_printf(dev, "CSR error log %#llx\n", (unsigned long long)val); } if ((intstat & (FIRE_JBC_ERR_INT_UNSOL_RD_P | FIRE_JBC_ERR_INT_UNSOL_INT_P)) != 0) { if ((intstat & FIRE_JBC_ERR_INT_UNSOL_RD_P) != 0) sc->sc_stats_jbc_unsol_rd++; if ((intstat & FIRE_JBC_ERR_INT_UNSOL_INT_P) != 0) sc->sc_stats_jbc_unsol_int++; val = FIRE_CTRL_READ_8(sc, FIRE_DMCINT_IDC_ERR_LOG); device_printf(dev, "DMCINT IDC error log %#llx\n", (unsigned long long)val); } if ((intstat & (FIRE_JBC_ERR_INT_MB_PER_P | FIRE_JBC_ERR_INT_MB_PEW_P)) != 0) { fatal = 1; val = FIRE_CTRL_READ_8(sc, FIRE_MERGE_TRANS_ERR_LOG); device_printf(dev, "merge transaction error log %#llx\n", (unsigned long long)val); } if ((intstat & FIRE_JBC_ERR_INT_IJP_P) != 0) { fatal = 1; val = FIRE_CTRL_READ_8(sc, FIRE_JBCINT_OTRANS_ERR_LOG); device_printf(dev, "JBCINT out transaction error log " "%#llx\n", (unsigned long long)val); val = FIRE_CTRL_READ_8(sc, FIRE_JBCINT_OTRANS_ERR_LOG2); device_printf(dev, "JBCINT out transaction error log 2 " "%#llx\n", (unsigned long long)val); } if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P | FIRE_JBC_ERR_INT_CE_ASYN_P | FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P | FIRE_JBC_ERR_INT_JUE_P | FIRE_JBC_ERR_INT_ICISE_P | FIRE_JBC_ERR_INT_WR_DPE_P | FIRE_JBC_ERR_INT_RD_DPE_P | FIRE_JBC_ERR_INT_ILL_BMW_P | FIRE_JBC_ERR_INT_ILL_BMR_P | FIRE_JBC_ERR_INT_BJC_P)) != 0) { if ((intstat & (FIRE_JBC_ERR_INT_UE_ASYN_P | FIRE_JBC_ERR_INT_JTE_P | FIRE_JBC_ERR_INT_JBE_P | FIRE_JBC_ERR_INT_JUE_P | FIRE_JBC_ERR_INT_ICISE_P | FIRE_JBC_ERR_INT_WR_DPE_P | FIRE_JBC_ERR_INT_RD_DPE_P | FIRE_JBC_ERR_INT_ILL_BMW_P | FIRE_JBC_ERR_INT_ILL_BMR_P | FIRE_JBC_ERR_INT_BJC_P)) != 0) fatal = 1; else sc->sc_stats_jbc_ce_async++; val = FIRE_CTRL_READ_8(sc, FIRE_JBCINT_ITRANS_ERR_LOG); device_printf(dev, "JBCINT in transaction error log %#llx\n", (unsigned long long)val); val = FIRE_CTRL_READ_8(sc, FIRE_JBCINT_ITRANS_ERR_LOG2); device_printf(dev, "JBCINT in transaction error log 2 " "%#llx\n", (unsigned long long)val); } if ((intstat & (FIRE_JBC_ERR_INT_PIO_UNMAP_RD_P | FIRE_JBC_ERR_INT_ILL_ACC_RD_P | FIRE_JBC_ERR_INT_PIO_UNMAP_P | FIRE_JBC_ERR_INT_PIO_DPE_P | FIRE_JBC_ERR_INT_PIO_CPE_P | FIRE_JBC_ERR_INT_ILL_ACC_P)) != 0) { fatal = 1; val = FIRE_CTRL_READ_8(sc, FIRE_JBC_CSR_ERR_LOG); device_printf(dev, "DMCINT ODCD error log %#llx\n", (unsigned long long)val); } if ((intstat & (FIRE_JBC_ERR_INT_MB_PEA_P | FIRE_JBC_ERR_INT_CPE_P | FIRE_JBC_ERR_INT_APE_P | FIRE_JBC_ERR_INT_PIO_CPE_P | FIRE_JBC_ERR_INT_JTCEEW_P | FIRE_JBC_ERR_INT_JTCEEI_P | FIRE_JBC_ERR_INT_JTCEER_P)) != 0) { fatal = 1; val = FIRE_CTRL_READ_8(sc, FIRE_FATAL_ERR_LOG); device_printf(dev, "fatal error log %#llx\n", (unsigned long long)val); val = FIRE_CTRL_READ_8(sc, FIRE_FATAL_ERR_LOG2); device_printf(dev, "fatal error log 2 " "%#llx\n", (unsigned long long)val); } if (fatal != 0) { mtx_unlock_spin(&sc->sc_pcib_mtx); panic("%s: fatal JBC core block error", device_get_nameunit(sc->sc_dev)); } else { FIRE_CTRL_SET(sc, FO_XBC_ERR_STAT_CLR, ~0ULL); mtx_unlock_spin(&sc->sc_pcib_mtx); } } else { mtx_unlock_spin(&sc->sc_pcib_mtx); panic("%s: unknown JCB core block error status %#llx", device_get_nameunit(sc->sc_dev), (unsigned long long)errstat); } } return (FILTER_HANDLED); } static int fire_pcie(void *arg) { struct fire_msiqarg *fmqa; struct fire_softc *sc; struct fo_msiq_record *qrec; device_t dev; uint64_t word0; u_int head, msg, msiq; fmqa = arg; sc = fmqa->fmqa_fica.fica_sc; dev = sc->sc_dev; msiq = fmqa->fmqa_msiq; mtx_lock_spin(&fmqa->fmqa_mtx); head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >> FO_PCI_EQ_HD_SHFT; qrec = &fmqa->fmqa_base[head]; word0 = qrec->fomqr_word0; for (;;) { KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSG) != 0, ("%s: received non-PCIe message in event queue %d " "(word0 %#llx)", device_get_nameunit(dev), msiq, (unsigned long long)word0)); msg = (word0 & FO_MQR_WORD0_DATA0_MASK) >> FO_MQR_WORD0_DATA0_SHFT; #define PCIE_MSG_CODE_ERR_COR 0x30 #define PCIE_MSG_CODE_ERR_NONFATAL 0x31 #define PCIE_MSG_CODE_ERR_FATAL 0x33 if (msg == PCIE_MSG_CODE_ERR_COR) device_printf(dev, "correctable PCIe error\n"); else if (msg == PCIE_MSG_CODE_ERR_NONFATAL || msg == PCIE_MSG_CODE_ERR_FATAL) panic("%s: %sfatal PCIe error", device_get_nameunit(dev), msg == PCIE_MSG_CODE_ERR_NONFATAL ? "non-" : ""); else panic("%s: received unknown PCIe message %#x", device_get_nameunit(dev), msg); qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK; head = (head + 1) % sc->sc_msiq_size; qrec = &fmqa->fmqa_base[head]; word0 = qrec->fomqr_word0; if (__predict_true((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0)) break; } FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) << FO_PCI_EQ_HD_SHFT); if ((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) & FO_PCI_EQ_TL_OVERR) != 0) { device_printf(dev, "event queue %d overflow\n", msiq); msiq <<= 3; FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) | FO_PCI_EQ_CTRL_CLR_COVERR); } mtx_unlock_spin(&fmqa->fmqa_mtx); return (FILTER_HANDLED); } static int fire_maxslots(device_t dev) { return (1); } static uint32_t fire_read_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, int width) { return (ofw_pci_read_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot, func, reg), bus, slot, func, reg, width)); } static void fire_write_config(device_t dev, u_int bus, u_int slot, u_int func, u_int reg, uint32_t val, int width) { ofw_pci_write_config_common(dev, PCIE_REGMAX, FO_CONF_OFF(bus, slot, func, reg), bus, slot, func, reg, val, width); } static int fire_route_interrupt(device_t bridge, device_t dev, int pin) { ofw_pci_intr_t mintr; mintr = ofw_pci_route_interrupt_common(bridge, dev, pin); if (!PCI_INTERRUPT_VALID(mintr)) device_printf(bridge, "could not route pin %d for device %d.%d\n", pin, pci_get_slot(dev), pci_get_function(dev)); return (mintr); } static void fire_dmamap_sync(bus_dma_tag_t dt __unused, bus_dmamap_t map, bus_dmasync_op_t op) { if ((map->dm_flags & DMF_LOADED) == 0) return; if ((op & BUS_DMASYNC_POSTREAD) != 0) ofw_pci_dmamap_sync_stst_order_common(); else if ((op & BUS_DMASYNC_PREWRITE) != 0) membar(Sync); } static void fire_intr_enable(void *arg) { struct intr_vector *iv; struct fire_icarg *fica; struct fire_softc *sc; struct pcpu *pc; uint64_t mr; u_int ctrl, i; iv = arg; fica = iv->iv_icarg; sc = fica->fica_sc; mr = FO_PCI_IMAP_V; if (sc->sc_mode == FIRE_MODE_OBERON) mr |= (iv->iv_mid << OBERON_PCI_IMAP_T_DESTID_SHFT) & OBERON_PCI_IMAP_T_DESTID_MASK; else mr |= (iv->iv_mid << FIRE_PCI_IMAP_T_JPID_SHFT) & FIRE_PCI_IMAP_T_JPID_MASK; /* * Given that all mondos for the same target are required to use the * same interrupt controller we just use the CPU ID for indexing the * latter. */ ctrl = 0; for (i = 0; i < mp_ncpus; ++i) { pc = pcpu_find(i); if (pc == NULL || iv->iv_mid != pc->pc_mid) continue; ctrl = pc->pc_cpuid % 4; break; } mr |= (1ULL << ctrl) << FO_PCI_IMAP_INT_CTRL_NUM_SHFT & FO_PCI_IMAP_INT_CTRL_NUM_MASK; FIRE_PCI_WRITE_8(sc, fica->fica_map, mr); } static void fire_intr_disable(void *arg) { struct intr_vector *iv; struct fire_icarg *fica; struct fire_softc *sc; iv = arg; fica = iv->iv_icarg; sc = fica->fica_sc; FIRE_PCI_WRITE_8(sc, fica->fica_map, FIRE_PCI_READ_8(sc, fica->fica_map) & ~FO_PCI_IMAP_V); } static void fire_intr_assign(void *arg) { struct intr_vector *iv; struct fire_icarg *fica; struct fire_softc *sc; uint64_t mr; iv = arg; fica = iv->iv_icarg; sc = fica->fica_sc; mr = FIRE_PCI_READ_8(sc, fica->fica_map); if ((mr & FO_PCI_IMAP_V) != 0) { FIRE_PCI_WRITE_8(sc, fica->fica_map, mr & ~FO_PCI_IMAP_V); FIRE_PCI_BARRIER(sc, fica->fica_map, 8, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } while (FIRE_PCI_READ_8(sc, fica->fica_clr) != INTCLR_IDLE) ; if ((mr & FO_PCI_IMAP_V) != 0) fire_intr_enable(arg); } static void fire_intr_clear(void *arg) { struct intr_vector *iv; struct fire_icarg *fica; iv = arg; fica = iv->iv_icarg; FIRE_PCI_WRITE_8(fica->fica_sc, fica->fica_clr, INTCLR_IDLE); } /* * Given that the event queue implementation matches our current MD and MI * interrupt frameworks like square pegs fit into round holes we are generous * and use one event queue per MSI for now, which limits us to 35 MSIs/MSI-Xs * per Host-PCIe-bridge (we use one event queue for the PCIe error messages). * This seems tolerable as long as most devices just use one MSI/MSI-X anyway. * Adding knowledge about MSIs/MSI-Xs to the MD interrupt code should allow us * to decouple the 1:1 mapping at the cost of no longer being able to bind * MSIs/MSI-Xs to specific CPUs as we currently have no reliable way to * quiesce a device while we move its MSIs/MSI-Xs to another event queue. */ static int fire_alloc_msi(device_t dev, device_t child, int count, int maxcount __unused, int *irqs) { struct fire_softc *sc; u_int i, j, msiqrun; if (powerof2(count) == 0 || count > 32) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); msiqrun = 0; for (i = 0; i < sc->sc_msiq_count; i++) { for (j = i; j < i + count; j++) { if (isclr(sc->sc_msiq_bitmap, j) == 0) break; } if (j == i + count) { msiqrun = i; break; } } if (i == sc->sc_msiq_count) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = 0; i + count < sc->sc_msi_count; i += count) { for (j = i; j < i + count; j++) if (isclr(sc->sc_msi_bitmap, j) == 0) break; if (j == i + count) { for (j = 0; j < count; j++) { setbit(sc->sc_msiq_bitmap, msiqrun + j); setbit(sc->sc_msi_bitmap, i + j); sc->sc_msi_msiq_table[i + j] = msiqrun + j; irqs[j] = sc->sc_msi_first + i + j; } mtx_unlock(&sc->sc_msi_mtx); return (0); } } mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } static int fire_release_msi(device_t dev, device_t child, int count, int *irqs) { struct fire_softc *sc; u_int i; sc = device_get_softc(dev); mtx_lock(&sc->sc_msi_mtx); for (i = 0; i < count; i++) { clrbit(sc->sc_msiq_bitmap, sc->sc_msi_msiq_table[irqs[i] - sc->sc_msi_first]); clrbit(sc->sc_msi_bitmap, irqs[i] - sc->sc_msi_first); } mtx_unlock(&sc->sc_msi_mtx); return (0); } static int fire_alloc_msix(device_t dev, device_t child, int *irq) { struct fire_softc *sc; int i, msiq; sc = device_get_softc(dev); if ((sc->sc_flags & FIRE_MSIX) == 0) return (ENXIO); mtx_lock(&sc->sc_msi_mtx); msiq = 0; for (i = 0; i < sc->sc_msiq_count; i++) { if (isclr(sc->sc_msiq_bitmap, i) != 0) { msiq = i; break; } } if (i == sc->sc_msiq_count) { mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } for (i = sc->sc_msi_count - 1; i >= 0; i--) { if (isclr(sc->sc_msi_bitmap, i) != 0) { setbit(sc->sc_msiq_bitmap, msiq); setbit(sc->sc_msi_bitmap, i); sc->sc_msi_msiq_table[i] = msiq; *irq = sc->sc_msi_first + i; mtx_unlock(&sc->sc_msi_mtx); return (0); } } mtx_unlock(&sc->sc_msi_mtx); return (ENXIO); } static int fire_release_msix(device_t dev, device_t child, int irq) { struct fire_softc *sc; sc = device_get_softc(dev); if ((sc->sc_flags & FIRE_MSIX) == 0) return (ENXIO); mtx_lock(&sc->sc_msi_mtx); clrbit(sc->sc_msiq_bitmap, sc->sc_msi_msiq_table[irq - sc->sc_msi_first]); clrbit(sc->sc_msi_bitmap, irq - sc->sc_msi_first); mtx_unlock(&sc->sc_msi_mtx); return (0); } static int fire_map_msi(device_t dev, device_t child, int irq, uint64_t *addr, uint32_t *data) { struct fire_softc *sc; struct pci_devinfo *dinfo; sc = device_get_softc(dev); dinfo = device_get_ivars(child); if (dinfo->cfg.msi.msi_alloc > 0) { if ((irq & ~sc->sc_msi_data_mask) != 0) { device_printf(dev, "invalid MSI 0x%x\n", irq); return (EINVAL); } } else { if ((sc->sc_flags & FIRE_MSIX) == 0) return (ENXIO); if (fls(irq) > sc->sc_msix_data_width) { device_printf(dev, "invalid MSI-X 0x%x\n", irq); return (EINVAL); } } if (dinfo->cfg.msi.msi_alloc > 0 && (dinfo->cfg.msi.msi_ctrl & PCIM_MSICTRL_64BIT) == 0) *addr = sc->sc_msi_addr32; else *addr = sc->sc_msi_addr64; *data = irq; return (0); } static void fire_msiq_handler(void *cookie) { struct intr_vector *iv; struct fire_msiqarg *fmqa; iv = cookie; fmqa = iv->iv_icarg; /* * Note that since fire_intr_clear() will clear the event queue * interrupt after the handler associated with the MSI [sic] has * been executed we have to protect the access to the event queue as * otherwise nested event queue interrupts cause corruption of the * event queue on MP machines. Obviously especially when abandoning * the 1:1 mapping it would be better to not clear the event queue * interrupt after each handler invocation but only once when the * outstanding MSIs have been processed but unfortunately that * doesn't work well and leads to interrupt storms with controllers/ * drivers which don't mask interrupts while the handler is executed. * Maybe delaying clearing the MSI until after the handler has been * executed could be used to work around this but that's not the * intended usage and might in turn cause lost MSIs. */ mtx_lock_spin(&fmqa->fmqa_mtx); fire_msiq_common(iv, fmqa); mtx_unlock_spin(&fmqa->fmqa_mtx); } static void fire_msiq_filter(void *cookie) { struct intr_vector *iv; struct fire_msiqarg *fmqa; iv = cookie; fmqa = iv->iv_icarg; /* * For filters we don't use fire_intr_clear() since it would clear * the event queue interrupt while we're still processing the event * queue as filters and associated post-filter handler are executed * directly, which in turn would lead to lost MSIs. So we clear the * event queue interrupt only once after processing the event queue. * Given that this still guarantees the filters to not be executed * concurrently and no other CPU can clear the event queue interrupt * while the event queue is still processed, we don't even need to * interlock the access to the event queue in this case. */ critical_enter(); fire_msiq_common(iv, fmqa); FIRE_PCI_WRITE_8(fmqa->fmqa_fica.fica_sc, fmqa->fmqa_fica.fica_clr, INTCLR_IDLE); critical_exit(); } static inline void fire_msiq_common(struct intr_vector *iv, struct fire_msiqarg *fmqa) { struct fire_softc *sc; struct fo_msiq_record *qrec; device_t dev; uint64_t word0; u_int head, msi, msiq; sc = fmqa->fmqa_fica.fica_sc; dev = sc->sc_dev; msiq = fmqa->fmqa_msiq; head = (FIRE_PCI_READ_8(sc, fmqa->fmqa_head) & FO_PCI_EQ_HD_MASK) >> FO_PCI_EQ_HD_SHFT; qrec = &fmqa->fmqa_base[head]; word0 = qrec->fomqr_word0; for (;;) { if (__predict_false((word0 & FO_MQR_WORD0_FMT_TYPE_MASK) == 0)) break; KASSERT((word0 & FO_MQR_WORD0_FMT_TYPE_MSI64) != 0 || (word0 & FO_MQR_WORD0_FMT_TYPE_MSI32) != 0, ("%s: received non-MSI/MSI-X message in event queue %d " "(word0 %#llx)", device_get_nameunit(dev), msiq, (unsigned long long)word0)); msi = (word0 & FO_MQR_WORD0_DATA0_MASK) >> FO_MQR_WORD0_DATA0_SHFT; /* * Sanity check the MSI/MSI-X as long as we use a 1:1 mapping. */ KASSERT(msi == fmqa->fmqa_msi, ("%s: received non-matching MSI/MSI-X in event queue %d " "(%d versus %d)", device_get_nameunit(dev), msiq, msi, fmqa->fmqa_msi)); FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + (msi << 3), FO_PCI_MSI_CLR_EQWR_N); if (__predict_false(intr_event_handle(iv->iv_event, NULL) != 0)) printf("stray MSI/MSI-X in event queue %d\n", msiq); qrec->fomqr_word0 &= ~FO_MQR_WORD0_FMT_TYPE_MASK; head = (head + 1) % sc->sc_msiq_size; qrec = &fmqa->fmqa_base[head]; word0 = qrec->fomqr_word0; } FIRE_PCI_WRITE_8(sc, fmqa->fmqa_head, (head & FO_PCI_EQ_HD_MASK) << FO_PCI_EQ_HD_SHFT); if (__predict_false((FIRE_PCI_READ_8(sc, fmqa->fmqa_tail) & FO_PCI_EQ_TL_OVERR) != 0)) { device_printf(dev, "event queue %d overflow\n", msiq); msiq <<= 3; FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, FIRE_PCI_READ_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq) | FO_PCI_EQ_CTRL_CLR_COVERR); } } static int fire_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { struct fire_softc *sc; struct fire_msiqarg *fmqa; u_long vec; int error; u_int msi, msiq; sc = device_get_softc(dev); /* * XXX this assumes that a device only has one INTx, while in fact * Cassini+ and Saturn can use all four the firmware has assigned * to them, but so does pci(4). */ if (rman_get_rid(ires) != 0) { msi = rman_get_start(ires); msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first]; vec = INTMAP_VEC(sc->sc_ign, sc->sc_msiq_ino_first + msiq); msiq += sc->sc_msiq_first; if (intr_vectors[vec].iv_ic != &fire_ic) { device_printf(dev, "invalid interrupt controller for vector 0x%lx\n", vec); return (EINVAL); } /* * The MD interrupt code needs the vector rather than the MSI. */ rman_set_start(ires, vec); rman_set_end(ires, vec); error = bus_generic_setup_intr(dev, child, ires, flags, filt, intr, arg, cookiep); rman_set_start(ires, msi); rman_set_end(ires, msi); if (error != 0) return (error); fmqa = intr_vectors[vec].iv_icarg; /* * XXX inject our event queue handler. */ if (filt != NULL) { intr_vectors[vec].iv_func = fire_msiq_filter; intr_vectors[vec].iv_ic = &fire_msiqc_filter; /* * Ensure the event queue interrupt is cleared, it * might have triggered before. Given we supply NULL * as ic_clear, inthand_add() won't do this for us. */ FIRE_PCI_WRITE_8(sc, fmqa->fmqa_fica.fica_clr, INTCLR_IDLE); } else intr_vectors[vec].iv_func = fire_msiq_handler; /* Record the MSI/MSI-X as long as we we use a 1:1 mapping. */ fmqa->fmqa_msi = msi; FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_SET_BASE + (msiq << 3), FO_PCI_EQ_CTRL_SET_EN); msi <<= 3; FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, (FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) & ~FO_PCI_MSI_MAP_EQNUM_MASK) | ((msiq << FO_PCI_MSI_MAP_EQNUM_SHFT) & FO_PCI_MSI_MAP_EQNUM_MASK)); FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_CLR_BASE + msi, FO_PCI_MSI_CLR_EQWR_N); FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) | FO_PCI_MSI_MAP_V); return (error); } /* * Make sure the vector is fully specified and we registered * our interrupt controller for it. */ vec = rman_get_start(ires); if (INTIGN(vec) != sc->sc_ign) { device_printf(dev, "invalid interrupt vector 0x%lx\n", vec); return (EINVAL); } if (intr_vectors[vec].iv_ic != &fire_ic) { device_printf(dev, "invalid interrupt controller for vector 0x%lx\n", vec); return (EINVAL); } return (bus_generic_setup_intr(dev, child, ires, flags, filt, intr, arg, cookiep)); } static int fire_teardown_intr(device_t dev, device_t child, struct resource *ires, void *cookie) { struct fire_softc *sc; u_long vec; int error; u_int msi, msiq; sc = device_get_softc(dev); if (rman_get_rid(ires) != 0) { msi = rman_get_start(ires); msiq = sc->sc_msi_msiq_table[msi - sc->sc_msi_first]; vec = INTMAP_VEC(sc->sc_ign, msiq + sc->sc_msiq_ino_first); msiq += sc->sc_msiq_first; msi <<= 3; FIRE_PCI_WRITE_8(sc, FO_PCI_MSI_MAP_BASE + msi, FIRE_PCI_READ_8(sc, FO_PCI_MSI_MAP_BASE + msi) & ~FO_PCI_MSI_MAP_V); msiq <<= 3; FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_CTRL_CLR_BASE + msiq, FO_PCI_EQ_CTRL_CLR_COVERR | FO_PCI_EQ_CTRL_CLR_E2I | FO_PCI_EQ_CTRL_CLR_DIS); FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_TL_BASE + msiq, (0 << FO_PCI_EQ_TL_SHFT) & FO_PCI_EQ_TL_MASK); FIRE_PCI_WRITE_8(sc, FO_PCI_EQ_HD_BASE + msiq, (0 << FO_PCI_EQ_HD_SHFT) & FO_PCI_EQ_HD_MASK); intr_vectors[vec].iv_ic = &fire_ic; /* * The MD interrupt code needs the vector rather than the MSI. */ rman_set_start(ires, vec); rman_set_end(ires, vec); error = bus_generic_teardown_intr(dev, child, ires, cookie); msi >>= 3; rman_set_start(ires, msi); rman_set_end(ires, msi); return (error); } return (bus_generic_teardown_intr(dev, child, ires, cookie)); } static struct resource * fire_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fire_softc *sc; if (type == SYS_RES_IRQ && *rid == 0) { sc = device_get_softc(bus); start = end = INTMAP_VEC(sc->sc_ign, end); } return (ofw_pci_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static u_int fire_get_timecount(struct timecounter *tc) { struct fire_softc *sc; sc = tc->tc_priv; return (FIRE_CTRL_READ_8(sc, FO_XBC_PRF_CNT0) & TC_COUNTER_MAX_MASK); } Index: head/sys/sparc64/sparc64/iommu.c =================================================================== --- head/sys/sparc64/sparc64/iommu.c (revision 295879) +++ head/sys/sparc64/sparc64/iommu.c (revision 295880) @@ -1,1215 +1,1214 @@ /*- * Copyright (c) 1999, 2000 Matthew R. Green * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: iommu.c,v 1.82 2008/05/30 02:29:37 mrg Exp */ /*- * Copyright (c) 1999-2002 Eduardo Horvath * Copyright (c) 2001-2003 Thomas Moestl * Copyright (c) 2007, 2009 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: sbus.c,v 1.50 2002/06/20 18:26:24 eeh Exp */ #include __FBSDID("$FreeBSD$"); /* * UltraSPARC IOMMU support; used by both the PCI and SBus code. * * TODO: * - Support sub-page boundaries. * - Fix alignment handling for small allocations (the possible page offset * of malloc()ed memory is not handled at all). Revise interaction of * alignment with the load_mbuf and load_uio functions. * - Handle lowaddr and highaddr in some way, and try to work out a way * for filter callbacks to work. Currently, only lowaddr is honored * in that no addresses above it are considered at all. * - Implement BUS_DMA_ALLOCNOW in bus_dma_tag_create as far as possible. * - Check the possible return values and callback error arguments; * the callback currently gets called in error conditions where it should * not be. * - When running out of DVMA space, return EINPROGRESS in the non- * BUS_DMA_NOWAIT case and delay the callback until sufficient space * becomes available. */ #include "opt_iommu.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include /* * Tuning constants */ #define IOMMU_MAX_PRE (32 * 1024) #define IOMMU_MAX_PRE_SEG 3 /* Threshold for using the streaming buffer */ #define IOMMU_STREAM_THRESH 128 static MALLOC_DEFINE(M_IOMMU, "dvmamem", "IOMMU DVMA Buffers"); static int iommu_strbuf_flush_sync(struct iommu_state *); #ifdef IOMMU_DIAG static void iommu_diag(struct iommu_state *, vm_offset_t va); #endif /* * Helpers */ #define IOMMU_READ8(is, reg, off) \ bus_space_read_8((is)->is_bustag, (is)->is_bushandle, \ (is)->reg + (off)) #define IOMMU_WRITE8(is, reg, off, v) \ bus_space_write_8((is)->is_bustag, (is)->is_bushandle, \ (is)->reg + (off), (v)) #define IOMMU_HAS_SB(is) \ ((is)->is_sb[0] != 0 || (is)->is_sb[1] != 0) /* * Always overallocate one page; this is needed to handle alignment of the * buffer, so it makes sense using a lazy allocation scheme. */ #define IOMMU_SIZE_ROUNDUP(sz) \ (round_io_page(sz) + IO_PAGE_SIZE) #define IOMMU_SET_TTE(is, va, tte) \ ((is)->is_tsb[IOTSBSLOT(va)] = (tte)) #define IOMMU_GET_TTE(is, va) \ (is)->is_tsb[IOTSBSLOT(va)] /* Resource helpers */ #define IOMMU_RES_START(res) \ ((bus_addr_t)rman_get_start(res) << IO_PAGE_SHIFT) #define IOMMU_RES_END(res) \ ((bus_addr_t)(rman_get_end(res) + 1) << IO_PAGE_SHIFT) #define IOMMU_RES_SIZE(res) \ ((bus_size_t)rman_get_size(res) << IO_PAGE_SHIFT) /* Helpers for struct bus_dmamap_res */ #define BDR_START(r) IOMMU_RES_START((r)->dr_res) #define BDR_END(r) IOMMU_RES_END((r)->dr_res) #define BDR_SIZE(r) IOMMU_RES_SIZE((r)->dr_res) /* Locking macros */ #define IS_LOCK(is) mtx_lock(&is->is_mtx) #define IS_LOCK_ASSERT(is) mtx_assert(&is->is_mtx, MA_OWNED) #define IS_UNLOCK(is) mtx_unlock(&is->is_mtx) /* Flush a page from the TLB. No locking required, since this is atomic. */ static __inline void iommu_tlb_flush(struct iommu_state *is, bus_addr_t va) { if ((is->is_flags & IOMMU_FIRE) != 0) /* * Direct page flushing is not supported and also not * necessary due to cache snooping. */ return; IOMMU_WRITE8(is, is_iommu, IMR_FLUSH, va); } /* * Flush a page from the streaming buffer. No locking required, since this * is atomic. */ static __inline void iommu_strbuf_flushpg(struct iommu_state *is, bus_addr_t va) { int i; for (i = 0; i < 2; i++) if (is->is_sb[i] != 0) IOMMU_WRITE8(is, is_sb[i], ISR_PGFLUSH, va); } /* * Flush an address from the streaming buffer(s); this is an asynchronous * operation. To make sure that it has completed, iommu_strbuf_sync() needs * to be called. No locking required. */ static __inline void iommu_strbuf_flush(struct iommu_state *is, bus_addr_t va) { iommu_strbuf_flushpg(is, va); } /* Synchronize all outstanding flush operations. */ static __inline void iommu_strbuf_sync(struct iommu_state *is) { IS_LOCK_ASSERT(is); iommu_strbuf_flush_sync(is); } /* LRU queue handling for lazy resource allocation. */ static __inline void iommu_map_insq(struct iommu_state *is, bus_dmamap_t map) { IS_LOCK_ASSERT(is); if (!SLIST_EMPTY(&map->dm_reslist)) { if (map->dm_onq) TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq); TAILQ_INSERT_TAIL(&is->is_maplruq, map, dm_maplruq); map->dm_onq = 1; } } static __inline void iommu_map_remq(struct iommu_state *is, bus_dmamap_t map) { IS_LOCK_ASSERT(is); if (map->dm_onq) TAILQ_REMOVE(&is->is_maplruq, map, dm_maplruq); map->dm_onq = 0; } /* * initialise the UltraSPARC IOMMU (PCI or SBus): * - allocate and setup the iotsb. * - enable the IOMMU * - initialise the streaming buffers (if they exist) * - create a private DVMA map. */ void iommu_init(const char *name, struct iommu_state *is, u_int tsbsize, uint32_t iovabase, u_int resvpg) { vm_size_t size; vm_offset_t offs; uint64_t end, obpmap, obpptsb, tte; u_int maxtsbsize, obptsbentries, obptsbsize, slot, tsbentries; int i; /* * Setup the IOMMU. * * The sun4u IOMMU is part of the PCI or SBus controller so we * will deal with it here.. * * The IOMMU address space always ends at 0xffffe000, but the starting * address depends on the size of the map. The map size is 1024 * 2 ^ * is->is_tsbsize entries, where each entry is 8 bytes. The start of * the map can be calculated by (0xffffe000 << (8 + is->is_tsbsize)). */ if ((is->is_flags & IOMMU_FIRE) != 0) { maxtsbsize = IOMMU_TSB512K; /* * We enable bypass in order to be able to use a physical * address for the event queue base. */ is->is_cr = IOMMUCR_SE | IOMMUCR_CM_C_TLB_TBW | IOMMUCR_BE; } else { maxtsbsize = IOMMU_TSB128K; is->is_cr = (tsbsize << IOMMUCR_TSBSZ_SHIFT) | IOMMUCR_DE; } if (tsbsize > maxtsbsize) panic("%s: unsupported TSB size ", __func__); tsbentries = IOMMU_TSBENTRIES(tsbsize); is->is_cr |= IOMMUCR_EN; is->is_tsbsize = tsbsize; is->is_dvmabase = iovabase; if (iovabase == -1) is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); size = IOTSB_BASESZ << is->is_tsbsize; printf("%s: DVMA map: %#lx to %#lx %d entries%s\n", name, is->is_dvmabase, is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT)) - 1, tsbentries, IOMMU_HAS_SB(is) ? ", streaming buffer" : ""); /* * Set up resource mamangement. */ mtx_init(&is->is_mtx, "iommu", NULL, MTX_DEF); end = is->is_dvmabase + (size << (IO_PAGE_SHIFT - IOTTE_SHIFT)); is->is_dvma_rman.rm_type = RMAN_ARRAY; is->is_dvma_rman.rm_descr = "DVMA Memory"; if (rman_init(&is->is_dvma_rman) != 0 || rman_manage_region(&is->is_dvma_rman, (is->is_dvmabase >> IO_PAGE_SHIFT) + resvpg, (end >> IO_PAGE_SHIFT) - 1) != 0) panic("%s: could not initialize DVMA rman", __func__); TAILQ_INIT(&is->is_maplruq); /* * Allocate memory for I/O page tables. They need to be * physically contiguous. */ is->is_tsb = contigmalloc(size, M_DEVBUF, M_NOWAIT, 0, ~0UL, PAGE_SIZE, 0); if (is->is_tsb == NULL) panic("%s: contigmalloc failed", __func__); is->is_ptsb = pmap_kextract((vm_offset_t)is->is_tsb); bzero(is->is_tsb, size); /* * Add the PROM mappings to the kernel IOTSB if desired. * Note that the firmware of certain Darwin boards doesn't set * the TSB size correctly. */ if ((is->is_flags & IOMMU_FIRE) != 0) obptsbsize = (IOMMU_READ8(is, is_iommu, IMR_TSB) & IOMMUTB_TSBSZ_MASK) >> IOMMUTB_TSBSZ_SHIFT; else obptsbsize = (IOMMU_READ8(is, is_iommu, IMR_CTL) & IOMMUCR_TSBSZ_MASK) >> IOMMUCR_TSBSZ_SHIFT; obptsbentries = IOMMU_TSBENTRIES(obptsbsize); if (bootverbose) printf("%s: PROM IOTSB size: %d (%d entries)\n", name, obptsbsize, obptsbentries); if ((is->is_flags & IOMMU_PRESERVE_PROM) != 0 && !(PCPU_GET(impl) == CPU_IMPL_ULTRASPARCIIi && obptsbsize == 7)) { if (obptsbentries > tsbentries) panic("%s: PROM IOTSB entries exceed kernel", __func__); obpptsb = IOMMU_READ8(is, is_iommu, IMR_TSB) & IOMMUTB_TB_MASK; for (i = 0; i < obptsbentries; i++) { tte = ldxa(obpptsb + i * 8, ASI_PHYS_USE_EC); if ((tte & IOTTE_V) == 0) continue; slot = tsbentries - obptsbentries + i; if (bootverbose) printf("%s: adding PROM IOTSB slot %d " "(kernel slot %d) TTE: %#lx\n", name, i, slot, tte); obpmap = (is->is_dvmabase + slot * IO_PAGE_SIZE) >> IO_PAGE_SHIFT; if (rman_reserve_resource(&is->is_dvma_rman, obpmap, obpmap, IO_PAGE_SIZE >> IO_PAGE_SHIFT, RF_ACTIVE, NULL) == NULL) panic("%s: could not reserve PROM IOTSB slot " "%d (kernel slot %d)", __func__, i, slot); is->is_tsb[slot] = tte; } } /* * Initialize streaming buffer, if it is there. */ if (IOMMU_HAS_SB(is)) { /* * Find two 64-byte blocks in is_flush that are aligned on * a 64-byte boundary for flushing. */ offs = roundup2((vm_offset_t)is->is_flush, STRBUF_FLUSHSYNC_NBYTES); for (i = 0; i < 2; i++, offs += STRBUF_FLUSHSYNC_NBYTES) { is->is_flushva[i] = (uint64_t *)offs; is->is_flushpa[i] = pmap_kextract(offs); } } /* * Now actually start up the IOMMU. */ iommu_reset(is); } /* * Streaming buffers don't exist on the UltraSPARC IIi; we should have * detected that already and disabled them. If not, we will notice that * they aren't there when the STRBUF_EN bit does not remain. */ void iommu_reset(struct iommu_state *is) { uint64_t tsb; int i; tsb = is->is_ptsb; if ((is->is_flags & IOMMU_FIRE) != 0) { tsb |= is->is_tsbsize; IOMMU_WRITE8(is, is_iommu, IMR_CACHE_INVAL, ~0ULL); } IOMMU_WRITE8(is, is_iommu, IMR_TSB, tsb); IOMMU_WRITE8(is, is_iommu, IMR_CTL, is->is_cr); for (i = 0; i < 2; i++) { if (is->is_sb[i] != 0) { IOMMU_WRITE8(is, is_sb[i], ISR_CTL, STRBUF_EN | ((is->is_flags & IOMMU_RERUN_DISABLE) != 0 ? STRBUF_RR_DIS : 0)); /* No streaming buffers? Disable them. */ if ((IOMMU_READ8(is, is_sb[i], ISR_CTL) & STRBUF_EN) == 0) is->is_sb[i] = 0; } } (void)IOMMU_READ8(is, is_iommu, IMR_CTL); } /* * Enter a mapping into the TSB. No locking required, since each TSB slot is * uniquely assigned to a single map. */ static void iommu_enter(struct iommu_state *is, vm_offset_t va, vm_paddr_t pa, int stream, int flags) { uint64_t tte; KASSERT(va >= is->is_dvmabase, ("%s: va %#lx not in DVMA space", __func__, va)); KASSERT(pa <= is->is_pmaxaddr, ("%s: XXX: physical address too large (%#lx)", __func__, pa)); tte = MAKEIOTTE(pa, !(flags & BUS_DMA_NOWRITE), !(flags & BUS_DMA_NOCACHE), stream); IOMMU_SET_TTE(is, va, tte); iommu_tlb_flush(is, va); #ifdef IOMMU_DIAG IS_LOCK(is); iommu_diag(is, va); IS_UNLOCK(is); #endif } /* * Remove mappings created by iommu_enter(). Flush the streaming buffer, * but do not synchronize it. Returns whether a streaming buffer flush * was performed. */ static int iommu_remove(struct iommu_state *is, vm_offset_t va, vm_size_t len) { int slot, streamed = 0; #ifdef IOMMU_DIAG iommu_diag(is, va); #endif KASSERT(va >= is->is_dvmabase, ("%s: va 0x%lx not in DVMA space", __func__, (u_long)va)); KASSERT(va + len >= va, ("%s: va 0x%lx + len 0x%lx wraps", __func__, (long)va, (long)len)); va = trunc_io_page(va); while (len > 0) { if ((IOMMU_GET_TTE(is, va) & IOTTE_STREAM) != 0) { streamed = 1; iommu_strbuf_flush(is, va); } len -= ulmin(len, IO_PAGE_SIZE); IOMMU_SET_TTE(is, va, 0); iommu_tlb_flush(is, va); if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) { slot = IOTSBSLOT(va); if (len <= IO_PAGE_SIZE || slot % 8 == 7) IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH, is->is_ptsb + slot * 8); } va += IO_PAGE_SIZE; } return (streamed); } /* Decode an IOMMU fault for host bridge error handlers. */ void iommu_decode_fault(struct iommu_state *is, vm_offset_t phys) { bus_addr_t va; long idx; idx = phys - is->is_ptsb; if (phys < is->is_ptsb || idx > (PAGE_SIZE << is->is_tsbsize)) return; va = is->is_dvmabase + (((bus_addr_t)idx >> IOTTE_SHIFT) << IO_PAGE_SHIFT); printf("IOMMU fault virtual address %#lx\n", (u_long)va); } /* * A barrier operation which makes sure that all previous streaming buffer * flushes complete before it returns. */ static int iommu_strbuf_flush_sync(struct iommu_state *is) { struct timeval cur, end; int i; IS_LOCK_ASSERT(is); if (!IOMMU_HAS_SB(is)) return (0); /* * Streaming buffer flushes: * * 1 Tell strbuf to flush by storing va to strbuf_pgflush. If * we're not on a cache line boundary (64-bits): * 2 Store 0 in flag * 3 Store pointer to flag in flushsync * 4 wait till flushsync becomes 0x1 * * If it takes more than .5 sec, something went wrong. */ *is->is_flushva[0] = 1; *is->is_flushva[1] = 1; membar(StoreStore); for (i = 0; i < 2; i++) { if (is->is_sb[i] != 0) { *is->is_flushva[i] = 0; IOMMU_WRITE8(is, is_sb[i], ISR_FLUSHSYNC, is->is_flushpa[i]); } } microuptime(&cur); end.tv_sec = 0; /* * 0.5s is the recommended timeout from the U2S manual. The actual * time required should be smaller by at least a factor of 1000. * We have no choice but to busy-wait. */ end.tv_usec = 500000; timevaladd(&end, &cur); while ((!*is->is_flushva[0] || !*is->is_flushva[1]) && timevalcmp(&cur, &end, <=)) microuptime(&cur); if (!*is->is_flushva[0] || !*is->is_flushva[1]) { panic("%s: flush timeout %ld, %ld at %#lx", __func__, *is->is_flushva[0], *is->is_flushva[1], is->is_flushpa[0]); } return (1); } /* Determine whether we may enable streaming on a mapping. */ static __inline int iommu_use_streaming(struct iommu_state *is, bus_dmamap_t map, bus_size_t size) { return (size >= IOMMU_STREAM_THRESH && IOMMU_HAS_SB(is) && (map->dm_flags & DMF_COHERENT) == 0); } /* * Allocate DVMA virtual memory for a map. The map may not be on a queue, * so that it can be freely modified. */ static int iommu_dvma_valloc(bus_dma_tag_t t, struct iommu_state *is, bus_dmamap_t map, bus_size_t size) { struct resource *res; struct bus_dmamap_res *bdr; bus_size_t align, sgsize; KASSERT(!map->dm_onq, ("%s: map on queue!", __func__)); if ((bdr = malloc(sizeof(*bdr), M_IOMMU, M_NOWAIT)) == NULL) return (EAGAIN); /* * If a boundary is specified, a map cannot be larger than it; however * we do not clip currently, as that does not play well with the lazy * allocation code. * Alignment to a page boundary is always enforced. */ align = (t->dt_alignment + IO_PAGE_MASK) >> IO_PAGE_SHIFT; sgsize = round_io_page(size) >> IO_PAGE_SHIFT; if (t->dt_boundary > 0 && t->dt_boundary < IO_PAGE_SIZE) panic("%s: illegal boundary specified", __func__); res = rman_reserve_resource_bound(&is->is_dvma_rman, 0L, t->dt_lowaddr >> IO_PAGE_SHIFT, sgsize, t->dt_boundary >> IO_PAGE_SHIFT, RF_ACTIVE | rman_make_alignment_flags(align), NULL); if (res == NULL) { free(bdr, M_IOMMU); return (ENOMEM); } bdr->dr_res = res; bdr->dr_used = 0; SLIST_INSERT_HEAD(&map->dm_reslist, bdr, dr_link); return (0); } /* Unload the map and mark all resources as unused, but do not free them. */ static void iommu_dvmamap_vunload(struct iommu_state *is, bus_dmamap_t map) { struct bus_dmamap_res *r; int streamed = 0; IS_LOCK_ASSERT(is); /* for iommu_strbuf_sync() below */ SLIST_FOREACH(r, &map->dm_reslist, dr_link) { streamed |= iommu_remove(is, BDR_START(r), r->dr_used); r->dr_used = 0; } if (streamed) iommu_strbuf_sync(is); } /* Free a DVMA virtual memory resource. */ static __inline void iommu_dvma_vfree_res(bus_dmamap_t map, struct bus_dmamap_res *r) { KASSERT(r->dr_used == 0, ("%s: resource busy!", __func__)); if (r->dr_res != NULL && rman_release_resource(r->dr_res) != 0) printf("warning: DVMA space lost\n"); SLIST_REMOVE(&map->dm_reslist, r, bus_dmamap_res, dr_link); free(r, M_IOMMU); } /* Free all DVMA virtual memory for a map. */ static void iommu_dvma_vfree(struct iommu_state *is, bus_dmamap_t map) { IS_LOCK(is); iommu_map_remq(is, map); iommu_dvmamap_vunload(is, map); IS_UNLOCK(is); while (!SLIST_EMPTY(&map->dm_reslist)) iommu_dvma_vfree_res(map, SLIST_FIRST(&map->dm_reslist)); } /* Prune a map, freeing all unused DVMA resources. */ static bus_size_t iommu_dvma_vprune(struct iommu_state *is, bus_dmamap_t map) { struct bus_dmamap_res *r, *n; bus_size_t freed = 0; IS_LOCK_ASSERT(is); for (r = SLIST_FIRST(&map->dm_reslist); r != NULL; r = n) { n = SLIST_NEXT(r, dr_link); if (r->dr_used == 0) { freed += BDR_SIZE(r); iommu_dvma_vfree_res(map, r); } } if (SLIST_EMPTY(&map->dm_reslist)) iommu_map_remq(is, map); return (freed); } /* * Try to find a suitably-sized (and if requested, -aligned) slab of DVMA * memory with IO page offset voffs. */ static bus_addr_t iommu_dvma_vfindseg(bus_dmamap_t map, vm_offset_t voffs, bus_size_t size, bus_addr_t amask) { struct bus_dmamap_res *r; bus_addr_t dvmaddr, dvmend; KASSERT(!map->dm_onq, ("%s: map on queue!", __func__)); SLIST_FOREACH(r, &map->dm_reslist, dr_link) { dvmaddr = round_io_page(BDR_START(r) + r->dr_used); /* Alignment can only work with voffs == 0. */ dvmaddr = (dvmaddr + amask) & ~amask; dvmaddr += voffs; dvmend = dvmaddr + size; if (dvmend <= BDR_END(r)) { r->dr_used = dvmend - BDR_START(r); return (dvmaddr); } } return (0); } /* * Try to find or allocate a slab of DVMA space; see above. */ static int iommu_dvma_vallocseg(bus_dma_tag_t dt, struct iommu_state *is, bus_dmamap_t map, vm_offset_t voffs, bus_size_t size, bus_addr_t amask, bus_addr_t *addr) { bus_dmamap_t tm, last; bus_addr_t dvmaddr, freed; int error, complete = 0; dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask); /* Need to allocate. */ if (dvmaddr == 0) { while ((error = iommu_dvma_valloc(dt, is, map, voffs + size)) == ENOMEM && !complete) { /* * Free the allocated DVMA of a few maps until * the required size is reached. This is an * approximation to not have to call the allocation * function too often; most likely one free run * will not suffice if not one map was large enough * itself due to fragmentation. */ IS_LOCK(is); freed = 0; last = TAILQ_LAST(&is->is_maplruq, iommu_maplruq_head); do { tm = TAILQ_FIRST(&is->is_maplruq); complete = tm == last; if (tm == NULL) break; freed += iommu_dvma_vprune(is, tm); /* Move to the end. */ iommu_map_insq(is, tm); } while (freed < size && !complete); IS_UNLOCK(is); } if (error != 0) return (error); dvmaddr = iommu_dvma_vfindseg(map, voffs, size, amask); KASSERT(dvmaddr != 0, ("%s: allocation failed unexpectedly!", __func__)); } *addr = dvmaddr; return (0); } static int iommu_dvmamem_alloc(bus_dma_tag_t dt, void **vaddr, int flags, bus_dmamap_t *mapp) { struct iommu_state *is = dt->dt_cookie; int error, mflags; /* * XXX: This will break for 32 bit transfers on machines with more * than is->is_pmaxaddr memory. */ if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0) return (error); if ((flags & BUS_DMA_NOWAIT) != 0) mflags = M_NOWAIT; else mflags = M_WAITOK; if ((flags & BUS_DMA_ZERO) != 0) mflags |= M_ZERO; if ((*vaddr = malloc(dt->dt_maxsize, M_IOMMU, mflags)) == NULL) { error = ENOMEM; sparc64_dma_free_map(dt, *mapp); return (error); } if ((flags & BUS_DMA_COHERENT) != 0) (*mapp)->dm_flags |= DMF_COHERENT; /* * Try to preallocate DVMA space. If this fails, it is retried at * load time. */ iommu_dvma_valloc(dt, is, *mapp, IOMMU_SIZE_ROUNDUP(dt->dt_maxsize)); IS_LOCK(is); iommu_map_insq(is, *mapp); IS_UNLOCK(is); return (0); } static void iommu_dvmamem_free(bus_dma_tag_t dt, void *vaddr, bus_dmamap_t map) { struct iommu_state *is = dt->dt_cookie; iommu_dvma_vfree(is, map); sparc64_dma_free_map(dt, map); free(vaddr, M_IOMMU); } static int iommu_dvmamap_create(bus_dma_tag_t dt, int flags, bus_dmamap_t *mapp) { struct iommu_state *is = dt->dt_cookie; bus_size_t totsz, presz, currsz; int error, i, maxpre; if ((error = sparc64_dma_alloc_map(dt, mapp)) != 0) return (error); if ((flags & BUS_DMA_COHERENT) != 0) (*mapp)->dm_flags |= DMF_COHERENT; /* * Preallocate DVMA space; if this fails now, it is retried at load * time. Through bus_dmamap_load_mbuf() and bus_dmamap_load_uio(), * it is possible to have multiple discontiguous segments in a single * map, which is handled by allocating additional resources, instead * of increasing the size, to avoid fragmentation. * Clamp preallocation to IOMMU_MAX_PRE. In some situations we can * handle more; that case is handled by reallocating at map load time. */ totsz = ulmin(IOMMU_SIZE_ROUNDUP(dt->dt_maxsize), IOMMU_MAX_PRE); error = iommu_dvma_valloc(dt, is, *mapp, totsz); if (error != 0) return (0); /* * Try to be smart about preallocating some additional segments if * needed. */ maxpre = imin(dt->dt_nsegments, IOMMU_MAX_PRE_SEG); presz = dt->dt_maxsize / maxpre; for (i = 1; i < maxpre && totsz < IOMMU_MAX_PRE; i++) { currsz = round_io_page(ulmin(presz, IOMMU_MAX_PRE - totsz)); error = iommu_dvma_valloc(dt, is, *mapp, currsz); if (error != 0) break; totsz += currsz; } IS_LOCK(is); iommu_map_insq(is, *mapp); IS_UNLOCK(is); return (0); } static int iommu_dvmamap_destroy(bus_dma_tag_t dt, bus_dmamap_t map) { struct iommu_state *is = dt->dt_cookie; iommu_dvma_vfree(is, map); sparc64_dma_free_map(dt, map); return (0); } /* * Utility function to load a physical buffer. segp contains * the starting segment on entrace, and the ending segment on exit. */ static int iommu_dvmamap_load_phys(bus_dma_tag_t dt, bus_dmamap_t map, vm_paddr_t buf, bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t amask, dvmaddr, dvmoffs; bus_size_t sgsize, esize; struct iommu_state *is; vm_offset_t voffs; vm_paddr_t curaddr; int error, firstpg, sgcnt; u_int slot; is = dt->dt_cookie; if (*segp == -1) { if ((map->dm_flags & DMF_LOADED) != 0) { #ifdef DIAGNOSTIC printf("%s: map still in use\n", __func__); #endif bus_dmamap_unload(dt, map); } /* * Make sure that the map is not on a queue so that the * resource list may be safely accessed and modified without * needing the lock to cover the whole operation. */ IS_LOCK(is); iommu_map_remq(is, map); IS_UNLOCK(is); amask = dt->dt_alignment - 1; } else amask = 0; KASSERT(buflen != 0, ("%s: buflen == 0!", __func__)); if (buflen > dt->dt_maxsize) return (EINVAL); if (segs == NULL) segs = dt->dt_segments; voffs = buf & IO_PAGE_MASK; /* Try to find a slab that is large enough. */ error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask, &dvmaddr); if (error != 0) return (error); sgcnt = *segp; firstpg = 1; map->dm_flags &= ~DMF_STREAMED; map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ? DMF_STREAMED : 0; for (; buflen > 0; ) { curaddr = buf; /* * Compute the segment size, and adjust counts. */ sgsize = IO_PAGE_SIZE - ((u_long)buf & IO_PAGE_MASK); if (buflen < sgsize) sgsize = buflen; buflen -= sgsize; buf += sgsize; dvmoffs = trunc_io_page(dvmaddr); iommu_enter(is, dvmoffs, trunc_io_page(curaddr), (map->dm_flags & DMF_STREAMED) != 0, flags); if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) { slot = IOTSBSLOT(dvmoffs); if (buflen <= 0 || slot % 8 == 7) IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH, is->is_ptsb + slot * 8); } /* * Chop the chunk up into segments of at most maxsegsz, but try * to fill each segment as well as possible. */ if (!firstpg) { esize = ulmin(sgsize, dt->dt_maxsegsz - segs[sgcnt].ds_len); segs[sgcnt].ds_len += esize; sgsize -= esize; dvmaddr += esize; } while (sgsize > 0) { sgcnt++; if (sgcnt >= dt->dt_nsegments) return (EFBIG); /* * No extra alignment here - the common practice in * the busdma code seems to be that only the first * segment needs to satisfy the alignment constraints * (and that only for bus_dmamem_alloc()ed maps). * It is assumed that such tags have maxsegsize >= * maxsize. */ esize = ulmin(sgsize, dt->dt_maxsegsz); segs[sgcnt].ds_addr = dvmaddr; segs[sgcnt].ds_len = esize; sgsize -= esize; dvmaddr += esize; } firstpg = 0; } *segp = sgcnt; return (0); } /* * IOMMU DVMA operations, common to PCI and SBus */ static int iommu_dvmamap_load_buffer(bus_dma_tag_t dt, bus_dmamap_t map, void *buf, bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs, int *segp) { bus_addr_t amask, dvmaddr, dvmoffs; bus_size_t sgsize, esize; struct iommu_state *is; vm_offset_t vaddr, voffs; vm_paddr_t curaddr; int error, firstpg, sgcnt; u_int slot; is = dt->dt_cookie; if (*segp == -1) { if ((map->dm_flags & DMF_LOADED) != 0) { #ifdef DIAGNOSTIC printf("%s: map still in use\n", __func__); #endif bus_dmamap_unload(dt, map); } /* * Make sure that the map is not on a queue so that the * resource list may be safely accessed and modified without * needing the lock to cover the whole operation. */ IS_LOCK(is); iommu_map_remq(is, map); IS_UNLOCK(is); amask = dt->dt_alignment - 1; } else amask = 0; KASSERT(buflen != 0, ("%s: buflen == 0!", __func__)); if (buflen > dt->dt_maxsize) return (EINVAL); if (segs == NULL) segs = dt->dt_segments; vaddr = (vm_offset_t)buf; voffs = vaddr & IO_PAGE_MASK; /* Try to find a slab that is large enough. */ error = iommu_dvma_vallocseg(dt, is, map, voffs, buflen, amask, &dvmaddr); if (error != 0) return (error); sgcnt = *segp; firstpg = 1; map->dm_flags &= ~DMF_STREAMED; map->dm_flags |= iommu_use_streaming(is, map, buflen) != 0 ? DMF_STREAMED : 0; for (; buflen > 0; ) { /* * Get the physical address for this page. */ if (pmap == kernel_pmap) curaddr = pmap_kextract(vaddr); else curaddr = pmap_extract(pmap, vaddr); /* * Compute the segment size, and adjust counts. */ sgsize = IO_PAGE_SIZE - ((u_long)vaddr & IO_PAGE_MASK); if (buflen < sgsize) sgsize = buflen; buflen -= sgsize; vaddr += sgsize; dvmoffs = trunc_io_page(dvmaddr); iommu_enter(is, dvmoffs, trunc_io_page(curaddr), (map->dm_flags & DMF_STREAMED) != 0, flags); if ((is->is_flags & IOMMU_FLUSH_CACHE) != 0) { slot = IOTSBSLOT(dvmoffs); if (buflen <= 0 || slot % 8 == 7) IOMMU_WRITE8(is, is_iommu, IMR_CACHE_FLUSH, is->is_ptsb + slot * 8); } /* * Chop the chunk up into segments of at most maxsegsz, but try * to fill each segment as well as possible. */ if (!firstpg) { esize = ulmin(sgsize, dt->dt_maxsegsz - segs[sgcnt].ds_len); segs[sgcnt].ds_len += esize; sgsize -= esize; dvmaddr += esize; } while (sgsize > 0) { sgcnt++; if (sgcnt >= dt->dt_nsegments) return (EFBIG); /* * No extra alignment here - the common practice in * the busdma code seems to be that only the first * segment needs to satisfy the alignment constraints * (and that only for bus_dmamem_alloc()ed maps). * It is assumed that such tags have maxsegsize >= * maxsize. */ esize = ulmin(sgsize, dt->dt_maxsegsz); segs[sgcnt].ds_addr = dvmaddr; segs[sgcnt].ds_len = esize; sgsize -= esize; dvmaddr += esize; } firstpg = 0; } *segp = sgcnt; return (0); } static void iommu_dvmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg) { } static bus_dma_segment_t * iommu_dvmamap_complete(bus_dma_tag_t dt, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, int error) { struct iommu_state *is = dt->dt_cookie; IS_LOCK(is); iommu_map_insq(is, map); if (error != 0) { iommu_dvmamap_vunload(is, map); IS_UNLOCK(is); } else { IS_UNLOCK(is); map->dm_flags |= DMF_LOADED; } if (segs == NULL) segs = dt->dt_segments; return (segs); } static void iommu_dvmamap_unload(bus_dma_tag_t dt, bus_dmamap_t map) { struct iommu_state *is = dt->dt_cookie; if ((map->dm_flags & DMF_LOADED) == 0) return; IS_LOCK(is); iommu_dvmamap_vunload(is, map); iommu_map_insq(is, map); IS_UNLOCK(is); map->dm_flags &= ~DMF_LOADED; } static void iommu_dvmamap_sync(bus_dma_tag_t dt, bus_dmamap_t map, bus_dmasync_op_t op) { struct iommu_state *is = dt->dt_cookie; struct bus_dmamap_res *r; vm_offset_t va; vm_size_t len; int streamed = 0; if ((map->dm_flags & DMF_LOADED) == 0) return; if ((map->dm_flags & DMF_STREAMED) != 0 && ((op & BUS_DMASYNC_POSTREAD) != 0 || (op & BUS_DMASYNC_PREWRITE) != 0)) { IS_LOCK(is); SLIST_FOREACH(r, &map->dm_reslist, dr_link) { va = (vm_offset_t)BDR_START(r); len = r->dr_used; /* * If we have a streaming buffer, flush it here * first. */ while (len > 0) { if ((IOMMU_GET_TTE(is, va) & IOTTE_STREAM) != 0) { streamed = 1; iommu_strbuf_flush(is, va); } len -= ulmin(len, IO_PAGE_SIZE); va += IO_PAGE_SIZE; } } if (streamed) iommu_strbuf_sync(is); IS_UNLOCK(is); } if ((op & BUS_DMASYNC_PREWRITE) != 0) membar(Sync); } #ifdef IOMMU_DIAG /* * Perform an IOMMU diagnostic access and print the tag belonging to va. */ static void iommu_diag(struct iommu_state *is, vm_offset_t va) { int i; uint64_t data, tag; if ((is->is_flags & IOMMU_FIRE) != 0) return; IS_LOCK_ASSERT(is); IOMMU_WRITE8(is, is_dva, 0, trunc_io_page(va)); membar(StoreStore | StoreLoad); printf("%s: tte entry %#lx", __func__, IOMMU_GET_TTE(is, va)); if (is->is_dtcmp != 0) { printf(", tag compare register is %#lx\n", IOMMU_READ8(is, is_dtcmp, 0)); } else printf("\n"); for (i = 0; i < 16; i++) { tag = IOMMU_READ8(is, is_dtag, i * 8); data = IOMMU_READ8(is, is_ddram, i * 8); printf("%s: tag %d: %#lx, vpn %#lx, err %lx; " "data %#lx, pa %#lx, v %d, c %d\n", __func__, i, tag, (tag & IOMMU_DTAG_VPNMASK) << IOMMU_DTAG_VPNSHIFT, (tag & IOMMU_DTAG_ERRMASK) >> IOMMU_DTAG_ERRSHIFT, data, (data & IOMMU_DDATA_PGMASK) << IOMMU_DDATA_PGSHIFT, (data & IOMMU_DDATA_V) != 0, (data & IOMMU_DDATA_C) != 0); } } #endif /* IOMMU_DIAG */ struct bus_dma_methods iommu_dma_methods = { iommu_dvmamap_create, iommu_dvmamap_destroy, iommu_dvmamap_load_phys, iommu_dvmamap_load_buffer, iommu_dvmamap_waitok, iommu_dvmamap_complete, iommu_dvmamap_unload, iommu_dvmamap_sync, iommu_dvmamem_alloc, iommu_dvmamem_free, }; Index: head/sys/sparc64/sparc64/tlb.c =================================================================== --- head/sys/sparc64/sparc64/tlb.c (revision 295879) +++ head/sys/sparc64/sparc64/tlb.c (revision 295880) @@ -1,147 +1,146 @@ /*- * Copyright (c) 2001 Jake Burkholder. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_pmap.h" #include #include #include #include #include #include #include #include #include #include -#include #include #include #include PMAP_STATS_VAR(tlb_ncontext_demap); PMAP_STATS_VAR(tlb_npage_demap); PMAP_STATS_VAR(tlb_nrange_demap); tlb_flush_nonlocked_t *tlb_flush_nonlocked; tlb_flush_user_t *tlb_flush_user; /* * Some tlb operations must be atomic, so no interrupt or trap can be allowed * while they are in progress. Traps should not happen, but interrupts need to * be explicitely disabled. critical_enter() cannot be used here, since it only * disables soft interrupts. */ void tlb_context_demap(struct pmap *pm) { void *cookie; register_t s; /* * It is important that we are not interrupted or preempted while * doing the IPIs. The interrupted CPU may hold locks, and since * it will wait for the CPU that sent the IPI, this can lead * to a deadlock when an interrupt comes in on that CPU and it's * handler tries to grab one of that locks. This will only happen for * spin locks, but these IPI types are delivered even if normal * interrupts are disabled, so the lock critical section will not * protect the target processor from entering the IPI handler with * the lock held. */ PMAP_STATS_INC(tlb_ncontext_demap); cookie = ipi_tlb_context_demap(pm); s = intr_disable(); if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_context_demap: inactive pmap?")); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0); flush(KERNBASE); } intr_restore(s); ipi_wait(cookie); } void tlb_page_demap(struct pmap *pm, vm_offset_t va) { u_long flags; void *cookie; register_t s; PMAP_STATS_INC(tlb_npage_demap); cookie = ipi_tlb_page_demap(pm, va); s = intr_disable(); if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_page_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); } intr_restore(s); ipi_wait(cookie); } void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) { vm_offset_t va; void *cookie; u_long flags; register_t s; PMAP_STATS_INC(tlb_nrange_demap); cookie = ipi_tlb_range_demap(pm, start, end); s = intr_disable(); if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_range_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; for (va = start; va < end; va += PAGE_SIZE) { stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); } } intr_restore(s); ipi_wait(cookie); } Index: head/sys/x86/acpica/acpi_wakeup.c =================================================================== --- head/sys/x86/acpica/acpi_wakeup.c (revision 295879) +++ head/sys/x86/acpica/acpi_wakeup.c (revision 295880) @@ -1,419 +1,418 @@ /*- * Copyright (c) 2001 Takanori Watanabe * Copyright (c) 2001-2012 Mitsuru IWASAKI * Copyright (c) 2003 Peter Wemm * Copyright (c) 2008-2012 Jung-uk Kim * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #if defined(__amd64__) #define DEV_APIC #else #include "opt_apic.h" #endif #ifdef __i386__ #include "opt_npx.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #ifdef DEV_APIC #include #include #endif #ifdef SMP #include #include #endif #include #include #include "acpi_wakecode.h" #include "acpi_wakedata.h" /* Make sure the code is less than a page and leave room for the stack. */ CTASSERT(sizeof(wakecode) < PAGE_SIZE - 1024); extern int acpi_resume_beep; extern int acpi_reset_video; #ifdef SMP extern struct susppcb **susppcbs; static cpuset_t suspcpus; #else static struct susppcb **susppcbs; #endif static void *acpi_alloc_wakeup_handler(void); static void acpi_stop_beep(void *); #ifdef SMP static int acpi_wakeup_ap(struct acpi_softc *, int); static void acpi_wakeup_cpus(struct acpi_softc *); #endif #ifdef __amd64__ #define ACPI_PAGETABLES 3 #else #define ACPI_PAGETABLES 0 #endif #define WAKECODE_VADDR(sc) \ ((sc)->acpi_wakeaddr + (ACPI_PAGETABLES * PAGE_SIZE)) #define WAKECODE_PADDR(sc) \ ((sc)->acpi_wakephys + (ACPI_PAGETABLES * PAGE_SIZE)) #define WAKECODE_FIXUP(offset, type, val) do { \ type *addr; \ addr = (type *)(WAKECODE_VADDR(sc) + offset); \ *addr = val; \ } while (0) static void acpi_stop_beep(void *arg) { if (acpi_resume_beep != 0) timer_spkr_release(); } #ifdef SMP static int acpi_wakeup_ap(struct acpi_softc *sc, int cpu) { struct pcb *pcb; int vector = (WAKECODE_PADDR(sc) >> 12) & 0xff; int apic_id = cpu_apic_ids[cpu]; int ms; pcb = &susppcbs[cpu]->sp_pcb; WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb); WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit); WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base); ipi_startup(apic_id, vector); /* Wait up to 5 seconds for it to resume. */ for (ms = 0; ms < 5000; ms++) { if (!CPU_ISSET(cpu, &suspended_cpus)) return (1); /* return SUCCESS */ DELAY(1000); } return (0); /* return FAILURE */ } #define WARMBOOT_TARGET 0 #define WARMBOOT_OFF (KERNBASE + 0x0467) #define WARMBOOT_SEG (KERNBASE + 0x0469) #define CMOS_REG (0x70) #define CMOS_DATA (0x71) #define BIOS_RESET (0x0f) #define BIOS_WARM (0x0a) static void acpi_wakeup_cpus(struct acpi_softc *sc) { uint32_t mpbioswarmvec; int cpu; u_char mpbiosreason; /* save the current value of the warm-start vector */ mpbioswarmvec = *((uint32_t *)WARMBOOT_OFF); outb(CMOS_REG, BIOS_RESET); mpbiosreason = inb(CMOS_DATA); /* setup a vector to our boot code */ *((volatile u_short *)WARMBOOT_OFF) = WARMBOOT_TARGET; *((volatile u_short *)WARMBOOT_SEG) = WAKECODE_PADDR(sc) >> 4; outb(CMOS_REG, BIOS_RESET); outb(CMOS_DATA, BIOS_WARM); /* 'warm-start' */ /* Wake up each AP. */ for (cpu = 1; cpu < mp_ncpus; cpu++) { if (!CPU_ISSET(cpu, &suspcpus)) continue; if (acpi_wakeup_ap(sc, cpu) == 0) { /* restore the warmstart vector */ *(uint32_t *)WARMBOOT_OFF = mpbioswarmvec; panic("acpi_wakeup: failed to resume AP #%d (PHY #%d)", cpu, cpu_apic_ids[cpu]); } } /* restore the warmstart vector */ *(uint32_t *)WARMBOOT_OFF = mpbioswarmvec; outb(CMOS_REG, BIOS_RESET); outb(CMOS_DATA, mpbiosreason); } #endif int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; struct pcb *pcb; if (sc->acpi_wakeaddr == 0ul) return (-1); /* couldn't alloc wake memory */ #ifdef SMP suspcpus = all_cpus; CPU_CLR(PCPU_GET(cpuid), &suspcpus); #endif if (acpi_resume_beep != 0) timer_spkr_acquire(); AcpiSetFirmwareWakingVector(WAKECODE_PADDR(sc), 0); intr_suspend(); pcb = &susppcbs[0]->sp_pcb; if (savectx(pcb)) { #ifdef __amd64__ fpususpend(susppcbs[0]->sp_fpususpend); #elif defined(DEV_NPX) npxsuspend(susppcbs[0]->sp_fpususpend); #endif #ifdef SMP if (!CPU_EMPTY(&suspcpus) && suspend_cpus(suspcpus) == 0) { device_printf(sc->acpi_dev, "Failed to suspend APs\n"); return (0); /* couldn't sleep */ } #endif WAKECODE_FIXUP(resume_beep, uint8_t, (acpi_resume_beep != 0)); WAKECODE_FIXUP(reset_video, uint8_t, (acpi_reset_video != 0)); #ifndef __amd64__ WAKECODE_FIXUP(wakeup_cr4, register_t, pcb->pcb_cr4); #endif WAKECODE_FIXUP(wakeup_pcb, struct pcb *, pcb); WAKECODE_FIXUP(wakeup_gdt, uint16_t, pcb->pcb_gdt.rd_limit); WAKECODE_FIXUP(wakeup_gdt + 2, uint64_t, pcb->pcb_gdt.rd_base); /* Call ACPICA to enter the desired sleep state */ if (state == ACPI_STATE_S4 && sc->acpi_s4bios) status = AcpiEnterSleepStateS4bios(); else status = AcpiEnterSleepState(state); if (ACPI_FAILURE(status)) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); return (0); /* couldn't sleep */ } for (;;) ia32_pause(); } else { #ifdef __amd64__ fpuresume(susppcbs[0]->sp_fpususpend); #elif defined(DEV_NPX) npxresume(susppcbs[0]->sp_fpususpend); #endif } return (1); /* wakeup successfully */ } int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled) { if (sleep_result == -1) return (sleep_result); if (!intr_enabled) { /* Wakeup MD procedures in interrupt disabled context */ if (sleep_result == 1) { pmap_init_pat(); initializecpu(); PCPU_SET(switchtime, 0); PCPU_SET(switchticks, ticks); #ifdef DEV_APIC lapic_xapic_mode(); #endif #ifdef SMP if (!CPU_EMPTY(&suspcpus)) acpi_wakeup_cpus(sc); #endif } #ifdef SMP if (!CPU_EMPTY(&suspcpus)) restart_cpus(suspcpus); #endif mca_resume(); #ifdef __amd64__ if (vmm_resume_p != NULL) vmm_resume_p(); #endif intr_resume(/*suspend_cancelled*/false); AcpiSetFirmwareWakingVector(0, 0); } else { /* Wakeup MD procedures in interrupt enabled context */ if (sleep_result == 1 && mem_range_softc.mr_op != NULL && mem_range_softc.mr_op->reinit != NULL) mem_range_softc.mr_op->reinit(&mem_range_softc); } return (sleep_result); } static void * acpi_alloc_wakeup_handler(void) { void *wakeaddr; int i; /* * Specify the region for our wakeup code. We want it in the low 1 MB * region, excluding real mode IVT (0-0x3ff), BDA (0x400-0x4ff), EBDA * (less than 128KB, below 0xa0000, must be excluded by SMAP and DSDT), * and ROM area (0xa0000 and above). The temporary page tables must be * page-aligned. */ wakeaddr = contigmalloc((ACPI_PAGETABLES + 1) * PAGE_SIZE, M_DEVBUF, M_WAITOK, 0x500, 0xa0000, PAGE_SIZE, 0ul); if (wakeaddr == NULL) { printf("%s: can't alloc wake memory\n", __func__); return (NULL); } if (EVENTHANDLER_REGISTER(power_resume, acpi_stop_beep, NULL, EVENTHANDLER_PRI_LAST) == NULL) { printf("%s: can't register event handler\n", __func__); contigfree(wakeaddr, (ACPI_PAGETABLES + 1) * PAGE_SIZE, M_DEVBUF); return (NULL); } susppcbs = malloc(mp_ncpus * sizeof(*susppcbs), M_DEVBUF, M_WAITOK); for (i = 0; i < mp_ncpus; i++) { susppcbs[i] = malloc(sizeof(**susppcbs), M_DEVBUF, M_WAITOK); susppcbs[i]->sp_fpususpend = alloc_fpusave(M_WAITOK); } return (wakeaddr); } void acpi_install_wakeup_handler(struct acpi_softc *sc) { static void *wakeaddr = NULL; #ifdef __amd64__ uint64_t *pt4, *pt3, *pt2; int i; #endif if (wakeaddr != NULL) return; wakeaddr = acpi_alloc_wakeup_handler(); if (wakeaddr == NULL) return; sc->acpi_wakeaddr = (vm_offset_t)wakeaddr; sc->acpi_wakephys = vtophys(wakeaddr); bcopy(wakecode, (void *)WAKECODE_VADDR(sc), sizeof(wakecode)); /* Patch GDT base address, ljmp targets. */ WAKECODE_FIXUP((bootgdtdesc + 2), uint32_t, WAKECODE_PADDR(sc) + bootgdt); WAKECODE_FIXUP((wakeup_sw32 + 2), uint32_t, WAKECODE_PADDR(sc) + wakeup_32); #ifdef __amd64__ WAKECODE_FIXUP((wakeup_sw64 + 1), uint32_t, WAKECODE_PADDR(sc) + wakeup_64); WAKECODE_FIXUP(wakeup_pagetables, uint32_t, sc->acpi_wakephys); #endif /* Save pointers to some global data. */ WAKECODE_FIXUP(wakeup_ret, void *, resumectx); #ifndef __amd64__ #if defined(PAE) || defined(PAE_TABLES) WAKECODE_FIXUP(wakeup_cr3, register_t, vtophys(kernel_pmap->pm_pdpt)); #else WAKECODE_FIXUP(wakeup_cr3, register_t, vtophys(kernel_pmap->pm_pdir)); #endif #else /* Build temporary page tables below realmode code. */ pt4 = wakeaddr; pt3 = pt4 + (PAGE_SIZE) / sizeof(uint64_t); pt2 = pt3 + (PAGE_SIZE) / sizeof(uint64_t); /* Create the initial 1GB replicated page tables */ for (i = 0; i < 512; i++) { /* * Each slot of the level 4 pages points * to the same level 3 page */ pt4[i] = (uint64_t)(sc->acpi_wakephys + PAGE_SIZE); pt4[i] |= PG_V | PG_RW | PG_U; /* * Each slot of the level 3 pages points * to the same level 2 page */ pt3[i] = (uint64_t)(sc->acpi_wakephys + (2 * PAGE_SIZE)); pt3[i] |= PG_V | PG_RW | PG_U; /* The level 2 page slots are mapped with 2MB pages for 1GB. */ pt2[i] = i * (2 * 1024 * 1024); pt2[i] |= PG_V | PG_RW | PG_PS | PG_U; } #endif if (bootverbose) device_printf(sc->acpi_dev, "wakeup code va %#jx pa %#jx\n", (uintmax_t)sc->acpi_wakeaddr, (uintmax_t)sc->acpi_wakephys); } Index: head/sys/x86/x86/nexus.c =================================================================== --- head/sys/x86/x86/nexus.c (revision 295879) +++ head/sys/x86/x86/nexus.c (revision 295880) @@ -1,830 +1,829 @@ /*- * Copyright 1998 Massachusetts Institute of Technology * * Permission to use, copy, modify, and distribute this software and * its documentation for any purpose and without fee is hereby * granted, provided that both the above copyright notice and this * permission notice appear in all copies, that both the above * copyright notice and this permission notice appear in all * supporting documentation, and that the name of M.I.T. not be used * in advertising or publicity pertaining to distribution of the * software without specific, written prior permission. M.I.T. makes * no representations about the suitability of this software for any * purpose. It is provided "as is" without express or implied * warranty. * * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * This code implements a `root nexus' for Intel Architecture * machines. The function of the root nexus is to serve as an * attachment point for both processors and buses, and to manage * resources which are common to all of them. In particular, * this code implements the core resource managers for interrupt * requests, DMA requests (which rightfully should be a part of the * ISA code but it's easier to do it here for now), I/O port addresses, * and I/O memory address space. */ #ifdef __amd64__ #define DEV_APIC #else #include "opt_apic.h" #endif #include "opt_isa.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#include #include #include #include #include #ifdef DEV_APIC #include "pcib_if.h" #endif #ifdef DEV_ISA #include #ifdef PC98 #include #else #include #endif #endif #include #define ELF_KERN_STR ("elf"__XSTRING(__ELF_WORD_SIZE)" kernel") static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device"); #define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev)) struct rman irq_rman, drq_rman, port_rman, mem_rman; static int nexus_probe(device_t); static int nexus_attach(device_t); static int nexus_print_all_resources(device_t dev); static int nexus_print_child(device_t, device_t); static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit); static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_adjust_resource(device_t, device_t, int, struct resource *, rman_res_t, rman_res_t); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif static int nexus_config_intr(device_t, int, enum intr_trigger, enum intr_polarity); static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); static int nexus_deactivate_resource(device_t, device_t, int, int, struct resource *); static int nexus_release_resource(device_t, device_t, int, int, struct resource *); static int nexus_setup_intr(device_t, device_t, struct resource *, int flags, driver_filter_t filter, void (*)(void *), void *, void **); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); static struct resource_list *nexus_get_reslist(device_t dev, device_t child); static int nexus_set_resource(device_t, device_t, int, int, rman_res_t, rman_res_t); static int nexus_get_resource(device_t, device_t, int, int, rman_res_t *, rman_res_t *); static void nexus_delete_resource(device_t, device_t, int, int); #ifdef DEV_APIC static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs); static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs); static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq); static int nexus_release_msix(device_t pcib, device_t dev, int irq); static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data); #endif static device_method_t nexus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nexus_probe), DEVMETHOD(device_attach, nexus_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), /* Bus interface */ DEVMETHOD(bus_print_child, nexus_print_child), DEVMETHOD(bus_add_child, nexus_add_child), DEVMETHOD(bus_alloc_resource, nexus_alloc_resource), DEVMETHOD(bus_adjust_resource, nexus_adjust_resource), DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_activate_resource, nexus_activate_resource), DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_get_resource_list, nexus_get_reslist), DEVMETHOD(bus_set_resource, nexus_set_resource), DEVMETHOD(bus_get_resource, nexus_get_resource), DEVMETHOD(bus_delete_resource, nexus_delete_resource), /* pcib interface */ #ifdef DEV_APIC DEVMETHOD(pcib_alloc_msi, nexus_alloc_msi), DEVMETHOD(pcib_release_msi, nexus_release_msi), DEVMETHOD(pcib_alloc_msix, nexus_alloc_msix), DEVMETHOD(pcib_release_msix, nexus_release_msix), DEVMETHOD(pcib_map_msi, nexus_map_msi), #endif { 0, 0 } }; DEFINE_CLASS_0(nexus, nexus_driver, nexus_methods, 1); static devclass_t nexus_devclass; DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0); static int nexus_probe(device_t dev) { device_quiet(dev); /* suppress attach message for neatness */ return (BUS_PROBE_GENERIC); } void nexus_init_resources(void) { int irq; /* * XXX working notes: * * - IRQ resource creation should be moved to the PIC/APIC driver. * - DRQ resource creation should be moved to the DMAC driver. * - The above should be sorted to probe earlier than any child busses. * * - Leave I/O and memory creation here, as child probes may need them. * (especially eg. ACPI) */ /* * IRQ's are on the mainboard on old systems, but on the ISA part * of PCI->ISA bridges. There would be multiple sets of IRQs on * multi-ISA-bus systems. PCI interrupts are routed to the ISA * component, so in a way, PCI can be a partial child of an ISA bus(!). * APIC interrupts are global though. */ irq_rman.rm_start = 0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt request lines"; irq_rman.rm_end = NUM_IO_INTS - 1; if (rman_init(&irq_rman)) panic("nexus_init_resources irq_rman"); /* * We search for regions of existing IRQs and add those to the IRQ * resource manager. */ for (irq = 0; irq < NUM_IO_INTS; irq++) if (intr_lookup_source(irq) != NULL) if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("nexus_init_resources irq_rman add"); /* * ISA DMA on PCI systems is implemented in the ISA part of each * PCI->ISA bridge and the channels can be duplicated if there are * multiple bridges. (eg: laptops with docking stations) */ drq_rman.rm_start = 0; #ifdef PC98 drq_rman.rm_end = 3; #else drq_rman.rm_end = 7; #endif drq_rman.rm_type = RMAN_ARRAY; drq_rman.rm_descr = "DMA request lines"; /* XXX drq 0 not available on some machines */ if (rman_init(&drq_rman) || rman_manage_region(&drq_rman, drq_rman.rm_start, drq_rman.rm_end)) panic("nexus_init_resources drq_rman"); /* * However, IO ports and Memory truely are global at this level, * as are APIC interrupts (however many IO APICS there turn out * to be on large systems..) */ port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff)) panic("nexus_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0)) panic("nexus_init_resources mem_rman"); } static int nexus_attach(device_t dev) { nexus_init_resources(); bus_generic_probe(dev); /* * Explicitly add the legacy0 device here. Other platform * types (such as ACPI), use their own nexus(4) subclass * driver to override this routine and add their own root bus. */ if (BUS_ADD_CHILD(dev, 10, "legacy", 0) == NULL) panic("legacy: could not attach"); bus_generic_attach(dev); return 0; } static int nexus_print_all_resources(device_t dev) { struct nexus_device *ndev = DEVTONX(dev); struct resource_list *rl = &ndev->nx_resources; int retval = 0; if (STAILQ_FIRST(rl)) retval += printf(" at"); retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); retval += resource_list_print_type(rl, "iomem", SYS_RES_MEMORY, "%#lx"); retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); return retval; } static int nexus_print_child(device_t bus, device_t child) { int retval = 0; retval += bus_print_child_header(bus, child); retval += nexus_print_all_resources(child); if (device_get_flags(child)) retval += printf(" flags %#x", device_get_flags(child)); retval += printf(" on motherboard\n"); /* XXX "motherboard", ick */ return (retval); } static device_t nexus_add_child(device_t bus, u_int order, const char *name, int unit) { device_t child; struct nexus_device *ndev; ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO); if (!ndev) return(0); resource_list_init(&ndev->nx_resources); child = device_add_child_ordered(bus, order, name, unit); /* should we free this in nexus_child_detached? */ device_set_ivars(child, ndev); return(child); } static struct rman * nexus_rman(int type) { switch (type) { case SYS_RES_IRQ: return (&irq_rman); case SYS_RES_DRQ: return (&drq_rman); case SYS_RES_IOPORT: return (&port_rman); case SYS_RES_MEMORY: return (&mem_rman); default: return (NULL); } } /* * Allocate a resource on behalf of child. NB: child is usually going to be a * child of one of our descendants, not a direct child of nexus0. * (Exceptions include npx.) */ static struct resource * nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct nexus_device *ndev = DEVTONX(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int needactivate = flags & RF_ACTIVE; /* * If this is an allocation of the "default" range for a given * RID, and we know what the resources for this device are * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); if (rle == NULL) return(NULL); start = rle->start; end = rle->end; count = rle->count; } flags &= ~RF_ACTIVE; rm = nexus_rman(type); if (rm == NULL) return (NULL); rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) return 0; rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { rman_release_resource(rv); return 0; } } return rv; } static int nexus_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct rman *rm; rm = nexus_rman(type); if (rm == NULL) return (ENXIO); if (!rman_is_region_manager(r, rm)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { #ifdef PC98 bus_space_handle_t bh; int error; #endif void *vaddr; /* * If this is a memory resource, map it into the kernel. */ switch (type) { case SYS_RES_IOPORT: #ifdef PC98 error = i386_bus_space_handle_alloc(X86_BUS_SPACE_IO, rman_get_start(r), rman_get_size(r), &bh); if (error) return (error); rman_set_bushandle(r, bh); #else rman_set_bushandle(r, rman_get_start(r)); #endif rman_set_bustag(r, X86_BUS_SPACE_IO); break; case SYS_RES_MEMORY: #ifdef PC98 error = i386_bus_space_handle_alloc(X86_BUS_SPACE_MEM, rman_get_start(r), rman_get_size(r), &bh); if (error) return (error); #endif vaddr = pmap_mapdev(rman_get_start(r), rman_get_size(r)); rman_set_virtual(r, vaddr); rman_set_bustag(r, X86_BUS_SPACE_MEM); #ifdef PC98 /* PC-98: the type of bus_space_handle_t is the structure. */ bh->bsh_base = (bus_addr_t) vaddr; rman_set_bushandle(r, bh); #else /* IBM-PC: the type of bus_space_handle_t is u_int */ rman_set_bushandle(r, (bus_space_handle_t) vaddr); #endif } return (rman_activate_resource(r)); } static int nexus_deactivate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { /* * If this is a memory resource, unmap it. */ if (type == SYS_RES_MEMORY) { pmap_unmapdev((vm_offset_t)rman_get_virtual(r), rman_get_size(r)); } #ifdef PC98 if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { bus_space_handle_t bh; bh = rman_get_bushandle(r); i386_bus_space_handle_free(rman_get_bustag(r), bh, bh->bsh_sz); } #endif return (rman_deactivate_resource(r)); } static int nexus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (rman_get_flags(r) & RF_ACTIVE) { int error = bus_deactivate_resource(child, type, rid, r); if (error) return error; } return (rman_release_resource(r)); } /* * Currently this uses the really grody interface from kern/kern_intr.c * (which really doesn't belong in kern/anything.c). Eventually, all of * the code in kern_intr.c and machdep_intr.c should get moved here, since * this is going to be the official interface. */ static int nexus_setup_intr(device_t bus, device_t child, struct resource *irq, int flags, driver_filter_t filter, void (*ihand)(void *), void *arg, void **cookiep) { int error; /* somebody tried to setup an irq that failed to allocate! */ if (irq == NULL) panic("nexus_setup_intr: NULL irq resource!"); *cookiep = 0; if ((rman_get_flags(irq) & RF_SHAREABLE) == 0) flags |= INTR_EXCL; /* * We depend here on rman_activate_resource() being idempotent. */ error = rman_activate_resource(irq); if (error) return (error); error = intr_add_handler(device_get_nameunit(child), rman_get_start(irq), filter, ihand, arg, flags, cookiep); return (error); } static int nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih) { return (intr_remove_handler(ih)); } #ifdef SMP static int nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) { return (intr_bind(rman_get_start(irq), cpu)); } #endif static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) { return (intr_config_intr(irq, trig, pol)); } static int nexus_describe_intr(device_t dev, device_t child, struct resource *irq, void *cookie, const char *descr) { return (intr_describe(rman_get_start(irq), cookie, descr)); } static struct resource_list * nexus_get_reslist(device_t dev, device_t child) { struct nexus_device *ndev = DEVTONX(child); return (&ndev->nx_resources); } static int nexus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; /* XXX this should return a success/failure indicator */ resource_list_add(rl, type, rid, start, start + count - 1, count); return(0); } static int nexus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; struct resource_list_entry *rle; rle = resource_list_find(rl, type, rid); if (!rle) return(ENOENT); if (startp) *startp = rle->start; if (countp) *countp = rle->count; return(0); } static void nexus_delete_resource(device_t dev, device_t child, int type, int rid) { struct nexus_device *ndev = DEVTONX(child); struct resource_list *rl = &ndev->nx_resources; resource_list_delete(rl, type, rid); } /* Called from the MSI code to add new IRQs to the IRQ rman. */ void nexus_add_irq(u_long irq) { if (rman_manage_region(&irq_rman, irq, irq) != 0) panic("%s: failed", __func__); } #ifdef DEV_APIC static int nexus_alloc_msix(device_t pcib, device_t dev, int *irq) { return (msix_alloc(dev, irq)); } static int nexus_release_msix(device_t pcib, device_t dev, int irq) { return (msix_release(irq)); } static int nexus_alloc_msi(device_t pcib, device_t dev, int count, int maxcount, int *irqs) { return (msi_alloc(dev, count, maxcount, irqs)); } static int nexus_release_msi(device_t pcib, device_t dev, int count, int *irqs) { return (msi_release(irqs, count)); } static int nexus_map_msi(device_t pcib, device_t dev, int irq, uint64_t *addr, uint32_t *data) { return (msi_map(irq, addr, data)); } #endif /* Placeholder for system RAM. */ static void ram_identify(driver_t *driver, device_t parent) { if (resource_disabled("ram", 0)) return; if (BUS_ADD_CHILD(parent, 0, "ram", 0) == NULL) panic("ram_identify"); } static int ram_probe(device_t dev) { device_quiet(dev); device_set_desc(dev, "System RAM"); return (0); } static int ram_attach(device_t dev) { struct bios_smap *smapbase, *smap, *smapend; struct resource *res; vm_paddr_t *p; caddr_t kmdp; uint32_t smapsize; int error, rid; /* Retrieve the system memory map from the loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type(ELF_KERN_STR); smapbase = (struct bios_smap *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_SMAP); if (smapbase != NULL) { smapsize = *((u_int32_t *)smapbase - 1); smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); rid = 0; for (smap = smapbase; smap < smapend; smap++) { if (smap->type != SMAP_TYPE_MEMORY || smap->length == 0) continue; #ifdef __i386__ /* * Resources use long's to track resources, so * we can't include memory regions above 4GB. */ if (smap->base > ~0ul) continue; #endif error = bus_set_resource(dev, SYS_RES_MEMORY, rid, smap->base, smap->length); if (error) panic( "ram_attach: resource %d failed set with %d", rid, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", rid); rid++; } return (0); } /* * If the system map is not available, fall back to using * dump_avail[]. We use the dump_avail[] array rather than * phys_avail[] for the memory map as phys_avail[] contains * holes for kernel memory, page 0, the message buffer, and * the dcons buffer. We test the end address in the loop * instead of the start since the start address for the first * segment is 0. */ for (rid = 0, p = dump_avail; p[1] != 0; rid++, p += 2) { #ifdef PAE /* * Resources use long's to track resources, so we can't * include memory regions above 4GB. */ if (p[0] > ~0ul) break; #endif error = bus_set_resource(dev, SYS_RES_MEMORY, rid, p[0], p[1] - p[0]); if (error) panic("ram_attach: resource %d failed set with %d", rid, error); res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 0); if (res == NULL) panic("ram_attach: resource %d failed to attach", rid); } return (0); } static device_method_t ram_methods[] = { /* Device interface */ DEVMETHOD(device_identify, ram_identify), DEVMETHOD(device_probe, ram_probe), DEVMETHOD(device_attach, ram_attach), { 0, 0 } }; static driver_t ram_driver = { "ram", ram_methods, 1, /* no softc */ }; static devclass_t ram_devclass; DRIVER_MODULE(ram, nexus, ram_driver, ram_devclass, 0, 0); #ifdef DEV_ISA /* * Placeholder which claims PnP 'devices' which describe system * resources. */ static struct isa_pnp_id sysresource_ids[] = { { 0x010cd041 /* PNP0c01 */, "System Memory" }, { 0x020cd041 /* PNP0c02 */, "System Resource" }, { 0 } }; static int sysresource_probe(device_t dev) { int result; if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, sysresource_ids)) <= 0) { device_quiet(dev); } return(result); } static int sysresource_attach(device_t dev) { return(0); } static device_method_t sysresource_methods[] = { /* Device interface */ DEVMETHOD(device_probe, sysresource_probe), DEVMETHOD(device_attach, sysresource_attach), DEVMETHOD(device_detach, bus_generic_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, bus_generic_suspend), DEVMETHOD(device_resume, bus_generic_resume), { 0, 0 } }; static driver_t sysresource_driver = { "sysresource", sysresource_methods, 1, /* no softc */ }; static devclass_t sysresource_devclass; DRIVER_MODULE(sysresource, isa, sysresource_driver, sysresource_devclass, 0, 0); #endif /* DEV_ISA */