Index: head/sys/arm/arm/machdep.c =================================================================== --- head/sys/arm/arm/machdep.c (revision 296188) +++ head/sys/arm/arm/machdep.c (revision 296189) @@ -1,1910 +1,1911 @@ /* $NetBSD: arm32_machdep.c,v 1.44 2004/03/24 15:34:47 atatat Exp $ */ /*- * Copyright (c) 2004 Olivier Houchard * Copyright (c) 1994-1998 Mark Brinicombe. * Copyright (c) 1994 Brini. * All rights reserved. * * This code is derived from software written for Brini by Mark Brinicombe * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Mark Brinicombe * for the NetBSD Project. * 4. The name of the company nor the name of the author may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Machine dependant functions for kernel setup * * Created : 17/09/94 * Updated : 18/04/01 updated for new wscons */ #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_platform.h" #include "opt_sched.h" #include "opt_timer.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #ifdef DDB #include #if __ARM_ARCH >= 6 DB_SHOW_COMMAND(cp15, db_show_cp15) { u_int reg; reg = cp15_midr_get(); db_printf("Cpu ID: 0x%08x\n", reg); reg = cp15_ctr_get(); db_printf("Current Cache Lvl ID: 0x%08x\n",reg); reg = cp15_sctlr_get(); db_printf("Ctrl: 0x%08x\n",reg); reg = cp15_actlr_get(); db_printf("Aux Ctrl: 0x%08x\n",reg); reg = cp15_id_pfr0_get(); db_printf("Processor Feat 0: 0x%08x\n", reg); reg = cp15_id_pfr1_get(); db_printf("Processor Feat 1: 0x%08x\n", reg); reg = cp15_id_dfr0_get(); db_printf("Debug Feat 0: 0x%08x\n", reg); reg = cp15_id_afr0_get(); db_printf("Auxiliary Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr0_get(); db_printf("Memory Model Feat 0: 0x%08x\n", reg); reg = cp15_id_mmfr1_get(); db_printf("Memory Model Feat 1: 0x%08x\n", reg); reg = cp15_id_mmfr2_get(); db_printf("Memory Model Feat 2: 0x%08x\n", reg); reg = cp15_id_mmfr3_get(); db_printf("Memory Model Feat 3: 0x%08x\n", reg); reg = cp15_ttbr_get(); db_printf("TTB0: 0x%08x\n", reg); } DB_SHOW_COMMAND(vtop, db_show_vtop) { u_int reg; if (have_addr) { cp15_ats1cpr_set(addr); reg = cp15_par_get(); db_printf("Physical address reg: 0x%08x\n",reg); } else db_printf("show vtop \n"); } #endif /* __ARM_ARCH >= 6 */ #endif /* DDB */ #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) #else #define debugf(fmt, args...) #endif struct pcpu __pcpu[MAXCPU]; struct pcpu *pcpup = &__pcpu[0]; static struct trapframe proc0_tf; uint32_t cpu_reset_address = 0; int cold = 1; vm_offset_t vector_page; int (*_arm_memcpy)(void *, void *, int, int) = NULL; int (*_arm_bzero)(void *, int, int) = NULL; int _min_memcpy_size = 0; int _min_bzero_size = 0; extern int *end; #ifdef FDT static char *loader_envp; vm_paddr_t pmap_pa; #if __ARM_ARCH >= 6 vm_offset_t systempage; vm_offset_t irqstack; vm_offset_t undstack; vm_offset_t abtstack; #else /* * This is the number of L2 page tables required for covering max * (hypothetical) memsize of 4GB and all kernel mappings (vectors, msgbuf, * stacks etc.), uprounded to be divisible by 4. */ #define KERNEL_PT_MAX 78 static struct pv_addr kernel_pt_table[KERNEL_PT_MAX]; struct pv_addr systempage; static struct pv_addr msgbufpv; struct pv_addr irqstack; struct pv_addr undstack; struct pv_addr abtstack; static struct pv_addr kernelstack; #endif #endif #if defined(LINUX_BOOT_ABI) #define LBABI_MAX_BANKS 10 uint32_t board_id; struct arm_lbabi_tag *atag_list; char linux_command_line[LBABI_MAX_COMMAND_LINE + 1]; char atags[LBABI_MAX_COMMAND_LINE * 2]; uint32_t memstart[LBABI_MAX_BANKS]; uint32_t memsize[LBABI_MAX_BANKS]; uint32_t membanks; #endif static uint32_t board_revision; /* hex representation of uint64_t */ static char board_serial[32]; SYSCTL_NODE(_hw, OID_AUTO, board, CTLFLAG_RD, 0, "Board attributes"); SYSCTL_UINT(_hw_board, OID_AUTO, revision, CTLFLAG_RD, &board_revision, 0, "Board revision"); SYSCTL_STRING(_hw_board, OID_AUTO, serial, CTLFLAG_RD, board_serial, 0, "Board serial"); int vfp_exists; SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, &vfp_exists, 0, "Floating point support enabled"); void board_set_serial(uint64_t serial) { snprintf(board_serial, sizeof(board_serial)-1, "%016jx", serial); } void board_set_revision(uint32_t revision) { board_revision = revision; } void sendsig(catcher, ksi, mask) sig_t catcher; ksiginfo_t *ksi; sigset_t *mask; { struct thread *td; struct proc *p; struct trapframe *tf; struct sigframe *fp, frame; struct sigacts *psp; struct sysentvec *sysent; int onstack; int sig; int code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_usr_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !(onstack) && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else fp = (struct sigframe *)td->td_frame->tf_usr_sp; /* make room on the stack */ fp--; /* make the stack aligned */ fp = (struct sigframe *)STACKALIGN(fp); /* Populate the siginfo frame. */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK ) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } /* * Build context to run handler in. We invoke the handler * directly, only returning via the trampoline. Note the * trampoline version numbers are coordinated with machine- * dependent code in libc. */ tf->tf_r0 = sig; tf->tf_r1 = (register_t)&fp->sf_si; tf->tf_r2 = (register_t)&fp->sf_uc; /* the trampoline uses r5 as the uc address */ tf->tf_r5 = (register_t)&fp->sf_uc; tf->tf_pc = (register_t)catcher; tf->tf_usr_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_usr_lr = (register_t)sysent->sv_sigcode_base; else tf->tf_usr_lr = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); /* Set the mode to enter in the signal handler */ #if __ARM_ARCH >= 7 if ((register_t)catcher & 1) tf->tf_spsr |= PSR_T; else tf->tf_spsr &= ~PSR_T; #endif CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr, tf->tf_usr_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } struct kva_md_info kmi; /* * arm32_vector_init: * * Initialize the vector page, and select whether or not to * relocate the vectors. * * NOTE: We expect the vector page to be mapped at its expected * destination. */ extern unsigned int page0[], page0_data[]; void arm_vector_init(vm_offset_t va, int which) { unsigned int *vectors = (int *) va; unsigned int *vectors_data = vectors + (page0_data - page0); int vec; /* * Loop through the vectors we're taking over, and copy the * vector's insn and data word. */ for (vec = 0; vec < ARM_NVEC; vec++) { if ((which & (1 << vec)) == 0) { /* Don't want to take over this vector. */ continue; } vectors[vec] = page0[vec]; vectors_data[vec] = page0_data[vec]; } /* Now sync the vectors. */ icache_sync(va, (ARM_NVEC * 2) * sizeof(u_int)); vector_page = va; if (va == ARM_VECTORS_HIGH) { /* * Assume the MD caller knows what it's doing here, and * really does want the vector page relocated. * * Note: This has to be done here (and not just in * cpu_setup()) because the vector page needs to be * accessible *before* cpu_startup() is called. * Think ddb(9) ... * * NOTE: If the CPU control register is not readable, * this will totally fail! We'll just assume that * any system that has high vector support has a * readable CPU control register, for now. If we * ever encounter one that does not, we'll have to * rethink this. */ cpu_control(CPU_CONTROL_VECRELOC, CPU_CONTROL_VECRELOC); } } static void cpu_startup(void *dummy) { struct pcb *pcb = thread0.td_pcb; const unsigned int mbyte = 1024 * 1024; #if __ARM_ARCH < 6 && !defined(ARM_CACHE_LOCK_ENABLE) vm_page_t m; #endif identify_arm_cpu(); vm_ksubmap_init(&kmi); /* * Display the RAM layout. */ printf("real memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(realmem), (uintmax_t)arm32_ptob(realmem) / mbyte); printf("avail memory = %ju (%ju MB)\n", (uintmax_t)arm32_ptob(vm_cnt.v_free_count), (uintmax_t)arm32_ptob(vm_cnt.v_free_count) / mbyte); if (bootverbose) { arm_physmem_print_tables(); arm_devmap_print_table(); } bufinit(); vm_pager_bufferinit(); pcb->pcb_regs.sf_sp = (u_int)thread0.td_kstack + USPACE_SVC_STACK_TOP; pmap_set_pcb_pagedir(kernel_pmap, pcb); #if __ARM_ARCH < 6 vector_page_setprot(VM_PROT_READ); pmap_postinit(); #ifdef ARM_CACHE_LOCK_ENABLE pmap_kenter_user(ARM_TP_ADDRESS, ARM_TP_ADDRESS); arm_lock_cache_line(ARM_TP_ADDRESS); #else m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_ZERO); pmap_kenter_user(ARM_TP_ADDRESS, VM_PAGE_TO_PHYS(m)); #endif *(uint32_t *)ARM_RAS_START = 0; *(uint32_t *)ARM_RAS_END = 0xffffffff; #endif } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { dcache_wb_poc((vm_offset_t)ptr, (vm_paddr_t)vtophys(ptr), len); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { return (ENXIO); } void cpu_idle(int busy) { CTR2(KTR_SPARE2, "cpu_idle(%d) at %d", busy, curcpu); spinlock_enter(); #ifndef NO_EVENTTIMERS if (!busy) cpu_idleclock(); #endif if (!sched_runnable()) cpu_sleep(0); #ifndef NO_EVENTTIMERS if (!busy) cpu_activeclock(); #endif spinlock_exit(); CTR2(KTR_SPARE2, "cpu_idle(%d) at %d done", busy, curcpu); } int cpu_idle_wakeup(int cpu) { return (0); } /* * Most ARM platforms don't need to do anything special to init their clocks * (they get intialized during normal device attachment), and by not defining a * cpu_initclocks() function they get this generic one. Any platform that needs * to do something special can just provide their own implementation, which will * override this one due to the weak linkage. */ void arm_generic_initclocks(void) { #ifndef NO_EVENTTIMERS #ifdef SMP if (PCPU_GET(cpuid) == 0) cpu_initclocks_bsp(); else cpu_initclocks_ap(); #else cpu_initclocks_bsp(); #endif #endif } __weak_reference(arm_generic_initclocks, cpu_initclocks); int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(&tf->tf_r0, regs->r, sizeof(regs->r)); regs->r_sp = tf->tf_usr_sp; regs->r_lr = tf->tf_usr_lr; regs->r_pc = tf->tf_pc; regs->r_cpsr = tf->tf_spsr; return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { bzero(regs, sizeof(*regs)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf = td->td_frame; bcopy(regs->r, &tf->tf_r0, sizeof(regs->r)); tf->tf_usr_sp = regs->r_sp; tf->tf_usr_lr = regs->r_lr; tf->tf_pc = regs->r_pc; tf->tf_spsr &= ~PSR_FLAGS; tf->tf_spsr |= regs->r_cpsr & PSR_FLAGS; return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { return (0); } int set_dbregs(struct thread *td, struct dbreg *regs) { return (0); } static int ptrace_read_int(struct thread *td, vm_offset_t addr, uint32_t *v) { if (proc_readmem(td, td->td_proc, addr, v, sizeof(*v)) != sizeof(*v)) return (ENOMEM); return (0); } static int ptrace_write_int(struct thread *td, vm_offset_t addr, uint32_t v) { if (proc_writemem(td, td->td_proc, addr, &v, sizeof(v)) != sizeof(v)) return (ENOMEM); return (0); } static u_int ptrace_get_usr_reg(void *cookie, int reg) { int ret; struct thread *td = cookie; KASSERT(((reg >= 0) && (reg <= ARM_REG_NUM_PC)), ("reg is outside range")); switch(reg) { case ARM_REG_NUM_PC: ret = td->td_frame->tf_pc; break; case ARM_REG_NUM_LR: ret = td->td_frame->tf_usr_lr; break; case ARM_REG_NUM_SP: ret = td->td_frame->tf_usr_sp; break; default: ret = *((register_t*)&td->td_frame->tf_r0 + reg); break; } return (ret); } static u_int ptrace_get_usr_int(void* cookie, vm_offset_t offset, u_int* val) { struct thread *td = cookie; u_int error; error = ptrace_read_int(td, offset, val); return (error); } /** * This function parses current instruction opcode and decodes * any possible jump (change in PC) which might occur after * the instruction is executed. * * @param td Thread structure of analysed task * @param cur_instr Currently executed instruction * @param alt_next_address Pointer to the variable where * the destination address of the * jump instruction shall be stored. * * @return <0> when jump is possible * otherwise */ static int ptrace_get_alternative_next(struct thread *td, uint32_t cur_instr, uint32_t *alt_next_address) { int error; if (inst_branch(cur_instr) || inst_call(cur_instr) || inst_return(cur_instr)) { error = arm_predict_branch(td, cur_instr, td->td_frame->tf_pc, alt_next_address, ptrace_get_usr_reg, ptrace_get_usr_int); return (error); } return (EINVAL); } int ptrace_single_step(struct thread *td) { struct proc *p; int error, error_alt; uint32_t cur_instr, alt_next = 0; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); KASSERT(td->td_md.md_ptrace_instr == 0, ("Didn't clear single step")); KASSERT(td->td_md.md_ptrace_instr_alt == 0, ("Didn't clear alternative single step")); p = td->td_proc; PROC_UNLOCK(p); error = ptrace_read_int(td, td->td_frame->tf_pc, &cur_instr); if (error) goto out; error = ptrace_read_int(td, td->td_frame->tf_pc + INSN_SIZE, &td->td_md.md_ptrace_instr); if (error == 0) { error = ptrace_write_int(td, td->td_frame->tf_pc + INSN_SIZE, PTRACE_BREAKPOINT); if (error) { td->td_md.md_ptrace_instr = 0; } else { td->td_md.md_ptrace_addr = td->td_frame->tf_pc + INSN_SIZE; } } error_alt = ptrace_get_alternative_next(td, cur_instr, &alt_next); if (error_alt == 0) { error_alt = ptrace_read_int(td, alt_next, &td->td_md.md_ptrace_instr_alt); if (error_alt) { td->td_md.md_ptrace_instr_alt = 0; } else { error_alt = ptrace_write_int(td, alt_next, PTRACE_BREAKPOINT); if (error_alt) td->td_md.md_ptrace_instr_alt = 0; else td->td_md.md_ptrace_addr_alt = alt_next; } } out: PROC_LOCK(p); return ((error != 0) && (error_alt != 0)); } int ptrace_clear_single_step(struct thread *td) { struct proc *p; /* TODO: This needs to be updated for Thumb-2 */ if ((td->td_frame->tf_spsr & PSR_T) != 0) return (EINVAL); if (td->td_md.md_ptrace_instr != 0) { p = td->td_proc; PROC_UNLOCK(p); ptrace_write_int(td, td->td_md.md_ptrace_addr, td->td_md.md_ptrace_instr); PROC_LOCK(p); td->td_md.md_ptrace_instr = 0; } if (td->td_md.md_ptrace_instr_alt != 0) { p = td->td_proc; PROC_UNLOCK(p); ptrace_write_int(td, td->td_md.md_ptrace_addr_alt, td->td_md.md_ptrace_instr_alt); PROC_LOCK(p); td->td_md.md_ptrace_instr_alt = 0; } return (0); } int ptrace_set_pc(struct thread *td, unsigned long addr) { td->td_frame->tf_pc = addr; return (0); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { } void spinlock_enter(void) { struct thread *td; register_t cspr; td = curthread; if (td->td_md.md_spinlock_count == 0) { cspr = disable_interrupts(PSR_I | PSR_F); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_cspr = cspr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t cspr; td = curthread; critical_exit(); cspr = td->td_md.md_saved_cspr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) restore_interrupts(cspr); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf = td->td_frame; memset(tf, 0, sizeof(*tf)); tf->tf_usr_sp = stack; tf->tf_usr_lr = imgp->entry_addr; tf->tf_svc_lr = 0x77777777; tf->tf_pc = imgp->entry_addr; tf->tf_spsr = PSR_USR32_MODE; } /* * Get machine context. */ int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; __greg_t *gr = mcp->__gregs; if (clear_ret & GET_MC_CLEAR_RET) { gr[_REG_R0] = 0; gr[_REG_CPSR] = tf->tf_spsr & ~PSR_C; } else { gr[_REG_R0] = tf->tf_r0; gr[_REG_CPSR] = tf->tf_spsr; } gr[_REG_R1] = tf->tf_r1; gr[_REG_R2] = tf->tf_r2; gr[_REG_R3] = tf->tf_r3; gr[_REG_R4] = tf->tf_r4; gr[_REG_R5] = tf->tf_r5; gr[_REG_R6] = tf->tf_r6; gr[_REG_R7] = tf->tf_r7; gr[_REG_R8] = tf->tf_r8; gr[_REG_R9] = tf->tf_r9; gr[_REG_R10] = tf->tf_r10; gr[_REG_R11] = tf->tf_r11; gr[_REG_R12] = tf->tf_r12; gr[_REG_SP] = tf->tf_usr_sp; gr[_REG_LR] = tf->tf_usr_lr; gr[_REG_PC] = tf->tf_pc; return (0); } /* * Set machine context. * * However, we don't set any but the user modifiable flags, and we won't * touch the cs selector. */ int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf = td->td_frame; const __greg_t *gr = mcp->__gregs; tf->tf_r0 = gr[_REG_R0]; tf->tf_r1 = gr[_REG_R1]; tf->tf_r2 = gr[_REG_R2]; tf->tf_r3 = gr[_REG_R3]; tf->tf_r4 = gr[_REG_R4]; tf->tf_r5 = gr[_REG_R5]; tf->tf_r6 = gr[_REG_R6]; tf->tf_r7 = gr[_REG_R7]; tf->tf_r8 = gr[_REG_R8]; tf->tf_r9 = gr[_REG_R9]; tf->tf_r10 = gr[_REG_R10]; tf->tf_r11 = gr[_REG_R11]; tf->tf_r12 = gr[_REG_R12]; tf->tf_usr_sp = gr[_REG_SP]; tf->tf_usr_lr = gr[_REG_LR]; tf->tf_pc = gr[_REG_PC]; tf->tf_spsr = gr[_REG_CPSR]; return (0); } /* * MPSAFE */ int sys_sigreturn(td, uap) struct thread *td; struct sigreturn_args /* { const struct __ucontext *sigcntxp; } */ *uap; { ucontext_t uc; int spsr; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ spsr = uc.uc_mcontext.__gregs[_REG_CPSR]; if ((spsr & PSR_MODE) != PSR_USR32_MODE || (spsr & (PSR_I | PSR_F)) != 0) return (EINVAL); /* Restore register context. */ set_mcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_regs.sf_r4 = tf->tf_r4; pcb->pcb_regs.sf_r5 = tf->tf_r5; pcb->pcb_regs.sf_r6 = tf->tf_r6; pcb->pcb_regs.sf_r7 = tf->tf_r7; pcb->pcb_regs.sf_r8 = tf->tf_r8; pcb->pcb_regs.sf_r9 = tf->tf_r9; pcb->pcb_regs.sf_r10 = tf->tf_r10; pcb->pcb_regs.sf_r11 = tf->tf_r11; pcb->pcb_regs.sf_r12 = tf->tf_r12; pcb->pcb_regs.sf_pc = tf->tf_pc; pcb->pcb_regs.sf_lr = tf->tf_usr_lr; pcb->pcb_regs.sf_sp = tf->tf_usr_sp; } /* * Fake up a boot descriptor table */ vm_offset_t fake_preload_metadata(struct arm_boot_params *abp __unused) { #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i = 0; static uint32_t fake_preload[35]; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf kernel") + 1; strcpy((char*)&fake_preload[i++], "elf kernel"); i += 2; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = KERNVIRTADDR; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint32_t); fake_preload[i++] = (uint32_t)&end - KERNVIRTADDR; #ifdef DDB if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; init_static_kenv(NULL, 0); return (lastaddr); } void pcpu0_init(void) { #if __ARM_ARCH >= 6 set_curthread(&thread0); #endif pcpu_init(pcpup, 0, sizeof(struct pcpu)); PCPU_SET(curthread, &thread0); } #if defined(LINUX_BOOT_ABI) vm_offset_t linux_parse_boot_param(struct arm_boot_params *abp) { struct arm_lbabi_tag *walker; uint32_t revision; uint64_t serial; /* * Linux boot ABI: r0 = 0, r1 is the board type (!= 0) and r2 * is atags or dtb pointer. If all of these aren't satisfied, * then punt. */ if (!(abp->abp_r0 == 0 && abp->abp_r1 != 0 && abp->abp_r2 != 0)) return 0; board_id = abp->abp_r1; walker = (struct arm_lbabi_tag *) (abp->abp_r2 + KERNVIRTADDR - abp->abp_physaddr); /* xxx - Need to also look for binary device tree */ if (ATAG_TAG(walker) != ATAG_CORE) return 0; atag_list = walker; while (ATAG_TAG(walker) != ATAG_NONE) { switch (ATAG_TAG(walker)) { case ATAG_CORE: break; case ATAG_MEM: arm_physmem_hardware_region(walker->u.tag_mem.start, walker->u.tag_mem.size); break; case ATAG_INITRD2: break; case ATAG_SERIAL: serial = walker->u.tag_sn.low | ((uint64_t)walker->u.tag_sn.high << 32); board_set_serial(serial); break; case ATAG_REVISION: revision = walker->u.tag_rev.rev; board_set_revision(revision); break; case ATAG_CMDLINE: /* XXX open question: Parse this for boothowto? */ bcopy(walker->u.tag_cmd.command, linux_command_line, ATAG_SIZE(walker)); break; default: break; } walker = ATAG_NEXT(walker); } /* Save a copy for later */ bcopy(atag_list, atags, (char *)walker - (char *)atag_list + ATAG_SIZE(walker)); init_static_kenv(NULL, 0); return fake_preload_metadata(abp); } #endif #if defined(FREEBSD_BOOT_LOADER) vm_offset_t freebsd_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr = 0; void *mdp; void *kmdp; #ifdef DDB vm_offset_t ksym_start; vm_offset_t ksym_end; #endif /* * Mask metadata pointer: it is supposed to be on page boundary. If * the first argument (mdp) doesn't point to a valid address the * bootloader must have passed us something else than the metadata * ptr, so we give up. Also give up if we cannot find metadta section * the loader creates that we get all this data out of. */ if ((mdp = (void *)(abp->abp_r0 & ~PAGE_MASK)) == NULL) return 0; preload_metadata = mdp; kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) return 0; boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); loader_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *); init_static_kenv(loader_envp, 0); lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t); #ifdef DDB ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); db_fetch_ksymtab(ksym_start, ksym_end); #endif return lastaddr; } #endif vm_offset_t default_parse_boot_param(struct arm_boot_params *abp) { vm_offset_t lastaddr; #if defined(LINUX_BOOT_ABI) if ((lastaddr = linux_parse_boot_param(abp)) != 0) return lastaddr; #endif #if defined(FREEBSD_BOOT_LOADER) if ((lastaddr = freebsd_parse_boot_param(abp)) != 0) return lastaddr; #endif /* Fall back to hardcoded metadata. */ lastaddr = fake_preload_metadata(abp); return lastaddr; } /* * Stub version of the boot parameter parsing routine. We are * called early in initarm, before even VM has been initialized. * This routine needs to preserve any data that the boot loader * has passed in before the kernel starts to grow past the end * of the BSS, traditionally the place boot-loaders put this data. * * Since this is called so early, things that depend on the vm system * being setup (including access to some SoC's serial ports), about * all that can be done in this routine is to copy the arguments. * * This is the default boot parameter parsing routine. Individual * kernels/boards can override this weak function with one of their * own. We just fake metadata... */ __weak_reference(default_parse_boot_param, parse_boot_param); /* * Initialize proc0 */ void init_proc0(vm_offset_t kstack) { proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + kstack_pages * PAGE_SIZE) - 1; thread0.td_pcb->pcb_flags = 0; thread0.td_pcb->pcb_vfpcpu = -1; thread0.td_pcb->pcb_vfpstate.fpscr = VFPSCR_DN; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } int arm_predict_branch(void *cookie, u_int insn, register_t pc, register_t *new_pc, u_int (*fetch_reg)(void*, int), u_int (*read_int)(void*, vm_offset_t, u_int*)) { u_int addr, nregs, offset = 0; int error = 0; switch ((insn >> 24) & 0xf) { case 0x2: /* add pc, reg1, #value */ case 0x0: /* add pc, reg1, reg2, lsl #offset */ addr = fetch_reg(cookie, (insn >> 16) & 0xf); if (((insn >> 16) & 0xf) == 15) addr += 8; if (insn & 0x0200000) { offset = (insn >> 7) & 0x1e; offset = (insn & 0xff) << (32 - offset) | (insn & 0xff) >> offset; } else { offset = fetch_reg(cookie, insn & 0x0f); if ((insn & 0x0000ff0) != 0x00000000) { if (insn & 0x10) nregs = fetch_reg(cookie, (insn >> 8) & 0xf); else nregs = (insn >> 7) & 0x1f; switch ((insn >> 5) & 3) { case 0: /* lsl */ offset = offset << nregs; break; case 1: /* lsr */ offset = offset >> nregs; break; default: break; /* XXX */ } } *new_pc = addr + offset; return (0); } case 0xa: /* b ... */ case 0xb: /* bl ... */ addr = ((insn << 2) & 0x03ffffff); if (addr & 0x02000000) addr |= 0xfc000000; *new_pc = (pc + 8 + addr); return (0); case 0x7: /* ldr pc, [pc, reg, lsl #2] */ addr = fetch_reg(cookie, insn & 0xf); addr = pc + 8 + (addr << 2); error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); case 0x1: /* mov pc, reg */ *new_pc = fetch_reg(cookie, insn & 0xf); return (0); case 0x4: case 0x5: /* ldr pc, [reg] */ addr = fetch_reg(cookie, (insn >> 16) & 0xf); /* ldr pc, [reg, #offset] */ if (insn & (1 << 24)) offset = insn & 0xfff; if (insn & 0x00800000) addr += offset; else addr -= offset; error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); case 0x8: /* ldmxx reg, {..., pc} */ case 0x9: addr = fetch_reg(cookie, (insn >> 16) & 0xf); nregs = (insn & 0x5555) + ((insn >> 1) & 0x5555); nregs = (nregs & 0x3333) + ((nregs >> 2) & 0x3333); nregs = (nregs + (nregs >> 4)) & 0x0f0f; nregs = (nregs + (nregs >> 8)) & 0x001f; switch ((insn >> 23) & 0x3) { case 0x0: /* ldmda */ addr = addr - 0; break; case 0x1: /* ldmia */ addr = addr + 0 + ((nregs - 1) << 2); break; case 0x2: /* ldmdb */ addr = addr - 4; break; case 0x3: /* ldmib */ addr = addr + 4 + ((nregs - 1) << 2); break; } error = read_int(cookie, addr, &addr); *new_pc = addr; return (error); default: return (EINVAL); } } #if __ARM_ARCH >= 6 void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #else void set_stackptrs(int cpu) { set_stackptr(PSR_IRQ32_MODE, irqstack.pv_va + ((IRQ_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_ABT32_MODE, abtstack.pv_va + ((ABT_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); set_stackptr(PSR_UND32_MODE, undstack.pv_va + ((UND_STACK_SIZE * PAGE_SIZE) * (cpu + 1))); } #endif #ifdef EFI #define efi_next_descriptor(ptr, size) \ ((struct efi_md *)(((uint8_t *) ptr) + size)) static void add_efi_map_entries(struct efi_map_header *efihdr, struct mem_region *mr, int *mrcnt, uint32_t *memsize) { struct efi_md *map, *p; const char *type; size_t efisz, memory_size; int ndesc, i, j; static const char *types[] = { "Reserved", "LoaderCode", "LoaderData", "BootServicesCode", "BootServicesData", "RuntimeServicesCode", "RuntimeServicesData", "ConventionalMemory", "UnusableMemory", "ACPIReclaimMemory", "ACPIMemoryNVS", "MemoryMappedIO", "MemoryMappedIOPortSpace", "PalCode" }; *mrcnt = 0; *memsize = 0; /* * Memory map data provided by UEFI via the GetMemoryMap * Boot Services API. */ efisz = roundup2(sizeof(struct efi_map_header), 0x10); map = (struct efi_md *)((uint8_t *)efihdr + efisz); if (efihdr->descriptor_size == 0) return; ndesc = efihdr->memory_size / efihdr->descriptor_size; if (boothowto & RB_VERBOSE) printf("%23s %12s %12s %8s %4s\n", "Type", "Physical", "Virtual", "#Pages", "Attr"); memory_size = 0; for (i = 0, j = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p, efihdr->descriptor_size)) { if (boothowto & RB_VERBOSE) { if (p->md_type <= EFI_MD_TYPE_PALCODE) type = types[p->md_type]; else type = ""; printf("%23s %012llx %12p %08llx ", type, p->md_phys, p->md_virt, p->md_pages); if (p->md_attr & EFI_MD_ATTR_UC) printf("UC "); if (p->md_attr & EFI_MD_ATTR_WC) printf("WC "); if (p->md_attr & EFI_MD_ATTR_WT) printf("WT "); if (p->md_attr & EFI_MD_ATTR_WB) printf("WB "); if (p->md_attr & EFI_MD_ATTR_UCE) printf("UCE "); if (p->md_attr & EFI_MD_ATTR_WP) printf("WP "); if (p->md_attr & EFI_MD_ATTR_RP) printf("RP "); if (p->md_attr & EFI_MD_ATTR_XP) printf("XP "); if (p->md_attr & EFI_MD_ATTR_RT) printf("RUNTIME"); printf("\n"); } switch (p->md_type) { case EFI_MD_TYPE_CODE: case EFI_MD_TYPE_DATA: case EFI_MD_TYPE_BS_CODE: case EFI_MD_TYPE_BS_DATA: case EFI_MD_TYPE_FREE: /* * We're allowed to use any entry with these types. */ break; default: continue; } j++; if (j >= FDT_MEM_REGIONS) break; mr[j].mr_start = p->md_phys; mr[j].mr_size = p->md_pages * PAGE_SIZE; memory_size += mr[j].mr_size; } *mrcnt = j; *memsize = memory_size; } #endif /* EFI */ #ifdef FDT static char * kenv_next(char *cp) { if (cp != NULL) { while (*cp != 0) cp++; cp++; if (*cp == 0) cp = NULL; } return (cp); } static void print_kenv(void) { char *cp; debugf("loader passed (static) kenv:\n"); if (loader_envp == NULL) { debugf(" no env, null ptr\n"); return; } debugf(" loader_envp = 0x%08x\n", (uint32_t)loader_envp); for (cp = loader_envp; cp != NULL; cp = kenv_next(cp)) debugf(" %x %s\n", (uint32_t)cp, cp); } #if __ARM_ARCH < 6 void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; struct pv_addr kernel_l1pt; struct pv_addr dpcpu; vm_offset_t dtbp, freemempos, l2_start, lastaddr; - uint32_t memsize, l2size; + u_long memsize; + uint32_t l2size; char *env; void *kmdp; u_int l1pagetable; int i, j, err_devmap, mem_regions_sz; lastaddr = parse_boot_param(abp); arm_physmem_kernaddr = abp->abp_physaddr; memsize = 0; cpuinfo_init(); set_cpufuncs(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); if (kmdp != NULL) dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); else dtbp = (vm_offset_t)NULL; #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* Calculate number of L2 tables needed for mapping vm_page_array */ l2size = (memsize / PAGE_SIZE) * sizeof(struct vm_page); l2size = (l2size >> L1_S_SHIFT) + 1; /* * Add one table for end of kernel map, one for stacks, msgbuf and * L1 and L2 tables map and one for vectors map. */ l2size += 3; /* Make it divisible by 4 */ l2size = (l2size + 3) & ~3; freemempos = (lastaddr + PAGE_MASK) & ~PAGE_MASK; /* Define a macro to simplify memory allocation */ #define valloc_pages(var, np) \ alloc_pages((var).pv_va, (np)); \ (var).pv_pa = (var).pv_va + (abp->abp_physaddr - KERNVIRTADDR); #define alloc_pages(var, np) \ (var) = freemempos; \ freemempos += (np * PAGE_SIZE); \ memset((char *)(var), 0, ((np) * PAGE_SIZE)); while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0) freemempos += PAGE_SIZE; valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE); for (i = 0, j = 0; i < l2size; ++i) { if (!(i % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) { valloc_pages(kernel_pt_table[i], L2_TABLE_SIZE / PAGE_SIZE); j = i; } else { kernel_pt_table[i].pv_va = kernel_pt_table[j].pv_va + L2_TABLE_SIZE_REAL * (i - j); kernel_pt_table[i].pv_pa = kernel_pt_table[i].pv_va - KERNVIRTADDR + abp->abp_physaddr; } } /* * Allocate a page for the system page mapped to 0x00000000 * or 0xffff0000. This page will just contain the system vectors * and can be shared by all processes. */ valloc_pages(systempage, 1); /* Allocate dynamic per-cpu area. */ valloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu.pv_va, 0); /* Allocate stacks for all modes */ valloc_pages(irqstack, IRQ_STACK_SIZE * MAXCPU); valloc_pages(abtstack, ABT_STACK_SIZE * MAXCPU); valloc_pages(undstack, UND_STACK_SIZE * MAXCPU); valloc_pages(kernelstack, kstack_pages * MAXCPU); valloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE); /* * Now we start construction of the L1 page table * We start by mapping the L2 page tables into the L1. * This means that we can replace L1 mappings later on if necessary */ l1pagetable = kernel_l1pt.pv_va; /* * Try to map as much as possible of kernel text and data using * 1MB section mapping and for the rest of initial kernel address * space use L2 coarse tables. * * Link L2 tables for mapping remainder of kernel (modulo 1MB) * and kernel structures */ l2_start = lastaddr & ~(L1_S_OFFSET); for (i = 0 ; i < l2size - 1; i++) pmap_link_l2pt(l1pagetable, l2_start + i * L1_S_SIZE, &kernel_pt_table[i]); pmap_curmaxkvaddr = l2_start + (l2size - 1) * L1_S_SIZE; /* Map kernel code and data */ pmap_map_chunk(l1pagetable, KERNVIRTADDR, abp->abp_physaddr, (((uint32_t)(lastaddr) - KERNVIRTADDR) + PAGE_MASK) & ~PAGE_MASK, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Map L1 directory and allocated L2 page tables */ pmap_map_chunk(l1pagetable, kernel_l1pt.pv_va, kernel_l1pt.pv_pa, L1_TABLE_SIZE, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); pmap_map_chunk(l1pagetable, kernel_pt_table[0].pv_va, kernel_pt_table[0].pv_pa, L2_TABLE_SIZE_REAL * l2size, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE); /* Map allocated DPCPU, stacks and msgbuf */ pmap_map_chunk(l1pagetable, dpcpu.pv_va, dpcpu.pv_pa, freemempos - dpcpu.pv_va, VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE); /* Link and map the vector page */ pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH, &kernel_pt_table[l2size - 1]); pmap_map_entry(l1pagetable, ARM_VECTORS_HIGH, systempage.pv_pa, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(l1pagetable, NULL); vm_max_kernel_address = platform_lastaddr(); cpu_domains((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | DOMAIN_CLIENT); pmap_pa = kernel_l1pt.pv_pa; cpu_setttb(kernel_l1pt.pv_pa); cpu_tlb_flushID(); cpu_domains(DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) { strlcpy(kernelname, env, sizeof(kernelname)); freeenv(env); } if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ cpu_control(CPU_CONTROL_MMU_ENABLE, CPU_CONTROL_MMU_ENABLE); set_stackptrs(0); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ cpu_idcache_wbinv_all(); undefined_init(); init_proc0(kernelstack.pv_va); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); pmap_bootstrap(freemempos, &kernel_l1pt); msgbufp = (void *)msgbufpv.pv_va; msgbufinit(msgbufp, msgbufsize); mutex_init(); /* * Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, (virtual_avail - KERNVIRTADDR), EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); dbg_monitor_init(); kdb_init(); return ((void *)(kernelstack.pv_va + USPACE_SVC_STACK_TOP - sizeof(struct pcb))); } #else /* __ARM_ARCH < 6 */ void * initarm(struct arm_boot_params *abp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_paddr_t lastaddr; vm_offset_t dtbp, kernelstack, dpcpu; - uint32_t memsize; + u_long memsize; char *env; void *kmdp; int err_devmap, mem_regions_sz; #ifdef EFI struct efi_map_header *efihdr; #endif /* get last allocated physical address */ arm_physmem_kernaddr = abp->abp_physaddr; lastaddr = parse_boot_param(abp) - KERNVIRTADDR + arm_physmem_kernaddr; memsize = 0; set_cpufuncs(); cpuinfo_init(); /* * Find the dtb passed in by the boot loader. */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #endif if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); #ifdef EFI efihdr = (struct efi_map_header *)preload_search_info(kmdp, MODINFO_METADATA | MODINFOMD_EFI_MAP); if (efihdr != NULL) { add_efi_map_entries(efihdr, mem_regions, &mem_regions_sz, &memsize); } else #endif { /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); } arm_physmem_hardware_regions(mem_regions, mem_regions_sz); /* Grab reserved memory regions information from device tree. */ if (fdt_get_reserved_regions(mem_regions, &mem_regions_sz) == 0) arm_physmem_exclude_regions(mem_regions, mem_regions_sz, EXFLAG_NODUMP | EXFLAG_NOALLOC); /* * Set TEX remapping registers. * Setup kernel page tables and switch to kernel L1 page table. */ pmap_set_tex(); pmap_bootstrap_prepare(lastaddr); /* * Now that proper page tables are installed, call cpu_setup() to enable * instruction and data caches and other chip-specific features. */ cpu_setup(); /* Platform-specific initialisation */ platform_probe_and_attach(); pcpu0_init(); /* Do basic tuning, hz etc */ init_param1(); /* * Allocate a page for the system page mapped to 0xffff0000 * This page will just contain the system vectors and can be * shared by all processes. */ systempage = pmap_preboot_get_pages(1); /* Map the vector page. */ pmap_preboot_map_pages(systempage, ARM_VECTORS_HIGH, 1); if (virtual_end >= ARM_VECTORS_HIGH) virtual_end = ARM_VECTORS_HIGH - 1; /* Allocate dynamic per-cpu area. */ dpcpu = pmap_preboot_get_vpages(DPCPU_SIZE / PAGE_SIZE); dpcpu_init((void *)dpcpu, 0); /* Allocate stacks for all modes */ irqstack = pmap_preboot_get_vpages(IRQ_STACK_SIZE * MAXCPU); abtstack = pmap_preboot_get_vpages(ABT_STACK_SIZE * MAXCPU); undstack = pmap_preboot_get_vpages(UND_STACK_SIZE * MAXCPU ); kernelstack = pmap_preboot_get_vpages(kstack_pages * MAXCPU); /* Allocate message buffer. */ msgbufp = (void *)pmap_preboot_get_vpages( round_page(msgbufsize) / PAGE_SIZE); /* * Pages were allocated during the secondary bootstrap for the * stacks for different CPU modes. * We must now set the r13 registers in the different CPU modes to * point to these stacks. * Since the ARM stacks use STMFD etc. we must set r13 to the top end * of the stack memory. */ set_stackptrs(0); mutex_init(); /* Establish static device mappings. */ err_devmap = platform_devmap_init(); arm_devmap_bootstrap(0, NULL); vm_max_kernel_address = platform_lastaddr(); /* * Only after the SOC registers block is mapped we can perform device * tree fixups, as they may attempt to read parameters from hardware. */ OF_interpret("perform-fixup", 0); platform_gpio_init(); cninit(); debugf("initarm: console initialized\n"); debugf(" arg1 kmdp = 0x%08x\n", (uint32_t)kmdp); debugf(" boothowto = 0x%08x\n", boothowto); debugf(" dtbp = 0x%08x\n", (uint32_t)dtbp); debugf(" lastaddr1: 0x%08x\n", lastaddr); print_kenv(); env = kern_getenv("kernelname"); if (env != NULL) strlcpy(kernelname, env, sizeof(kernelname)); if (err_devmap != 0) printf("WARNING: could not fully configure devmap, error=%d\n", err_devmap); platform_late_init(); /* * We must now clean the cache again.... * Cleaning may be done by reading new data to displace any * dirty data in the cache. This will have happened in cpu_setttb() * but since we are boot strapping the addresses used for the read * may have just been remapped and thus the cache could be out * of sync. A re-clean after the switch will cure this. * After booting there are no gross relocations of the kernel thus * this problem will not occur after initarm(). */ /* Set stack for exception handlers */ undefined_init(); init_proc0(kernelstack); arm_vector_init(ARM_VECTORS_HIGH, ARM_VEC_ALL); enable_interrupts(PSR_A); pmap_bootstrap(0); /* Exclude the kernel (and all the things we allocated which immediately * follow the kernel) from the VM allocation pool but not from crash * dumps. virtual_avail is a global variable which tracks the kva we've * "allocated" while setting up pmaps. * * Prepare the list of physical memory available to the vm subsystem. */ arm_physmem_exclude_region(abp->abp_physaddr, pmap_preboot_get_pages(0) - abp->abp_physaddr, EXFLAG_NOALLOC); arm_physmem_init_kernel_globals(); init_param2(physmem); /* Init message buffer. */ msgbufinit(msgbufp, msgbufsize); dbg_monitor_init(); kdb_init(); return ((void *)STACKALIGN(thread0.td_pcb)); } #endif /* __ARM_ARCH < 6 */ #endif /* FDT */ uint32_t (*arm_cpu_fill_vdso_timehands)(struct vdso_timehands *, struct timecounter *); uint32_t cpu_fill_vdso_timehands(struct vdso_timehands *vdso_th, struct timecounter *tc) { return (arm_cpu_fill_vdso_timehands != NULL ? arm_cpu_fill_vdso_timehands(vdso_th, tc) : 0); } Index: head/sys/arm/mv/mv_common.c =================================================================== --- head/sys/arm/mv/mv_common.c (revision 296188) +++ head/sys/arm/mv/mv_common.c (revision 296189) @@ -1,2233 +1,2234 @@ /*- * Copyright (C) 2008-2011 MARVELL INTERNATIONAL LTD. * All rights reserved. * * Developed by Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of MARVELL nor the names of contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_IDMA, "idma", "idma dma test memory"); #define IDMA_DEBUG #undef IDMA_DEBUG #define MAX_CPU_WIN 5 #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #ifdef DEBUG #define MV_DUMP_WIN 1 #else #define MV_DUMP_WIN 0 #endif static int win_eth_can_remap(int i); #ifndef SOC_MV_FREY static int decode_win_cpu_valid(void); #endif static int decode_win_usb_valid(void); static int decode_win_eth_valid(void); static int decode_win_pcie_valid(void); static int decode_win_sata_valid(void); static int decode_win_idma_valid(void); static int decode_win_xor_valid(void); #ifndef SOC_MV_FREY static void decode_win_cpu_setup(void); #endif #ifdef SOC_MV_ARMADAXP static int decode_win_sdram_fixup(void); #endif static void decode_win_usb_setup(u_long); static void decode_win_eth_setup(u_long); static void decode_win_sata_setup(u_long); static void decode_win_idma_setup(u_long); static void decode_win_xor_setup(u_long); static void decode_win_usb_dump(u_long); static void decode_win_eth_dump(u_long base); static void decode_win_idma_dump(u_long base); static void decode_win_xor_dump(u_long base); static int fdt_get_ranges(const char *, void *, int, int *, int *); #ifdef SOC_MV_ARMADA38X int gic_decode_fdt(phandle_t iparent, pcell_t *intr, int *interrupt, int *trig, int *pol); #endif static int win_cpu_from_dt(void); static int fdt_win_setup(void); static uint32_t dev_mask = 0; static int cpu_wins_no = 0; static int eth_port = 0; static int usb_port = 0; static struct decode_win cpu_win_tbl[MAX_CPU_WIN]; const struct decode_win *cpu_wins = cpu_win_tbl; typedef void (*decode_win_setup_t)(u_long); typedef void (*dump_win_t)(u_long); struct soc_node_spec { const char *compat; decode_win_setup_t decode_handler; dump_win_t dump_handler; }; static struct soc_node_spec soc_nodes[] = { { "mrvl,ge", &decode_win_eth_setup, &decode_win_eth_dump }, { "mrvl,usb-ehci", &decode_win_usb_setup, &decode_win_usb_dump }, { "mrvl,sata", &decode_win_sata_setup, NULL }, { "mrvl,xor", &decode_win_xor_setup, &decode_win_xor_dump }, { "mrvl,idma", &decode_win_idma_setup, &decode_win_idma_dump }, { "mrvl,pcie", &decode_win_pcie_setup, NULL }, { NULL, NULL, NULL }, }; struct fdt_pm_mask_entry fdt_pm_mask_table[] = { { "mrvl,ge", CPU_PM_CTRL_GE(0) }, { "mrvl,ge", CPU_PM_CTRL_GE(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(0) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(1) }, { "mrvl,usb-ehci", CPU_PM_CTRL_USB(2) }, { "mrvl,xor", CPU_PM_CTRL_XOR }, { "mrvl,sata", CPU_PM_CTRL_SATA }, { NULL, 0 } }; static __inline int pm_is_disabled(uint32_t mask) { #if defined(SOC_MV_KIRKWOOD) return (soc_power_ctrl_get(mask) == mask); #else return (soc_power_ctrl_get(mask) == mask ? 0 : 1); #endif } /* * Disable device using power management register. * 1 - Device Power On * 0 - Device Power Off * Mask can be set in loader. * EXAMPLE: * loader> set hw.pm-disable-mask=0x2 * * Common mask: * |-------------------------------| * | Device | Kirkwood | Discovery | * |-------------------------------| * | USB0 | 0x00008 | 0x020000 | * |-------------------------------| * | USB1 | - | 0x040000 | * |-------------------------------| * | USB2 | - | 0x080000 | * |-------------------------------| * | GE0 | 0x00001 | 0x000002 | * |-------------------------------| * | GE1 | - | 0x000004 | * |-------------------------------| * | IDMA | - | 0x100000 | * |-------------------------------| * | XOR | 0x10000 | 0x200000 | * |-------------------------------| * | CESA | 0x20000 | 0x400000 | * |-------------------------------| * | SATA | 0x04000 | 0x004000 | * --------------------------------| * This feature can be used only on Kirkwood and Discovery * machines. */ static __inline void pm_disable_device(int mask) { #ifdef DIAGNOSTIC uint32_t reg; reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); reg &= ~mask; soc_power_ctrl_set(reg); printf("Device %x is disabled\n", mask); reg = soc_power_ctrl_get(CPU_PM_CTRL_ALL); printf("Power Management Register: 0%x\n", reg); #endif } int fdt_pm(phandle_t node) { uint32_t cpu_pm_ctrl; int i, ena, compat; ena = 1; cpu_pm_ctrl = read_cpu_ctrl(CPU_PM_CTRL); for (i = 0; fdt_pm_mask_table[i].compat != NULL; i++) { if (dev_mask & (1 << i)) continue; compat = fdt_is_compatible(node, fdt_pm_mask_table[i].compat); #if defined(SOC_MV_KIRKWOOD) if (compat && (cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #else if (compat && (~cpu_pm_ctrl & fdt_pm_mask_table[i].mask)) { dev_mask |= (1 << i); ena = 0; break; } else if (compat) { dev_mask |= (1 << i); break; } #endif } return (ena); } uint32_t read_cpu_ctrl(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg)); } void write_cpu_ctrl(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_CPU_CONTROL_BASE, reg, val); } #if defined(SOC_MV_ARMADAXP) || defined(SOC_MV_ARMADA38X) uint32_t read_cpu_mp_clocks(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg)); } void write_cpu_mp_clocks(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MP_CLOCKS_BASE, reg, val); } uint32_t read_cpu_misc(uint32_t reg) { return (bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, reg)); } void write_cpu_misc(uint32_t reg, uint32_t val) { bus_space_write_4(fdtbus_bs_tag, MV_MISC_BASE, reg, val); } #endif void cpu_reset(void) { #if defined(SOC_MV_ARMADAXP) || defined (SOC_MV_ARMADA38X) write_cpu_misc(RSTOUTn_MASK, SOFT_RST_OUT_EN); write_cpu_misc(SYSTEM_SOFT_RESET, SYS_SOFT_RST); #else write_cpu_ctrl(RSTOUTn_MASK, SOFT_RST_OUT_EN); write_cpu_ctrl(SYSTEM_SOFT_RESET, SYS_SOFT_RST); #endif while (1); } uint32_t cpu_extra_feat(void) { uint32_t dev, rev; uint32_t ef = 0; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_88RC8180: case MV_DEV_MV78100_Z0: case MV_DEV_MV78100: __asm __volatile("mrc p15, 1, %0, c15, c1, 0" : "=r" (ef)); break; case MV_DEV_88F5182: case MV_DEV_88F5281: __asm __volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (ef)); break; default: if (bootverbose) printf("This ARM Core does not support any extra features\n"); } return (ef); } /* * Get the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ uint32_t soc_power_ctrl_get(uint32_t mask) { #if !defined(SOC_MV_ORION) && !defined(SOC_MV_LOKIPLUS) && !defined(SOC_MV_FREY) if (mask != CPU_PM_CTRL_NONE) mask &= read_cpu_ctrl(CPU_PM_CTRL); return (mask); #else return (mask); #endif } /* * Set the power status of device. This feature is only supported on * Kirkwood and Discovery SoCs. */ void soc_power_ctrl_set(uint32_t mask) { #if !defined(SOC_MV_ORION) && !defined(SOC_MV_LOKIPLUS) if (mask != CPU_PM_CTRL_NONE) write_cpu_ctrl(CPU_PM_CTRL, mask); #endif } void soc_id(uint32_t *dev, uint32_t *rev) { /* * Notice: system identifiers are available in the registers range of * PCIE controller, so using this function is only allowed (and * possible) after the internal registers range has been mapped in via * arm_devmap_bootstrap(). */ *dev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 0) >> 16; *rev = bus_space_read_4(fdtbus_bs_tag, MV_PCIE_BASE, 8) & 0xff; } static void soc_identify(void) { uint32_t d, r, size, mode; const char *dev; const char *rev; soc_id(&d, &r); printf("SOC: "); if (bootverbose) printf("(0x%4x:0x%02x) ", d, r); rev = ""; switch (d) { case MV_DEV_88F5181: dev = "Marvell 88F5181"; if (r == 3) rev = "B1"; break; case MV_DEV_88F5182: dev = "Marvell 88F5182"; if (r == 2) rev = "A2"; break; case MV_DEV_88F5281: dev = "Marvell 88F5281"; if (r == 4) rev = "D0"; else if (r == 5) rev = "D1"; else if (r == 6) rev = "D2"; break; case MV_DEV_88F6281: dev = "Marvell 88F6281"; if (r == 0) rev = "Z0"; else if (r == 2) rev = "A0"; else if (r == 3) rev = "A1"; break; case MV_DEV_88RC8180: dev = "Marvell 88RC8180"; break; case MV_DEV_88RC9480: dev = "Marvell 88RC9480"; break; case MV_DEV_88RC9580: dev = "Marvell 88RC9580"; break; case MV_DEV_88F6781: dev = "Marvell 88F6781"; if (r == 2) rev = "Y0"; break; case MV_DEV_88F6282: dev = "Marvell 88F6282"; if (r == 0) rev = "A0"; else if (r == 1) rev = "A1"; break; case MV_DEV_88F6828: dev = "Marvell 88F6828"; break; case MV_DEV_88F6820: dev = "Marvell 88F6820"; break; case MV_DEV_88F6810: dev = "Marvell 88F6810"; break; case MV_DEV_MV78100_Z0: dev = "Marvell MV78100 Z0"; break; case MV_DEV_MV78100: dev = "Marvell MV78100"; break; case MV_DEV_MV78160: dev = "Marvell MV78160"; break; case MV_DEV_MV78260: dev = "Marvell MV78260"; break; case MV_DEV_MV78460: dev = "Marvell MV78460"; break; default: dev = "UNKNOWN"; break; } printf("%s", dev); if (*rev != '\0') printf(" rev %s", rev); printf(", TClock %dMHz\n", get_tclk() / 1000 / 1000); mode = read_cpu_ctrl(CPU_CONFIG); printf(" Instruction cache prefetch %s, data cache prefetch %s\n", (mode & CPU_CONFIG_IC_PREF) ? "enabled" : "disabled", (mode & CPU_CONFIG_DC_PREF) ? "enabled" : "disabled"); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: mode = read_cpu_ctrl(CPU_L2_CONFIG) & CPU_L2_CONFIG_MODE; printf(" 256KB 4-way set-associative %s unified L2 cache\n", mode ? "write-through" : "write-back"); break; case MV_DEV_MV78100: mode = read_cpu_ctrl(CPU_CONTROL); size = mode & CPU_CONTROL_L2_SIZE; mode = mode & CPU_CONTROL_L2_MODE; printf(" %s set-associative %s unified L2 cache\n", size ? "256KB 4-way" : "512KB 8-way", mode ? "write-through" : "write-back"); break; default: break; } } static void platform_identify(void *dummy) { soc_identify(); /* * XXX Board identification e.g. read out from FPGA or similar should * go here */ } SYSINIT(platform_identify, SI_SUB_CPU, SI_ORDER_SECOND, platform_identify, NULL); #ifdef KDB static void mv_enter_debugger(void *dummy) { if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); } SYSINIT(mv_enter_debugger, SI_SUB_CPU, SI_ORDER_ANY, mv_enter_debugger, NULL); #endif int soc_decode_win(void) { uint32_t dev, rev; int mask, err; mask = 0; TUNABLE_INT_FETCH("hw.pm-disable-mask", &mask); if (mask != 0) pm_disable_device(mask); /* Retrieve data about physical addresses from device tree. */ if ((err = win_cpu_from_dt()) != 0) return (err); /* Retrieve our ID: some windows facilities vary between SoC models */ soc_id(&dev, &rev); #ifdef SOC_MV_ARMADAXP if ((err = decode_win_sdram_fixup()) != 0) return(err); #endif #ifndef SOC_MV_FREY if (!decode_win_cpu_valid() || !decode_win_usb_valid() || !decode_win_eth_valid() || !decode_win_idma_valid() || !decode_win_pcie_valid() || !decode_win_sata_valid() || !decode_win_xor_valid()) return (EINVAL); decode_win_cpu_setup(); #else if (!decode_win_usb_valid() || !decode_win_eth_valid() || !decode_win_idma_valid() || !decode_win_pcie_valid() || !decode_win_sata_valid() || !decode_win_xor_valid()) return (EINVAL); #endif if (MV_DUMP_WIN) soc_dump_decode_win(); eth_port = 0; usb_port = 0; if ((err = fdt_win_setup()) != 0) return (err); return (0); } /************************************************************************** * Decode windows registers accessors **************************************************************************/ #if !defined(SOC_MV_FREY) WIN_REG_IDX_RD(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_RD(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, cr, MV_WIN_CPU_CTRL, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, br, MV_WIN_CPU_BASE, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, remap_l, MV_WIN_CPU_REMAP_LO, MV_MBUS_BRIDGE_BASE) WIN_REG_IDX_WR(win_cpu, remap_h, MV_WIN_CPU_REMAP_HI, MV_MBUS_BRIDGE_BASE) #endif WIN_REG_BASE_IDX_RD(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_RD(win_usb, br, MV_WIN_USB_BASE) WIN_REG_BASE_IDX_WR(win_usb, cr, MV_WIN_USB_CTRL) WIN_REG_BASE_IDX_WR(win_usb, br, MV_WIN_USB_BASE) WIN_REG_BASE_IDX_RD(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_RD(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_RD(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_IDX_WR(win_eth, br, MV_WIN_ETH_BASE) WIN_REG_BASE_IDX_WR(win_eth, sz, MV_WIN_ETH_SIZE) WIN_REG_BASE_IDX_WR(win_eth, har, MV_WIN_ETH_REMAP) WIN_REG_BASE_IDX_RD2(win_xor, br, MV_WIN_XOR_BASE) WIN_REG_BASE_IDX_RD2(win_xor, sz, MV_WIN_XOR_SIZE) WIN_REG_BASE_IDX_RD2(win_xor, har, MV_WIN_XOR_REMAP) WIN_REG_BASE_IDX_RD2(win_xor, ctrl, MV_WIN_XOR_CTRL) WIN_REG_BASE_IDX_WR2(win_xor, br, MV_WIN_XOR_BASE) WIN_REG_BASE_IDX_WR2(win_xor, sz, MV_WIN_XOR_SIZE) WIN_REG_BASE_IDX_WR2(win_xor, har, MV_WIN_XOR_REMAP) WIN_REG_BASE_IDX_WR2(win_xor, ctrl, MV_WIN_XOR_CTRL) WIN_REG_BASE_RD(win_eth, bare, 0x290) WIN_REG_BASE_RD(win_eth, epap, 0x294) WIN_REG_BASE_WR(win_eth, bare, 0x290) WIN_REG_BASE_WR(win_eth, epap, 0x294) WIN_REG_BASE_IDX_RD(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_RD(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_RD(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_WR(win_pcie, cr, MV_WIN_PCIE_CTRL); WIN_REG_BASE_IDX_WR(win_pcie, br, MV_WIN_PCIE_BASE); WIN_REG_BASE_IDX_WR(win_pcie, remap, MV_WIN_PCIE_REMAP); WIN_REG_BASE_IDX_RD(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_WR(pcie_bar, br, MV_PCIE_BAR_BASE); WIN_REG_BASE_IDX_WR(pcie_bar, brh, MV_PCIE_BAR_BASE_H); WIN_REG_BASE_IDX_WR(pcie_bar, cr, MV_PCIE_BAR_CTRL); WIN_REG_BASE_IDX_RD(win_idma, br, MV_WIN_IDMA_BASE) WIN_REG_BASE_IDX_RD(win_idma, sz, MV_WIN_IDMA_SIZE) WIN_REG_BASE_IDX_RD(win_idma, har, MV_WIN_IDMA_REMAP) WIN_REG_BASE_IDX_RD(win_idma, cap, MV_WIN_IDMA_CAP) WIN_REG_BASE_IDX_WR(win_idma, br, MV_WIN_IDMA_BASE) WIN_REG_BASE_IDX_WR(win_idma, sz, MV_WIN_IDMA_SIZE) WIN_REG_BASE_IDX_WR(win_idma, har, MV_WIN_IDMA_REMAP) WIN_REG_BASE_IDX_WR(win_idma, cap, MV_WIN_IDMA_CAP) WIN_REG_BASE_RD(win_idma, bare, 0xa80) WIN_REG_BASE_WR(win_idma, bare, 0xa80) WIN_REG_BASE_IDX_RD(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_RD(win_sata, br, MV_WIN_SATA_BASE); WIN_REG_BASE_IDX_WR(win_sata, cr, MV_WIN_SATA_CTRL); WIN_REG_BASE_IDX_WR(win_sata, br, MV_WIN_SATA_BASE); #ifndef SOC_MV_DOVE WIN_REG_IDX_RD(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_RD(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr, br, MV_WIN_DDR_BASE, MV_DDR_CADR_BASE) WIN_REG_IDX_WR(ddr, sz, MV_WIN_DDR_SIZE, MV_DDR_CADR_BASE) #else /* * On 88F6781 (Dove) SoC DDR Controller is accessed through * single MBUS <-> AXI bridge. In this case we provide emulated * ddr_br_read() and ddr_sz_read() functions to keep compatibility * with common decoding windows setup code. */ static inline uint32_t ddr_br_read(int i) { uint32_t mmap; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Return CS i base address */ return (mmap & 0xFF000000); } static inline uint32_t ddr_sz_read(int i) { uint32_t mmap, size; /* Read Memory Address Map Register for CS i */ mmap = bus_space_read_4(fdtbus_bs_tag, MV_DDR_CADR_BASE + (i * 0x10), 0); /* Extract size of CS space in 64kB units */ size = (1 << ((mmap >> 16) & 0x0F)); /* Return CS size and enable/disable status */ return (((size - 1) << 16) | (mmap & 0x01)); } #endif #if !defined(SOC_MV_FREY) /************************************************************************** * Decode windows helper routines **************************************************************************/ void soc_dump_decode_win(void) { uint32_t dev, rev; int i; soc_id(&dev, &rev); for (i = 0; i < MV_WIN_CPU_MAX; i++) { printf("CPU window#%d: c 0x%08x, b 0x%08x", i, win_cpu_cr_read(i), win_cpu_br_read(i)); if (win_cpu_can_remap(i)) printf(", rl 0x%08x, rh 0x%08x", win_cpu_remap_l_read(i), win_cpu_remap_h_read(i)); printf("\n"); } printf("Internal regs base: 0x%08x\n", bus_space_read_4(fdtbus_bs_tag, MV_INTREGS_BASE, 0)); for (i = 0; i < MV_WIN_DDR_MAX; i++) printf("DDR CS#%d: b 0x%08x, s 0x%08x\n", i, ddr_br_read(i), ddr_sz_read(i)); } /************************************************************************** * CPU windows routines **************************************************************************/ int win_cpu_can_remap(int i) { uint32_t dev, rev; soc_id(&dev, &rev); /* Depending on the SoC certain windows have remap capability */ if ((dev == MV_DEV_88F5182 && i < 2) || (dev == MV_DEV_88F5281 && i < 4) || (dev == MV_DEV_88F6281 && i < 4) || (dev == MV_DEV_88F6282 && i < 4) || (dev == MV_DEV_88F6828 && i < 20) || (dev == MV_DEV_88F6820 && i < 20) || (dev == MV_DEV_88F6810 && i < 20) || (dev == MV_DEV_88RC8180 && i < 2) || (dev == MV_DEV_88F6781 && i < 4) || (dev == MV_DEV_MV78100_Z0 && i < 8) || ((dev & MV_DEV_FAMILY_MASK) == MV_DEV_DISCOVERY && i < 8)) return (1); return (0); } /* XXX This should check for overlapping remap fields too.. */ int decode_win_overlap(int win, int win_no, const struct decode_win *wintab) { const struct decode_win *tab; int i; tab = wintab; for (i = 0; i < win_no; i++, tab++) { if (i == win) /* Skip self */ continue; if ((tab->base + tab->size - 1) < (wintab + win)->base) continue; else if (((wintab + win)->base + (wintab + win)->size - 1) < tab->base) continue; else return (i); } return (-1); } static int decode_win_cpu_valid(void) { int i, j, rv; uint32_t b, e, s; if (cpu_wins_no > MV_WIN_CPU_MAX) { printf("CPU windows: too many entries: %d\n", cpu_wins_no); return (0); } rv = 1; for (i = 0; i < cpu_wins_no; i++) { if (cpu_wins[i].target == 0) { printf("CPU window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (cpu_wins[i].remap != ~0 && win_cpu_can_remap(i) != 1) { printf("CPU window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, cpu_wins[i].remap); rv = 0; } s = cpu_wins[i].size; b = cpu_wins[i].base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* * XXX this boundary check should account for 64bit * and remapping.. */ printf("CPU window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } if (b != (b & ~(s - 1))) { printf("CPU window#%d: address 0x%08x is not aligned " "to 0x%08x\n", i, b, s); rv = 0; continue; } j = decode_win_overlap(i, cpu_wins_no, &cpu_wins[0]); if (j >= 0) { printf("CPU window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, cpu_wins[j].base, cpu_wins[j].base + cpu_wins[j].size - 1); rv = 0; } } return (rv); } int decode_win_cpu_set(int target, int attr, vm_paddr_t base, uint32_t size, vm_paddr_t remap) { uint32_t br, cr; int win, i; if (remap == ~0) { win = MV_WIN_CPU_MAX - 1; i = -1; } else { win = 0; i = 1; } while ((win >= 0) && (win < MV_WIN_CPU_MAX)) { cr = win_cpu_cr_read(win); if ((cr & MV_WIN_CPU_ENABLE_BIT) == 0) break; if ((cr & ((0xff << MV_WIN_CPU_ATTR_SHIFT) | (0x1f << MV_WIN_CPU_TARGET_SHIFT))) == ((attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT))) break; win += i; } if ((win < 0) || (win >= MV_WIN_CPU_MAX) || ((remap != ~0) && (win_cpu_can_remap(win) == 0))) return (-1); br = base & 0xffff0000; win_cpu_br_write(win, br); if (win_cpu_can_remap(win)) { if (remap != ~0) { win_cpu_remap_l_write(win, remap & 0xffff0000); win_cpu_remap_h_write(win, 0); } else { /* * Remap function is not used for a given window * (capable of remapping) - set remap field with the * same value as base. */ win_cpu_remap_l_write(win, base & 0xffff0000); win_cpu_remap_h_write(win, 0); } } cr = ((size - 1) & 0xffff0000) | (attr << MV_WIN_CPU_ATTR_SHIFT) | (target << MV_WIN_CPU_TARGET_SHIFT) | MV_WIN_CPU_ENABLE_BIT; win_cpu_cr_write(win, cr); return (0); } static void decode_win_cpu_setup(void) { int i; /* Disable all CPU windows */ for (i = 0; i < MV_WIN_CPU_MAX; i++) { win_cpu_cr_write(i, 0); win_cpu_br_write(i, 0); if (win_cpu_can_remap(i)) { win_cpu_remap_l_write(i, 0); win_cpu_remap_h_write(i, 0); } } for (i = 0; i < cpu_wins_no; i++) if (cpu_wins[i].target > 0) decode_win_cpu_set(cpu_wins[i].target, cpu_wins[i].attr, cpu_wins[i].base, cpu_wins[i].size, cpu_wins[i].remap); } #endif #ifdef SOC_MV_ARMADAXP static int decode_win_sdram_fixup(void) { struct mem_region mr[FDT_MEM_REGIONS]; uint8_t window_valid[MV_WIN_DDR_MAX]; - int mr_cnt, memsize, err, i, j; + int mr_cnt, err, i, j; + u_long memsize; uint32_t valid_win_num = 0; /* Grab physical memory regions information from device tree. */ err = fdt_get_mem_regions(mr, &mr_cnt, &memsize); if (err != 0) return (err); for (i = 0; i < MV_WIN_DDR_MAX; i++) window_valid[i] = 0; /* Try to match entries from device tree with settings from u-boot */ for (i = 0; i < mr_cnt; i++) { for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (ddr_base(j) == mr[i].mr_start) && (ddr_size(j) == mr[i].mr_size)) { window_valid[j] = 1; valid_win_num++; } } } if (mr_cnt != valid_win_num) return (EINVAL); /* Destroy windows without corresponding device tree entry */ for (j = 0; j < MV_WIN_DDR_MAX; j++) { if (ddr_is_active(j) && (window_valid[j] != 1)) { printf("Disabling SDRAM decoding window: %d\n", j); ddr_disable(j); } } return (0); } #endif /* * Check if we're able to cover all active DDR banks. */ static int decode_win_can_cover_ddr(int max) { int i, c; c = 0; for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (c > max) { printf("Unable to cover all active DDR banks: " "%d, available windows: %d\n", c, max); return (0); } return (1); } /************************************************************************** * DDR windows routines **************************************************************************/ int ddr_is_active(int i) { if (ddr_sz_read(i) & 0x1) return (1); return (0); } void ddr_disable(int i) { ddr_sz_write(i, 0); ddr_br_write(i, 0); } uint32_t ddr_base(int i) { return (ddr_br_read(i) & 0xff000000); } uint32_t ddr_size(int i) { return ((ddr_sz_read(i) | 0x00ffffff) + 1); } uint32_t ddr_attr(int i) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) return ((ddr_sz_read(i) & 0xf0) >> 4); if (dev == MV_DEV_88F6781) return (0); return (i == 0 ? 0xe : (i == 1 ? 0xd : (i == 2 ? 0xb : (i == 3 ? 0x7 : 0xff)))); } uint32_t ddr_target(int i) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88RC8180) { i = (ddr_sz_read(i) & 0xf0) >> 4; return (i == 0xe ? 0xc : (i == 0xd ? 0xd : (i == 0xb ? 0xe : (i == 0x7 ? 0xf : 0xc)))); } /* * On SOCs other than 88RC8180 Mbus unit ID for * DDR SDRAM controller is always 0x0. */ return (0); } /************************************************************************** * USB windows routines **************************************************************************/ static int decode_win_usb_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_USB_MAX)); } static void decode_win_usb_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port - 1))) return; for (i = 0; i < MV_WIN_USB_MAX; i++) printf("USB window#%d: c 0x%08x, b 0x%08x\n", i, win_usb_cr_read(base, i), win_usb_br_read(base, i)); } /* * Set USB decode windows. */ static void decode_win_usb_setup(u_long base) { uint32_t br, cr; int i, j; if (pm_is_disabled(CPU_PM_CTRL_USB(usb_port))) return; usb_port++; for (i = 0; i < MV_WIN_USB_MAX; i++) { win_usb_cr_write(base, i, 0); win_usb_br_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { br = ddr_base(i); /* * XXX for 6281 we should handle Mbus write * burst limit field in the ctrl reg */ cr = (((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1); /* Set the first free USB window */ for (j = 0; j < MV_WIN_USB_MAX; j++) { if (win_usb_cr_read(base, j) & 0x1) continue; win_usb_br_write(base, j, br); win_usb_cr_write(base, j, cr); break; } } } } /************************************************************************** * ETH windows routines **************************************************************************/ static int win_eth_can_remap(int i) { /* ETH encode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int eth_bare_read(uint32_t base, int i) { uint32_t v; v = win_eth_bare_read(base); v &= (1 << i); return (v >> i); } static void eth_bare_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_bare_read(base); v &= ~(1 << i); v |= (val << i); win_eth_bare_write(base, v); } static void eth_epap_write(uint32_t base, int i, int val) { uint32_t v; v = win_eth_epap_read(base); v &= ~(0x3 << (i * 2)); v |= (val << (i * 2)); win_eth_epap_write(base, v); } static void decode_win_eth_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port - 1))) return; for (i = 0; i < MV_WIN_ETH_MAX; i++) { printf("ETH window#%d: b 0x%08x, s 0x%08x", i, win_eth_br_read(base, i), win_eth_sz_read(base, i)); if (win_eth_can_remap(i)) printf(", ha 0x%08x", win_eth_har_read(base, i)); printf("\n"); } printf("ETH windows: bare 0x%08x, epap 0x%08x\n", win_eth_bare_read(base), win_eth_epap_read(base)); } #if defined(SOC_MV_LOKIPLUS) #define MV_WIN_ETH_DDR_TRGT(n) 0 #else #define MV_WIN_ETH_DDR_TRGT(n) ddr_target(n) #endif static void decode_win_eth_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_GE(eth_port))) return; eth_port++; /* Disable, clear and revoke protection for all ETH windows */ for (i = 0; i < MV_WIN_ETH_MAX; i++) { eth_bare_write(base, i, 1); eth_epap_write(base, i, 0); win_eth_br_write(base, i, 0); win_eth_sz_write(base, i, 0); if (win_eth_can_remap(i)) win_eth_har_write(base, i, 0); } /* Only access to active DRAM banks is required */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | MV_WIN_ETH_DDR_TRGT(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Set the first free ETH window */ for (j = 0; j < MV_WIN_ETH_MAX; j++) { if (eth_bare_read(base, j) == 0) continue; win_eth_br_write(base, j, br); win_eth_sz_write(base, j, sz); /* XXX remapping ETH windows not supported */ /* Set protection RW */ eth_epap_write(base, j, 0x3); /* Enable window */ eth_bare_write(base, j, 0); break; } } } static int decode_win_eth_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_ETH_MAX)); } /************************************************************************** * PCIE windows routines **************************************************************************/ void decode_win_pcie_setup(u_long base) { uint32_t size = 0, ddrbase = ~0; uint32_t cr, br; int i, j; for (i = 0; i < MV_PCIE_BAR_MAX; i++) { pcie_bar_br_write(base, i, MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); if (i < 3) pcie_bar_brh_write(base, i, 0); if (i > 0) pcie_bar_cr_write(base, i, 0); } for (i = 0; i < MV_WIN_PCIE_MAX; i++) { win_pcie_cr_write(base, i, 0); win_pcie_br_write(base, i, 0); win_pcie_remap_write(base, i, 0); } /* On End-Point only set BAR size to 1MB regardless of DDR size */ if ((bus_space_read_4(fdtbus_bs_tag, base, MV_PCIE_CONTROL) & MV_PCIE_ROOT_CMPLX) == 0) { pcie_bar_cr_write(base, 1, 0xf0000 | 1); return; } for (i = 0; i < MV_WIN_DDR_MAX; i++) { if (ddr_is_active(i)) { /* Map DDR to BAR 1 */ cr = (ddr_size(i) - 1) & 0xffff0000; size += ddr_size(i) & 0xffff0000; cr |= (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); if (br < ddrbase) ddrbase = br; /* Use the first available PCIE window */ for (j = 0; j < MV_WIN_PCIE_MAX; j++) { if (win_pcie_cr_read(base, j) != 0) continue; win_pcie_br_write(base, j, br); win_pcie_cr_write(base, j, cr); break; } } } /* * Upper 16 bits in BAR register is interpreted as BAR size * (in 64 kB units) plus 64kB, so substract 0x10000 * form value passed to register to get correct value. */ size -= 0x10000; pcie_bar_cr_write(base, 1, size | 1); pcie_bar_br_write(base, 1, ddrbase | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); pcie_bar_br_write(base, 0, fdt_immr_pa | MV_PCIE_BAR_64BIT | MV_PCIE_BAR_PREFETCH_EN); } static int decode_win_pcie_valid(void) { return (decode_win_can_cover_ddr(MV_WIN_PCIE_MAX)); } /************************************************************************** * IDMA windows routines **************************************************************************/ #if defined(SOC_MV_ORION) || defined(SOC_MV_DISCOVERY) static int idma_bare_read(u_long base, int i) { uint32_t v; v = win_idma_bare_read(base); v &= (1 << i); return (v >> i); } static void idma_bare_write(u_long base, int i, int val) { uint32_t v; v = win_idma_bare_read(base); v &= ~(1 << i); v |= (val << i); win_idma_bare_write(base, v); } /* * Sets channel protection 'val' for window 'w' on channel 'c' */ static void idma_cap_write(u_long base, int c, int w, int val) { uint32_t v; v = win_idma_cap_read(base, c); v &= ~(0x3 << (w * 2)); v |= (val << (w * 2)); win_idma_cap_write(base, c, v); } /* * Set protection 'val' on all channels for window 'w' */ static void idma_set_prot(u_long base, int w, int val) { int c; for (c = 0; c < MV_IDMA_CHAN_MAX; c++) idma_cap_write(base, c, w, val); } static int win_idma_can_remap(int i) { /* IDMA decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } void decode_win_idma_setup(u_long base) { uint32_t br, sz; int i, j; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; /* * Disable and clear all IDMA windows, revoke protection for all channels */ for (i = 0; i < MV_WIN_IDMA_MAX; i++) { idma_bare_write(base, i, 1); win_idma_br_write(base, i, 0); win_idma_sz_write(base, i, 0); if (win_idma_can_remap(i) == 1) win_idma_har_write(base, i, 0); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) win_idma_cap_write(base, i, 0); /* * Set up access to all active DRAM banks */ for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) if (win_idma_can_remap(j) != 1 && idma_bare_read(base, j) == 1) { /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } /* * Remaining targets -- from statically defined table */ for (i = 0; i < idma_wins_no; i++) if (idma_wins[i].target > 0) { br = (idma_wins[i].base & 0xffff0000) | (idma_wins[i].attr << 8) | idma_wins[i].target; sz = ((idma_wins[i].size - 1) & 0xffff0000); /* Set the first free IDMA window */ for (j = 0; j < MV_WIN_IDMA_MAX; j++) { if (idma_bare_read(base, j) == 0) continue; /* Configure window */ win_idma_br_write(base, j, br); win_idma_sz_write(base, j, sz); if (win_idma_can_remap(j) && idma_wins[j].remap >= 0) win_idma_har_write(base, j, idma_wins[j].remap); /* Set protection RW on all channels */ idma_set_prot(base, j, 0x3); /* Enable window */ idma_bare_write(base, j, 0); break; } } } int decode_win_idma_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (idma_wins_no > MV_WIN_IDMA_MAX) { printf("IDMA windows: too many entries: %d\n", idma_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (idma_wins_no > (MV_WIN_IDMA_MAX - c)) { printf("IDMA windows: too many entries: %d, available: %d\n", idma_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = idma_wins; rv = 1; for (i = 0; i < idma_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("IDMA window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("IDMA window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* XXX this boundary check should account for 64bit and * remapping.. */ printf("IDMA window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, idma_wins_no, &idma_wins[0]); if (j >= 0) { printf("IDMA window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, idma_wins[j].base, idma_wins[j].base + idma_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_idma_dump(u_long base) { int i; if (pm_is_disabled(CPU_PM_CTRL_IDMA)) return; for (i = 0; i < MV_WIN_IDMA_MAX; i++) { printf("IDMA window#%d: b 0x%08x, s 0x%08x", i, win_idma_br_read(base, i), win_idma_sz_read(base, i)); if (win_idma_can_remap(i)) printf(", ha 0x%08x", win_idma_har_read(base, i)); printf("\n"); } for (i = 0; i < MV_IDMA_CHAN_MAX; i++) printf("IDMA channel#%d: ap 0x%08x\n", i, win_idma_cap_read(base, i)); printf("IDMA windows: bare 0x%08x\n", win_idma_bare_read(base)); } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with IDMA */ int decode_win_idma_valid(void) { return (1); } void decode_win_idma_setup(u_long base) { } void decode_win_idma_dump(u_long base) { } #endif /************************************************************************** * XOR windows routines **************************************************************************/ #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) static int xor_ctrl_read(u_long base, int i, int c, int e) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= (1 << i); return (v >> i); } static void xor_ctrl_write(u_long base, int i, int c, int e, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(1 << i); v |= (val << i); win_xor_ctrl_write(base, c, e, v); } /* * Set channel protection 'val' for window 'w' on channel 'c' */ static void xor_chan_write(u_long base, int c, int e, int w, int val) { uint32_t v; v = win_xor_ctrl_read(base, c, e); v &= ~(0x3 << (w * 2 + 16)); v |= (val << (w * 2 + 16)); win_xor_ctrl_write(base, c, e, v); } /* * Set protection 'val' on all channels for window 'w' on engine 'e' */ static void xor_set_prot(u_long base, int w, int e, int val) { int c; for (c = 0; c < MV_XOR_CHAN_MAX; c++) xor_chan_write(base, c, e, w, val); } static int win_xor_can_remap(int i) { /* XOR decode windows 0-3 have remap capability */ if (i < 4) return (1); return (0); } static int xor_max_eng(void) { uint32_t dev, rev; soc_id(&dev, &rev); switch (dev) { case MV_DEV_88F6281: case MV_DEV_88F6282: case MV_DEV_MV78130: case MV_DEV_MV78160: case MV_DEV_MV78230: case MV_DEV_MV78260: case MV_DEV_MV78460: return (2); case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: return (1); default: return (0); } } static void xor_active_dram(u_long base, int c, int e, int *window) { uint32_t br, sz; int i, m, w; /* * Set up access to all active DRAM banks */ m = xor_max_eng(); for (i = 0; i < m; i++) if (ddr_is_active(i)) { br = ddr_base(i) | (ddr_attr(i) << 8) | ddr_target(i); sz = ((ddr_size(i) - 1) & 0xffff0000); /* Place DDR entries in non-remapped windows */ for (w = 0; w < MV_WIN_XOR_MAX; w++) if (win_xor_can_remap(w) != 1 && (xor_ctrl_read(base, w, c, e) == 0) && w > *window) { /* Configure window */ win_xor_br_write(base, w, e, br); win_xor_sz_write(base, w, e, sz); /* Set protection RW on all channels */ xor_set_prot(base, w, e, 0x3); /* Enable window */ xor_ctrl_write(base, w, c, e, 1); (*window)++; break; } } } void decode_win_xor_setup(u_long base) { uint32_t br, sz; int i, j, z, e = 1, m, window; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; /* * Disable and clear all XOR windows, revoke protection for all * channels */ m = xor_max_eng(); for (j = 0; j < m; j++, e--) { /* Number of non-remaped windows */ window = MV_XOR_NON_REMAP - 1; for (i = 0; i < MV_WIN_XOR_MAX; i++) { win_xor_br_write(base, i, e, 0); win_xor_sz_write(base, i, e, 0); } if (win_xor_can_remap(i) == 1) win_xor_har_write(base, i, e, 0); for (i = 0; i < MV_XOR_CHAN_MAX; i++) { win_xor_ctrl_write(base, i, e, 0); xor_active_dram(base, i, e, &window); } /* * Remaining targets -- from a statically defined table */ for (i = 0; i < xor_wins_no; i++) if (xor_wins[i].target > 0) { br = (xor_wins[i].base & 0xffff0000) | (xor_wins[i].attr << 8) | xor_wins[i].target; sz = ((xor_wins[i].size - 1) & 0xffff0000); /* Set the first free XOR window */ for (z = 0; z < MV_WIN_XOR_MAX; z++) { if (xor_ctrl_read(base, z, 0, e) && xor_ctrl_read(base, z, 1, e)) continue; /* Configure window */ win_xor_br_write(base, z, e, br); win_xor_sz_write(base, z, e, sz); if (win_xor_can_remap(z) && xor_wins[z].remap >= 0) win_xor_har_write(base, z, e, xor_wins[z].remap); /* Set protection RW on all channels */ xor_set_prot(base, z, e, 0x3); /* Enable window */ xor_ctrl_write(base, z, 0, e, 1); xor_ctrl_write(base, z, 1, e, 1); break; } } } } int decode_win_xor_valid(void) { const struct decode_win *wintab; int c, i, j, rv; uint32_t b, e, s; if (xor_wins_no > MV_WIN_XOR_MAX) { printf("XOR windows: too many entries: %d\n", xor_wins_no); return (0); } for (i = 0, c = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) c++; if (xor_wins_no > (MV_WIN_XOR_MAX - c)) { printf("XOR windows: too many entries: %d, available: %d\n", xor_wins_no, MV_WIN_IDMA_MAX - c); return (0); } wintab = xor_wins; rv = 1; for (i = 0; i < xor_wins_no; i++, wintab++) { if (wintab->target == 0) { printf("XOR window#%d: DDR target window is not " "supposed to be reprogrammed!\n", i); rv = 0; } if (wintab->remap >= 0 && win_cpu_can_remap(i) != 1) { printf("XOR window#%d: not capable of remapping, but " "val 0x%08x defined\n", i, wintab->remap); rv = 0; } s = wintab->size; b = wintab->base; e = b + s - 1; if (s > (0xFFFFFFFF - b + 1)) { /* * XXX this boundary check should account for 64bit * and remapping.. */ printf("XOR window#%d: no space for size 0x%08x at " "0x%08x\n", i, s, b); rv = 0; continue; } j = decode_win_overlap(i, xor_wins_no, &xor_wins[0]); if (j >= 0) { printf("XOR window#%d: (0x%08x - 0x%08x) overlaps " "with #%d (0x%08x - 0x%08x)\n", i, b, e, j, xor_wins[j].base, xor_wins[j].base + xor_wins[j].size - 1); rv = 0; } } return (rv); } void decode_win_xor_dump(u_long base) { int i, j; int e = 1; if (pm_is_disabled(CPU_PM_CTRL_XOR)) return; for (j = 0; j < xor_max_eng(); j++, e--) { for (i = 0; i < MV_WIN_XOR_MAX; i++) { printf("XOR window#%d: b 0x%08x, s 0x%08x", i, win_xor_br_read(base, i, e), win_xor_sz_read(base, i, e)); if (win_xor_can_remap(i)) printf(", ha 0x%08x", win_xor_har_read(base, i, e)); printf("\n"); } for (i = 0; i < MV_XOR_CHAN_MAX; i++) printf("XOR control#%d: 0x%08x\n", i, win_xor_ctrl_read(base, i, e)); } } #else /* Provide dummy functions to satisfy the build for SoCs not equipped with XOR */ static int decode_win_xor_valid(void) { return (1); } static void decode_win_xor_setup(u_long base) { } static void decode_win_xor_dump(u_long base) { } #endif /************************************************************************** * SATA windows routines **************************************************************************/ static void decode_win_sata_setup(u_long base) { uint32_t cr, br; int i, j; if (pm_is_disabled(CPU_PM_CTRL_SATA)) return; for (i = 0; i < MV_WIN_SATA_MAX; i++) { win_sata_cr_write(base, i, 0); win_sata_br_write(base, i, 0); } for (i = 0; i < MV_WIN_DDR_MAX; i++) if (ddr_is_active(i)) { cr = ((ddr_size(i) - 1) & 0xffff0000) | (ddr_attr(i) << 8) | (ddr_target(i) << 4) | 1; br = ddr_base(i); /* Use the first available SATA window */ for (j = 0; j < MV_WIN_SATA_MAX; j++) { if ((win_sata_cr_read(base, j) & 1) != 0) continue; win_sata_br_write(base, j, br); win_sata_cr_write(base, j, cr); break; } } } static int decode_win_sata_valid(void) { uint32_t dev, rev; soc_id(&dev, &rev); if (dev == MV_DEV_88F5281) return (1); return (decode_win_can_cover_ddr(MV_WIN_SATA_MAX)); } /************************************************************************** * FDT parsing routines. **************************************************************************/ static int fdt_get_ranges(const char *nodename, void *buf, int size, int *tuples, int *tuplesize) { phandle_t node; pcell_t addr_cells, par_addr_cells, size_cells; int len, tuple_size, tuples_count; node = OF_finddevice(nodename); if (node == -1) return (EINVAL); if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (ENXIO); par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) return (ERANGE); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); /* Note the OF_getprop_alloc() cannot be used at this early stage. */ len = OF_getprop(node, "ranges", buf, size); /* * XXX this does not handle the empty 'ranges;' case, which is * legitimate and should be allowed. */ tuples_count = len / tuple_size; if (tuples_count <= 0) return (ERANGE); if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2) return (ERANGE); *tuples = tuples_count; *tuplesize = tuple_size; return (0); } static int win_cpu_from_dt(void) { pcell_t ranges[48]; phandle_t node; int i, entry_size, err, t, tuple_size, tuples; u_long sram_base, sram_size; t = 0; /* Retrieve 'ranges' property of '/localbus' node. */ if ((err = fdt_get_ranges("/localbus", ranges, sizeof(ranges), &tuples, &tuple_size)) == 0) { /* * Fill CPU decode windows table. */ bzero((void *)&cpu_win_tbl, sizeof(cpu_win_tbl)); entry_size = tuple_size / sizeof(pcell_t); cpu_wins_no = tuples; for (i = 0, t = 0; t < tuples; i += entry_size, t++) { cpu_win_tbl[t].target = 1; cpu_win_tbl[t].attr = fdt32_to_cpu(ranges[i + 1]); cpu_win_tbl[t].base = fdt32_to_cpu(ranges[i + 2]); cpu_win_tbl[t].size = fdt32_to_cpu(ranges[i + 3]); cpu_win_tbl[t].remap = ~0; debugf("target = 0x%0x attr = 0x%0x base = 0x%0x " "size = 0x%0x remap = 0x%0x\n", cpu_win_tbl[t].target, cpu_win_tbl[t].attr, cpu_win_tbl[t].base, cpu_win_tbl[t].size, cpu_win_tbl[t].remap); } } /* * Retrieve CESA SRAM data. */ if ((node = OF_finddevice("sram")) != -1) if (fdt_is_compatible(node, "mrvl,cesa-sram")) goto moveon; if ((node = OF_finddevice("/")) == 0) return (ENXIO); if ((node = fdt_find_compatible(node, "mrvl,cesa-sram", 0)) == 0) /* SRAM block is not always present. */ return (0); moveon: sram_base = sram_size = 0; if (fdt_regsize(node, &sram_base, &sram_size) != 0) return (EINVAL); cpu_win_tbl[t].target = MV_WIN_CESA_TARGET; cpu_win_tbl[t].attr = MV_WIN_CESA_ATTR(1); cpu_win_tbl[t].base = sram_base; cpu_win_tbl[t].size = sram_size; cpu_win_tbl[t].remap = ~0; cpu_wins_no++; debugf("sram: base = 0x%0lx size = 0x%0lx\n", sram_base, sram_size); return (0); } static int fdt_win_setup(void) { phandle_t node, child; struct soc_node_spec *soc_node; u_long size, base; int err, i; node = OF_finddevice("/"); if (node == -1) panic("fdt_win_setup: no root node"); /* * Traverse through all children of root and simple-bus nodes. * For each found device retrieve decode windows data (if applicable). */ child = OF_child(node); while (child != 0) { for (i = 0; soc_nodes[i].compat != NULL; i++) { soc_node = &soc_nodes[i]; if (!fdt_is_compatible(child, soc_node->compat)) continue; err = fdt_regsize(child, &base, &size); if (err != 0) return (err); base = (base & 0x000fffff) | fdt_immr_va; if (soc_node->decode_handler != NULL) soc_node->decode_handler(base); else return (ENXIO); if (MV_DUMP_WIN && (soc_node->dump_handler != NULL)) soc_node->dump_handler(base); } /* * Once done with root-level children let's move down to * simple-bus and its children. */ child = OF_peer(child); if ((child == 0) && (node == OF_finddevice("/"))) { node = fdt_find_compatible(node, "simple-bus", 0); if (node == 0) return (ENXIO); child = OF_child(node); } } return (0); } static void fdt_fixup_busfreq(phandle_t root) { phandle_t sb; pcell_t freq; freq = cpu_to_fdt32(get_tclk()); /* * Fix bus speed in cpu node */ if ((sb = OF_finddevice("cpu")) != 0) if (fdt_is_compatible_strict(sb, "ARM,88VS584")) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); /* * This fixup sets the simple-bus bus-frequency property. */ if ((sb = fdt_find_compatible(root, "simple-bus", 1)) != 0) OF_setprop(sb, "bus-frequency", (void *)&freq, sizeof(freq)); } static void fdt_fixup_ranges(phandle_t root) { phandle_t node; pcell_t par_addr_cells, addr_cells, size_cells; pcell_t ranges[3], reg[2], *rangesptr; int len, tuple_size, tuples_count; uint32_t base; /* Fix-up SoC ranges according to real fdt_immr_pa */ if ((node = fdt_find_compatible(root, "simple-bus", 1)) != 0) { if (fdt_addrsize_cells(node, &addr_cells, &size_cells) == 0 && (par_addr_cells = fdt_parent_addr_cells(node) <= 2)) { tuple_size = sizeof(pcell_t) * (par_addr_cells + addr_cells + size_cells); len = OF_getprop(node, "ranges", ranges, sizeof(ranges)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; rangesptr = &ranges[0]; rangesptr += par_addr_cells; base = fdt_data_get((void *)rangesptr, addr_cells); *rangesptr = cpu_to_fdt32(fdt_immr_pa); if (OF_setprop(node, "ranges", (void *)&ranges[0], sizeof(ranges)) < 0) goto fixup_failed; } } /* Fix-up PCIe reg according to real PCIe registers' PA */ if ((node = fdt_find_compatible(root, "mrvl,pcie", 1)) != 0) { if (fdt_addrsize_cells(OF_parent(node), &par_addr_cells, &size_cells) == 0) { tuple_size = sizeof(pcell_t) * (par_addr_cells + size_cells); len = OF_getprop(node, "reg", reg, sizeof(reg)); tuples_count = len / tuple_size; /* Unexpected settings are not supported */ if (tuples_count != 1) goto fixup_failed; base = fdt_data_get((void *)®[0], par_addr_cells); base &= ~0xFF000000; base |= fdt_immr_pa; reg[0] = cpu_to_fdt32(base); if (OF_setprop(node, "reg", (void *)®[0], sizeof(reg)) < 0) goto fixup_failed; } } /* Fix-up succeeded. May return and continue */ return; fixup_failed: while (1) { /* * In case of any error while fixing ranges just hang. * 1. No message can be displayed yet since console * is not initialized. * 2. Going further will cause failure on bus_space_map() * relying on the wrong ranges or data abort when * accessing PCIe registers. */ } } struct fdt_fixup_entry fdt_fixup_table[] = { { "mrvl,DB-88F6281", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_busfreq }, { "mrvl,DB-78460", &fdt_fixup_ranges }, { NULL, NULL } }; #ifndef ARM_INTRNG static int fdt_pic_decode_ic(phandle_t node, pcell_t *intr, int *interrupt, int *trig, int *pol) { if (!fdt_is_compatible(node, "mrvl,pic") && !fdt_is_compatible(node, "mrvl,mpic")) return (ENXIO); *interrupt = fdt32_to_cpu(intr[0]); *trig = INTR_TRIGGER_CONFORM; *pol = INTR_POLARITY_CONFORM; return (0); } fdt_pic_decode_t fdt_pic_table[] = { #ifdef SOC_MV_ARMADA38X &gic_decode_fdt, #endif &fdt_pic_decode_ic, NULL }; #endif uint64_t get_sar_value(void) { uint32_t sar_low, sar_high; #if defined(SOC_MV_ARMADAXP) sar_high = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET_HI); sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET_LO); #elif defined(SOC_MV_ARMADA38X) sar_high = 0; sar_low = bus_space_read_4(fdtbus_bs_tag, MV_MISC_BASE, SAMPLE_AT_RESET); #else /* * TODO: Add getting proper values for other SoC configurations */ sar_high = 0; sar_low = 0; #endif return (((uint64_t)sar_high << 32) | sar_low); } Index: head/sys/dev/cesa/cesa.c =================================================================== --- head/sys/dev/cesa/cesa.c (revision 296188) +++ head/sys/dev/cesa/cesa.c (revision 296189) @@ -1,1681 +1,1682 @@ /*- * Copyright (C) 2009-2011 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * CESA SRAM Memory Map: * * +------------------------+ <= sc->sc_sram_base + CESA_SRAM_SIZE * | | * | DATA | * | | * +------------------------+ <= sc->sc_sram_base + CESA_DATA(0) * | struct cesa_sa_data | * +------------------------+ * | struct cesa_sa_hdesc | * +------------------------+ <= sc->sc_sram_base */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include #include #include #include "cesa.h" static int cesa_probe(device_t); static int cesa_attach(device_t); static int cesa_detach(device_t); static void cesa_intr(void *); static int cesa_newsession(device_t, u_int32_t *, struct cryptoini *); static int cesa_freesession(device_t, u_int64_t); static int cesa_process(device_t, struct cryptop *, int); static int decode_win_cesa_setup(struct cesa_softc *sc); static struct resource_spec cesa_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; static device_method_t cesa_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cesa_probe), DEVMETHOD(device_attach, cesa_attach), DEVMETHOD(device_detach, cesa_detach), /* Crypto device methods */ DEVMETHOD(cryptodev_newsession, cesa_newsession), DEVMETHOD(cryptodev_freesession,cesa_freesession), DEVMETHOD(cryptodev_process, cesa_process), DEVMETHOD_END }; static driver_t cesa_driver = { "cesa", cesa_methods, sizeof (struct cesa_softc) }; static devclass_t cesa_devclass; DRIVER_MODULE(cesa, simplebus, cesa_driver, cesa_devclass, 0, 0); MODULE_DEPEND(cesa, crypto, 1, 1, 1); static void cesa_dump_cshd(struct cesa_softc *sc, struct cesa_sa_hdesc *cshd) { #ifdef DEBUG device_t dev; dev = sc->sc_dev; device_printf(dev, "CESA SA Hardware Descriptor:\n"); device_printf(dev, "\t\tconfig: 0x%08X\n", cshd->cshd_config); device_printf(dev, "\t\te_src: 0x%08X\n", cshd->cshd_enc_src); device_printf(dev, "\t\te_dst: 0x%08X\n", cshd->cshd_enc_dst); device_printf(dev, "\t\te_dlen: 0x%08X\n", cshd->cshd_enc_dlen); device_printf(dev, "\t\te_key: 0x%08X\n", cshd->cshd_enc_key); device_printf(dev, "\t\te_iv_1: 0x%08X\n", cshd->cshd_enc_iv); device_printf(dev, "\t\te_iv_2: 0x%08X\n", cshd->cshd_enc_iv_buf); device_printf(dev, "\t\tm_src: 0x%08X\n", cshd->cshd_mac_src); device_printf(dev, "\t\tm_dst: 0x%08X\n", cshd->cshd_mac_dst); device_printf(dev, "\t\tm_dlen: 0x%08X\n", cshd->cshd_mac_dlen); device_printf(dev, "\t\tm_tlen: 0x%08X\n", cshd->cshd_mac_total_dlen); device_printf(dev, "\t\tm_iv_i: 0x%08X\n", cshd->cshd_mac_iv_in); device_printf(dev, "\t\tm_iv_o: 0x%08X\n", cshd->cshd_mac_iv_out); #endif } static void cesa_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct cesa_dma_mem *cdm; if (error) return; KASSERT(nseg == 1, ("Got wrong number of DMA segments, should be 1.")); cdm = arg; cdm->cdm_paddr = segs->ds_addr; } static int cesa_alloc_dma_mem(struct cesa_softc *sc, struct cesa_dma_mem *cdm, bus_size_t size) { int error; KASSERT(cdm->cdm_vaddr == NULL, ("%s(): DMA memory descriptor in use.", __func__)); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ PAGE_SIZE, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ size, 1, /* maxsize, nsegments */ size, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &cdm->cdm_tag); /* dmat */ if (error) { device_printf(sc->sc_dev, "failed to allocate busdma tag, error" " %i!\n", error); goto err1; } error = bus_dmamem_alloc(cdm->cdm_tag, &cdm->cdm_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &cdm->cdm_map); if (error) { device_printf(sc->sc_dev, "failed to allocate DMA safe" " memory, error %i!\n", error); goto err2; } error = bus_dmamap_load(cdm->cdm_tag, cdm->cdm_map, cdm->cdm_vaddr, size, cesa_alloc_dma_mem_cb, cdm, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "cannot get address of the DMA" " memory, error %i\n", error); goto err3; } return (0); err3: bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); err2: bus_dma_tag_destroy(cdm->cdm_tag); err1: cdm->cdm_vaddr = NULL; return (error); } static void cesa_free_dma_mem(struct cesa_dma_mem *cdm) { bus_dmamap_unload(cdm->cdm_tag, cdm->cdm_map); bus_dmamem_free(cdm->cdm_tag, cdm->cdm_vaddr, cdm->cdm_map); bus_dma_tag_destroy(cdm->cdm_tag); cdm->cdm_vaddr = NULL; } static void cesa_sync_dma_mem(struct cesa_dma_mem *cdm, bus_dmasync_op_t op) { /* Sync only if dma memory is valid */ if (cdm->cdm_vaddr != NULL) bus_dmamap_sync(cdm->cdm_tag, cdm->cdm_map, op); } static void cesa_sync_desc(struct cesa_softc *sc, bus_dmasync_op_t op) { cesa_sync_dma_mem(&sc->sc_tdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_sdesc_cdm, op); cesa_sync_dma_mem(&sc->sc_requests_cdm, op); } static struct cesa_session * cesa_alloc_session(struct cesa_softc *sc) { struct cesa_session *cs; CESA_GENERIC_ALLOC_LOCKED(sc, cs, sessions); return (cs); } static struct cesa_session * cesa_get_session(struct cesa_softc *sc, uint32_t sid) { if (sid >= CESA_SESSIONS) return (NULL); return (&sc->sc_sessions[sid]); } static void cesa_free_session(struct cesa_softc *sc, struct cesa_session *cs) { CESA_GENERIC_FREE_LOCKED(sc, cs, sessions); } static struct cesa_request * cesa_alloc_request(struct cesa_softc *sc) { struct cesa_request *cr; CESA_GENERIC_ALLOC_LOCKED(sc, cr, requests); if (!cr) return (NULL); STAILQ_INIT(&cr->cr_tdesc); STAILQ_INIT(&cr->cr_sdesc); return (cr); } static void cesa_free_request(struct cesa_softc *sc, struct cesa_request *cr) { /* Free TDMA descriptors assigned to this request */ CESA_LOCK(sc, tdesc); STAILQ_CONCAT(&sc->sc_free_tdesc, &cr->cr_tdesc); CESA_UNLOCK(sc, tdesc); /* Free SA descriptors assigned to this request */ CESA_LOCK(sc, sdesc); STAILQ_CONCAT(&sc->sc_free_sdesc, &cr->cr_sdesc); CESA_UNLOCK(sc, sdesc); /* Unload DMA memory asociated with request */ if (cr->cr_dmap_loaded) { bus_dmamap_unload(sc->sc_data_dtag, cr->cr_dmap); cr->cr_dmap_loaded = 0; } CESA_GENERIC_FREE_LOCKED(sc, cr, requests); } static void cesa_enqueue_request(struct cesa_softc *sc, struct cesa_request *cr) { CESA_LOCK(sc, requests); STAILQ_INSERT_TAIL(&sc->sc_ready_requests, cr, cr_stq); CESA_UNLOCK(sc, requests); } static struct cesa_tdma_desc * cesa_alloc_tdesc(struct cesa_softc *sc) { struct cesa_tdma_desc *ctd; CESA_GENERIC_ALLOC_LOCKED(sc, ctd, tdesc); if (!ctd) device_printf(sc->sc_dev, "TDMA descriptors pool exhaused. " "Consider increasing CESA_TDMA_DESCRIPTORS.\n"); return (ctd); } static struct cesa_sa_desc * cesa_alloc_sdesc(struct cesa_softc *sc, struct cesa_request *cr) { struct cesa_sa_desc *csd; CESA_GENERIC_ALLOC_LOCKED(sc, csd, sdesc); if (!csd) { device_printf(sc->sc_dev, "SA descriptors pool exhaused. " "Consider increasing CESA_SA_DESCRIPTORS.\n"); return (NULL); } STAILQ_INSERT_TAIL(&cr->cr_sdesc, csd, csd_stq); /* Fill-in SA descriptor with default values */ csd->csd_cshd->cshd_enc_key = CESA_SA_DATA(csd_key); csd->csd_cshd->cshd_enc_iv = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_iv_buf = CESA_SA_DATA(csd_iv); csd->csd_cshd->cshd_enc_src = 0; csd->csd_cshd->cshd_enc_dst = 0; csd->csd_cshd->cshd_enc_dlen = 0; csd->csd_cshd->cshd_mac_dst = CESA_SA_DATA(csd_hash); csd->csd_cshd->cshd_mac_iv_in = CESA_SA_DATA(csd_hiv_in); csd->csd_cshd->cshd_mac_iv_out = CESA_SA_DATA(csd_hiv_out); csd->csd_cshd->cshd_mac_src = 0; csd->csd_cshd->cshd_mac_dlen = 0; return (csd); } static struct cesa_tdma_desc * cesa_tdma_copy(struct cesa_softc *sc, bus_addr_t dst, bus_addr_t src, bus_size_t size) { struct cesa_tdma_desc *ctd; ctd = cesa_alloc_tdesc(sc); if (!ctd) return (NULL); ctd->ctd_cthd->cthd_dst = dst; ctd->ctd_cthd->cthd_src = src; ctd->ctd_cthd->cthd_byte_count = size; /* Handle special control packet */ if (size != 0) ctd->ctd_cthd->cthd_flags = CESA_CTHD_OWNED; else ctd->ctd_cthd->cthd_flags = 0; return (ctd); } static struct cesa_tdma_desc * cesa_tdma_copyin_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, sc->sc_sram_base + sizeof(struct cesa_sa_hdesc), cr->cr_csd_paddr, sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copyout_sa_data(struct cesa_softc *sc, struct cesa_request *cr) { return (cesa_tdma_copy(sc, cr->cr_csd_paddr, sc->sc_sram_base + sizeof(struct cesa_sa_hdesc), sizeof(struct cesa_sa_data))); } static struct cesa_tdma_desc * cesa_tdma_copy_sdesc(struct cesa_softc *sc, struct cesa_sa_desc *csd) { return (cesa_tdma_copy(sc, sc->sc_sram_base, csd->csd_cshd_paddr, sizeof(struct cesa_sa_hdesc))); } static void cesa_append_tdesc(struct cesa_request *cr, struct cesa_tdma_desc *ctd) { struct cesa_tdma_desc *ctd_prev; if (!STAILQ_EMPTY(&cr->cr_tdesc)) { ctd_prev = STAILQ_LAST(&cr->cr_tdesc, cesa_tdma_desc, ctd_stq); ctd_prev->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } ctd->ctd_cthd->cthd_next = 0; STAILQ_INSERT_TAIL(&cr->cr_tdesc, ctd, ctd_stq); } static int cesa_append_packet(struct cesa_softc *sc, struct cesa_request *cr, struct cesa_packet *cp, struct cesa_sa_desc *csd) { struct cesa_tdma_desc *ctd, *tmp; /* Copy SA descriptor for this packet */ ctd = cesa_tdma_copy_sdesc(sc, csd); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy data to be processed */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyin, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyin); /* Insert control descriptor */ ctd = cesa_tdma_copy(sc, 0, 0, 0); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Copy back results */ STAILQ_FOREACH_SAFE(ctd, &cp->cp_copyout, ctd_stq, tmp) cesa_append_tdesc(cr, ctd); STAILQ_INIT(&cp->cp_copyout); return (0); } static int cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen) { uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN]; uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN]; SHA1_CTX sha1ctx; MD5_CTX md5ctx; uint32_t *hout; uint32_t *hin; int i; memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN); for (i = 0; i < mklen; i++) { ipad[i] ^= mkey[i]; opad[i] ^= mkey[i]; } hin = (uint32_t *)cs->cs_hiv_in; hout = (uint32_t *)cs->cs_hiv_out; switch (alg) { case CRYPTO_MD5_HMAC: MD5Init(&md5ctx); MD5Update(&md5ctx, ipad, MD5_HMAC_BLOCK_LEN); memcpy(hin, md5ctx.state, sizeof(md5ctx.state)); MD5Init(&md5ctx); MD5Update(&md5ctx, opad, MD5_HMAC_BLOCK_LEN); memcpy(hout, md5ctx.state, sizeof(md5ctx.state)); break; case CRYPTO_SHA1_HMAC: SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, ipad, SHA1_HMAC_BLOCK_LEN); memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); SHA1Init(&sha1ctx); SHA1Update(&sha1ctx, opad, SHA1_HMAC_BLOCK_LEN); memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32)); break; default: return (EINVAL); } for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) { hin[i] = htobe32(hin[i]); hout[i] = htobe32(hout[i]); } return (0); } static int cesa_prep_aes_key(struct cesa_session *cs) { uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; uint32_t *dkey; int i; rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8); cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK; dkey = (uint32_t *)cs->cs_aes_dkey; switch (cs->cs_klen) { case 16: cs->cs_config |= CESA_CSH_AES_KLEN_128; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 10 + i]); break; case 24: cs->cs_config |= CESA_CSH_AES_KLEN_192; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 12 + i]); for (i = 0; i < 2; i++) *dkey++ = htobe32(ek[4 * 11 + 2 + i]); break; case 32: cs->cs_config |= CESA_CSH_AES_KLEN_256; for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 14 + i]); for (i = 0; i < 4; i++) *dkey++ = htobe32(ek[4 * 13 + i]); break; default: return (EINVAL); } return (0); } static int cesa_is_hash(int alg) { switch (alg) { case CRYPTO_MD5: case CRYPTO_MD5_HMAC: case CRYPTO_SHA1: case CRYPTO_SHA1_HMAC: return (1); default: return (0); } } static void cesa_start_packet(struct cesa_packet *cp, unsigned int size) { cp->cp_size = size; cp->cp_offset = 0; STAILQ_INIT(&cp->cp_copyin); STAILQ_INIT(&cp->cp_copyout); } static int cesa_fill_packet(struct cesa_softc *sc, struct cesa_packet *cp, bus_dma_segment_t *seg) { struct cesa_tdma_desc *ctd; unsigned int bsize; /* Calculate size of block copy */ bsize = MIN(seg->ds_len, cp->cp_size - cp->cp_offset); if (bsize > 0) { ctd = cesa_tdma_copy(sc, sc->sc_sram_base + CESA_DATA(cp->cp_offset), seg->ds_addr, bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyin, ctd, ctd_stq); ctd = cesa_tdma_copy(sc, seg->ds_addr, sc->sc_sram_base + CESA_DATA(cp->cp_offset), bsize); if (!ctd) return (-ENOMEM); STAILQ_INSERT_TAIL(&cp->cp_copyout, ctd, ctd_stq); seg->ds_len -= bsize; seg->ds_addr += bsize; cp->cp_offset += bsize; } return (bsize); } static void cesa_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { unsigned int mpsize, fragmented; unsigned int mlen, mskip, tmlen; struct cesa_chain_info *cci; unsigned int elen, eskip; unsigned int skip, len; struct cesa_sa_desc *csd; struct cesa_request *cr; struct cesa_softc *sc; struct cesa_packet cp; bus_dma_segment_t seg; uint32_t config; int size; cci = arg; sc = cci->cci_sc; cr = cci->cci_cr; if (error) { cci->cci_error = error; return; } elen = cci->cci_enc ? cci->cci_enc->crd_len : 0; eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0; mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0; mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0; if (elen && mlen && ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) || (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) || (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) { /* * Data alignment in the request does not meet CESA requiremnts * for combined encryption/decryption and hashing. We have to * split the request to separate operations and process them * one by one. */ config = cci->cci_config; if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_MAC; cci->cci_enc = NULL; cci->cci_mac = cr->cr_mac; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); cci->cci_config = config | CESA_CSHD_ENC; cci->cci_enc = cr->cr_enc; cci->cci_mac = NULL; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); } else { config &= ~CESA_CSHD_OP_MASK; cci->cci_config = config | CESA_CSHD_ENC; cci->cci_enc = cr->cr_enc; cci->cci_mac = NULL; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); cci->cci_config = config | CESA_CSHD_MAC; cci->cci_enc = NULL; cci->cci_mac = cr->cr_mac; cesa_create_chain_cb(cci, segs, nseg, cci->cci_error); } return; } tmlen = mlen; fragmented = 0; mpsize = CESA_MAX_PACKET_SIZE; mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1)); if (elen && mlen) { skip = MIN(eskip, mskip); len = MAX(elen + eskip, mlen + mskip) - skip; } else if (elen) { skip = eskip; len = elen; } else { skip = mskip; len = mlen; } /* Start first packet in chain */ cesa_start_packet(&cp, MIN(mpsize, len)); while (nseg-- && len > 0) { seg = *(segs++); /* * Skip data in buffer on which neither ENC nor MAC operation * is requested. */ if (skip > 0) { size = MIN(skip, seg.ds_len); skip -= size; seg.ds_addr += size; seg.ds_len -= size; if (eskip > 0) eskip -= size; if (mskip > 0) mskip -= size; if (seg.ds_len == 0) continue; } while (1) { /* * Fill in current packet with data. Break if there is * no more data in current DMA segment or an error * occured. */ size = cesa_fill_packet(sc, &cp, &seg); if (size <= 0) { error = -size; break; } len -= size; /* If packet is full, append it to the chain */ if (cp.cp_size == cp.cp_offset) { csd = cesa_alloc_sdesc(sc, cr); if (!csd) { error = ENOMEM; break; } /* Create SA descriptor for this packet */ csd->csd_cshd->cshd_config = cci->cci_config; csd->csd_cshd->cshd_mac_total_dlen = tmlen; /* * Enable fragmentation if request will not fit * into one packet. */ if (len > 0) { if (!fragmented) { fragmented = 1; csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_FIRST; } else csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_MIDDLE; } else if (fragmented) csd->csd_cshd->cshd_config |= CESA_CSHD_FRAG_LAST; if (eskip < cp.cp_size && elen > 0) { csd->csd_cshd->cshd_enc_src = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dst = CESA_DATA(eskip); csd->csd_cshd->cshd_enc_dlen = MIN(elen, cp.cp_size - eskip); } if (mskip < cp.cp_size && mlen > 0) { csd->csd_cshd->cshd_mac_src = CESA_DATA(mskip); csd->csd_cshd->cshd_mac_dlen = MIN(mlen, cp.cp_size - mskip); } elen -= csd->csd_cshd->cshd_enc_dlen; eskip -= MIN(eskip, cp.cp_size); mlen -= csd->csd_cshd->cshd_mac_dlen; mskip -= MIN(mskip, cp.cp_size); cesa_dump_cshd(sc, csd->csd_cshd); /* Append packet to the request */ error = cesa_append_packet(sc, cr, &cp, csd); if (error) break; /* Start a new packet, as current is full */ cesa_start_packet(&cp, MIN(mpsize, len)); } } if (error) break; } if (error) { /* * Move all allocated resources to the request. They will be * freed later. */ STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyin); STAILQ_CONCAT(&cr->cr_tdesc, &cp.cp_copyout); cci->cci_error = error; } } static void cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg, bus_size_t size, int error) { cesa_create_chain_cb(arg, segs, nseg, error); } static int cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr) { struct cesa_chain_info cci; struct cesa_tdma_desc *ctd; uint32_t config; int error; error = 0; CESA_LOCK_ASSERT(sc, sessions); /* Create request metadata */ if (cr->cr_enc) { if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey, cr->cr_cs->cs_klen); else memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key, cr->cr_cs->cs_klen); } if (cr->cr_mac) { memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in, CESA_MAX_HASH_LEN); memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out, CESA_MAX_HASH_LEN); } ctd = cesa_tdma_copyin_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); /* Prepare SA configuration */ config = cr->cr_cs->cs_config; if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0) config |= CESA_CSHD_DECRYPT; if (cr->cr_enc && !cr->cr_mac) config |= CESA_CSHD_ENC; if (!cr->cr_enc && cr->cr_mac) config |= CESA_CSHD_MAC; if (cr->cr_enc && cr->cr_mac) config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC : CESA_CSHD_ENC_AND_MAC; /* Create data packets */ cci.cci_sc = sc; cci.cci_cr = cr; cci.cci_enc = cr->cr_enc; cci.cci_mac = cr->cr_mac; cci.cci_config = config; cci.cci_error = 0; if (cr->cr_crp->crp_flags & CRYPTO_F_IOV) error = bus_dmamap_load_uio(sc->sc_data_dtag, cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf, cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF) error = bus_dmamap_load_mbuf(sc->sc_data_dtag, cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf, cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT); else error = bus_dmamap_load(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp->crp_buf, cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT); if (!error) cr->cr_dmap_loaded = 1; if (cci.cci_error) error = cci.cci_error; if (error) return (error); /* Read back request metadata */ ctd = cesa_tdma_copyout_sa_data(sc, cr); if (!ctd) return (ENOMEM); cesa_append_tdesc(cr, ctd); return (0); } static void cesa_execute(struct cesa_softc *sc) { struct cesa_tdma_desc *prev_ctd, *ctd; struct cesa_request *prev_cr, *cr; CESA_LOCK(sc, requests); /* * If ready list is empty, there is nothing to execute. If queued list * is not empty, the hardware is busy and we cannot start another * execution. */ if (STAILQ_EMPTY(&sc->sc_ready_requests) || !STAILQ_EMPTY(&sc->sc_queued_requests)) { CESA_UNLOCK(sc, requests); return; } /* Move all ready requests to queued list */ STAILQ_CONCAT(&sc->sc_queued_requests, &sc->sc_ready_requests); STAILQ_INIT(&sc->sc_ready_requests); /* Create one execution chain from all requests on the list */ if (STAILQ_FIRST(&sc->sc_queued_requests) != STAILQ_LAST(&sc->sc_queued_requests, cesa_request, cr_stq)) { prev_cr = NULL; cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH(cr, &sc->sc_queued_requests, cr_stq) { if (prev_cr) { ctd = STAILQ_FIRST(&cr->cr_tdesc); prev_ctd = STAILQ_LAST(&prev_cr->cr_tdesc, cesa_tdma_desc, ctd_stq); prev_ctd->ctd_cthd->cthd_next = ctd->ctd_cthd_paddr; } prev_cr = cr; } cesa_sync_dma_mem(&sc->sc_tdesc_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } /* Start chain execution in hardware */ cr = STAILQ_FIRST(&sc->sc_queued_requests); ctd = STAILQ_FIRST(&cr->cr_tdesc); CESA_WRITE(sc, CESA_TDMA_ND, ctd->ctd_cthd_paddr); CESA_WRITE(sc, CESA_SA_CMD, CESA_SA_CMD_ACTVATE); CESA_UNLOCK(sc, requests); } static int cesa_setup_sram(struct cesa_softc *sc) { phandle_t sram_node; ihandle_t sram_ihandle; pcell_t sram_handle, sram_reg; if (OF_getprop(ofw_bus_get_node(sc->sc_dev), "sram-handle", (void *)&sram_handle, sizeof(sram_handle)) <= 0) return (ENXIO); sram_ihandle = (ihandle_t)sram_handle; sram_ihandle = fdt32_to_cpu(sram_ihandle); sram_node = OF_instance_to_package(sram_ihandle); if (OF_getprop(sram_node, "reg", (void *)&sram_reg, sizeof(sram_reg)) <= 0) return (ENXIO); sc->sc_sram_base = fdt32_to_cpu(sram_reg); return (0); } static int cesa_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "mrvl,cesa")) return (ENXIO); device_set_desc(dev, "Marvell Cryptographic Engine and Security " "Accelerator"); return (BUS_PROBE_DEFAULT); } static int cesa_attach(device_t dev) { struct cesa_softc *sc; uint32_t d, r; int error; int i; sc = device_get_softc(dev); sc->sc_blocked = 0; sc->sc_error = 0; sc->sc_dev = dev; /* Check if CESA peripheral device has power turned on */ #if defined(SOC_MV_KIRKWOOD) if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) == CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } #else if (soc_power_ctrl_get(CPU_PM_CTRL_CRYPTO) != CPU_PM_CTRL_CRYPTO) { device_printf(dev, "not powered on\n"); return (ENXIO); } #endif soc_id(&d, &r); switch (d) { case MV_DEV_88F6281: case MV_DEV_88F6282: sc->sc_tperr = 0; break; case MV_DEV_MV78100: case MV_DEV_MV78100_Z0: sc->sc_tperr = CESA_ICR_TPERR; break; default: return (ENXIO); } /* Initialize mutexes */ mtx_init(&sc->sc_sc_lock, device_get_nameunit(dev), "CESA Shared Data", MTX_DEF); mtx_init(&sc->sc_tdesc_lock, device_get_nameunit(dev), "CESA TDMA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_sdesc_lock, device_get_nameunit(dev), "CESA SA Descriptors Pool", MTX_DEF); mtx_init(&sc->sc_requests_lock, device_get_nameunit(dev), "CESA Requests Pool", MTX_DEF); mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev), "CESA Sessions Pool", MTX_DEF); /* Allocate I/O and IRQ resources */ error = bus_alloc_resources(dev, cesa_res_spec, sc->sc_res); if (error) { device_printf(dev, "could not allocate resources\n"); goto err0; } sc->sc_bsh = rman_get_bushandle(*(sc->sc_res)); sc->sc_bst = rman_get_bustag(*(sc->sc_res)); /* Setup CESA decoding windows */ error = decode_win_cesa_setup(sc); if (error) { device_printf(dev, "could not setup decoding windows\n"); goto err1; } /* Acquire SRAM base address */ error = cesa_setup_sram(sc); if (error) { device_printf(dev, "could not setup SRAM\n"); goto err1; } /* Setup interrupt handler */ error = bus_setup_intr(dev, sc->sc_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, cesa_intr, sc, &(sc->sc_icookie)); if (error) { device_printf(dev, "could not setup engine completion irq\n"); goto err1; } /* Create DMA tag for processed data */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ CESA_MAX_REQUEST_SIZE, /* maxsize */ CESA_MAX_FRAGMENTS, /* nsegments */ CESA_MAX_REQUEST_SIZE, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->sc_data_dtag); /* dmat */ if (error) goto err2; /* Initialize data structures: TDMA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_tdesc_cdm, CESA_TDMA_DESCRIPTORS * sizeof(struct cesa_tdma_hdesc)); if (error) goto err3; STAILQ_INIT(&sc->sc_free_tdesc); for (i = 0; i < CESA_TDMA_DESCRIPTORS; i++) { sc->sc_tdesc[i].ctd_cthd = (struct cesa_tdma_hdesc *)(sc->sc_tdesc_cdm.cdm_vaddr) + i; sc->sc_tdesc[i].ctd_cthd_paddr = sc->sc_tdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_tdma_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_tdesc, &sc->sc_tdesc[i], ctd_stq); } /* Initialize data structures: SA Descriptors Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_sdesc_cdm, CESA_SA_DESCRIPTORS * sizeof(struct cesa_sa_hdesc)); if (error) goto err4; STAILQ_INIT(&sc->sc_free_sdesc); for (i = 0; i < CESA_SA_DESCRIPTORS; i++) { sc->sc_sdesc[i].csd_cshd = (struct cesa_sa_hdesc *)(sc->sc_sdesc_cdm.cdm_vaddr) + i; sc->sc_sdesc[i].csd_cshd_paddr = sc->sc_sdesc_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_hdesc)); STAILQ_INSERT_TAIL(&sc->sc_free_sdesc, &sc->sc_sdesc[i], csd_stq); } /* Initialize data structures: Requests Pool */ error = cesa_alloc_dma_mem(sc, &sc->sc_requests_cdm, CESA_REQUESTS * sizeof(struct cesa_sa_data)); if (error) goto err5; STAILQ_INIT(&sc->sc_free_requests); STAILQ_INIT(&sc->sc_ready_requests); STAILQ_INIT(&sc->sc_queued_requests); for (i = 0; i < CESA_REQUESTS; i++) { sc->sc_requests[i].cr_csd = (struct cesa_sa_data *)(sc->sc_requests_cdm.cdm_vaddr) + i; sc->sc_requests[i].cr_csd_paddr = sc->sc_requests_cdm.cdm_paddr + (i * sizeof(struct cesa_sa_data)); /* Preallocate DMA maps */ error = bus_dmamap_create(sc->sc_data_dtag, 0, &sc->sc_requests[i].cr_dmap); if (error && i > 0) { i--; do { bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); } while (i--); goto err6; } STAILQ_INSERT_TAIL(&sc->sc_free_requests, &sc->sc_requests[i], cr_stq); } /* Initialize data structures: Sessions Pool */ STAILQ_INIT(&sc->sc_free_sessions); for (i = 0; i < CESA_SESSIONS; i++) { sc->sc_sessions[i].cs_sid = i; STAILQ_INSERT_TAIL(&sc->sc_free_sessions, &sc->sc_sessions[i], cs_stq); } /* * Initialize TDMA: * - Burst limit: 128 bytes, * - Outstanding reads enabled, * - No byte-swap. */ CESA_WRITE(sc, CESA_TDMA_CR, CESA_TDMA_CR_DBL128 | CESA_TDMA_CR_SBL128 | CESA_TDMA_CR_ORDEN | CESA_TDMA_CR_NBS | CESA_TDMA_CR_ENABLE); /* * Initialize SA: * - SA descriptor is present at beginning of CESA SRAM, * - Multi-packet chain mode, * - Cooperation with TDMA enabled. */ CESA_WRITE(sc, CESA_SA_DPR, 0); CESA_WRITE(sc, CESA_SA_CR, CESA_SA_CR_ACTIVATE_TDMA | CESA_SA_CR_WAIT_FOR_TDMA | CESA_SA_CR_MULTI_MODE); /* Unmask interrupts */ CESA_WRITE(sc, CESA_ICR, 0); CESA_WRITE(sc, CESA_ICM, CESA_ICM_ACCTDMA | sc->sc_tperr); CESA_WRITE(sc, CESA_TDMA_ECR, 0); CESA_WRITE(sc, CESA_TDMA_EMR, CESA_TDMA_EMR_MISS | CESA_TDMA_EMR_DOUBLE_HIT | CESA_TDMA_EMR_BOTH_HIT | CESA_TDMA_EMR_DATA_ERROR); /* Register in OCF */ sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (sc->sc_cid) { device_printf(dev, "could not get crypto driver id\n"); goto err7; } crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0); crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0); crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0); return (0); err7: for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); err6: cesa_free_dma_mem(&sc->sc_requests_cdm); err5: cesa_free_dma_mem(&sc->sc_sdesc_cdm); err4: cesa_free_dma_mem(&sc->sc_tdesc_cdm); err3: bus_dma_tag_destroy(sc->sc_data_dtag); err2: bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie); err1: bus_release_resources(dev, cesa_res_spec, sc->sc_res); err0: mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (ENXIO); } static int cesa_detach(device_t dev) { struct cesa_softc *sc; int i; sc = device_get_softc(dev); /* TODO: Wait for queued requests completion before shutdown. */ /* Mask interrupts */ CESA_WRITE(sc, CESA_ICM, 0); CESA_WRITE(sc, CESA_TDMA_EMR, 0); /* Unregister from OCF */ crypto_unregister_all(sc->sc_cid); /* Free DMA Maps */ for (i = 0; i < CESA_REQUESTS; i++) bus_dmamap_destroy(sc->sc_data_dtag, sc->sc_requests[i].cr_dmap); /* Free DMA Memory */ cesa_free_dma_mem(&sc->sc_requests_cdm); cesa_free_dma_mem(&sc->sc_sdesc_cdm); cesa_free_dma_mem(&sc->sc_tdesc_cdm); /* Free DMA Tag */ bus_dma_tag_destroy(sc->sc_data_dtag); /* Stop interrupt */ bus_teardown_intr(dev, sc->sc_res[1], sc->sc_icookie); /* Relase I/O and IRQ resources */ bus_release_resources(dev, cesa_res_spec, sc->sc_res); /* Destory mutexes */ mtx_destroy(&sc->sc_sessions_lock); mtx_destroy(&sc->sc_requests_lock); mtx_destroy(&sc->sc_sdesc_lock); mtx_destroy(&sc->sc_tdesc_lock); mtx_destroy(&sc->sc_sc_lock); return (0); } static void cesa_intr(void *arg) { STAILQ_HEAD(, cesa_request) requests; struct cesa_request *cr, *tmp; struct cesa_softc *sc; uint32_t ecr, icr; int blocked; sc = arg; /* Ack interrupt */ ecr = CESA_READ(sc, CESA_TDMA_ECR); CESA_WRITE(sc, CESA_TDMA_ECR, 0); icr = CESA_READ(sc, CESA_ICR); CESA_WRITE(sc, CESA_ICR, 0); /* Check for TDMA errors */ if (ecr & CESA_TDMA_ECR_MISS) { device_printf(sc->sc_dev, "TDMA Miss error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DOUBLE_HIT) { device_printf(sc->sc_dev, "TDMA Double Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_BOTH_HIT) { device_printf(sc->sc_dev, "TDMA Both Hit error detected!\n"); sc->sc_error = EIO; } if (ecr & CESA_TDMA_ECR_DATA_ERROR) { device_printf(sc->sc_dev, "TDMA Data error detected!\n"); sc->sc_error = EIO; } /* Check for CESA errors */ if (icr & sc->sc_tperr) { device_printf(sc->sc_dev, "CESA SRAM Parity error detected!\n"); sc->sc_error = EIO; } /* If there is nothing more to do, return */ if ((icr & CESA_ICR_ACCTDMA) == 0) return; /* Get all finished requests */ CESA_LOCK(sc, requests); STAILQ_INIT(&requests); STAILQ_CONCAT(&requests, &sc->sc_queued_requests); STAILQ_INIT(&sc->sc_queued_requests); CESA_UNLOCK(sc, requests); /* Execute all ready requests */ cesa_execute(sc); /* Process completed requests */ cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); STAILQ_FOREACH_SAFE(cr, &requests, cr_stq, tmp) { bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cr->cr_crp->crp_etype = sc->sc_error; if (cr->cr_mac) crypto_copyback(cr->cr_crp->crp_flags, cr->cr_crp->crp_buf, cr->cr_mac->crd_inject, cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash); crypto_done(cr->cr_crp); cesa_free_request(sc, cr); } cesa_sync_dma_mem(&sc->sc_requests_cdm, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); sc->sc_error = 0; /* Unblock driver if it ran out of resources */ CESA_LOCK(sc, sc); blocked = sc->sc_blocked; sc->sc_blocked = 0; CESA_UNLOCK(sc, sc); if (blocked) crypto_unblock(sc->sc_cid, blocked); } static int cesa_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) { struct cesa_session *cs; struct cesa_softc *sc; struct cryptoini *enc; struct cryptoini *mac; int error; sc = device_get_softc(dev); enc = NULL; mac = NULL; error = 0; /* Check and parse input */ if (cesa_is_hash(cri->cri_alg)) mac = cri; else enc = cri; cri = cri->cri_next; if (cri) { if (!enc && !cesa_is_hash(cri->cri_alg)) enc = cri; if (!mac && cesa_is_hash(cri->cri_alg)) mac = cri; if (cri->cri_next || !(enc && mac)) return (EINVAL); } if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) || (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN)) return (E2BIG); /* Allocate session */ cs = cesa_alloc_session(sc); if (!cs) return (ENOMEM); /* Prepare CESA configuration */ cs->cs_config = 0; cs->cs_ivlen = 1; cs->cs_mblen = 1; if (enc) { switch (enc->cri_alg) { case CRYPTO_AES_CBC: cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC; cs->cs_ivlen = AES_BLOCK_LEN; break; case CRYPTO_DES_CBC: cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC; cs->cs_ivlen = DES_BLOCK_LEN; break; case CRYPTO_3DES_CBC: cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE | CESA_CSHD_CBC; cs->cs_ivlen = DES3_BLOCK_LEN; break; default: error = EINVAL; break; } } if (!error && mac) { switch (mac->cri_alg) { case CRYPTO_MD5: cs->cs_config |= CESA_CSHD_MD5; cs->cs_mblen = 1; cs->cs_hlen = MD5_HASH_LEN; break; case CRYPTO_MD5_HMAC: cs->cs_config |= CESA_CSHD_MD5_HMAC; cs->cs_mblen = MD5_HMAC_BLOCK_LEN; cs->cs_hlen = CESA_HMAC_HASH_LENGTH; break; case CRYPTO_SHA1: cs->cs_config |= CESA_CSHD_SHA1; cs->cs_mblen = 1; cs->cs_hlen = SHA1_HASH_LEN; break; case CRYPTO_SHA1_HMAC: cs->cs_config |= CESA_CSHD_SHA1_HMAC; cs->cs_mblen = SHA1_HMAC_BLOCK_LEN; cs->cs_hlen = CESA_HMAC_HASH_LENGTH; break; default: error = EINVAL; break; } } /* Save cipher key */ if (!error && enc && enc->cri_key) { cs->cs_klen = enc->cri_klen / 8; memcpy(cs->cs_key, enc->cri_key, cs->cs_klen); if (enc->cri_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs); } /* Save digest key */ if (!error && mac && mac->cri_key) error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key, mac->cri_klen / 8); if (error) { cesa_free_session(sc, cs); return (EINVAL); } *sidp = cs->cs_sid; return (0); } static int cesa_freesession(device_t dev, uint64_t tid) { struct cesa_session *cs; struct cesa_softc *sc; sc = device_get_softc(dev); cs = cesa_get_session(sc, CRYPTO_SESID2LID(tid)); if (!cs) return (EINVAL); /* Free session */ cesa_free_session(sc, cs); return (0); } static int cesa_process(device_t dev, struct cryptop *crp, int hint) { struct cesa_request *cr; struct cesa_session *cs; struct cryptodesc *crd; struct cryptodesc *enc; struct cryptodesc *mac; struct cesa_softc *sc; int error; sc = device_get_softc(dev); crd = crp->crp_desc; enc = NULL; mac = NULL; error = 0; /* Check session ID */ cs = cesa_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid)); if (!cs) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } /* Check and parse input */ if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) { crp->crp_etype = E2BIG; crypto_done(crp); return (0); } if (cesa_is_hash(crd->crd_alg)) mac = crd; else enc = crd; crd = crd->crd_next; if (crd) { if (!enc && !cesa_is_hash(crd->crd_alg)) enc = crd; if (!mac && cesa_is_hash(crd->crd_alg)) mac = crd; if (crd->crd_next || !(enc && mac)) { crp->crp_etype = EINVAL; crypto_done(crp); return (0); } } /* * Get request descriptor. Block driver if there is no free * descriptors in pool. */ cr = cesa_alloc_request(sc); if (!cr) { CESA_LOCK(sc, sc); sc->sc_blocked = CRYPTO_SYMQ; CESA_UNLOCK(sc, sc); return (ERESTART); } /* Prepare request */ cr->cr_crp = crp; cr->cr_enc = enc; cr->cr_mac = mac; cr->cr_cs = cs; CESA_LOCK(sc, sessions); cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); if (enc && enc->crd_flags & CRD_F_ENCRYPT) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); else arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0); if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); } else if (enc) { if (enc->crd_flags & CRD_F_IV_EXPLICIT) memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen); else crypto_copydata(crp->crp_flags, crp->crp_buf, enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv); } if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) { if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) { cs->cs_klen = enc->crd_klen / 8; memcpy(cs->cs_key, enc->crd_key, cs->cs_klen); if (enc->crd_alg == CRYPTO_AES_CBC) error = cesa_prep_aes_key(cs); } else error = E2BIG; } if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) { if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN) error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key, mac->crd_klen / 8); else error = E2BIG; } /* Convert request to chain of TDMA and SA descriptors */ if (!error) error = cesa_create_chain(sc, cr); cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CESA_UNLOCK(sc, sessions); if (error) { cesa_free_request(sc, cr); crp->crp_etype = error; crypto_done(crp); return (0); } bus_dmamap_sync(sc->sc_data_dtag, cr->cr_dmap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Enqueue request to execution */ cesa_enqueue_request(sc, cr); /* Start execution, if we have no more requests in queue */ if ((hint & CRYPTO_HINT_MORE) == 0) cesa_execute(sc); return (0); } /* * Set CESA TDMA decode windows. */ static int decode_win_cesa_setup(struct cesa_softc *sc) { struct mem_region availmem_regions[FDT_MEM_REGIONS]; int availmem_regions_sz; - uint32_t memsize, br, cr, i; + uint32_t br, cr, i; + u_long memsize; /* Grab physical memory regions information from DTS */ if (fdt_get_mem_regions(availmem_regions, &availmem_regions_sz, &memsize) != 0) return (ENXIO); if (availmem_regions_sz > MV_WIN_CESA_MAX) { device_printf(sc->sc_dev, "Too much memory regions, cannot " " set CESA windows to cover whole DRAM \n"); return (ENXIO); } /* Disable and clear all CESA windows */ for (i = 0; i < MV_WIN_CESA_MAX; i++) { CESA_WRITE(sc, MV_WIN_CESA_BASE(i), 0); CESA_WRITE(sc, MV_WIN_CESA_CTRL(i), 0); } /* Fill CESA TDMA decoding windows with information acquired from DTS */ for (i = 0; i < availmem_regions_sz; i++) { br = availmem_regions[i].mr_start; cr = availmem_regions[i].mr_size; /* Don't add entries with size lower than 64KB */ if (cr & 0xffff0000) { cr = (((cr - 1) & 0xffff0000) | (MV_WIN_DDR_ATTR(i) << MV_WIN_CPU_ATTR_SHIFT) | (MV_WIN_DDR_TARGET << MV_WIN_CPU_TARGET_SHIFT) | MV_WIN_CPU_ENABLE_BIT); CESA_WRITE(sc, MV_WIN_CESA_BASE(i), br); CESA_WRITE(sc, MV_WIN_CESA_CTRL(i), cr); } } return (0); } Index: head/sys/dev/fdt/fdt_common.c =================================================================== --- head/sys/dev/fdt/fdt_common.c (revision 296188) +++ head/sys/dev/fdt/fdt_common.c (revision 296189) @@ -1,723 +1,723 @@ /*- * Copyright (c) 2009-2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by Andrew Turner under sponsorship from * the FreeBSD Foundation. * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "ofw_bus_if.h" #ifdef DEBUG #define debugf(fmt, args...) do { printf("%s(): ", __func__); \ printf(fmt,##args); } while (0) #else #define debugf(fmt, args...) #endif #define FDT_COMPAT_LEN 255 #define FDT_TYPE_LEN 64 #define FDT_REG_CELLS 4 vm_paddr_t fdt_immr_pa; vm_offset_t fdt_immr_va; vm_offset_t fdt_immr_size; struct fdt_ic_list fdt_ic_list_head = SLIST_HEAD_INITIALIZER(fdt_ic_list_head); static int fdt_get_range_by_busaddr(phandle_t node, u_long addr, u_long *base, u_long *size) { pcell_t ranges[32], *rangesptr; pcell_t addr_cells, size_cells, par_addr_cells; u_long bus_addr, par_bus_addr, pbase, psize; int err, i, len, tuple_size, tuples; if (node == 0) { *base = 0; *size = ULONG_MAX; return (0); } if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (ENXIO); /* * Process 'ranges' property. */ par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) { return (ERANGE); } len = OF_getproplen(node, "ranges"); if (len < 0) return (-1); if (len > sizeof(ranges)) return (ENOMEM); if (len == 0) { return (fdt_get_range_by_busaddr(OF_parent(node), addr, base, size)); } if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = addr_cells + par_addr_cells + size_cells; tuples = len / (tuple_size * sizeof(cell_t)); if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2) return (ERANGE); *base = 0; *size = 0; for (i = 0; i < tuples; i++) { rangesptr = &ranges[i * tuple_size]; bus_addr = fdt_data_get((void *)rangesptr, addr_cells); if (bus_addr != addr) continue; rangesptr += addr_cells; par_bus_addr = fdt_data_get((void *)rangesptr, par_addr_cells); rangesptr += par_addr_cells; err = fdt_get_range_by_busaddr(OF_parent(node), par_bus_addr, &pbase, &psize); if (err > 0) return (err); if (err == 0) *base = pbase; else *base = par_bus_addr; *size = fdt_data_get((void *)rangesptr, size_cells); return (0); } return (EINVAL); } int fdt_get_range(phandle_t node, int range_id, u_long *base, u_long *size) { pcell_t ranges[6], *rangesptr; pcell_t addr_cells, size_cells, par_addr_cells; u_long par_bus_addr, pbase, psize; int err, len, tuple_size, tuples; if ((fdt_addrsize_cells(node, &addr_cells, &size_cells)) != 0) return (ENXIO); /* * Process 'ranges' property. */ par_addr_cells = fdt_parent_addr_cells(node); if (par_addr_cells > 2) return (ERANGE); len = OF_getproplen(node, "ranges"); if (len > sizeof(ranges)) return (ENOMEM); if (len == 0) { *base = 0; *size = ULONG_MAX; return (0); } if (!(range_id < len)) return (ERANGE); if (OF_getprop(node, "ranges", ranges, sizeof(ranges)) <= 0) return (EINVAL); tuple_size = sizeof(pcell_t) * (addr_cells + par_addr_cells + size_cells); tuples = len / tuple_size; if (par_addr_cells > 2 || addr_cells > 2 || size_cells > 2) return (ERANGE); *base = 0; *size = 0; rangesptr = &ranges[range_id]; *base = fdt_data_get((void *)rangesptr, addr_cells); rangesptr += addr_cells; par_bus_addr = fdt_data_get((void *)rangesptr, par_addr_cells); rangesptr += par_addr_cells; err = fdt_get_range_by_busaddr(OF_parent(node), par_bus_addr, &pbase, &psize); if (err == 0) *base += pbase; else *base += par_bus_addr; *size = fdt_data_get((void *)rangesptr, size_cells); return (0); } int fdt_immr_addr(vm_offset_t immr_va) { phandle_t node; u_long base, size; int r; /* * Try to access the SOC node directly i.e. through /aliases/. */ if ((node = OF_finddevice("soc")) != 0) if (fdt_is_compatible(node, "simple-bus")) goto moveon; /* * Find the node the long way. */ if ((node = OF_finddevice("/")) == 0) return (ENXIO); if ((node = fdt_find_compatible(node, "simple-bus", 0)) == 0) return (ENXIO); moveon: if ((r = fdt_get_range(node, 0, &base, &size)) == 0) { fdt_immr_pa = base; fdt_immr_va = immr_va; fdt_immr_size = size; } return (r); } /* * This routine is an early-usage version of the ofw_bus_is_compatible() when * the ofw_bus I/F is not available (like early console routines and similar). * Note the buffer has to be on the stack since malloc() is usually not * available in such cases either. */ int fdt_is_compatible(phandle_t node, const char *compatstr) { char buf[FDT_COMPAT_LEN]; char *compat; int len, onelen, l, rv; if ((len = OF_getproplen(node, "compatible")) <= 0) return (0); compat = (char *)&buf; bzero(compat, FDT_COMPAT_LEN); if (OF_getprop(node, "compatible", compat, FDT_COMPAT_LEN) < 0) return (0); onelen = strlen(compatstr); rv = 0; while (len > 0) { if (strncasecmp(compat, compatstr, onelen) == 0) { /* Found it. */ rv = 1; break; } /* Slide to the next sub-string. */ l = strlen(compat) + 1; compat += l; len -= l; } return (rv); } int fdt_is_compatible_strict(phandle_t node, const char *compatible) { char compat[FDT_COMPAT_LEN]; if (OF_getproplen(node, "compatible") <= 0) return (0); if (OF_getprop(node, "compatible", compat, FDT_COMPAT_LEN) < 0) return (0); if (strncasecmp(compat, compatible, FDT_COMPAT_LEN) == 0) /* This fits. */ return (1); return (0); } phandle_t fdt_find_compatible(phandle_t start, const char *compat, int strict) { phandle_t child; /* * Traverse all children of 'start' node, and find first with * matching 'compatible' property. */ for (child = OF_child(start); child != 0; child = OF_peer(child)) if (fdt_is_compatible(child, compat)) { if (strict) if (!fdt_is_compatible_strict(child, compat)) continue; return (child); } return (0); } phandle_t fdt_depth_search_compatible(phandle_t start, const char *compat, int strict) { phandle_t child, node; /* * Depth-search all descendants of 'start' node, and find first with * matching 'compatible' property. */ for (node = OF_child(start); node != 0; node = OF_peer(node)) { if (fdt_is_compatible(node, compat) && (strict == 0 || fdt_is_compatible_strict(node, compat))) { return (node); } child = fdt_depth_search_compatible(node, compat, strict); if (child != 0) return (child); } return (0); } int fdt_is_enabled(phandle_t node) { char *stat; int ena, len; len = OF_getprop_alloc(node, "status", sizeof(char), (void **)&stat); if (len <= 0) /* It is OK if no 'status' property. */ return (1); /* Anything other than 'okay' means disabled. */ ena = 0; if (strncmp((char *)stat, "okay", len) == 0) ena = 1; free(stat, M_OFWPROP); return (ena); } int fdt_is_type(phandle_t node, const char *typestr) { char type[FDT_TYPE_LEN]; if (OF_getproplen(node, "device_type") <= 0) return (0); if (OF_getprop(node, "device_type", type, FDT_TYPE_LEN) < 0) return (0); if (strncasecmp(type, typestr, FDT_TYPE_LEN) == 0) /* This fits. */ return (1); return (0); } int fdt_parent_addr_cells(phandle_t node) { pcell_t addr_cells; /* Find out #address-cells of the superior bus. */ if (OF_searchprop(OF_parent(node), "#address-cells", &addr_cells, sizeof(addr_cells)) <= 0) return (2); return ((int)fdt32_to_cpu(addr_cells)); } int fdt_pm_is_enabled(phandle_t node) { int ret; ret = 1; #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) ret = fdt_pm(node); #endif return (ret); } u_long fdt_data_get(void *data, int cells) { if (cells == 1) return (fdt32_to_cpu(*((uint32_t *)data))); return (fdt64_to_cpu(*((uint64_t *)data))); } int fdt_addrsize_cells(phandle_t node, int *addr_cells, int *size_cells) { pcell_t cell; int cell_size; /* * Retrieve #{address,size}-cells. */ cell_size = sizeof(cell); if (OF_getprop(node, "#address-cells", &cell, cell_size) < cell_size) cell = 2; *addr_cells = fdt32_to_cpu((int)cell); if (OF_getprop(node, "#size-cells", &cell, cell_size) < cell_size) cell = 1; *size_cells = fdt32_to_cpu((int)cell); if (*addr_cells > 3 || *size_cells > 2) return (ERANGE); return (0); } int fdt_data_to_res(pcell_t *data, int addr_cells, int size_cells, u_long *start, u_long *count) { /* Address portion. */ if (addr_cells > 2) return (ERANGE); *start = fdt_data_get((void *)data, addr_cells); data += addr_cells; /* Size portion. */ if (size_cells > 2) return (ERANGE); *count = fdt_data_get((void *)data, size_cells); return (0); } int fdt_regsize(phandle_t node, u_long *base, u_long *size) { pcell_t reg[4]; int addr_cells, len, size_cells; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells)) return (ENXIO); if ((sizeof(pcell_t) * (addr_cells + size_cells)) > sizeof(reg)) return (ENOMEM); len = OF_getprop(node, "reg", ®, sizeof(reg)); if (len <= 0) return (EINVAL); *base = fdt_data_get(®[0], addr_cells); *size = fdt_data_get(®[addr_cells], size_cells); return (0); } int fdt_reg_to_rl(phandle_t node, struct resource_list *rl) { u_long end, count, start; pcell_t *reg, *regptr; pcell_t addr_cells, size_cells; int tuple_size, tuples; int i, rv; long busaddr, bussize; if (fdt_addrsize_cells(OF_parent(node), &addr_cells, &size_cells) != 0) return (ENXIO); if (fdt_get_range(OF_parent(node), 0, &busaddr, &bussize)) { busaddr = 0; bussize = 0; } tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); tuples = OF_getprop_alloc(node, "reg", tuple_size, (void **)®); debugf("addr_cells = %d, size_cells = %d\n", addr_cells, size_cells); debugf("tuples = %d, tuple size = %d\n", tuples, tuple_size); if (tuples <= 0) /* No 'reg' property in this node. */ return (0); regptr = reg; for (i = 0; i < tuples; i++) { rv = fdt_data_to_res(reg, addr_cells, size_cells, &start, &count); if (rv != 0) { resource_list_free(rl); goto out; } reg += addr_cells + size_cells; /* Calculate address range relative to base. */ start += busaddr; end = start + count - 1; debugf("reg addr start = %lx, end = %lx, count = %lx\n", start, end, count); resource_list_add(rl, SYS_RES_MEMORY, i, start, end, count); } rv = 0; out: free(regptr, M_OFWPROP); return (rv); } int fdt_get_phyaddr(phandle_t node, device_t dev, int *phy_addr, void **phy_sc) { phandle_t phy_node; pcell_t phy_handle, phy_reg; uint32_t i; device_t parent, child; if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) <= 0) return (ENXIO); phy_node = OF_node_from_xref(phy_handle); if (OF_getprop(phy_node, "reg", (void *)&phy_reg, sizeof(phy_reg)) <= 0) return (ENXIO); *phy_addr = fdt32_to_cpu(phy_reg); /* * Search for softc used to communicate with phy. */ /* * Step 1: Search for ancestor of the phy-node with a "phy-handle" * property set. */ phy_node = OF_parent(phy_node); while (phy_node != 0) { if (OF_getprop(phy_node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) > 0) break; phy_node = OF_parent(phy_node); } if (phy_node == 0) return (ENXIO); /* * Step 2: For each device with the same parent and name as ours * compare its node with the one found in step 1, ancestor of phy * node (stored in phy_node). */ parent = device_get_parent(dev); i = 0; child = device_find_child(parent, device_get_name(dev), i); while (child != NULL) { if (ofw_bus_get_node(child) == phy_node) break; i++; child = device_find_child(parent, device_get_name(dev), i); } if (child == NULL) return (ENXIO); /* * Use softc of the device found. */ *phy_sc = (void *)device_get_softc(child); return (0); } int fdt_get_reserved_regions(struct mem_region *mr, int *mrcnt) { pcell_t reserve[FDT_REG_CELLS * FDT_MEM_REGIONS]; pcell_t *reservep; phandle_t memory, root; uint32_t memory_size; int addr_cells, size_cells; int i, max_size, res_len, rv, tuple_size, tuples; max_size = sizeof(reserve); root = OF_finddevice("/"); memory = OF_finddevice("/memory"); if (memory == -1) { rv = ENXIO; goto out; } if ((rv = fdt_addrsize_cells(OF_parent(memory), &addr_cells, &size_cells)) != 0) goto out; if (addr_cells > 2) { rv = ERANGE; goto out; } tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); res_len = OF_getproplen(root, "memreserve"); if (res_len <= 0 || res_len > sizeof(reserve)) { rv = ERANGE; goto out; } if (OF_getprop(root, "memreserve", reserve, res_len) <= 0) { rv = ENXIO; goto out; } memory_size = 0; tuples = res_len / tuple_size; reservep = (pcell_t *)&reserve; for (i = 0; i < tuples; i++) { rv = fdt_data_to_res(reservep, addr_cells, size_cells, (u_long *)&mr[i].mr_start, (u_long *)&mr[i].mr_size); if (rv != 0) goto out; reservep += addr_cells + size_cells; } *mrcnt = i; rv = 0; out: return (rv); } int -fdt_get_mem_regions(struct mem_region *mr, int *mrcnt, uint32_t *memsize) +fdt_get_mem_regions(struct mem_region *mr, int *mrcnt, u_long *memsize) { pcell_t reg[FDT_REG_CELLS * FDT_MEM_REGIONS]; pcell_t *regp; phandle_t memory; - uint32_t memory_size; + u_long memory_size; int addr_cells, size_cells; int i, max_size, reg_len, rv, tuple_size, tuples; max_size = sizeof(reg); memory = OF_finddevice("/memory"); if (memory == -1) { rv = ENXIO; goto out; } if ((rv = fdt_addrsize_cells(OF_parent(memory), &addr_cells, &size_cells)) != 0) goto out; if (addr_cells > 2) { rv = ERANGE; goto out; } tuple_size = sizeof(pcell_t) * (addr_cells + size_cells); reg_len = OF_getproplen(memory, "reg"); if (reg_len <= 0 || reg_len > sizeof(reg)) { rv = ERANGE; goto out; } if (OF_getprop(memory, "reg", reg, reg_len) <= 0) { rv = ENXIO; goto out; } memory_size = 0; tuples = reg_len / tuple_size; regp = (pcell_t *)® for (i = 0; i < tuples; i++) { rv = fdt_data_to_res(regp, addr_cells, size_cells, (u_long *)&mr[i].mr_start, (u_long *)&mr[i].mr_size); if (rv != 0) goto out; regp += addr_cells + size_cells; memory_size += mr[i].mr_size; } if (memory_size == 0) { rv = ERANGE; goto out; } *mrcnt = i; *memsize = memory_size; rv = 0; out: return (rv); } int fdt_get_unit(device_t dev) { const char * name; name = ofw_bus_get_name(dev); name = strchr(name, '@') + 1; return (strtol(name,NULL,0)); } Index: head/sys/dev/fdt/fdt_common.h =================================================================== --- head/sys/dev/fdt/fdt_common.h (revision 296188) +++ head/sys/dev/fdt/fdt_common.h (revision 296189) @@ -1,104 +1,104 @@ /*- * Copyright (c) 2009-2010 The FreeBSD Foundation * All rights reserved. * * This software was developed by Semihalf under sponsorship from * the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _FDT_COMMON_H_ #define _FDT_COMMON_H_ #include #include #include #define FDT_MEM_REGIONS 8 #define DI_MAX_INTR_NUM 32 struct fdt_sense_level { enum intr_trigger trig; enum intr_polarity pol; }; #if defined(__arm__) && !defined(ARM_INTRNG) typedef int (*fdt_pic_decode_t)(phandle_t, pcell_t *, int *, int *, int *); extern fdt_pic_decode_t fdt_pic_table[]; #endif #if defined(__arm__) || defined(__powerpc__) typedef void (*fdt_fixup_t)(phandle_t); struct fdt_fixup_entry { char *model; fdt_fixup_t handler; }; extern struct fdt_fixup_entry fdt_fixup_table[]; #endif extern SLIST_HEAD(fdt_ic_list, fdt_ic) fdt_ic_list_head; struct fdt_ic { SLIST_ENTRY(fdt_ic) fdt_ics; ihandle_t iph; device_t dev; }; extern vm_paddr_t fdt_immr_pa; extern vm_offset_t fdt_immr_va; extern vm_offset_t fdt_immr_size; struct fdt_pm_mask_entry { char *compat; uint32_t mask; }; extern struct fdt_pm_mask_entry fdt_pm_mask_table[]; #if defined(FDT_DTB_STATIC) extern u_char fdt_static_dtb; #endif int fdt_addrsize_cells(phandle_t, int *, int *); u_long fdt_data_get(void *, int); int fdt_data_to_res(pcell_t *, int, int, u_long *, u_long *); phandle_t fdt_find_compatible(phandle_t, const char *, int); phandle_t fdt_depth_search_compatible(phandle_t, const char *, int); -int fdt_get_mem_regions(struct mem_region *, int *, uint32_t *); +int fdt_get_mem_regions(struct mem_region *, int *, u_long *); int fdt_get_reserved_regions(struct mem_region *, int *); int fdt_get_phyaddr(phandle_t, device_t, int *, void **); int fdt_get_range(phandle_t, int, u_long *, u_long *); int fdt_immr_addr(vm_offset_t); int fdt_regsize(phandle_t, u_long *, u_long *); int fdt_is_compatible(phandle_t, const char *); int fdt_is_compatible_strict(phandle_t, const char *); int fdt_is_enabled(phandle_t); int fdt_pm_is_enabled(phandle_t); int fdt_is_type(phandle_t, const char *); int fdt_parent_addr_cells(phandle_t); int fdt_reg_to_rl(phandle_t, struct resource_list *); int fdt_pm(phandle_t); int fdt_get_unit(device_t); #endif /* _FDT_COMMON_H_ */ Index: head/sys/mips/beri/beri_machdep.c =================================================================== --- head/sys/mips/beri/beri_machdep.c (revision 296188) +++ head/sys/mips/beri/beri_machdep.c (revision 296189) @@ -1,317 +1,318 @@ /*- * Copyright (c) 2006 Wojciech A. Koszek * Copyright (c) 2012-2014 Robert N. M. Watson * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_ddb.h" #include "opt_platform.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef FDT #include #include #endif #include #include #include #include #include #include #include #include #include #include #include #include extern int *edata; extern int *end; void platform_cpu_init() { /* Nothing special */ } static void mips_init(void) { int i; #ifdef FDT struct mem_region mr[FDT_MEM_REGIONS]; - int mr_cnt, val; + int mr_cnt; + u_long val; int j; #endif for (i = 0; i < 10; i++) { phys_avail[i] = 0; } /* phys_avail regions are in bytes */ phys_avail[0] = MIPS_KSEG0_TO_PHYS(kernel_kseg0_end); phys_avail[1] = ctob(realmem); dump_avail[0] = phys_avail[0]; dump_avail[1] = phys_avail[1]; physmem = realmem; #ifdef FDT if (fdt_get_mem_regions(mr, &mr_cnt, &val) == 0) { physmem = btoc(val); KASSERT((phys_avail[0] >= mr[0].mr_start) && \ (phys_avail[0] < (mr[0].mr_start + mr[0].mr_size)), ("First region is not within FDT memory range")); /* Limit size of the first region */ phys_avail[1] = (mr[0].mr_start + MIN(mr[0].mr_size, ctob(realmem))); dump_avail[1] = phys_avail[1]; /* Add the rest of regions */ for (i = 1, j = 2; i < mr_cnt; i++, j+=2) { phys_avail[j] = mr[i].mr_start; phys_avail[j+1] = (mr[i].mr_start + mr[i].mr_size); dump_avail[j] = phys_avail[j]; dump_avail[j+1] = phys_avail[j+1]; } } #endif init_param1(); init_param2(physmem); mips_cpu_init(); pmap_bootstrap(); mips_proc0_init(); mutex_init(); kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger"); #endif } /* * Perform a board-level soft-reset. */ void platform_reset(void) { /* XXX SMP will likely require us to do more. */ __asm__ __volatile__( "mfc0 $k0, $12\n\t" "li $k1, 0x00100000\n\t" "or $k0, $k0, $k1\n\t" "mtc0 $k0, $12\n"); for( ; ; ) __asm__ __volatile("wait"); } #ifdef FDT /* Parse cmd line args as env - copied from xlp_machdep. */ /* XXX-BZ this should really be centrally provided for all (boot) code. */ static void _parse_bootargs(char *cmdline) { char *n, *v; while ((v = strsep(&cmdline, " \n")) != NULL) { if (*v == '\0') continue; if (*v == '-') { while (*v != '\0') { v++; switch (*v) { case 'a': boothowto |= RB_ASKNAME; break; /* Someone should simulate that ;-) */ case 'C': boothowto |= RB_CDROM; break; case 'd': boothowto |= RB_KDB; break; case 'D': boothowto |= RB_MULTIPLE; break; case 'm': boothowto |= RB_MUTE; break; case 'g': boothowto |= RB_GDB; break; case 'h': boothowto |= RB_SERIAL; break; case 'p': boothowto |= RB_PAUSE; break; case 'r': boothowto |= RB_DFLTROOT; break; case 's': boothowto |= RB_SINGLE; break; case 'v': boothowto |= RB_VERBOSE; break; } } } else { n = strsep(&v, "="); if (v == NULL) kern_setenv(n, "1"); else kern_setenv(n, v); } } } #endif void platform_start(__register_t a0, __register_t a1, __register_t a2, __register_t a3) { struct bootinfo *bootinfop; vm_offset_t kernend; uint64_t platform_counter_freq; int argc = a0; char **argv = (char **)a1; char **envp = (char **)a2; long memsize; #ifdef FDT char buf[2048]; /* early stack supposedly big enough */ vm_offset_t dtbp; phandle_t chosen; void *kmdp; #endif int i; /* clear the BSS and SBSS segments */ kernend = (vm_offset_t)&end; memset(&edata, 0, kernend - (vm_offset_t)(&edata)); mips_postboot_fixup(); mips_pcpu0_init(); /* * Over time, we've changed out boot-time binary interface for the * kernel. Miniboot simply provides a 'memsize' in a3, whereas the * FreeBSD boot loader provides a 'bootinfo *' in a3. While slightly * grody, we support both here by detecting 'pointer-like' values in * a3 and assuming physical memory can never be that back. * * XXXRW: Pull more values than memsize out of bootinfop -- e.g., * module information. */ if (a3 >= 0x9800000000000000ULL) { bootinfop = (void *)a3; memsize = bootinfop->bi_memsize; preload_metadata = (caddr_t)bootinfop->bi_modulep; } else { bootinfop = NULL; memsize = a3; } #ifdef FDT /* * Find the dtb passed in by the boot loader (currently fictional). */ kmdp = preload_search_by_type("elf kernel"); dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t); #if defined(FDT_DTB_STATIC) /* * In case the device tree blob was not retrieved (from metadata) try * to use the statically embedded one. */ if (dtbp == (vm_offset_t)NULL) dtbp = (vm_offset_t)&fdt_static_dtb; #else #error "Non-static FDT not yet supported on BERI" #endif if (OF_install(OFW_FDT, 0) == FALSE) while (1); if (OF_init((void *)dtbp) != 0) while (1); /* * Configure more boot-time parameters passed in by loader. */ boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); init_static_kenv(MD_FETCH(kmdp, MODINFOMD_ENVP, char *), 0); /* * Get bootargs from FDT if specified. */ chosen = OF_finddevice("/chosen"); if (OF_getprop(chosen, "bootargs", buf, sizeof(buf)) != -1) _parse_bootargs(buf); #endif /* * XXXRW: We have no way to compare wallclock time to cycle rate on * BERI, so for now assume we run at the MALTA default (100MHz). */ platform_counter_freq = MIPS_DEFAULT_HZ; mips_timer_early_init(platform_counter_freq); cninit(); printf("entry: platform_start()\n"); bootverbose = 1; if (bootverbose) { printf("cmd line: "); for (i = 0; i < argc; i++) printf("%s ", argv[i]); printf("\n"); printf("envp:\n"); for (i = 0; envp[i]; i += 2) printf("\t%s = %s\n", envp[i], envp[i+1]); if (bootinfop != NULL) printf("bootinfo found at %p\n", bootinfop); printf("memsize = %p\n", (void *)memsize); } realmem = btoc(memsize); mips_init(); mips_timer_init_params(platform_counter_freq, 0); } Index: head/sys/riscv/riscv/machdep.c =================================================================== --- head/sys/riscv/riscv/machdep.c (revision 296188) +++ head/sys/riscv/riscv/machdep.c (revision 296189) @@ -1,805 +1,805 @@ /*- * Copyright (c) 2014 Andrew Turner * Copyright (c) 2015-2016 Ruslan Bukin * All rights reserved. * * Portions of this software were developed by SRI International and the * University of Cambridge Computer Laboratory under DARPA/AFRL contract * FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by the University of Cambridge * Computer Laboratory as part of the CTSRD Project, with support from the * UK Higher Education Innovation Fund (HEIF). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include "opt_platform.h" #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef VFP #include #endif #ifdef FDT #include #include #endif struct pcpu __pcpu[MAXCPU]; extern uint64_t pagetable_l0; static struct trapframe proc0_tf; vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2]; vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2]; int early_boot = 1; int cold = 1; long realmem = 0; long Maxmem = 0; #define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) vm_paddr_t physmap[PHYSMAP_SIZE]; u_int physmap_idx; struct kva_md_info kmi; int64_t dcache_line_size; /* The minimum D cache line size */ int64_t icache_line_size; /* The minimum I cache line size */ int64_t idcache_line_size; /* The minimum cache line size */ extern int *end; extern int *initstack_end; struct pcpu *pcpup; uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs); uintptr_t mcall_trap(uintptr_t mcause, uintptr_t* regs) { return (0); } static void cpu_startup(void *dummy) { identify_cpu(); vm_ksubmap_init(&kmi); bufinit(); vm_pager_bufferinit(); } SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); int cpu_idle_wakeup(int cpu) { return (0); } void bzero(void *buf, size_t len) { uint8_t *p; p = buf; while(len-- > 0) *p++ = 0; } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; regs->sepc = frame->tf_sepc; regs->sstatus = frame->tf_sstatus; regs->ra = frame->tf_ra; regs->sp = frame->tf_sp; regs->gp = frame->tf_gp; regs->tp = frame->tf_tp; memcpy(regs->t, frame->tf_t, sizeof(regs->t)); memcpy(regs->s, frame->tf_s, sizeof(regs->s)); memcpy(regs->a, frame->tf_a, sizeof(regs->a)); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *frame; frame = td->td_frame; frame->tf_sepc = regs->sepc; frame->tf_sstatus = regs->sstatus; frame->tf_ra = regs->ra; frame->tf_sp = regs->sp; frame->tf_gp = regs->gp; frame->tf_tp = regs->tp; memcpy(frame->tf_t, regs->t, sizeof(frame->tf_t)); memcpy(frame->tf_s, regs->s, sizeof(frame->tf_s)); memcpy(frame->tf_a, regs->a, sizeof(frame->tf_a)); return (0); } int fill_fpregs(struct thread *td, struct fpreg *regs) { /* TODO */ bzero(regs, sizeof(*regs)); return (0); } int set_fpregs(struct thread *td, struct fpreg *regs) { /* TODO */ return (0); } int fill_dbregs(struct thread *td, struct dbreg *regs) { panic("fill_dbregs"); } int set_dbregs(struct thread *td, struct dbreg *regs) { panic("set_dbregs"); } int ptrace_set_pc(struct thread *td, u_long addr) { panic("ptrace_set_pc"); return (0); } int ptrace_single_step(struct thread *td) { /* TODO; */ return (0); } int ptrace_clear_single_step(struct thread *td) { /* TODO; */ return (0); } void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; tf = td->td_frame; memset(tf, 0, sizeof(struct trapframe)); /* * We need to set a0 for init as it doesn't call * cpu_set_syscall_retval to copy the value. We also * need to set td_retval for the cases where we do. */ tf->tf_a[0] = td->td_retval[0] = stack; tf->tf_sp = STACKALIGN(stack); tf->tf_ra = imgp->entry_addr; tf->tf_sepc = imgp->entry_addr; } /* Sanity check these are the same size, they will be memcpy'd to and fro */ CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct gpregs *)0)->gp_a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct gpregs *)0)->gp_s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct gpregs *)0)->gp_t); CTASSERT(sizeof(((struct trapframe *)0)->tf_a) == sizeof((struct reg *)0)->a); CTASSERT(sizeof(((struct trapframe *)0)->tf_s) == sizeof((struct reg *)0)->s); CTASSERT(sizeof(((struct trapframe *)0)->tf_t) == sizeof((struct reg *)0)->t); int get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret) { struct trapframe *tf = td->td_frame; memcpy(mcp->mc_gpregs.gp_t, tf->tf_t, sizeof(mcp->mc_gpregs.gp_t)); memcpy(mcp->mc_gpregs.gp_s, tf->tf_s, sizeof(mcp->mc_gpregs.gp_s)); memcpy(mcp->mc_gpregs.gp_a, tf->tf_a, sizeof(mcp->mc_gpregs.gp_a)); if (clear_ret & GET_MC_CLEAR_RET) { mcp->mc_gpregs.gp_a[0] = 0; mcp->mc_gpregs.gp_t[0] = 0; /* clear syscall error */ } mcp->mc_gpregs.gp_ra = tf->tf_ra; mcp->mc_gpregs.gp_sp = tf->tf_sp; mcp->mc_gpregs.gp_gp = tf->tf_gp; mcp->mc_gpregs.gp_tp = tf->tf_tp; mcp->mc_gpregs.gp_sepc = tf->tf_sepc; mcp->mc_gpregs.gp_sstatus = tf->tf_sstatus; return (0); } int set_mcontext(struct thread *td, mcontext_t *mcp) { struct trapframe *tf; tf = td->td_frame; memcpy(tf->tf_t, mcp->mc_gpregs.gp_t, sizeof(tf->tf_t)); memcpy(tf->tf_s, mcp->mc_gpregs.gp_s, sizeof(tf->tf_s)); memcpy(tf->tf_a, mcp->mc_gpregs.gp_a, sizeof(tf->tf_a)); tf->tf_ra = mcp->mc_gpregs.gp_ra; tf->tf_sp = mcp->mc_gpregs.gp_sp; tf->tf_gp = mcp->mc_gpregs.gp_gp; tf->tf_tp = mcp->mc_gpregs.gp_tp; tf->tf_sepc = mcp->mc_gpregs.gp_sepc; tf->tf_sstatus = mcp->mc_gpregs.gp_sstatus; return (0); } static void get_fpcontext(struct thread *td, mcontext_t *mcp) { /* TODO */ } static void set_fpcontext(struct thread *td, mcontext_t *mcp) { /* TODO */ } void cpu_idle(int busy) { spinlock_enter(); if (!busy) cpu_idleclock(); if (!sched_runnable()) __asm __volatile( "fence \n" "wfi \n"); if (!busy) cpu_activeclock(); spinlock_exit(); } void cpu_halt(void) { panic("cpu_halt"); } /* * Flush the D-cache for non-DMA I/O so that the I-cache can * be made coherent later. */ void cpu_flush_dcache(void *ptr, size_t len) { /* TBD */ } /* Get current clock frequency for the given CPU ID. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { panic("cpu_est_clockrate"); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { uint64_t addr; addr = (uint64_t)&pagetable_l0; addr += (cpuid * PAGE_SIZE); pcpu->pc_sptbr = addr; } void spinlock_enter(void) { struct thread *td; td = curthread; if (td->td_md.md_spinlock_count == 0) { td->td_md.md_spinlock_count = 1; td->td_md.md_saved_sstatus_ie = intr_disable(); } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; critical_exit(); sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(sstatus_ie); } #ifndef _SYS_SYSPROTO_H_ struct sigreturn_args { ucontext_t *ucp; }; #endif int sys_sigreturn(struct thread *td, struct sigreturn_args *uap) { uint64_t sstatus; ucontext_t uc; int error; if (uap == NULL) return (EFAULT); if (copyin(uap->sigcntxp, &uc, sizeof(uc))) return (EFAULT); /* * Make sure the processor mode has not been tampered with and * interrupts have not been disabled. */ sstatus = uc.uc_mcontext.mc_gpregs.gp_sstatus; if ((sstatus & SSTATUS_PS) != 0 || (sstatus & SSTATUS_PIE) == 0) return (EINVAL); error = set_mcontext(td, &uc.uc_mcontext); if (error != 0) return (error); set_fpcontext(td, &uc.uc_mcontext); /* Restore signal mask. */ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { memcpy(pcb->pcb_t, tf->tf_t, sizeof(tf->tf_t)); memcpy(pcb->pcb_s, tf->tf_s, sizeof(tf->tf_s)); memcpy(pcb->pcb_a, tf->tf_a, sizeof(tf->tf_a)); pcb->pcb_ra = tf->tf_ra; pcb->pcb_sp = tf->tf_sp; pcb->pcb_gp = tf->tf_gp; pcb->pcb_tp = tf->tf_tp; pcb->pcb_sepc = tf->tf_sepc; } void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct sigframe *fp, frame; struct sysentvec *sysent; struct trapframe *tf; struct sigacts *psp; struct thread *td; struct proc *p; int onstack; int code; int sig; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; onstack = sigonstack(tf->tf_sp); CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm, catcher, sig); /* Allocate and validate space for the signal handler context. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { fp = (struct sigframe *)((uintptr_t)td->td_sigstk.ss_sp + td->td_sigstk.ss_size); } else { fp = (struct sigframe *)td->td_frame->tf_sp; } /* Make room, keeping the stack aligned */ fp--; fp = (struct sigframe *)STACKALIGN(fp); /* Fill in the frame to copy out */ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0); get_fpcontext(td, &frame.sf_uc.uc_mcontext); frame.sf_si = ksi->ksi_info; frame.sf_uc.uc_sigmask = *mask; frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE; frame.sf_uc.uc_stack = td->td_sigstk; mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(td->td_proc); /* Copy the sigframe out to the user's stack. */ if (copyout(&frame, fp, sizeof(*fp)) != 0) { /* Process has trashed its stack. Kill it. */ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp); PROC_LOCK(p); sigexit(td, SIGILL); } tf->tf_a[0] = sig; tf->tf_a[1] = (register_t)&fp->sf_si; tf->tf_a[2] = (register_t)&fp->sf_uc; tf->tf_sepc = (register_t)catcher; tf->tf_sp = (register_t)fp; sysent = p->p_sysent; if (sysent->sv_sigcode_base != 0) tf->tf_ra = (register_t)sysent->sv_sigcode_base; else tf->tf_ra = (register_t)(sysent->sv_psstrings - *(sysent->sv_szsigcode)); CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_sepc, tf->tf_sp); PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } static void init_proc0(vm_offset_t kstack) { pcpup = &__pcpu[0]; proc_linkup0(&proc0, &thread0); thread0.td_kstack = kstack; thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1; thread0.td_frame = &proc0_tf; pcpup->pc_curpcb = thread0.td_pcb; } static int add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, u_int *physmap_idxp) { u_int i, insert_idx, _physmap_idx; _physmap_idx = *physmap_idxp; if (length == 0) return (1); /* * Find insertion point while checking for overlap. Start off by * assuming the new entry will be added to the end. */ insert_idx = _physmap_idx; for (i = 0; i <= _physmap_idx; i += 2) { if (base < physmap[i + 1]) { if (base + length <= physmap[i]) { insert_idx = i; break; } if (boothowto & RB_VERBOSE) printf( "Overlapping memory regions, ignoring second region\n"); return (1); } } /* See if we can prepend to the next entry. */ if (insert_idx <= _physmap_idx && base + length == physmap[insert_idx]) { physmap[insert_idx] = base; return (1); } /* See if we can append to the previous entry. */ if (insert_idx > 0 && base == physmap[insert_idx - 1]) { physmap[insert_idx - 1] += length; return (1); } _physmap_idx += 2; *physmap_idxp = _physmap_idx; if (_physmap_idx == PHYSMAP_SIZE) { printf( "Too many segments in the physical address map, giving up\n"); return (0); } /* * Move the last 'N' entries down to make room for the new * entry if needed. */ for (i = _physmap_idx; i > insert_idx; i -= 2) { physmap[i] = physmap[i - 2]; physmap[i + 1] = physmap[i - 1]; } /* Insert the new entry. */ physmap[insert_idx] = base; physmap[insert_idx + 1] = base + length; printf("physmap[%d] = 0x%016lx\n", insert_idx, base); printf("physmap[%d] = 0x%016lx\n", insert_idx + 1, base + length); return (1); } #ifdef FDT static void try_load_dtb(caddr_t kmdp) { vm_offset_t dtbp; dtbp = (vm_offset_t)&fdt_static_dtb; if (dtbp == (vm_offset_t)NULL) { printf("ERROR loading DTB\n"); return; } if (OF_install(OFW_FDT, 0) == FALSE) panic("Cannot install FDT"); if (OF_init((void *)dtbp) != 0) panic("OF_init failed with the found device tree"); } #endif static void cache_setup(void) { /* TODO */ } /* * Fake up a boot descriptor table. * RISCVTODO: This needs to be done via loader (when it's available). */ vm_offset_t fake_preload_metadata(struct riscv_bootparams *rvbp __unused) { #ifdef DDB vm_offset_t zstart = 0, zend = 0; #endif vm_offset_t lastaddr; int i = 0; static uint32_t fake_preload[35]; fake_preload[i++] = MODINFO_NAME; fake_preload[i++] = strlen("kernel") + 1; strcpy((char*)&fake_preload[i++], "kernel"); i += 1; fake_preload[i++] = MODINFO_TYPE; fake_preload[i++] = strlen("elf64 kernel") + 1; strcpy((char*)&fake_preload[i++], "elf64 kernel"); i += 3; fake_preload[i++] = MODINFO_ADDR; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = (uint64_t)(KERNBASE + KERNENTRY); i += 1; fake_preload[i++] = MODINFO_SIZE; fake_preload[i++] = sizeof(uint64_t); printf("end is 0x%016lx\n", (uint64_t)&end); fake_preload[i++] = (uint64_t)&end - (uint64_t)(KERNBASE + KERNENTRY); i += 1; #ifdef DDB #if 0 /* RISCVTODO */ if (*(uint32_t *)KERNVIRTADDR == MAGIC_TRAMP_NUMBER) { fake_preload[i++] = MODINFO_METADATA|MODINFOMD_SSYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 4); fake_preload[i++] = MODINFO_METADATA|MODINFOMD_ESYM; fake_preload[i++] = sizeof(vm_offset_t); fake_preload[i++] = *(uint32_t *)(KERNVIRTADDR + 8); lastaddr = *(uint32_t *)(KERNVIRTADDR + 8); zend = lastaddr; zstart = *(uint32_t *)(KERNVIRTADDR + 4); db_fetch_ksymtab(zstart, zend); } else #endif #endif lastaddr = (vm_offset_t)&end; fake_preload[i++] = 0; fake_preload[i] = 0; preload_metadata = (void *)fake_preload; return (lastaddr); } void initriscv(struct riscv_bootparams *rvbp) { struct mem_region mem_regions[FDT_MEM_REGIONS]; vm_offset_t lastaddr; int mem_regions_sz; vm_size_t kernlen; - uint32_t memsize; + u_long memsize; caddr_t kmdp; int i; /* Set the module data location */ lastaddr = fake_preload_metadata(rvbp); /* Find the kernel address */ kmdp = preload_search_by_type("elf kernel"); if (kmdp == NULL) kmdp = preload_search_by_type("elf64 kernel"); boothowto = 0; kern_envp = NULL; #ifdef FDT try_load_dtb(kmdp); #endif /* Load the physical memory ranges */ physmap_idx = 0; /* Grab physical memory regions information from device tree. */ if (fdt_get_mem_regions(mem_regions, &mem_regions_sz, &memsize) != 0) panic("Cannot get physical memory regions"); for (i = 0; i < mem_regions_sz; i++) add_physmap_entry(mem_regions[i].mr_start, mem_regions[i].mr_size, physmap, &physmap_idx); /* Set the pcpu data, this is needed by pmap_bootstrap */ pcpup = &__pcpu[0]; pcpu_init(pcpup, 0, sizeof(struct pcpu)); /* Set the pcpu pointer */ __asm __volatile("mv gp, %0" :: "r"(pcpup)); PCPU_SET(curthread, &thread0); /* Do basic tuning, hz etc */ init_param1(); cache_setup(); /* Bootstrap enough of pmap to enter the kernel proper */ kernlen = (lastaddr - KERNBASE); pmap_bootstrap(rvbp->kern_l1pt, KERNENTRY, kernlen); cninit(); init_proc0(rvbp->kern_stack); /* set page table base register for thread0 */ thread0.td_pcb->pcb_l1addr = (rvbp->kern_l1pt - KERNBASE); msgbufinit(msgbufp, msgbufsize); mutex_init(); init_param2(physmem); kdb_init(); riscv_init_interrupts(); early_boot = 0; }