Index: head/sys/amd64/amd64/apic_vector.S =================================================================== --- head/sys/amd64/amd64/apic_vector.S (revision 70005) +++ head/sys/amd64/amd64/apic_vector.S (revision 70006) @@ -1,692 +1,692 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD$ */ #include #include #include "i386/isa/intr_machdep.h" /* convert an absolute IRQ# into a bitmask */ #define IRQ_BIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs #define POP_FRAME \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp /* * Macros for interrupt entry, call to handler, and exit. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL,%eax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ movl $KPSEL,%eax ; \ mov %ax,%fs ; \ FAKE_MCOUNT(13*4(%esp)) ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ pushl _intr_unit + (irq_num) * 4 ; \ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ lock ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4, %eax ; \ lock ; \ incl (%eax) ; \ MEXITCOUNT ; \ jmp _doreti #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 #define MASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ IMASK_UNLOCK /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: #ifdef APIC_INTR_REORDER #define EOI_IRQ(irq_num) \ movl _apic_isrbit_location + 8 * (irq_num), %eax ; \ movl (%eax), %eax ; \ testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #else #define EOI_IRQ(irq_num) \ - testl $IRQ_BIT(irq_num), lapic_isr1; \ + testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi; \ + movl $0, _lapic+LA_EOI; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #endif /* * Test to see if the source is currently masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ andl $~IOART_INTMASK, %eax ; /* clear the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already unmasked */ \ IMASK_UNLOCK #ifdef APIC_INTR_DIAGNOSTIC #ifdef APIC_INTR_DIAGNOSTIC_IRQ log_intr_event: pushf cli pushl $CNAME(apic_itrace_debuglock) call CNAME(s_lock_np) addl $4, %esp movl CNAME(apic_itrace_debugbuffer_idx), %ecx andl $32767, %ecx movl PCPU(CPUID), %eax shll $8, %eax orl 8(%esp), %eax movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2) incl %ecx andl $32767, %ecx movl %ecx, CNAME(apic_itrace_debugbuffer_idx) pushl $CNAME(apic_itrace_debuglock) call CNAME(s_unlock_np) addl $4, %esp popf ret #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 ; \ pushl %eax ; \ pushl %ecx ; \ pushl %edx ; \ movl $(irq_num), %eax ; \ cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \ jne 7f ; \ pushl $id ; \ call log_intr_event ; \ addl $4, %esp ; \ 7: ; \ popl %edx ; \ popl %ecx ; \ popl %eax #else #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 #endif #define APIC_ITRACE_ENTER 1 #define APIC_ITRACE_EOI 2 #define APIC_ITRACE_TRYISRLOCK 3 #define APIC_ITRACE_GOTISRLOCK 4 #define APIC_ITRACE_ENTER2 5 #define APIC_ITRACE_LEAVE 6 #define APIC_ITRACE_UNMASK 7 #define APIC_ITRACE_ACTIVE 8 #define APIC_ITRACE_MASKED 9 #define APIC_ITRACE_NOISRLOCK 10 #define APIC_ITRACE_MASKED2 11 #define APIC_ITRACE_SPLZ 12 #define APIC_ITRACE_DORETI 13 #else #define APIC_ITRACE(name, irq_num, id) #endif /* * Slow, threaded interrupts. * * XXX Most of the parameters here are obsolete. Fix this when we're * done. * XXX we really shouldn't return via doreti if we just schedule the * interrupt handler and don't run anything. We could just do an * iret. FIXME. */ #define INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ mov %ax, %ds ; \ mov %ax, %es ; \ movl $KPSEL, %eax ; \ mov %ax, %fs ; \ ; \ maybe_extra_ipending ; \ ; \ APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ 0: ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ ; \ /* entry point used by doreti_unpend for HWIs. */ \ __CONCAT(Xresume,irq_num): ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ pushl $irq_num; /* pass the IRQ */ \ APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ sti ; \ call _sched_ithd ; \ addl $4, %esp ; /* discard the parameter */ \ APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ ; \ MEXITCOUNT ; \ jmp _doreti /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl _Xspuriousint _Xspuriousint: /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl _Xinvltlb _Xinvltlb: pushl %eax #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax mov %ax, %fs movl PCPU(CPUID), %eax popl %fs ss incl _xhits(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ popl %eax iret #ifdef BETTER_CLOCK /* * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, * * - Stores current cpu state in checkstate_cpustate[cpuid] * 0 == user, 1 == sys, 2 == intr * - Stores current process in checkstate_curproc[cpuid] * * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. * * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags */ .text SUPERALIGN_TEXT .globl _Xcpucheckstate .globl _checkstate_cpustate .globl _checkstate_curproc .globl _checkstate_pc _Xcpucheckstate: pushl %eax pushl %ebx pushl %ds /* save current data segment */ pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ movl $KPSEL, %eax mov %ax, %fs - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ movl $0, %ebx movl 20(%esp), %eax andl $3, %eax cmpl $3, %eax je 1f testl $PSL_VM, 24(%esp) jne 1f incl %ebx /* system or interrupt */ 1: movl PCPU(CPUID), %eax movl %ebx, _checkstate_cpustate(,%eax,4) movl PCPU(CURPROC), %ebx movl %ebx, _checkstate_curproc(,%eax,4) movl 16(%esp), %ebx movl %ebx, _checkstate_pc(,%eax,4) lock /* checkstate_probed_cpus |= (1< #include #include #ifdef SMP #include #include #include /** GRAB_LOPRIO */ #include #endif /* SMP */ #include "assym.s" /*****************************************************************************/ /* Scheduling */ /*****************************************************************************/ .data .globl _panic #if defined(SWTCH_OPTIM_STATS) .globl _swtch_optim_stats, _tlb_flush_count _swtch_optim_stats: .long 0 /* number of _swtch_optims */ _tlb_flush_count: .long 0 #endif .text /* * cpu_throw() */ ENTRY(cpu_throw) jmp sw1 /* * cpu_switch() */ ENTRY(cpu_switch) /* switch to new process. first, save context as needed */ movl PCPU(CURPROC),%ecx /* if no process to save, don't bother */ testl %ecx,%ecx jz sw1 #ifdef SMP movb P_ONCPU(%ecx), %al /* save "last" cpu */ movb %al, P_LASTCPU(%ecx) movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ #endif /* SMP */ movl P_VMSPACE(%ecx), %edx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif /* SMP */ btrl %eax, VM_PMAP+PM_ACTIVE(%edx) movl P_ADDR(%ecx),%edx movl (%esp),%eax /* Hardware registers */ movl %eax,PCB_EIP(%edx) movl %ebx,PCB_EBX(%edx) movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) movl %esi,PCB_ESI(%edx) movl %edi,PCB_EDI(%edx) movl %gs,PCB_GS(%edx) /* test if debug registers should be saved */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl %dr7,%eax /* yes, do the save */ movl %eax,PCB_DR7(%edx) andl $0x0000ff00, %eax /* disable all watchpoints */ movl %eax,%dr7 movl %dr6,%eax movl %eax,PCB_DR6(%edx) movl %dr3,%eax movl %eax,PCB_DR3(%edx) movl %dr2,%eax movl %eax,PCB_DR2(%edx) movl %dr1,%eax movl %eax,PCB_DR1(%edx) movl %dr0,%eax movl %eax,PCB_DR0(%edx) 1: /* save sched_lock recursion count */ movl _sched_lock+MTX_RECURSE,%eax movl %eax,PCB_SCHEDNEST(%edx) #ifdef SMP /* XXX FIXME: we should be saving the local APIC TPR */ #endif /* SMP */ #if NNPX > 0 /* have we used fp, and need a save? */ cmpl %ecx,PCPU(NPXPROC) jne 1f addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */ pushl %edx call _npxsave /* do it in a big C function */ popl %eax 1: #endif /* NNPX > 0 */ /* save is done, now choose a new process */ sw1: #ifdef SMP /* Stop scheduling if smp_active goes zero and we are not BSP */ cmpl $0,_smp_active jne 1f cmpl $0,PCPU(CPUID) je 1f movl PCPU(IDLEPROC), %eax jmp sw1b 1: #endif /* * Choose a new process to schedule. chooseproc() returns idleproc * if it cannot find another process to run. */ sw1a: call _chooseproc /* trash ecx, edx, ret eax*/ #ifdef INVARIANTS testl %eax,%eax /* no process? */ jz badsw3 /* no, panic */ #endif sw1b: movl %eax,%ecx xorl %eax,%eax andl $~AST_RESCHED,PCPU(ASTPENDING) #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 #endif movl P_ADDR(%ecx),%edx #if defined(SWTCH_OPTIM_STATS) incl _swtch_optim_stats #endif /* switch address space */ movl %cr3,%ebx cmpl PCB_CR3(%edx),%ebx je 4f #if defined(SWTCH_OPTIM_STATS) decl _swtch_optim_stats incl _tlb_flush_count #endif movl PCB_CR3(%edx),%ebx movl %ebx,%cr3 4: #ifdef SMP movl PCPU(CPUID), %esi #else xorl %esi, %esi #endif cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ je 1f btsl %esi, _private_tss /* mark use of private tss */ movl PCB_EXT(%edx), %edi /* new tss descriptor */ jmp 2f 1: /* update common_tss.tss_esp0 pointer */ movl %edx, %ebx /* pcb */ addl $(UPAGES * PAGE_SIZE - 16), %ebx movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0 btrl %esi, _private_tss jae 3f #ifdef SMP movl $GD_COMMON_TSSD, %edi addl %fs:0, %edi #else movl $PCPU(COMMON_TSSD), %edi #endif 2: /* move correct tss descriptor into GDT slot, then reload tr */ movl PCPU(TSS_GDT), %ebx /* entry in GDT */ movl 0(%edi), %eax movl %eax, 0(%ebx) movl 4(%edi), %eax movl %eax, 4(%ebx) movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 3: movl P_VMSPACE(%ecx), %ebx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif btsl %eax, VM_PMAP+PM_ACTIVE(%ebx) /* restore context */ movl PCB_EBX(%edx),%ebx movl PCB_ESP(%edx),%esp movl PCB_EBP(%edx),%ebp movl PCB_ESI(%edx),%esi movl PCB_EDI(%edx),%edi movl PCB_EIP(%edx),%eax movl %eax,(%esp) #ifdef SMP #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */ #ifdef CHEAP_TPR - movl $0, lapic_tpr + movl $0, _lapic+LA_TPR #else - andl $~APIC_TPR_PRIO, lapic_tpr + andl $~APIC_TPR_PRIO, _lapic+LA_TPR #endif /** CHEAP_TPR */ #endif /** GRAB_LOPRIO */ movl PCPU(CPUID),%eax movb %al, P_ONCPU(%ecx) #endif /* SMP */ movl %edx, PCPU(CURPCB) movl %ecx, PCPU(CURPROC) /* into next process */ #ifdef SMP /* XXX FIXME: we should be restoring the local APIC TPR */ #endif /* SMP */ #ifdef USER_LDT cmpl $0, PCB_USERLDT(%edx) jnz 1f movl __default_ldt,%eax cmpl PCPU(CURRENTLDT),%eax je 2f lldt __default_ldt movl %eax,PCPU(CURRENTLDT) jmp 2f 1: pushl %edx call _set_user_ldt popl %edx 2: #endif /* This must be done after loading the user LDT. */ .globl cpu_switch_load_gs cpu_switch_load_gs: movl PCB_GS(%edx),%gs /* test if debug regisers should be restored */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl PCB_DR6(%edx),%eax /* yes, do the restore */ movl %eax,%dr6 movl PCB_DR3(%edx),%eax movl %eax,%dr3 movl PCB_DR2(%edx),%eax movl %eax,%dr2 movl PCB_DR1(%edx),%eax movl %eax,%dr1 movl PCB_DR0(%edx),%eax movl %eax,%dr0 movl PCB_DR7(%edx),%eax movl %eax,%dr7 1: /* * restore sched_lock recursion count and transfer ownership to * new process */ movl PCB_SCHEDNEST(%edx),%eax movl %eax,_sched_lock+MTX_RECURSE movl PCPU(CURPROC),%eax movl %eax,_sched_lock+MTX_LOCK ret CROSSJUMPTARGET(sw1a) #ifdef INVARIANTS badsw2: pushl $sw0_2 call _panic sw0_2: .asciz "cpu_switch: not SRUN" badsw3: pushl $sw0_3 call _panic sw0_3: .asciz "cpu_switch: chooseproc returned NULL" #endif /* * savectx(pcb) * Update pcb, saving current processor state. */ ENTRY(savectx) /* fetch PCB */ movl 4(%esp),%ecx /* caller's return address - child won't execute this routine */ movl (%esp),%eax movl %eax,PCB_EIP(%ecx) movl %cr3,%eax movl %eax,PCB_CR3(%ecx) movl %ebx,PCB_EBX(%ecx) movl %esp,PCB_ESP(%ecx) movl %ebp,PCB_EBP(%ecx) movl %esi,PCB_ESI(%ecx) movl %edi,PCB_EDI(%ecx) movl %gs,PCB_GS(%ecx) #if NNPX > 0 /* * If npxproc == NULL, then the npx h/w state is irrelevant and the * state had better already be in the pcb. This is true for forks * but not for dumps (the old book-keeping with FP flags in the pcb * always lost for dumps because the dump pcb has 0 flags). * * If npxproc != NULL, then we have to save the npx h/w state to * npxproc's pcb and copy it to the requested pcb, or save to the * requested pcb and reload. Copying is easier because we would * have to handle h/w bugs for reloading. We used to lose the * parent's npx state for forks by forgetting to reload. */ movl PCPU(NPXPROC),%eax testl %eax,%eax je 1f pushl %ecx movl P_ADDR(%eax),%eax leal PCB_SAVEFPU(%eax),%eax pushl %eax pushl %eax call _npxsave addl $4,%esp popl %eax popl %ecx pushl $PCB_SAVEFPU_SIZE leal PCB_SAVEFPU(%ecx),%ecx pushl %ecx pushl %eax call _bcopy addl $12,%esp #endif /* NNPX > 0 */ 1: ret Index: head/sys/amd64/amd64/genassym.c =================================================================== --- head/sys/amd64/amd64/genassym.c (revision 70005) +++ head/sys/amd64/amd64/genassym.c (revision 70006) @@ -1,237 +1,244 @@ /*- * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 * $FreeBSD$ */ #include "opt_user_ldt.h" #include #include #include #include #include #include #include #include #include #include #include /* XXX */ #ifdef KTR_PERCPU #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include #include #include ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_ADDR, offsetof(struct proc, p_addr)); ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); #endif ASSYM(SSLEEP, SSLEEP); ASSYM(SRUN, SRUN); ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); ASSYM(UPAGES, UPAGES); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(NPTEPG, NPTEPG); ASSYM(NPDEPG, NPDEPG); ASSYM(PDESIZE, PDESIZE); ASSYM(PTESIZE, PTESIZE); ASSYM(PAGE_SHIFT, PAGE_SHIFT); ASSYM(PAGE_MASK, PAGE_MASK); ASSYM(PDRSHIFT, PDRSHIFT); ASSYM(USRSTACK, USRSTACK); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(KERNBASE, KERNBASE); ASSYM(MCLBYTES, MCLBYTES); ASSYM(PCB_CR3, offsetof(struct pcb, pcb_cr3)); ASSYM(PCB_EDI, offsetof(struct pcb, pcb_edi)); ASSYM(PCB_ESI, offsetof(struct pcb, pcb_esi)); ASSYM(PCB_EBP, offsetof(struct pcb, pcb_ebp)); ASSYM(PCB_ESP, offsetof(struct pcb, pcb_esp)); ASSYM(PCB_EBX, offsetof(struct pcb, pcb_ebx)); ASSYM(PCB_EIP, offsetof(struct pcb, pcb_eip)); ASSYM(TSS_ESP0, offsetof(struct i386tss, tss_esp0)); #ifdef USER_LDT ASSYM(PCB_USERLDT, offsetof(struct pcb, pcb_ldt)); #endif ASSYM(PCB_GS, offsetof(struct pcb, pcb_gs)); ASSYM(PCB_DR0, offsetof(struct pcb, pcb_dr0)); ASSYM(PCB_DR1, offsetof(struct pcb, pcb_dr1)); ASSYM(PCB_DR2, offsetof(struct pcb, pcb_dr2)); ASSYM(PCB_DR3, offsetof(struct pcb, pcb_dr3)); ASSYM(PCB_DR6, offsetof(struct pcb, pcb_dr6)); ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7)); ASSYM(PCB_DBREGS, PCB_DBREGS); ASSYM(PCB_EXT, offsetof(struct pcb, pcb_ext)); ASSYM(PCB_SCHEDNEST, offsetof(struct pcb, pcb_schednest)); ASSYM(PCB_SPARE, offsetof(struct pcb, __pcb_spare)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_savefpu)); ASSYM(PCB_SAVEFPU_SIZE, sizeof(struct save87)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); #ifdef SMP ASSYM(PCB_SIZE, sizeof(struct pcb)); #endif ASSYM(TF_TRAPNO, offsetof(struct trapframe, tf_trapno)); ASSYM(TF_ERR, offsetof(struct trapframe, tf_err)); ASSYM(TF_CS, offsetof(struct trapframe, tf_cs)); ASSYM(TF_EFLAGS, offsetof(struct trapframe, tf_eflags)); ASSYM(SIGF_HANDLER, offsetof(struct sigframe, sf_ahu.sf_handler)); ASSYM(SIGF_SC, offsetof(struct osigframe, sf_siginfo.si_sc)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(SC_PS, offsetof(struct osigcontext, sc_ps)); ASSYM(SC_FS, offsetof(struct osigcontext, sc_fs)); ASSYM(SC_GS, offsetof(struct osigcontext, sc_gs)); ASSYM(SC_TRAPNO, offsetof(struct osigcontext, sc_trapno)); ASSYM(UC_EFLAGS, offsetof(ucontext_t, uc_mcontext.mc_eflags)); ASSYM(UC_GS, offsetof(ucontext_t, uc_mcontext.mc_gs)); ASSYM(ENOENT, ENOENT); ASSYM(EFAULT, EFAULT); ASSYM(ENAMETOOLONG, ENAMETOOLONG); ASSYM(MAXPATHLEN, MAXPATHLEN); ASSYM(BOOTINFO_SIZE, sizeof(struct bootinfo)); ASSYM(BI_VERSION, offsetof(struct bootinfo, bi_version)); ASSYM(BI_KERNELNAME, offsetof(struct bootinfo, bi_kernelname)); ASSYM(BI_NFS_DISKLESS, offsetof(struct bootinfo, bi_nfs_diskless)); ASSYM(BI_ENDCOMMON, offsetof(struct bootinfo, bi_endcommon)); ASSYM(NFSDISKLESS_SIZE, sizeof(struct nfs_diskless)); ASSYM(BI_SIZE, offsetof(struct bootinfo, bi_size)); ASSYM(BI_SYMTAB, offsetof(struct bootinfo, bi_symtab)); ASSYM(BI_ESYMTAB, offsetof(struct bootinfo, bi_esymtab)); ASSYM(BI_KERNEND, offsetof(struct bootinfo, bi_kernend)); ASSYM(GD_SIZEOF, sizeof(struct globaldata)); ASSYM(GD_CURPROC, offsetof(struct globaldata, gd_curproc)); ASSYM(GD_NPXPROC, offsetof(struct globaldata, gd_npxproc)); ASSYM(GD_IDLEPROC, offsetof(struct globaldata, gd_idleproc)); ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb)); ASSYM(GD_COMMON_TSS, offsetof(struct globaldata, gd_common_tss)); ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); ASSYM(AST_PENDING, AST_PENDING); ASSYM(AST_RESCHED, AST_RESCHED); ASSYM(GD_INTR_NESTING_LEVEL, offsetof(struct globaldata, gd_intr_nesting_level)); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); #endif ASSYM(GD_WITNESS_SPIN_CHECK, offsetof(struct globaldata, gd_witness_spin_check)); /* XXX */ #ifdef KTR_PERCPU ASSYM(GD_KTR_IDX, offsetof(struct globaldata, gd_ktr_idx)); ASSYM(GD_KTR_BUF, offsetof(struct globaldata, gd_ktr_buf)); ASSYM(GD_KTR_BUF_DATA, offsetof(struct globaldata, gd_ktr_buf_data)); #endif #ifdef SMP ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_CPU_LOCKID, offsetof(struct globaldata, gd_cpu_lockid)); ASSYM(GD_OTHER_CPUS, offsetof(struct globaldata, gd_other_cpus)); ASSYM(GD_SS_EFLAGS, offsetof(struct globaldata, gd_ss_eflags)); ASSYM(GD_INSIDE_INTR, offsetof(struct globaldata, gd_inside_intr)); ASSYM(GD_PRV_CMAP1, offsetof(struct globaldata, gd_prv_CMAP1)); ASSYM(GD_PRV_CMAP2, offsetof(struct globaldata, gd_prv_CMAP2)); ASSYM(GD_PRV_CMAP3, offsetof(struct globaldata, gd_prv_CMAP3)); ASSYM(GD_PRV_PMAP1, offsetof(struct globaldata, gd_prv_PMAP1)); ASSYM(GD_PRV_CADDR1, offsetof(struct globaldata, gd_prv_CADDR1)); ASSYM(GD_PRV_CADDR2, offsetof(struct globaldata, gd_prv_CADDR2)); ASSYM(GD_PRV_CADDR3, offsetof(struct globaldata, gd_prv_CADDR3)); ASSYM(GD_PRV_PADDR1, offsetof(struct globaldata, gd_prv_PADDR1)); ASSYM(PS_IDLESTACK, offsetof(struct privatespace, idlestack)); ASSYM(PS_IDLESTACK_TOP, sizeof(struct privatespace)); + +ASSYM(LA_VER, offsetof(struct LAPIC, version)); +ASSYM(LA_TPR, offsetof(struct LAPIC, tpr)); +ASSYM(LA_EOI, offsetof(struct LAPIC, eoi)); +ASSYM(LA_SVR, offsetof(struct LAPIC, svr)); +ASSYM(LA_ICR_LO, offsetof(struct LAPIC, icr_lo)); +ASSYM(LA_ICR_HI, offsetof(struct LAPIC, icr_hi)); #endif ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL)); ASSYM(KDSEL, GSEL(GDATA_SEL, SEL_KPL)); #ifdef SMP ASSYM(KPSEL, GSEL(GPRIV_SEL, SEL_KPL)); #endif ASSYM(BC32SEL, GSEL(GBIOSCODE32_SEL, SEL_KPL)); ASSYM(GPROC0_SEL, GPROC0_SEL); ASSYM(VM86_FRAMESIZE, sizeof(struct vm86frame)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse)); ASSYM(MTX_SAVEINTR, offsetof(struct mtx, mtx_saveintr)); #ifdef WITNESS ASSYM(MTX_DEBUG, offsetof(struct mtx, mtx_debug)); ASSYM(MTXD_WITNESS, offsetof(struct mtx_debug, mtxd_witness)); #endif ASSYM(MTX_UNOWNED, MTX_UNOWNED); ASSYM(MTX_SPIN, MTX_SPIN); Index: head/sys/amd64/amd64/mpboot.S =================================================================== --- head/sys/amd64/amd64/mpboot.S (revision 70005) +++ head/sys/amd64/amd64/mpboot.S (revision 70006) @@ -1,272 +1,272 @@ /* * Copyright (c) 1995, Jack F. Vogel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jack F. Vogel * 4. The name of the developer may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * mpboot.s: FreeBSD machine support for the Intel MP Spec * multiprocessor systems. * * $FreeBSD$ */ #include /* miscellaneous asm macros */ #include #include #include "assym.s" /* * this code MUST be enabled here and in mp_machdep.c * it follows the very early stages of AP boot by placing values in CMOS ram. * it NORMALLY will never be needed and thus the primitive method for enabling. * #define CHECK_POINTS */ #if defined(CHECK_POINTS) && !defined(PC98) #define CMOS_REG (0x70) #define CMOS_DATA (0x71) #define CHECKPOINT(A,D) \ movb $(A),%al ; \ outb %al,$CMOS_REG ; \ movb $(D),%al ; \ outb %al,$CMOS_DATA #else #define CHECKPOINT(A,D) #endif /* CHECK_POINTS */ /* * the APs enter here from their trampoline code (bootMP, below) */ .p2align 4 NON_GPROF_ENTRY(MPentry) CHECKPOINT(0x36, 3) /* Now enable paging mode */ movl _IdlePTD-KERNBASE, %eax movl %eax,%cr3 movl %cr0,%eax orl $CR0_PE|CR0_PG,%eax /* enable paging */ movl %eax,%cr0 /* let the games begin! */ movl _bootSTK,%esp /* boot stack end loc. */ pushl $mp_begin /* jump to high mem */ ret /* * Wait for the booting CPU to signal startup */ mp_begin: /* now running relocated at KERNBASE */ CHECKPOINT(0x37, 4) call _init_secondary /* load i386 tables */ CHECKPOINT(0x38, 5) /* * If the [BSP] CPU has support for VME, turn it on. */ testl $CPUID_VME, _cpu_feature /* XXX WRONG! BSP! */ jz 1f movl %cr4, %eax orl $CR4_VME, %eax movl %eax, %cr4 1: /* disable the APIC, just to be SURE */ - movl lapic_svr, %eax /* get spurious vector reg. */ + movl _lapic+LA_SVR, %eax /* get spurious vector reg. */ andl $~APIC_SVR_SWEN, %eax /* clear software enable bit */ - movl %eax, lapic_svr + movl %eax, _lapic+LA_SVR /* signal our startup to the BSP */ - movl lapic_ver, %eax /* our version reg contents */ + movl _lapic+LA_VER, %eax /* our version reg contents */ movl %eax, _cpu_apic_versions /* into [ 0 ] */ incl _mp_ncpus /* signal BSP */ CHECKPOINT(0x39, 6) /* Now, let's prepare for some REAL WORK :-) This doesn't return. */ call _ap_init /* * This is the embedded trampoline or bootstrap that is * copied into 'real-mode' low memory, it is where the * secondary processor "wakes up". When it is executed * the processor will eventually jump into the routine * MPentry, which resides in normal kernel text above * 1Meg. -jackv */ .data ALIGN_DATA /* just to be sure */ BOOTMP1: NON_GPROF_ENTRY(bootMP) .code16 cli CHECKPOINT(0x34, 1) /* First guarantee a 'clean slate' */ xorl %eax, %eax movl %eax, %ebx movl %eax, %ecx movl %eax, %edx movl %eax, %esi movl %eax, %edi /* set up data segments */ mov %cs, %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss mov $(boot_stk-_bootMP), %esp /* Now load the global descriptor table */ lgdt MP_GDTptr-_bootMP /* Enable protected mode */ movl %cr0, %eax orl $CR0_PE, %eax movl %eax, %cr0 /* * make intrasegment jump to flush the processor pipeline and * reload CS register */ pushl $0x18 pushl $(protmode-_bootMP) lretl .code32 protmode: CHECKPOINT(0x35, 2) /* * we are NOW running for the first time with %eip * having the full physical address, BUT we still * are using a segment descriptor with the origin * not matching the booting kernel. * * SO NOW... for the BIG Jump into kernel's segment * and physical text above 1 Meg. */ mov $0x10, %ebx movw %bx, %ds movw %bx, %es movw %bx, %fs movw %bx, %gs movw %bx, %ss .globl _bigJump _bigJump: /* this will be modified by mpInstallTramp() */ ljmp $0x08, $0 /* far jmp to MPentry() */ dead: hlt /* We should never get here */ jmp dead /* * MP boot strap Global Descriptor Table */ .p2align 4 .globl _MP_GDT .globl _bootCodeSeg .globl _bootDataSeg _MP_GDT: nulldesc: /* offset = 0x0 */ .word 0x0 .word 0x0 .byte 0x0 .byte 0x0 .byte 0x0 .byte 0x0 kernelcode: /* offset = 0x08 */ .word 0xffff /* segment limit 0..15 */ .word 0x0000 /* segment base 0..15 */ .byte 0x0 /* segment base 16..23; set for 0K */ .byte 0x9f /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /* segment base 24..32 */ kerneldata: /* offset = 0x10 */ .word 0xffff /* segment limit 0..15 */ .word 0x0000 /* segment base 0..15 */ .byte 0x0 /* segment base 16..23; set for 0k */ .byte 0x93 /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /* segment base 24..32 */ bootcode: /* offset = 0x18 */ .word 0xffff /* segment limit 0..15 */ _bootCodeSeg: /* this will be modified by mpInstallTramp() */ .word 0x0000 /* segment base 0..15 */ .byte 0x00 /* segment base 16...23; set for 0x000xx000 */ .byte 0x9e /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /*segment base 24..32 */ bootdata: /* offset = 0x20 */ .word 0xffff _bootDataSeg: /* this will be modified by mpInstallTramp() */ .word 0x0000 /* segment base 0..15 */ .byte 0x00 /* segment base 16...23; set for 0x000xx000 */ .byte 0x92 .byte 0xcf .byte 0x0 /* * GDT pointer for the lgdt call */ .globl _mp_gdtbase MP_GDTptr: _mp_gdtlimit: .word 0x0028 _mp_gdtbase: /* this will be modified by mpInstallTramp() */ .long 0 .space 0x100 /* space for boot_stk - 1st temporary stack */ boot_stk: BOOTMP2: .globl _bootMP_size _bootMP_size: .long BOOTMP2 - BOOTMP1 Index: head/sys/amd64/amd64/swtch.s =================================================================== --- head/sys/amd64/amd64/swtch.s (revision 70005) +++ head/sys/amd64/amd64/swtch.s (revision 70006) @@ -1,402 +1,402 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "npx.h" #include "opt_user_ldt.h" #include #include #include #ifdef SMP #include #include #include /** GRAB_LOPRIO */ #include #endif /* SMP */ #include "assym.s" /*****************************************************************************/ /* Scheduling */ /*****************************************************************************/ .data .globl _panic #if defined(SWTCH_OPTIM_STATS) .globl _swtch_optim_stats, _tlb_flush_count _swtch_optim_stats: .long 0 /* number of _swtch_optims */ _tlb_flush_count: .long 0 #endif .text /* * cpu_throw() */ ENTRY(cpu_throw) jmp sw1 /* * cpu_switch() */ ENTRY(cpu_switch) /* switch to new process. first, save context as needed */ movl PCPU(CURPROC),%ecx /* if no process to save, don't bother */ testl %ecx,%ecx jz sw1 #ifdef SMP movb P_ONCPU(%ecx), %al /* save "last" cpu */ movb %al, P_LASTCPU(%ecx) movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ #endif /* SMP */ movl P_VMSPACE(%ecx), %edx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif /* SMP */ btrl %eax, VM_PMAP+PM_ACTIVE(%edx) movl P_ADDR(%ecx),%edx movl (%esp),%eax /* Hardware registers */ movl %eax,PCB_EIP(%edx) movl %ebx,PCB_EBX(%edx) movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) movl %esi,PCB_ESI(%edx) movl %edi,PCB_EDI(%edx) movl %gs,PCB_GS(%edx) /* test if debug registers should be saved */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl %dr7,%eax /* yes, do the save */ movl %eax,PCB_DR7(%edx) andl $0x0000ff00, %eax /* disable all watchpoints */ movl %eax,%dr7 movl %dr6,%eax movl %eax,PCB_DR6(%edx) movl %dr3,%eax movl %eax,PCB_DR3(%edx) movl %dr2,%eax movl %eax,PCB_DR2(%edx) movl %dr1,%eax movl %eax,PCB_DR1(%edx) movl %dr0,%eax movl %eax,PCB_DR0(%edx) 1: /* save sched_lock recursion count */ movl _sched_lock+MTX_RECURSE,%eax movl %eax,PCB_SCHEDNEST(%edx) #ifdef SMP /* XXX FIXME: we should be saving the local APIC TPR */ #endif /* SMP */ #if NNPX > 0 /* have we used fp, and need a save? */ cmpl %ecx,PCPU(NPXPROC) jne 1f addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */ pushl %edx call _npxsave /* do it in a big C function */ popl %eax 1: #endif /* NNPX > 0 */ /* save is done, now choose a new process */ sw1: #ifdef SMP /* Stop scheduling if smp_active goes zero and we are not BSP */ cmpl $0,_smp_active jne 1f cmpl $0,PCPU(CPUID) je 1f movl PCPU(IDLEPROC), %eax jmp sw1b 1: #endif /* * Choose a new process to schedule. chooseproc() returns idleproc * if it cannot find another process to run. */ sw1a: call _chooseproc /* trash ecx, edx, ret eax*/ #ifdef INVARIANTS testl %eax,%eax /* no process? */ jz badsw3 /* no, panic */ #endif sw1b: movl %eax,%ecx xorl %eax,%eax andl $~AST_RESCHED,PCPU(ASTPENDING) #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 #endif movl P_ADDR(%ecx),%edx #if defined(SWTCH_OPTIM_STATS) incl _swtch_optim_stats #endif /* switch address space */ movl %cr3,%ebx cmpl PCB_CR3(%edx),%ebx je 4f #if defined(SWTCH_OPTIM_STATS) decl _swtch_optim_stats incl _tlb_flush_count #endif movl PCB_CR3(%edx),%ebx movl %ebx,%cr3 4: #ifdef SMP movl PCPU(CPUID), %esi #else xorl %esi, %esi #endif cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ je 1f btsl %esi, _private_tss /* mark use of private tss */ movl PCB_EXT(%edx), %edi /* new tss descriptor */ jmp 2f 1: /* update common_tss.tss_esp0 pointer */ movl %edx, %ebx /* pcb */ addl $(UPAGES * PAGE_SIZE - 16), %ebx movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0 btrl %esi, _private_tss jae 3f #ifdef SMP movl $GD_COMMON_TSSD, %edi addl %fs:0, %edi #else movl $PCPU(COMMON_TSSD), %edi #endif 2: /* move correct tss descriptor into GDT slot, then reload tr */ movl PCPU(TSS_GDT), %ebx /* entry in GDT */ movl 0(%edi), %eax movl %eax, 0(%ebx) movl 4(%edi), %eax movl %eax, 4(%ebx) movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 3: movl P_VMSPACE(%ecx), %ebx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif btsl %eax, VM_PMAP+PM_ACTIVE(%ebx) /* restore context */ movl PCB_EBX(%edx),%ebx movl PCB_ESP(%edx),%esp movl PCB_EBP(%edx),%ebp movl PCB_ESI(%edx),%esi movl PCB_EDI(%edx),%edi movl PCB_EIP(%edx),%eax movl %eax,(%esp) #ifdef SMP #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */ #ifdef CHEAP_TPR - movl $0, lapic_tpr + movl $0, _lapic+LA_TPR #else - andl $~APIC_TPR_PRIO, lapic_tpr + andl $~APIC_TPR_PRIO, _lapic+LA_TPR #endif /** CHEAP_TPR */ #endif /** GRAB_LOPRIO */ movl PCPU(CPUID),%eax movb %al, P_ONCPU(%ecx) #endif /* SMP */ movl %edx, PCPU(CURPCB) movl %ecx, PCPU(CURPROC) /* into next process */ #ifdef SMP /* XXX FIXME: we should be restoring the local APIC TPR */ #endif /* SMP */ #ifdef USER_LDT cmpl $0, PCB_USERLDT(%edx) jnz 1f movl __default_ldt,%eax cmpl PCPU(CURRENTLDT),%eax je 2f lldt __default_ldt movl %eax,PCPU(CURRENTLDT) jmp 2f 1: pushl %edx call _set_user_ldt popl %edx 2: #endif /* This must be done after loading the user LDT. */ .globl cpu_switch_load_gs cpu_switch_load_gs: movl PCB_GS(%edx),%gs /* test if debug regisers should be restored */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl PCB_DR6(%edx),%eax /* yes, do the restore */ movl %eax,%dr6 movl PCB_DR3(%edx),%eax movl %eax,%dr3 movl PCB_DR2(%edx),%eax movl %eax,%dr2 movl PCB_DR1(%edx),%eax movl %eax,%dr1 movl PCB_DR0(%edx),%eax movl %eax,%dr0 movl PCB_DR7(%edx),%eax movl %eax,%dr7 1: /* * restore sched_lock recursion count and transfer ownership to * new process */ movl PCB_SCHEDNEST(%edx),%eax movl %eax,_sched_lock+MTX_RECURSE movl PCPU(CURPROC),%eax movl %eax,_sched_lock+MTX_LOCK ret CROSSJUMPTARGET(sw1a) #ifdef INVARIANTS badsw2: pushl $sw0_2 call _panic sw0_2: .asciz "cpu_switch: not SRUN" badsw3: pushl $sw0_3 call _panic sw0_3: .asciz "cpu_switch: chooseproc returned NULL" #endif /* * savectx(pcb) * Update pcb, saving current processor state. */ ENTRY(savectx) /* fetch PCB */ movl 4(%esp),%ecx /* caller's return address - child won't execute this routine */ movl (%esp),%eax movl %eax,PCB_EIP(%ecx) movl %cr3,%eax movl %eax,PCB_CR3(%ecx) movl %ebx,PCB_EBX(%ecx) movl %esp,PCB_ESP(%ecx) movl %ebp,PCB_EBP(%ecx) movl %esi,PCB_ESI(%ecx) movl %edi,PCB_EDI(%ecx) movl %gs,PCB_GS(%ecx) #if NNPX > 0 /* * If npxproc == NULL, then the npx h/w state is irrelevant and the * state had better already be in the pcb. This is true for forks * but not for dumps (the old book-keeping with FP flags in the pcb * always lost for dumps because the dump pcb has 0 flags). * * If npxproc != NULL, then we have to save the npx h/w state to * npxproc's pcb and copy it to the requested pcb, or save to the * requested pcb and reload. Copying is easier because we would * have to handle h/w bugs for reloading. We used to lose the * parent's npx state for forks by forgetting to reload. */ movl PCPU(NPXPROC),%eax testl %eax,%eax je 1f pushl %ecx movl P_ADDR(%eax),%eax leal PCB_SAVEFPU(%eax),%eax pushl %eax pushl %eax call _npxsave addl $4,%esp popl %eax popl %ecx pushl $PCB_SAVEFPU_SIZE leal PCB_SAVEFPU(%ecx),%ecx pushl %ecx pushl %eax call _bcopy addl $12,%esp #endif /* NNPX > 0 */ 1: ret Index: head/sys/i386/i386/apic_vector.s =================================================================== --- head/sys/i386/i386/apic_vector.s (revision 70005) +++ head/sys/i386/i386/apic_vector.s (revision 70006) @@ -1,692 +1,692 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD$ */ #include #include #include "i386/isa/intr_machdep.h" /* convert an absolute IRQ# into a bitmask */ #define IRQ_BIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs #define POP_FRAME \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp /* * Macros for interrupt entry, call to handler, and exit. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL,%eax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ movl $KPSEL,%eax ; \ mov %ax,%fs ; \ FAKE_MCOUNT(13*4(%esp)) ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ pushl _intr_unit + (irq_num) * 4 ; \ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ lock ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4, %eax ; \ lock ; \ incl (%eax) ; \ MEXITCOUNT ; \ jmp _doreti #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 #define MASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ IMASK_UNLOCK /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: #ifdef APIC_INTR_REORDER #define EOI_IRQ(irq_num) \ movl _apic_isrbit_location + 8 * (irq_num), %eax ; \ movl (%eax), %eax ; \ testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #else #define EOI_IRQ(irq_num) \ - testl $IRQ_BIT(irq_num), lapic_isr1; \ + testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi; \ + movl $0, _lapic+LA_EOI; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #endif /* * Test to see if the source is currently masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ andl $~IOART_INTMASK, %eax ; /* clear the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already unmasked */ \ IMASK_UNLOCK #ifdef APIC_INTR_DIAGNOSTIC #ifdef APIC_INTR_DIAGNOSTIC_IRQ log_intr_event: pushf cli pushl $CNAME(apic_itrace_debuglock) call CNAME(s_lock_np) addl $4, %esp movl CNAME(apic_itrace_debugbuffer_idx), %ecx andl $32767, %ecx movl PCPU(CPUID), %eax shll $8, %eax orl 8(%esp), %eax movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2) incl %ecx andl $32767, %ecx movl %ecx, CNAME(apic_itrace_debugbuffer_idx) pushl $CNAME(apic_itrace_debuglock) call CNAME(s_unlock_np) addl $4, %esp popf ret #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 ; \ pushl %eax ; \ pushl %ecx ; \ pushl %edx ; \ movl $(irq_num), %eax ; \ cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \ jne 7f ; \ pushl $id ; \ call log_intr_event ; \ addl $4, %esp ; \ 7: ; \ popl %edx ; \ popl %ecx ; \ popl %eax #else #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 #endif #define APIC_ITRACE_ENTER 1 #define APIC_ITRACE_EOI 2 #define APIC_ITRACE_TRYISRLOCK 3 #define APIC_ITRACE_GOTISRLOCK 4 #define APIC_ITRACE_ENTER2 5 #define APIC_ITRACE_LEAVE 6 #define APIC_ITRACE_UNMASK 7 #define APIC_ITRACE_ACTIVE 8 #define APIC_ITRACE_MASKED 9 #define APIC_ITRACE_NOISRLOCK 10 #define APIC_ITRACE_MASKED2 11 #define APIC_ITRACE_SPLZ 12 #define APIC_ITRACE_DORETI 13 #else #define APIC_ITRACE(name, irq_num, id) #endif /* * Slow, threaded interrupts. * * XXX Most of the parameters here are obsolete. Fix this when we're * done. * XXX we really shouldn't return via doreti if we just schedule the * interrupt handler and don't run anything. We could just do an * iret. FIXME. */ #define INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ mov %ax, %ds ; \ mov %ax, %es ; \ movl $KPSEL, %eax ; \ mov %ax, %fs ; \ ; \ maybe_extra_ipending ; \ ; \ APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ 0: ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ ; \ /* entry point used by doreti_unpend for HWIs. */ \ __CONCAT(Xresume,irq_num): ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ pushl $irq_num; /* pass the IRQ */ \ APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ sti ; \ call _sched_ithd ; \ addl $4, %esp ; /* discard the parameter */ \ APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ ; \ MEXITCOUNT ; \ jmp _doreti /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl _Xspuriousint _Xspuriousint: /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl _Xinvltlb _Xinvltlb: pushl %eax #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax mov %ax, %fs movl PCPU(CPUID), %eax popl %fs ss incl _xhits(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ popl %eax iret #ifdef BETTER_CLOCK /* * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, * * - Stores current cpu state in checkstate_cpustate[cpuid] * 0 == user, 1 == sys, 2 == intr * - Stores current process in checkstate_curproc[cpuid] * * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. * * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags */ .text SUPERALIGN_TEXT .globl _Xcpucheckstate .globl _checkstate_cpustate .globl _checkstate_curproc .globl _checkstate_pc _Xcpucheckstate: pushl %eax pushl %ebx pushl %ds /* save current data segment */ pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ movl $KPSEL, %eax mov %ax, %fs - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ movl $0, %ebx movl 20(%esp), %eax andl $3, %eax cmpl $3, %eax je 1f testl $PSL_VM, 24(%esp) jne 1f incl %ebx /* system or interrupt */ 1: movl PCPU(CPUID), %eax movl %ebx, _checkstate_cpustate(,%eax,4) movl PCPU(CURPROC), %ebx movl %ebx, _checkstate_curproc(,%eax,4) movl 16(%esp), %ebx movl %ebx, _checkstate_pc(,%eax,4) lock /* checkstate_probed_cpus |= (1< #include #include #include #include #include #include #include #include #include #include /* XXX */ #ifdef KTR_PERCPU #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include #include #include ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_ADDR, offsetof(struct proc, p_addr)); ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); #endif ASSYM(SSLEEP, SSLEEP); ASSYM(SRUN, SRUN); ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); ASSYM(UPAGES, UPAGES); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(NPTEPG, NPTEPG); ASSYM(NPDEPG, NPDEPG); ASSYM(PDESIZE, PDESIZE); ASSYM(PTESIZE, PTESIZE); ASSYM(PAGE_SHIFT, PAGE_SHIFT); ASSYM(PAGE_MASK, PAGE_MASK); ASSYM(PDRSHIFT, PDRSHIFT); ASSYM(USRSTACK, USRSTACK); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(KERNBASE, KERNBASE); ASSYM(MCLBYTES, MCLBYTES); ASSYM(PCB_CR3, offsetof(struct pcb, pcb_cr3)); ASSYM(PCB_EDI, offsetof(struct pcb, pcb_edi)); ASSYM(PCB_ESI, offsetof(struct pcb, pcb_esi)); ASSYM(PCB_EBP, offsetof(struct pcb, pcb_ebp)); ASSYM(PCB_ESP, offsetof(struct pcb, pcb_esp)); ASSYM(PCB_EBX, offsetof(struct pcb, pcb_ebx)); ASSYM(PCB_EIP, offsetof(struct pcb, pcb_eip)); ASSYM(TSS_ESP0, offsetof(struct i386tss, tss_esp0)); #ifdef USER_LDT ASSYM(PCB_USERLDT, offsetof(struct pcb, pcb_ldt)); #endif ASSYM(PCB_GS, offsetof(struct pcb, pcb_gs)); ASSYM(PCB_DR0, offsetof(struct pcb, pcb_dr0)); ASSYM(PCB_DR1, offsetof(struct pcb, pcb_dr1)); ASSYM(PCB_DR2, offsetof(struct pcb, pcb_dr2)); ASSYM(PCB_DR3, offsetof(struct pcb, pcb_dr3)); ASSYM(PCB_DR6, offsetof(struct pcb, pcb_dr6)); ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7)); ASSYM(PCB_DBREGS, PCB_DBREGS); ASSYM(PCB_EXT, offsetof(struct pcb, pcb_ext)); ASSYM(PCB_SCHEDNEST, offsetof(struct pcb, pcb_schednest)); ASSYM(PCB_SPARE, offsetof(struct pcb, __pcb_spare)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_savefpu)); ASSYM(PCB_SAVEFPU_SIZE, sizeof(struct save87)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); #ifdef SMP ASSYM(PCB_SIZE, sizeof(struct pcb)); #endif ASSYM(TF_TRAPNO, offsetof(struct trapframe, tf_trapno)); ASSYM(TF_ERR, offsetof(struct trapframe, tf_err)); ASSYM(TF_CS, offsetof(struct trapframe, tf_cs)); ASSYM(TF_EFLAGS, offsetof(struct trapframe, tf_eflags)); ASSYM(SIGF_HANDLER, offsetof(struct sigframe, sf_ahu.sf_handler)); ASSYM(SIGF_SC, offsetof(struct osigframe, sf_siginfo.si_sc)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(SC_PS, offsetof(struct osigcontext, sc_ps)); ASSYM(SC_FS, offsetof(struct osigcontext, sc_fs)); ASSYM(SC_GS, offsetof(struct osigcontext, sc_gs)); ASSYM(SC_TRAPNO, offsetof(struct osigcontext, sc_trapno)); ASSYM(UC_EFLAGS, offsetof(ucontext_t, uc_mcontext.mc_eflags)); ASSYM(UC_GS, offsetof(ucontext_t, uc_mcontext.mc_gs)); ASSYM(ENOENT, ENOENT); ASSYM(EFAULT, EFAULT); ASSYM(ENAMETOOLONG, ENAMETOOLONG); ASSYM(MAXPATHLEN, MAXPATHLEN); ASSYM(BOOTINFO_SIZE, sizeof(struct bootinfo)); ASSYM(BI_VERSION, offsetof(struct bootinfo, bi_version)); ASSYM(BI_KERNELNAME, offsetof(struct bootinfo, bi_kernelname)); ASSYM(BI_NFS_DISKLESS, offsetof(struct bootinfo, bi_nfs_diskless)); ASSYM(BI_ENDCOMMON, offsetof(struct bootinfo, bi_endcommon)); ASSYM(NFSDISKLESS_SIZE, sizeof(struct nfs_diskless)); ASSYM(BI_SIZE, offsetof(struct bootinfo, bi_size)); ASSYM(BI_SYMTAB, offsetof(struct bootinfo, bi_symtab)); ASSYM(BI_ESYMTAB, offsetof(struct bootinfo, bi_esymtab)); ASSYM(BI_KERNEND, offsetof(struct bootinfo, bi_kernend)); ASSYM(GD_SIZEOF, sizeof(struct globaldata)); ASSYM(GD_CURPROC, offsetof(struct globaldata, gd_curproc)); ASSYM(GD_NPXPROC, offsetof(struct globaldata, gd_npxproc)); ASSYM(GD_IDLEPROC, offsetof(struct globaldata, gd_idleproc)); ASSYM(GD_CURPCB, offsetof(struct globaldata, gd_curpcb)); ASSYM(GD_COMMON_TSS, offsetof(struct globaldata, gd_common_tss)); ASSYM(GD_SWITCHTIME, offsetof(struct globaldata, gd_switchtime)); ASSYM(GD_SWITCHTICKS, offsetof(struct globaldata, gd_switchticks)); ASSYM(GD_COMMON_TSSD, offsetof(struct globaldata, gd_common_tssd)); ASSYM(GD_TSS_GDT, offsetof(struct globaldata, gd_tss_gdt)); ASSYM(GD_ASTPENDING, offsetof(struct globaldata, gd_astpending)); ASSYM(AST_PENDING, AST_PENDING); ASSYM(AST_RESCHED, AST_RESCHED); ASSYM(GD_INTR_NESTING_LEVEL, offsetof(struct globaldata, gd_intr_nesting_level)); #ifdef USER_LDT ASSYM(GD_CURRENTLDT, offsetof(struct globaldata, gd_currentldt)); #endif ASSYM(GD_WITNESS_SPIN_CHECK, offsetof(struct globaldata, gd_witness_spin_check)); /* XXX */ #ifdef KTR_PERCPU ASSYM(GD_KTR_IDX, offsetof(struct globaldata, gd_ktr_idx)); ASSYM(GD_KTR_BUF, offsetof(struct globaldata, gd_ktr_buf)); ASSYM(GD_KTR_BUF_DATA, offsetof(struct globaldata, gd_ktr_buf_data)); #endif #ifdef SMP ASSYM(GD_CPUID, offsetof(struct globaldata, gd_cpuid)); ASSYM(GD_CPU_LOCKID, offsetof(struct globaldata, gd_cpu_lockid)); ASSYM(GD_OTHER_CPUS, offsetof(struct globaldata, gd_other_cpus)); ASSYM(GD_SS_EFLAGS, offsetof(struct globaldata, gd_ss_eflags)); ASSYM(GD_INSIDE_INTR, offsetof(struct globaldata, gd_inside_intr)); ASSYM(GD_PRV_CMAP1, offsetof(struct globaldata, gd_prv_CMAP1)); ASSYM(GD_PRV_CMAP2, offsetof(struct globaldata, gd_prv_CMAP2)); ASSYM(GD_PRV_CMAP3, offsetof(struct globaldata, gd_prv_CMAP3)); ASSYM(GD_PRV_PMAP1, offsetof(struct globaldata, gd_prv_PMAP1)); ASSYM(GD_PRV_CADDR1, offsetof(struct globaldata, gd_prv_CADDR1)); ASSYM(GD_PRV_CADDR2, offsetof(struct globaldata, gd_prv_CADDR2)); ASSYM(GD_PRV_CADDR3, offsetof(struct globaldata, gd_prv_CADDR3)); ASSYM(GD_PRV_PADDR1, offsetof(struct globaldata, gd_prv_PADDR1)); ASSYM(PS_IDLESTACK, offsetof(struct privatespace, idlestack)); ASSYM(PS_IDLESTACK_TOP, sizeof(struct privatespace)); + +ASSYM(LA_VER, offsetof(struct LAPIC, version)); +ASSYM(LA_TPR, offsetof(struct LAPIC, tpr)); +ASSYM(LA_EOI, offsetof(struct LAPIC, eoi)); +ASSYM(LA_SVR, offsetof(struct LAPIC, svr)); +ASSYM(LA_ICR_LO, offsetof(struct LAPIC, icr_lo)); +ASSYM(LA_ICR_HI, offsetof(struct LAPIC, icr_hi)); #endif ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL)); ASSYM(KDSEL, GSEL(GDATA_SEL, SEL_KPL)); #ifdef SMP ASSYM(KPSEL, GSEL(GPRIV_SEL, SEL_KPL)); #endif ASSYM(BC32SEL, GSEL(GBIOSCODE32_SEL, SEL_KPL)); ASSYM(GPROC0_SEL, GPROC0_SEL); ASSYM(VM86_FRAMESIZE, sizeof(struct vm86frame)); ASSYM(MTX_LOCK, offsetof(struct mtx, mtx_lock)); ASSYM(MTX_RECURSE, offsetof(struct mtx, mtx_recurse)); ASSYM(MTX_SAVEINTR, offsetof(struct mtx, mtx_saveintr)); #ifdef WITNESS ASSYM(MTX_DEBUG, offsetof(struct mtx, mtx_debug)); ASSYM(MTXD_WITNESS, offsetof(struct mtx_debug, mtxd_witness)); #endif ASSYM(MTX_UNOWNED, MTX_UNOWNED); ASSYM(MTX_SPIN, MTX_SPIN); Index: head/sys/i386/i386/mpboot.s =================================================================== --- head/sys/i386/i386/mpboot.s (revision 70005) +++ head/sys/i386/i386/mpboot.s (revision 70006) @@ -1,272 +1,272 @@ /* * Copyright (c) 1995, Jack F. Vogel * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Jack F. Vogel * 4. The name of the developer may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * mpboot.s: FreeBSD machine support for the Intel MP Spec * multiprocessor systems. * * $FreeBSD$ */ #include /* miscellaneous asm macros */ #include #include #include "assym.s" /* * this code MUST be enabled here and in mp_machdep.c * it follows the very early stages of AP boot by placing values in CMOS ram. * it NORMALLY will never be needed and thus the primitive method for enabling. * #define CHECK_POINTS */ #if defined(CHECK_POINTS) && !defined(PC98) #define CMOS_REG (0x70) #define CMOS_DATA (0x71) #define CHECKPOINT(A,D) \ movb $(A),%al ; \ outb %al,$CMOS_REG ; \ movb $(D),%al ; \ outb %al,$CMOS_DATA #else #define CHECKPOINT(A,D) #endif /* CHECK_POINTS */ /* * the APs enter here from their trampoline code (bootMP, below) */ .p2align 4 NON_GPROF_ENTRY(MPentry) CHECKPOINT(0x36, 3) /* Now enable paging mode */ movl _IdlePTD-KERNBASE, %eax movl %eax,%cr3 movl %cr0,%eax orl $CR0_PE|CR0_PG,%eax /* enable paging */ movl %eax,%cr0 /* let the games begin! */ movl _bootSTK,%esp /* boot stack end loc. */ pushl $mp_begin /* jump to high mem */ ret /* * Wait for the booting CPU to signal startup */ mp_begin: /* now running relocated at KERNBASE */ CHECKPOINT(0x37, 4) call _init_secondary /* load i386 tables */ CHECKPOINT(0x38, 5) /* * If the [BSP] CPU has support for VME, turn it on. */ testl $CPUID_VME, _cpu_feature /* XXX WRONG! BSP! */ jz 1f movl %cr4, %eax orl $CR4_VME, %eax movl %eax, %cr4 1: /* disable the APIC, just to be SURE */ - movl lapic_svr, %eax /* get spurious vector reg. */ + movl _lapic+LA_SVR, %eax /* get spurious vector reg. */ andl $~APIC_SVR_SWEN, %eax /* clear software enable bit */ - movl %eax, lapic_svr + movl %eax, _lapic+LA_SVR /* signal our startup to the BSP */ - movl lapic_ver, %eax /* our version reg contents */ + movl _lapic+LA_VER, %eax /* our version reg contents */ movl %eax, _cpu_apic_versions /* into [ 0 ] */ incl _mp_ncpus /* signal BSP */ CHECKPOINT(0x39, 6) /* Now, let's prepare for some REAL WORK :-) This doesn't return. */ call _ap_init /* * This is the embedded trampoline or bootstrap that is * copied into 'real-mode' low memory, it is where the * secondary processor "wakes up". When it is executed * the processor will eventually jump into the routine * MPentry, which resides in normal kernel text above * 1Meg. -jackv */ .data ALIGN_DATA /* just to be sure */ BOOTMP1: NON_GPROF_ENTRY(bootMP) .code16 cli CHECKPOINT(0x34, 1) /* First guarantee a 'clean slate' */ xorl %eax, %eax movl %eax, %ebx movl %eax, %ecx movl %eax, %edx movl %eax, %esi movl %eax, %edi /* set up data segments */ mov %cs, %ax mov %ax, %ds mov %ax, %es mov %ax, %fs mov %ax, %gs mov %ax, %ss mov $(boot_stk-_bootMP), %esp /* Now load the global descriptor table */ lgdt MP_GDTptr-_bootMP /* Enable protected mode */ movl %cr0, %eax orl $CR0_PE, %eax movl %eax, %cr0 /* * make intrasegment jump to flush the processor pipeline and * reload CS register */ pushl $0x18 pushl $(protmode-_bootMP) lretl .code32 protmode: CHECKPOINT(0x35, 2) /* * we are NOW running for the first time with %eip * having the full physical address, BUT we still * are using a segment descriptor with the origin * not matching the booting kernel. * * SO NOW... for the BIG Jump into kernel's segment * and physical text above 1 Meg. */ mov $0x10, %ebx movw %bx, %ds movw %bx, %es movw %bx, %fs movw %bx, %gs movw %bx, %ss .globl _bigJump _bigJump: /* this will be modified by mpInstallTramp() */ ljmp $0x08, $0 /* far jmp to MPentry() */ dead: hlt /* We should never get here */ jmp dead /* * MP boot strap Global Descriptor Table */ .p2align 4 .globl _MP_GDT .globl _bootCodeSeg .globl _bootDataSeg _MP_GDT: nulldesc: /* offset = 0x0 */ .word 0x0 .word 0x0 .byte 0x0 .byte 0x0 .byte 0x0 .byte 0x0 kernelcode: /* offset = 0x08 */ .word 0xffff /* segment limit 0..15 */ .word 0x0000 /* segment base 0..15 */ .byte 0x0 /* segment base 16..23; set for 0K */ .byte 0x9f /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /* segment base 24..32 */ kerneldata: /* offset = 0x10 */ .word 0xffff /* segment limit 0..15 */ .word 0x0000 /* segment base 0..15 */ .byte 0x0 /* segment base 16..23; set for 0k */ .byte 0x93 /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /* segment base 24..32 */ bootcode: /* offset = 0x18 */ .word 0xffff /* segment limit 0..15 */ _bootCodeSeg: /* this will be modified by mpInstallTramp() */ .word 0x0000 /* segment base 0..15 */ .byte 0x00 /* segment base 16...23; set for 0x000xx000 */ .byte 0x9e /* flags; Type */ .byte 0xcf /* flags; Limit */ .byte 0x0 /*segment base 24..32 */ bootdata: /* offset = 0x20 */ .word 0xffff _bootDataSeg: /* this will be modified by mpInstallTramp() */ .word 0x0000 /* segment base 0..15 */ .byte 0x00 /* segment base 16...23; set for 0x000xx000 */ .byte 0x92 .byte 0xcf .byte 0x0 /* * GDT pointer for the lgdt call */ .globl _mp_gdtbase MP_GDTptr: _mp_gdtlimit: .word 0x0028 _mp_gdtbase: /* this will be modified by mpInstallTramp() */ .long 0 .space 0x100 /* space for boot_stk - 1st temporary stack */ boot_stk: BOOTMP2: .globl _bootMP_size _bootMP_size: .long BOOTMP2 - BOOTMP1 Index: head/sys/i386/i386/swtch.s =================================================================== --- head/sys/i386/i386/swtch.s (revision 70005) +++ head/sys/i386/i386/swtch.s (revision 70006) @@ -1,402 +1,402 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "npx.h" #include "opt_user_ldt.h" #include #include #include #ifdef SMP #include #include #include /** GRAB_LOPRIO */ #include #endif /* SMP */ #include "assym.s" /*****************************************************************************/ /* Scheduling */ /*****************************************************************************/ .data .globl _panic #if defined(SWTCH_OPTIM_STATS) .globl _swtch_optim_stats, _tlb_flush_count _swtch_optim_stats: .long 0 /* number of _swtch_optims */ _tlb_flush_count: .long 0 #endif .text /* * cpu_throw() */ ENTRY(cpu_throw) jmp sw1 /* * cpu_switch() */ ENTRY(cpu_switch) /* switch to new process. first, save context as needed */ movl PCPU(CURPROC),%ecx /* if no process to save, don't bother */ testl %ecx,%ecx jz sw1 #ifdef SMP movb P_ONCPU(%ecx), %al /* save "last" cpu */ movb %al, P_LASTCPU(%ecx) movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ #endif /* SMP */ movl P_VMSPACE(%ecx), %edx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif /* SMP */ btrl %eax, VM_PMAP+PM_ACTIVE(%edx) movl P_ADDR(%ecx),%edx movl (%esp),%eax /* Hardware registers */ movl %eax,PCB_EIP(%edx) movl %ebx,PCB_EBX(%edx) movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) movl %esi,PCB_ESI(%edx) movl %edi,PCB_EDI(%edx) movl %gs,PCB_GS(%edx) /* test if debug registers should be saved */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl %dr7,%eax /* yes, do the save */ movl %eax,PCB_DR7(%edx) andl $0x0000ff00, %eax /* disable all watchpoints */ movl %eax,%dr7 movl %dr6,%eax movl %eax,PCB_DR6(%edx) movl %dr3,%eax movl %eax,PCB_DR3(%edx) movl %dr2,%eax movl %eax,PCB_DR2(%edx) movl %dr1,%eax movl %eax,PCB_DR1(%edx) movl %dr0,%eax movl %eax,PCB_DR0(%edx) 1: /* save sched_lock recursion count */ movl _sched_lock+MTX_RECURSE,%eax movl %eax,PCB_SCHEDNEST(%edx) #ifdef SMP /* XXX FIXME: we should be saving the local APIC TPR */ #endif /* SMP */ #if NNPX > 0 /* have we used fp, and need a save? */ cmpl %ecx,PCPU(NPXPROC) jne 1f addl $PCB_SAVEFPU,%edx /* h/w bugs make saving complicated */ pushl %edx call _npxsave /* do it in a big C function */ popl %eax 1: #endif /* NNPX > 0 */ /* save is done, now choose a new process */ sw1: #ifdef SMP /* Stop scheduling if smp_active goes zero and we are not BSP */ cmpl $0,_smp_active jne 1f cmpl $0,PCPU(CPUID) je 1f movl PCPU(IDLEPROC), %eax jmp sw1b 1: #endif /* * Choose a new process to schedule. chooseproc() returns idleproc * if it cannot find another process to run. */ sw1a: call _chooseproc /* trash ecx, edx, ret eax*/ #ifdef INVARIANTS testl %eax,%eax /* no process? */ jz badsw3 /* no, panic */ #endif sw1b: movl %eax,%ecx xorl %eax,%eax andl $~AST_RESCHED,PCPU(ASTPENDING) #ifdef INVARIANTS cmpb $SRUN,P_STAT(%ecx) jne badsw2 #endif movl P_ADDR(%ecx),%edx #if defined(SWTCH_OPTIM_STATS) incl _swtch_optim_stats #endif /* switch address space */ movl %cr3,%ebx cmpl PCB_CR3(%edx),%ebx je 4f #if defined(SWTCH_OPTIM_STATS) decl _swtch_optim_stats incl _tlb_flush_count #endif movl PCB_CR3(%edx),%ebx movl %ebx,%cr3 4: #ifdef SMP movl PCPU(CPUID), %esi #else xorl %esi, %esi #endif cmpl $0, PCB_EXT(%edx) /* has pcb extension? */ je 1f btsl %esi, _private_tss /* mark use of private tss */ movl PCB_EXT(%edx), %edi /* new tss descriptor */ jmp 2f 1: /* update common_tss.tss_esp0 pointer */ movl %edx, %ebx /* pcb */ addl $(UPAGES * PAGE_SIZE - 16), %ebx movl %ebx, PCPU(COMMON_TSS) + TSS_ESP0 btrl %esi, _private_tss jae 3f #ifdef SMP movl $GD_COMMON_TSSD, %edi addl %fs:0, %edi #else movl $PCPU(COMMON_TSSD), %edi #endif 2: /* move correct tss descriptor into GDT slot, then reload tr */ movl PCPU(TSS_GDT), %ebx /* entry in GDT */ movl 0(%edi), %eax movl %eax, 0(%ebx) movl 4(%edi), %eax movl %eax, 4(%ebx) movl $GPROC0_SEL*8, %esi /* GSEL(entry, SEL_KPL) */ ltr %si 3: movl P_VMSPACE(%ecx), %ebx #ifdef SMP movl PCPU(CPUID), %eax #else xorl %eax, %eax #endif btsl %eax, VM_PMAP+PM_ACTIVE(%ebx) /* restore context */ movl PCB_EBX(%edx),%ebx movl PCB_ESP(%edx),%esp movl PCB_EBP(%edx),%ebp movl PCB_ESI(%edx),%esi movl PCB_EDI(%edx),%edi movl PCB_EIP(%edx),%eax movl %eax,(%esp) #ifdef SMP #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */ #ifdef CHEAP_TPR - movl $0, lapic_tpr + movl $0, _lapic+LA_TPR #else - andl $~APIC_TPR_PRIO, lapic_tpr + andl $~APIC_TPR_PRIO, _lapic+LA_TPR #endif /** CHEAP_TPR */ #endif /** GRAB_LOPRIO */ movl PCPU(CPUID),%eax movb %al, P_ONCPU(%ecx) #endif /* SMP */ movl %edx, PCPU(CURPCB) movl %ecx, PCPU(CURPROC) /* into next process */ #ifdef SMP /* XXX FIXME: we should be restoring the local APIC TPR */ #endif /* SMP */ #ifdef USER_LDT cmpl $0, PCB_USERLDT(%edx) jnz 1f movl __default_ldt,%eax cmpl PCPU(CURRENTLDT),%eax je 2f lldt __default_ldt movl %eax,PCPU(CURRENTLDT) jmp 2f 1: pushl %edx call _set_user_ldt popl %edx 2: #endif /* This must be done after loading the user LDT. */ .globl cpu_switch_load_gs cpu_switch_load_gs: movl PCB_GS(%edx),%gs /* test if debug regisers should be restored */ movb PCB_FLAGS(%edx),%al andb $PCB_DBREGS,%al jz 1f /* no, skip over */ movl PCB_DR6(%edx),%eax /* yes, do the restore */ movl %eax,%dr6 movl PCB_DR3(%edx),%eax movl %eax,%dr3 movl PCB_DR2(%edx),%eax movl %eax,%dr2 movl PCB_DR1(%edx),%eax movl %eax,%dr1 movl PCB_DR0(%edx),%eax movl %eax,%dr0 movl PCB_DR7(%edx),%eax movl %eax,%dr7 1: /* * restore sched_lock recursion count and transfer ownership to * new process */ movl PCB_SCHEDNEST(%edx),%eax movl %eax,_sched_lock+MTX_RECURSE movl PCPU(CURPROC),%eax movl %eax,_sched_lock+MTX_LOCK ret CROSSJUMPTARGET(sw1a) #ifdef INVARIANTS badsw2: pushl $sw0_2 call _panic sw0_2: .asciz "cpu_switch: not SRUN" badsw3: pushl $sw0_3 call _panic sw0_3: .asciz "cpu_switch: chooseproc returned NULL" #endif /* * savectx(pcb) * Update pcb, saving current processor state. */ ENTRY(savectx) /* fetch PCB */ movl 4(%esp),%ecx /* caller's return address - child won't execute this routine */ movl (%esp),%eax movl %eax,PCB_EIP(%ecx) movl %cr3,%eax movl %eax,PCB_CR3(%ecx) movl %ebx,PCB_EBX(%ecx) movl %esp,PCB_ESP(%ecx) movl %ebp,PCB_EBP(%ecx) movl %esi,PCB_ESI(%ecx) movl %edi,PCB_EDI(%ecx) movl %gs,PCB_GS(%ecx) #if NNPX > 0 /* * If npxproc == NULL, then the npx h/w state is irrelevant and the * state had better already be in the pcb. This is true for forks * but not for dumps (the old book-keeping with FP flags in the pcb * always lost for dumps because the dump pcb has 0 flags). * * If npxproc != NULL, then we have to save the npx h/w state to * npxproc's pcb and copy it to the requested pcb, or save to the * requested pcb and reload. Copying is easier because we would * have to handle h/w bugs for reloading. We used to lose the * parent's npx state for forks by forgetting to reload. */ movl PCPU(NPXPROC),%eax testl %eax,%eax je 1f pushl %ecx movl P_ADDR(%eax),%eax leal PCB_SAVEFPU(%eax),%eax pushl %eax pushl %eax call _npxsave addl $4,%esp popl %eax popl %ecx pushl $PCB_SAVEFPU_SIZE leal PCB_SAVEFPU(%ecx),%ecx pushl %ecx pushl %eax call _bcopy addl $12,%esp #endif /* NNPX > 0 */ 1: ret Index: head/sys/i386/isa/apic_vector.s =================================================================== --- head/sys/i386/isa/apic_vector.s (revision 70005) +++ head/sys/i386/isa/apic_vector.s (revision 70006) @@ -1,692 +1,692 @@ /* * from: vector.s, 386BSD 0.1 unknown origin * $FreeBSD$ */ #include #include #include "i386/isa/intr_machdep.h" /* convert an absolute IRQ# into a bitmask */ #define IRQ_BIT(irq_num) (1 << (irq_num)) /* make an index into the IO APIC from the IRQ# */ #define REDTBL_IDX(irq_num) (0x10 + ((irq_num) * 2)) /* * */ #define PUSH_FRAME \ pushl $0 ; /* dummy error code */ \ pushl $0 ; /* dummy trap type */ \ pushal ; \ pushl %ds ; /* save data and extra segments ... */ \ pushl %es ; \ pushl %fs #define POP_FRAME \ popl %fs ; \ popl %es ; \ popl %ds ; \ popal ; \ addl $4+4,%esp /* * Macros for interrupt entry, call to handler, and exit. */ #define FAST_INTR(irq_num, vec_name) \ .text ; \ SUPERALIGN_TEXT ; \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL,%eax ; \ mov %ax,%ds ; \ mov %ax,%es ; \ movl $KPSEL,%eax ; \ mov %ax,%fs ; \ FAKE_MCOUNT(13*4(%esp)) ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ pushl _intr_unit + (irq_num) * 4 ; \ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ addl $4, %esp ; \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ lock ; \ incl _cnt+V_INTR ; /* book-keeping can wait */ \ movl _intr_countp + (irq_num) * 4, %eax ; \ lock ; \ incl (%eax) ; \ MEXITCOUNT ; \ jmp _doreti #define IOAPICADDR(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 8 #define REDIRIDX(irq_num) CNAME(int_to_apicintpin) + 16 * (irq_num) + 12 #define MASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ jne 7f ; /* masked, don't mask */ \ orl $IRQ_BIT(irq_num), _apic_imen ; /* set the mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ orl $IOART_INTMASK, %eax ; /* set the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already masked */ \ IMASK_UNLOCK /* * Test to see whether we are handling an edge or level triggered INT. * Level-triggered INTs must still be masked as we don't clear the source, * and the EOI cycle would cause redundant INTs to occur. */ #define MASK_LEVEL_IRQ(irq_num) \ testl $IRQ_BIT(irq_num), _apic_pin_trigger ; \ jz 9f ; /* edge, don't mask */ \ MASK_IRQ(irq_num) ; \ 9: #ifdef APIC_INTR_REORDER #define EOI_IRQ(irq_num) \ movl _apic_isrbit_location + 8 * (irq_num), %eax ; \ movl (%eax), %eax ; \ testl _apic_isrbit_location + 4 + 8 * (irq_num), %eax ; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi ; \ + movl $0, _lapic+LA_EOI ; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #else #define EOI_IRQ(irq_num) \ - testl $IRQ_BIT(irq_num), lapic_isr1; \ + testl $IRQ_BIT(irq_num), _lapic+LA_ISR1; \ jz 9f ; /* not active */ \ - movl $0, lapic_eoi; \ + movl $0, _lapic+LA_EOI; \ APIC_ITRACE(apic_itrace_eoi, irq_num, APIC_ITRACE_EOI) ; \ 9: #endif /* * Test to see if the source is currently masked, clear if so. */ #define UNMASK_IRQ(irq_num) \ IMASK_LOCK ; /* into critical reg */ \ testl $IRQ_BIT(irq_num), _apic_imen ; \ je 7f ; /* bit clear, not masked */ \ andl $~IRQ_BIT(irq_num), _apic_imen ;/* clear mask bit */ \ movl IOAPICADDR(irq_num), %ecx ; /* ioapic addr */ \ movl REDIRIDX(irq_num), %eax ; /* get the index */ \ movl %eax, (%ecx) ; /* write the index */ \ movl IOAPIC_WINDOW(%ecx), %eax ; /* current value */ \ andl $~IOART_INTMASK, %eax ; /* clear the mask */ \ movl %eax, IOAPIC_WINDOW(%ecx) ; /* new value */ \ 7: ; /* already unmasked */ \ IMASK_UNLOCK #ifdef APIC_INTR_DIAGNOSTIC #ifdef APIC_INTR_DIAGNOSTIC_IRQ log_intr_event: pushf cli pushl $CNAME(apic_itrace_debuglock) call CNAME(s_lock_np) addl $4, %esp movl CNAME(apic_itrace_debugbuffer_idx), %ecx andl $32767, %ecx movl PCPU(CPUID), %eax shll $8, %eax orl 8(%esp), %eax movw %ax, CNAME(apic_itrace_debugbuffer)(,%ecx,2) incl %ecx andl $32767, %ecx movl %ecx, CNAME(apic_itrace_debugbuffer_idx) pushl $CNAME(apic_itrace_debuglock) call CNAME(s_unlock_np) addl $4, %esp popf ret #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 ; \ pushl %eax ; \ pushl %ecx ; \ pushl %edx ; \ movl $(irq_num), %eax ; \ cmpl $APIC_INTR_DIAGNOSTIC_IRQ, %eax ; \ jne 7f ; \ pushl $id ; \ call log_intr_event ; \ addl $4, %esp ; \ 7: ; \ popl %edx ; \ popl %ecx ; \ popl %eax #else #define APIC_ITRACE(name, irq_num, id) \ lock ; /* MP-safe */ \ incl CNAME(name) + (irq_num) * 4 #endif #define APIC_ITRACE_ENTER 1 #define APIC_ITRACE_EOI 2 #define APIC_ITRACE_TRYISRLOCK 3 #define APIC_ITRACE_GOTISRLOCK 4 #define APIC_ITRACE_ENTER2 5 #define APIC_ITRACE_LEAVE 6 #define APIC_ITRACE_UNMASK 7 #define APIC_ITRACE_ACTIVE 8 #define APIC_ITRACE_MASKED 9 #define APIC_ITRACE_NOISRLOCK 10 #define APIC_ITRACE_MASKED2 11 #define APIC_ITRACE_SPLZ 12 #define APIC_ITRACE_DORETI 13 #else #define APIC_ITRACE(name, irq_num, id) #endif /* * Slow, threaded interrupts. * * XXX Most of the parameters here are obsolete. Fix this when we're * done. * XXX we really shouldn't return via doreti if we just schedule the * interrupt handler and don't run anything. We could just do an * iret. FIXME. */ #define INTR(irq_num, vec_name, maybe_extra_ipending) \ .text ; \ SUPERALIGN_TEXT ; \ /* _XintrNN: entry point used by IDT/HWIs & splz_unpend via _vec[]. */ \ IDTVEC(vec_name) ; \ PUSH_FRAME ; \ movl $KDSEL, %eax ; /* reload with kernel's data segment */ \ mov %ax, %ds ; \ mov %ax, %es ; \ movl $KPSEL, %eax ; \ mov %ax, %fs ; \ ; \ maybe_extra_ipending ; \ ; \ APIC_ITRACE(apic_itrace_enter, irq_num, APIC_ITRACE_ENTER) ; \ ; \ MASK_LEVEL_IRQ(irq_num) ; \ EOI_IRQ(irq_num) ; \ 0: ; \ incb PCPU(INTR_NESTING_LEVEL) ; \ ; \ /* entry point used by doreti_unpend for HWIs. */ \ __CONCAT(Xresume,irq_num): ; \ FAKE_MCOUNT(13*4(%esp)) ; /* XXX avoid dbl cnt */ \ pushl $irq_num; /* pass the IRQ */ \ APIC_ITRACE(apic_itrace_enter2, irq_num, APIC_ITRACE_ENTER2) ; \ sti ; \ call _sched_ithd ; \ addl $4, %esp ; /* discard the parameter */ \ APIC_ITRACE(apic_itrace_leave, irq_num, APIC_ITRACE_LEAVE) ; \ ; \ MEXITCOUNT ; \ jmp _doreti /* * Handle "spurious INTerrupts". * Notes: * This is different than the "spurious INTerrupt" generated by an * 8259 PIC for missing INTs. See the APIC documentation for details. * This routine should NOT do an 'EOI' cycle. */ .text SUPERALIGN_TEXT .globl _Xspuriousint _Xspuriousint: /* No EOI cycle used here */ iret /* * Handle TLB shootdowns. */ .text SUPERALIGN_TEXT .globl _Xinvltlb _Xinvltlb: pushl %eax #ifdef COUNT_XINVLTLB_HITS pushl %fs movl $KPSEL, %eax mov %ax, %fs movl PCPU(CPUID), %eax popl %fs ss incl _xhits(,%eax,4) #endif /* COUNT_XINVLTLB_HITS */ movl %cr3, %eax /* invalidate the TLB */ movl %eax, %cr3 ss /* stack segment, avoid %ds load */ - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ popl %eax iret #ifdef BETTER_CLOCK /* * Executed by a CPU when it receives an Xcpucheckstate IPI from another CPU, * * - Stores current cpu state in checkstate_cpustate[cpuid] * 0 == user, 1 == sys, 2 == intr * - Stores current process in checkstate_curproc[cpuid] * * - Signals its receipt by setting bit cpuid in checkstate_probed_cpus. * * stack: 0->ds, 4->fs, 8->ebx, 12->eax, 16->eip, 20->cs, 24->eflags */ .text SUPERALIGN_TEXT .globl _Xcpucheckstate .globl _checkstate_cpustate .globl _checkstate_curproc .globl _checkstate_pc _Xcpucheckstate: pushl %eax pushl %ebx pushl %ds /* save current data segment */ pushl %fs movl $KDSEL, %eax mov %ax, %ds /* use KERNEL data segment */ movl $KPSEL, %eax mov %ax, %fs - movl $0, lapic_eoi /* End Of Interrupt to APIC */ + movl $0, _lapic+LA_EOI /* End Of Interrupt to APIC */ movl $0, %ebx movl 20(%esp), %eax andl $3, %eax cmpl $3, %eax je 1f testl $PSL_VM, 24(%esp) jne 1f incl %ebx /* system or interrupt */ 1: movl PCPU(CPUID), %eax movl %ebx, _checkstate_cpustate(,%eax,4) movl PCPU(CURPROC), %ebx movl %ebx, _checkstate_curproc(,%eax,4) movl 16(%esp), %ebx movl %ebx, _checkstate_pc(,%eax,4) lock /* checkstate_probed_cpus |= (1<