Index: stable/4/sys/i386/i386/initcpu.c =================================================================== --- stable/4/sys/i386/i386/initcpu.c (revision 82390) +++ stable/4/sys/i386/i386/initcpu.c (revision 82391) @@ -1,848 +1,852 @@ /* * Copyright (c) KATO Takenori, 1997, 1998. * * All rights reserved. Unpublished rights reserved under the copyright * laws of Japan. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_cpu.h" #include #include #include #include #include #include #include void initializecpu(void); #if defined(I586_CPU) && defined(CPU_WT_ALLOC) void enable_K5_wt_alloc(void); void enable_K6_wt_alloc(void); void enable_K6_2_wt_alloc(void); #endif #ifdef I486_CPU static void init_5x86(void); static void init_bluelightning(void); static void init_486dlc(void); static void init_cy486dx(void); #ifdef CPU_I486_ON_386 static void init_i486_on_386(void); #endif static void init_6x86(void); #endif /* I486_CPU */ #ifdef I686_CPU static void init_6x86MX(void); static void init_ppro(void); static void init_mendocino(void); #endif void enable_sse(void); int hw_instruction_sse = 0; SYSCTL_INT(_hw, OID_AUTO, instruction_sse, CTLFLAG_RD, &hw_instruction_sse, 0, "SIMD/MMX2 instructions available in CPU"); +#ifdef CPU_ENABLE_SSE +u_int cpu_fxsr; /* SSE enabled */ +#endif + #ifdef I486_CPU /* * IBM Blue Lightning */ static void init_bluelightning(void) { u_long eflags; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); invd(); #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */ #else wrmsr(0x1000, 0x1c92LL); /* Intel FPU */ #endif /* Enables 13MB and 0-640KB cache. */ wrmsr(0x1001, (0xd0LL << 32) | 0x3ff); #ifdef CPU_BLUELIGHTNING_3X wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */ #else wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */ #endif /* Enable caching in CR0. */ load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ invd(); write_eflags(eflags); } /* * Cyrix 486SLC/DLC/SR/DR series */ static void init_486dlc(void) { u_long eflags; u_char ccr0; eflags = read_eflags(); disable_intr(); invd(); ccr0 = read_cyrix_reg(CCR0); #ifndef CYRIX_CACHE_WORKS ccr0 |= CCR0_NC1 | CCR0_BARB; write_cyrix_reg(CCR0, ccr0); invd(); #else ccr0 &= ~CCR0_NC0; #ifndef CYRIX_CACHE_REALLY_WORKS ccr0 |= CCR0_NC1 | CCR0_BARB; #else ccr0 |= CCR0_NC1; #endif #ifdef CPU_DIRECT_MAPPED_CACHE ccr0 |= CCR0_CO; /* Direct mapped mode. */ #endif write_cyrix_reg(CCR0, ccr0); /* Clear non-cacheable region. */ write_cyrix_reg(NCR1+2, NCR_SIZE_0K); write_cyrix_reg(NCR2+2, NCR_SIZE_0K); write_cyrix_reg(NCR3+2, NCR_SIZE_0K); write_cyrix_reg(NCR4+2, NCR_SIZE_0K); write_cyrix_reg(0, 0); /* dummy write */ /* Enable caching in CR0. */ load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ invd(); #endif /* !CYRIX_CACHE_WORKS */ write_eflags(eflags); } /* * Cyrix 486S/DX series */ static void init_cy486dx(void) { u_long eflags; u_char ccr2; eflags = read_eflags(); disable_intr(); invd(); ccr2 = read_cyrix_reg(CCR2); #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #endif #ifdef PC98 /* Enables WB cache interface pin and Lock NW bit in CR0. */ ccr2 |= CCR2_WB | CCR2_LOCK_NW; /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW); load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ #endif write_cyrix_reg(CCR2, ccr2); write_eflags(eflags); } /* * Cyrix 5x86 */ static void init_5x86(void) { u_long eflags; u_char ccr2, ccr3, ccr4, pcr0; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); (void)read_cyrix_reg(CCR3); /* dummy */ /* Initialize CCR2. */ ccr2 = read_cyrix_reg(CCR2); ccr2 |= CCR2_WB; #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #else ccr2 &= ~CCR2_SUSP_HLT; #endif ccr2 |= CCR2_WT1; write_cyrix_reg(CCR2, ccr2); /* Initialize CCR4. */ ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); ccr4 = read_cyrix_reg(CCR4); ccr4 |= CCR4_DTE; ccr4 |= CCR4_MEM; #ifdef CPU_FASTER_5X86_FPU ccr4 |= CCR4_FASTFPE; #else ccr4 &= ~CCR4_FASTFPE; #endif ccr4 &= ~CCR4_IOMASK; /******************************************************************** * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time * should be 0 for errata fix. ********************************************************************/ #ifdef CPU_IORT ccr4 |= CPU_IORT & CCR4_IOMASK; #endif write_cyrix_reg(CCR4, ccr4); /* Initialize PCR0. */ /**************************************************************** * WARNING: RSTK_EN and LOOP_EN could make your system unstable. * BTB_EN might make your system unstable. ****************************************************************/ pcr0 = read_cyrix_reg(PCR0); #ifdef CPU_RSTK_EN pcr0 |= PCR0_RSTK; #else pcr0 &= ~PCR0_RSTK; #endif #ifdef CPU_BTB_EN pcr0 |= PCR0_BTB; #else pcr0 &= ~PCR0_BTB; #endif #ifdef CPU_LOOP_EN pcr0 |= PCR0_LOOP; #else pcr0 &= ~PCR0_LOOP; #endif /**************************************************************** * WARNING: if you use a memory mapped I/O device, don't use * DISABLE_5X86_LSSER option, which may reorder memory mapped * I/O access. * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER. ****************************************************************/ #ifdef CPU_DISABLE_5X86_LSSER pcr0 &= ~PCR0_LSSER; #else pcr0 |= PCR0_LSSER; #endif write_cyrix_reg(PCR0, pcr0); /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); (void)read_cyrix_reg(0x80); /* dummy */ /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); } #ifdef CPU_I486_ON_386 /* * There are i486 based upgrade products for i386 machines. * In this case, BIOS doesn't enables CPU cache. */ void init_i486_on_386(void) { u_long eflags; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif eflags = read_eflags(); disable_intr(); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */ write_eflags(eflags); } #endif /* * Cyrix 6x86 * * XXX - What should I do here? Please let me know. */ static void init_6x86(void) { u_long eflags; u_char ccr3, ccr4; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); /* Initialize CCR0. */ write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); /* Initialize CCR1. */ #ifdef CPU_CYRIX_NO_LOCK write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); #else write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); #endif /* Initialize CCR2. */ #ifdef CPU_SUSP_HLT write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); #else write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); #endif ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); /* Initialize CCR4. */ ccr4 = read_cyrix_reg(CCR4); ccr4 |= CCR4_DTE; ccr4 &= ~CCR4_IOMASK; #ifdef CPU_IORT write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); #else write_cyrix_reg(CCR4, ccr4 | 7); #endif /* Initialize CCR5. */ #ifdef CPU_WT_ALLOC write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); #endif /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); /* * Earlier revision of the 6x86 CPU could crash the system if * L1 cache is in write-back mode. */ if ((cyrix_did & 0xff00) > 0x1600) load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ else { /* Revision 2.6 and lower. */ #ifdef CYRIX_CACHE_REALLY_WORKS load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ #else load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */ #endif } /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); } #endif /* I486_CPU */ #ifdef I686_CPU /* * Cyrix 6x86MX (code-named M2) * * XXX - What should I do here? Please let me know. */ static void init_6x86MX(void) { u_long eflags; u_char ccr3, ccr4; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); /* Initialize CCR0. */ write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); /* Initialize CCR1. */ #ifdef CPU_CYRIX_NO_LOCK write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); #else write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); #endif /* Initialize CCR2. */ #ifdef CPU_SUSP_HLT write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); #else write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); #endif ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); /* Initialize CCR4. */ ccr4 = read_cyrix_reg(CCR4); ccr4 &= ~CCR4_IOMASK; #ifdef CPU_IORT write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); #else write_cyrix_reg(CCR4, ccr4 | 7); #endif /* Initialize CCR5. */ #ifdef CPU_WT_ALLOC write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); #endif /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); } static void init_ppro(void) { #ifndef SMP u_int64_t apicbase; /* * Local APIC should be diabled in UP kernel. */ apicbase = rdmsr(0x1b); apicbase &= ~0x800LL; wrmsr(0x1b, apicbase); #endif } /* * Initialize BBL_CR_CTL3 (Control register 3: used to configure the * L2 cache). */ void init_mendocino(void) { #ifdef CPU_PPRO2CELERON u_long eflags; u_int64_t bbl_cr_ctl3; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); bbl_cr_ctl3 = rdmsr(0x11e); /* If the L2 cache is configured, do nothing. */ if (!(bbl_cr_ctl3 & 1)) { bbl_cr_ctl3 = 0x134052bLL; /* Set L2 Cache Latency (Default: 5). */ #ifdef CPU_CELERON_L2_LATENCY #if CPU_L2_LATENCY > 15 #error invalid CPU_L2_LATENCY. #endif bbl_cr_ctl3 |= CPU_L2_LATENCY << 1; #else bbl_cr_ctl3 |= 5 << 1; #endif wrmsr(0x11e, bbl_cr_ctl3); } load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); write_eflags(eflags); #endif /* CPU_PPRO2CELERON */ } #endif /* I686_CPU */ /* * Initialize CR4 (Control register 4) to enable SSE instructions. */ void enable_sse(void) { #if defined(CPU_ENABLE_SSE) if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } #endif } void initializecpu(void) { switch (cpu) { #ifdef I486_CPU case CPU_BLUE: init_bluelightning(); break; case CPU_486DLC: init_486dlc(); break; case CPU_CY486DX: init_cy486dx(); break; case CPU_M1SC: init_5x86(); break; #ifdef CPU_I486_ON_386 case CPU_486: init_i486_on_386(); break; #endif case CPU_M1: init_6x86(); break; #endif /* I486_CPU */ #ifdef I686_CPU case CPU_M2: init_6x86MX(); break; case CPU_686: if (strcmp(cpu_vendor, "GenuineIntel") == 0) { switch (cpu_id & 0xff0) { case 0x610: init_ppro(); break; case 0x660: init_mendocino(); break; } } break; #endif default: break; } enable_sse(); #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) /* * OS should flush L1 cahce by itself because no PC-98 supports * non-Intel CPUs. Use wbinvd instruction before DMA transfer * when need_pre_dma_flush = 1, use invd instruction after DMA * transfer when need_post_dma_flush = 1. If your CPU upgrade * product support hardware cache control, you can add * UPGRADE_CPU_HW_CACHE option in your kernel configuration file. * This option elminate unneeded cache flush instruction. */ if (strcmp(cpu_vendor, "CyrixInstead") == 0) { switch (cpu) { #ifdef I486_CPU case CPU_486DLC: need_post_dma_flush = 1; break; case CPU_M1SC: need_pre_dma_flush = 1; break; case CPU_CY486DX: need_pre_dma_flush = 1; #ifdef CPU_I486_ON_386 need_post_dma_flush = 1; #endif break; #endif default: break; } } else if (strcmp(cpu_vendor, "AuthenticAMD") == 0) { switch (cpu_id & 0xFF0) { case 0x470: /* Enhanced Am486DX2 WB */ case 0x490: /* Enhanced Am486DX4 WB */ case 0x4F0: /* Am5x86 WB */ need_pre_dma_flush = 1; break; } } else if (strcmp(cpu_vendor, "IBM") == 0) { need_post_dma_flush = 1; } else { #ifdef CPU_I486_ON_386 need_pre_dma_flush = 1; #endif } #endif /* PC98 && !UPGRADE_CPU_HW_CACHE */ } #if defined(I586_CPU) && defined(CPU_WT_ALLOC) /* * Enable write allocate feature of AMD processors. * Following two functions require the Maxmem variable being set. */ void enable_K5_wt_alloc(void) { u_int64_t msr; /* * Write allocate is supported only on models 1, 2, and 3, with * a stepping of 4 or greater. */ if (((cpu_id & 0xf0) > 0) && ((cpu_id & 0x0f) > 3)) { disable_intr(); msr = rdmsr(0x83); /* HWCR */ wrmsr(0x83, msr & !(0x10)); /* * We have to tell the chip where the top of memory is, * since video cards could have frame bufferes there, * memory-mapped I/O could be there, etc. */ if(Maxmem > 0) msr = Maxmem / 16; else msr = 0; msr |= AMD_WT_ALLOC_TME | AMD_WT_ALLOC_FRE; #ifdef PC98 if (!(inb(0x43b) & 4)) { wrmsr(0x86, 0x0ff00f0); msr |= AMD_WT_ALLOC_PRE; } #else /* * There is no way to know wheter 15-16M hole exists or not. * Therefore, we disable write allocate for this range. */ wrmsr(0x86, 0x0ff00f0); msr |= AMD_WT_ALLOC_PRE; #endif wrmsr(0x85, msr); msr=rdmsr(0x83); wrmsr(0x83, msr|0x10); /* enable write allocate */ enable_intr(); } } void enable_K6_wt_alloc(void) { quad_t size; u_int64_t whcr; u_long eflags; eflags = read_eflags(); disable_intr(); wbinvd(); #ifdef CPU_DISABLE_CACHE /* * Certain K6-2 box becomes unstable when write allocation is * enabled. */ /* * The AMD-K6 processer provides the 64-bit Test Register 12(TR12), * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported. * All other bits in TR12 have no effect on the processer's operation. * The I/O Trap Restart function (bit 9 of TR12) is always enabled * on the AMD-K6. */ wrmsr(0x0000000e, (u_int64_t)0x0008); #endif /* Don't assume that memory size is aligned with 4M. */ if (Maxmem > 0) size = ((Maxmem >> 8) + 3) >> 2; else size = 0; /* Limit is 508M bytes. */ if (size > 0x7f) size = 0x7f; whcr = (rdmsr(0xc0000082) & ~(0x7fLL << 1)) | (size << 1); #if defined(PC98) || defined(NO_MEMORY_HOLE) if (whcr & (0x7fLL << 1)) { #ifdef PC98 /* * If bit 2 of port 0x43b is 0, disable wrte allocate for the * 15-16M range. */ if (!(inb(0x43b) & 4)) whcr &= ~0x0001LL; else #endif whcr |= 0x0001LL; } #else /* * There is no way to know wheter 15-16M hole exists or not. * Therefore, we disable write allocate for this range. */ whcr &= ~0x0001LL; #endif wrmsr(0x0c0000082, whcr); write_eflags(eflags); enable_intr(); } void enable_K6_2_wt_alloc(void) { quad_t size; u_int64_t whcr; u_long eflags; eflags = read_eflags(); disable_intr(); wbinvd(); #ifdef CPU_DISABLE_CACHE /* * Certain K6-2 box becomes unstable when write allocation is * enabled. */ /* * The AMD-K6 processer provides the 64-bit Test Register 12(TR12), * but only the Cache Inhibit(CI) (bit 3 of TR12) is suppported. * All other bits in TR12 have no effect on the processer's operation. * The I/O Trap Restart function (bit 9 of TR12) is always enabled * on the AMD-K6. */ wrmsr(0x0000000e, (u_int64_t)0x0008); #endif /* Don't assume that memory size is aligned with 4M. */ if (Maxmem > 0) size = ((Maxmem >> 8) + 3) >> 2; else size = 0; /* Limit is 4092M bytes. */ if (size > 0x3fff) size = 0x3ff; whcr = (rdmsr(0xc0000082) & ~(0x3ffLL << 22)) | (size << 22); #if defined(PC98) || defined(NO_MEMORY_HOLE) if (whcr & (0x3ffLL << 22)) { #ifdef PC98 /* * If bit 2 of port 0x43b is 0, disable wrte allocate for the * 15-16M range. */ if (!(inb(0x43b) & 4)) whcr &= ~(1LL << 16); else #endif whcr |= 1LL << 16; } #else /* * There is no way to know wheter 15-16M hole exists or not. * Therefore, we disable write allocate for this range. */ whcr &= ~(1LL << 16); #endif wrmsr(0x0c0000082, whcr); write_eflags(eflags); enable_intr(); } #endif /* I585_CPU && CPU_WT_ALLOC */ #include "opt_ddb.h" #ifdef DDB #include DB_SHOW_COMMAND(cyrixreg, cyrixreg) { u_long eflags; u_int cr0; u_char ccr1, ccr2, ccr3; u_char ccr0 = 0, ccr4 = 0, ccr5 = 0, pcr0 = 0; cr0 = rcr0(); if (strcmp(cpu_vendor,"CyrixInstead") == 0) { eflags = read_eflags(); disable_intr(); if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) { ccr0 = read_cyrix_reg(CCR0); } ccr1 = read_cyrix_reg(CCR1); ccr2 = read_cyrix_reg(CCR2); ccr3 = read_cyrix_reg(CCR3); if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) { write_cyrix_reg(CCR3, CCR3_MAPEN0); ccr4 = read_cyrix_reg(CCR4); if ((cpu == CPU_M1) || (cpu == CPU_M2)) ccr5 = read_cyrix_reg(CCR5); else pcr0 = read_cyrix_reg(PCR0); write_cyrix_reg(CCR3, ccr3); /* Restore CCR3. */ } write_eflags(eflags); if ((cpu != CPU_M1SC) && (cpu != CPU_CY486DX)) printf("CCR0=%x, ", (u_int)ccr0); printf("CCR1=%x, CCR2=%x, CCR3=%x", (u_int)ccr1, (u_int)ccr2, (u_int)ccr3); if ((cpu == CPU_M1SC) || (cpu == CPU_M1) || (cpu == CPU_M2)) { printf(", CCR4=%x, ", (u_int)ccr4); if (cpu == CPU_M1SC) printf("PCR0=%x\n", pcr0); else printf("CCR5=%x\n", ccr5); } } printf("CR0=%x\n", cr0); } #endif /* DDB */ Index: stable/4/sys/i386/i386/locore.s =================================================================== --- stable/4/sys/i386/i386/locore.s (revision 82390) +++ stable/4/sys/i386/i386/locore.s (revision 82391) @@ -1,969 +1,968 @@ /*- * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)locore.s 7.3 (Berkeley) 5/13/91 * $FreeBSD$ * * originally from: locore.s, by William F. Jolitz * * Substantially rewritten by David Greenman, Rod Grimes, * Bruce Evans, Wolfgang Solfrank, Poul-Henning Kamp * and many others. */ #include "opt_bootp.h" #include "opt_nfsroot.h" #include #include #include #include #include #include #include #include "assym.s" /* * XXX * * Note: This version greatly munged to avoid various assembler errors * that may be fixed in newer versions of gas. Perhaps newer versions * will have more pleasant appearance. */ /* * PTmap is recursive pagemap at top of virtual address space. * Within PTmap, the page directory can be found (third indirection). */ .globl _PTmap,_PTD,_PTDpde .set _PTmap,(PTDPTDI << PDRSHIFT) .set _PTD,_PTmap + (PTDPTDI * PAGE_SIZE) .set _PTDpde,_PTD + (PTDPTDI * PDESIZE) /* * APTmap, APTD is the alternate recursive pagemap. * It's used when modifying another process's page tables. */ .globl _APTmap,_APTD,_APTDpde .set _APTmap,APTDPTDI << PDRSHIFT .set _APTD,_APTmap + (APTDPTDI * PAGE_SIZE) .set _APTDpde,_PTD + (APTDPTDI * PDESIZE) /* * Globals */ .data ALIGN_DATA /* just to be sure */ .globl HIDENAME(tmpstk) .space 0x2000 /* space for tmpstk - temporary stack */ HIDENAME(tmpstk): .globl _boothowto,_bootdev .globl _cpu,_cpu_vendor,_cpu_id,_bootinfo - .globl _cpu_high, _cpu_feature, _cpu_fxsr + .globl _cpu_high, _cpu_feature _cpu: .long 0 /* are we 386, 386sx, or 486 */ _cpu_id: .long 0 /* stepping ID */ _cpu_high: .long 0 /* highest arg to CPUID */ _cpu_feature: .long 0 /* features */ -_cpu_fxsr: .long 0 /* use fxsave/fxrstor instruction */ _cpu_vendor: .space 20 /* CPU origin code */ _bootinfo: .space BOOTINFO_SIZE /* bootinfo that we can handle */ _KERNend: .long 0 /* phys addr end of kernel (just after bss) */ physfree: .long 0 /* phys addr of next free page */ #ifdef SMP .globl _cpu0prvpage cpu0pp: .long 0 /* phys addr cpu0 private pg */ _cpu0prvpage: .long 0 /* relocated version */ .globl _SMPpt SMPptpa: .long 0 /* phys addr SMP page table */ _SMPpt: .long 0 /* relocated version */ #endif /* SMP */ .globl _IdlePTD _IdlePTD: .long 0 /* phys addr of kernel PTD */ #ifdef SMP .globl _KPTphys #endif _KPTphys: .long 0 /* phys addr of kernel page tables */ .globl _proc0paddr _proc0paddr: .long 0 /* address of proc 0 address space */ p0upa: .long 0 /* phys addr of proc0's UPAGES */ vm86phystk: .long 0 /* PA of vm86/bios stack */ .globl _vm86paddr, _vm86pa _vm86paddr: .long 0 /* address of vm86 region */ _vm86pa: .long 0 /* phys addr of vm86 region */ #ifdef BDE_DEBUGGER .globl _bdb_exists /* flag to indicate BDE debugger is present */ _bdb_exists: .long 0 #endif #ifdef PC98 .globl _pc98_system_parameter _pc98_system_parameter: .space 0x240 #endif /********************************************************************** * * Some handy macros * */ #define R(foo) ((foo)-KERNBASE) #define ALLOCPAGES(foo) \ movl R(physfree), %esi ; \ movl $((foo)*PAGE_SIZE), %eax ; \ addl %esi, %eax ; \ movl %eax, R(physfree) ; \ movl %esi, %edi ; \ movl $((foo)*PAGE_SIZE),%ecx ; \ xorl %eax,%eax ; \ cld ; \ rep ; \ stosb /* * fillkpt * eax = page frame address * ebx = index into page table * ecx = how many pages to map * base = base address of page dir/table * prot = protection bits */ #define fillkpt(base, prot) \ shll $2,%ebx ; \ addl base,%ebx ; \ orl $PG_V,%eax ; \ orl prot,%eax ; \ 1: movl %eax,(%ebx) ; \ addl $PAGE_SIZE,%eax ; /* increment physical address */ \ addl $4,%ebx ; /* next pte */ \ loop 1b /* * fillkptphys(prot) * eax = physical address * ecx = how many pages to map * prot = protection bits */ #define fillkptphys(prot) \ movl %eax, %ebx ; \ shrl $PAGE_SHIFT, %ebx ; \ fillkpt(R(_KPTphys), prot) .text /********************************************************************** * * This is where the bootblocks start us, set the ball rolling... * */ NON_GPROF_ENTRY(btext) #ifdef PC98 /* save SYSTEM PARAMETER for resume (NS/T or other) */ movl $0xa1400,%esi movl $R(_pc98_system_parameter),%edi movl $0x0240,%ecx cld rep movsb #else /* IBM-PC */ #ifdef BDE_DEBUGGER #ifdef BIOS_STEALS_3K cmpl $0x0375c339,0x95504 #else cmpl $0x0375c339,0x96104 /* XXX - debugger signature */ #endif jne 1f movb $1,R(_bdb_exists) 1: #endif /* Tell the bios to warmboot next time */ movw $0x1234,0x472 #endif /* PC98 */ /* Set up a real frame in case the double return in newboot is executed. */ pushl %ebp movl %esp, %ebp /* Don't trust what the BIOS gives for eflags. */ pushl $PSL_KERNEL popfl /* * Don't trust what the BIOS gives for %fs and %gs. Trust the bootstrap * to set %cs, %ds, %es and %ss. */ mov %ds, %ax mov %ax, %fs mov %ax, %gs call recover_bootinfo /* Get onto a stack that we can trust. */ /* * XXX this step is delayed in case recover_bootinfo needs to return via * the old stack, but it need not be, since recover_bootinfo actually * returns via the old frame. */ movl $R(HIDENAME(tmpstk)),%esp #ifdef PC98 /* pc98_machine_type & M_EPSON_PC98 */ testb $0x02,R(_pc98_system_parameter)+220 jz 3f /* epson_machine_id <= 0x0b */ cmpb $0x0b,R(_pc98_system_parameter)+224 ja 3f /* count up memory */ movl $0x100000,%eax /* next, talley remaining memory */ movl $0xFFF-0x100,%ecx 1: movl 0(%eax),%ebx /* save location to check */ movl $0xa55a5aa5,0(%eax) /* write test pattern */ cmpl $0xa55a5aa5,0(%eax) /* does not check yet for rollover */ jne 2f movl %ebx,0(%eax) /* restore memory */ addl $PAGE_SIZE,%eax loop 1b 2: subl $0x100000,%eax shrl $17,%eax movb %al,R(_pc98_system_parameter)+1 3: movw R(_pc98_system_parameter+0x86),%ax movw %ax,R(_cpu_id) #endif call identify_cpu /* clear bss */ /* * XXX this should be done a little earlier. * * XXX we don't check that there is memory for our bss and page tables * before using it. * * XXX the boot program somewhat bogusly clears the bss. We still have * to do it in case we were unzipped by kzipboot. Then the boot program * only clears kzipboot's bss. * * XXX the gdt and idt are still somewhere in the boot program. We * depend on the convention that the boot program is below 1MB and we * are above 1MB to keep the gdt and idt away from the bss and page * tables. The idt is only used if BDE_DEBUGGER is enabled. */ movl $R(_end),%ecx movl $R(_edata),%edi subl %edi,%ecx xorl %eax,%eax cld rep stosb call create_pagetables /* * If the CPU has support for VME, turn it on. */ testl $CPUID_VME, R(_cpu_feature) jz 1f movl %cr4, %eax orl $CR4_VME, %eax movl %eax, %cr4 1: #ifdef BDE_DEBUGGER /* * Adjust as much as possible for paging before enabling paging so that the * adjustments can be traced. */ call bdb_prepare_paging #endif /* Now enable paging */ movl R(_IdlePTD), %eax movl %eax,%cr3 /* load ptd addr into mmu */ movl %cr0,%eax /* get control word */ orl $CR0_PE|CR0_PG,%eax /* enable paging */ movl %eax,%cr0 /* and let's page NOW! */ #ifdef BDE_DEBUGGER /* * Complete the adjustments for paging so that we can keep tracing through * initi386() after the low (physical) addresses for the gdt and idt become * invalid. */ call bdb_commit_paging #endif pushl $begin /* jump to high virtualized address */ ret /* now running relocated at KERNBASE where the system is linked to run */ begin: /* set up bootstrap stack */ movl _proc0paddr,%esp /* location of in-kernel pages */ addl $UPAGES*PAGE_SIZE,%esp /* bootstrap stack end location */ xorl %eax,%eax /* mark end of frames */ movl %eax,%ebp movl _proc0paddr,%eax movl _IdlePTD, %esi movl %esi,PCB_CR3(%eax) movl physfree, %esi pushl %esi /* value of first for init386(first) */ call _init386 /* wire 386 chip for unix operation */ popl %esi call _mi_startup /* autoconfiguration, mountroot etc */ hlt /* never returns to here */ /* * Signal trampoline, copied to top of user stack */ NON_GPROF_ENTRY(sigcode) call *SIGF_HANDLER(%esp) /* call signal handler */ lea SIGF_UC(%esp),%eax /* get ucontext_t */ pushl %eax testl $PSL_VM,UC_EFLAGS(%eax) jne 9f movl UC_GS(%eax),%gs /* restore %gs */ 9: movl $SYS_sigreturn,%eax pushl %eax /* junk to fake return addr. */ int $0x80 /* enter kernel with args */ 0: jmp 0b ALIGN_TEXT _osigcode: call *SIGF_HANDLER(%esp) /* call signal handler */ lea SIGF_SC(%esp),%eax /* get sigcontext */ pushl %eax testl $PSL_VM,SC_PS(%eax) jne 9f movl SC_GS(%eax),%gs /* restore %gs */ 9: movl $0x01d516,SC_TRAPNO(%eax) /* magic: 0ldSiG */ movl $SYS_sigreturn,%eax pushl %eax /* junk to fake return addr. */ int $0x80 /* enter kernel with args */ 0: jmp 0b ALIGN_TEXT _esigcode: .data .globl _szsigcode, _szosigcode _szsigcode: .long _esigcode-_sigcode _szosigcode: .long _esigcode-_osigcode .text /********************************************************************** * * Recover the bootinfo passed to us from the boot program * */ recover_bootinfo: /* * This code is called in different ways depending on what loaded * and started the kernel. This is used to detect how we get the * arguments from the other code and what we do with them. * * Old disk boot blocks: * (*btext)(howto, bootdev, cyloffset, esym); * [return address == 0, and can NOT be returned to] * [cyloffset was not supported by the FreeBSD boot code * and always passed in as 0] * [esym is also known as total in the boot code, and * was never properly supported by the FreeBSD boot code] * * Old diskless netboot code: * (*btext)(0,0,0,0,&nfsdiskless,0,0,0); * [return address != 0, and can NOT be returned to] * If we are being booted by this code it will NOT work, * so we are just going to halt if we find this case. * * New uniform boot code: * (*btext)(howto, bootdev, 0, 0, 0, &bootinfo) * [return address != 0, and can be returned to] * * There may seem to be a lot of wasted arguments in here, but * that is so the newer boot code can still load very old kernels * and old boot code can load new kernels. */ /* * The old style disk boot blocks fake a frame on the stack and * did an lret to get here. The frame on the stack has a return * address of 0. */ cmpl $0,4(%ebp) je olddiskboot /* * We have some form of return address, so this is either the * old diskless netboot code, or the new uniform code. That can * be detected by looking at the 5th argument, if it is 0 * we are being booted by the new uniform boot code. */ cmpl $0,24(%ebp) je newboot /* * Seems we have been loaded by the old diskless boot code, we * don't stand a chance of running as the diskless structure * changed considerably between the two, so just halt. */ hlt /* * We have been loaded by the new uniform boot code. * Let's check the bootinfo version, and if we do not understand * it we return to the loader with a status of 1 to indicate this error */ newboot: movl 28(%ebp),%ebx /* &bootinfo.version */ movl BI_VERSION(%ebx),%eax cmpl $1,%eax /* We only understand version 1 */ je 1f movl $1,%eax /* Return status */ leave /* * XXX this returns to our caller's caller (as is required) since * we didn't set up a frame and our caller did. */ ret 1: /* * If we have a kernelname copy it in */ movl BI_KERNELNAME(%ebx),%esi cmpl $0,%esi je 2f /* No kernelname */ movl $MAXPATHLEN,%ecx /* Brute force!!! */ movl $R(_kernelname),%edi cmpb $'/',(%esi) /* Make sure it starts with a slash */ je 1f movb $'/',(%edi) incl %edi decl %ecx 1: cld rep movsb 2: /* * Determine the size of the boot loader's copy of the bootinfo * struct. This is impossible to do properly because old versions * of the struct don't contain a size field and there are 2 old * versions with the same version number. */ movl $BI_ENDCOMMON,%ecx /* prepare for sizeless version */ testl $RB_BOOTINFO,8(%ebp) /* bi_size (and bootinfo) valid? */ je got_bi_size /* no, sizeless version */ movl BI_SIZE(%ebx),%ecx got_bi_size: /* * Copy the common part of the bootinfo struct */ movl %ebx,%esi movl $R(_bootinfo),%edi cmpl $BOOTINFO_SIZE,%ecx jbe got_common_bi_size movl $BOOTINFO_SIZE,%ecx got_common_bi_size: cld rep movsb #ifdef NFS_ROOT #ifndef BOOTP_NFSV3 /* * If we have a nfs_diskless structure copy it in */ movl BI_NFS_DISKLESS(%ebx),%esi cmpl $0,%esi je olddiskboot movl $R(_nfs_diskless),%edi movl $NFSDISKLESS_SIZE,%ecx cld rep movsb movl $R(_nfs_diskless_valid),%edi movl $1,(%edi) #endif #endif /* * The old style disk boot. * (*btext)(howto, bootdev, cyloffset, esym); * Note that the newer boot code just falls into here to pick * up howto and bootdev, cyloffset and esym are no longer used */ olddiskboot: movl 8(%ebp),%eax movl %eax,R(_boothowto) movl 12(%ebp),%eax movl %eax,R(_bootdev) ret /********************************************************************** * * Identify the CPU and initialize anything special about it * */ identify_cpu: /* Try to toggle alignment check flag; does not exist on 386. */ pushfl popl %eax movl %eax,%ecx orl $PSL_AC,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_AC,%eax pushl %ecx popfl testl %eax,%eax jnz try486 /* NexGen CPU does not have aligment check flag. */ pushfl movl $0x5555, %eax xorl %edx, %edx movl $2, %ecx clc divl %ecx jz trynexgen popfl movl $CPU_386,R(_cpu) jmp 3f trynexgen: popfl movl $CPU_NX586,R(_cpu) movl $0x4778654e,R(_cpu_vendor) # store vendor string movl $0x72446e65,R(_cpu_vendor+4) movl $0x6e657669,R(_cpu_vendor+8) movl $0,R(_cpu_vendor+12) jmp 3f try486: /* Try to toggle identification flag; does not exist on early 486s. */ pushfl popl %eax movl %eax,%ecx xorl $PSL_ID,%eax pushl %eax popfl pushfl popl %eax xorl %ecx,%eax andl $PSL_ID,%eax pushl %ecx popfl testl %eax,%eax jnz trycpuid movl $CPU_486,R(_cpu) /* * Check Cyrix CPU * Cyrix CPUs do not change the undefined flags following * execution of the divide instruction which divides 5 by 2. * * Note: CPUID is enabled on M2, so it passes another way. */ pushfl movl $0x5555, %eax xorl %edx, %edx movl $2, %ecx clc divl %ecx jnc trycyrix popfl jmp 3f /* You may use Intel CPU. */ trycyrix: popfl /* * IBM Bluelighting CPU also doesn't change the undefined flags. * Because IBM doesn't disclose the information for Bluelighting * CPU, we couldn't distinguish it from Cyrix's (including IBM * brand of Cyrix CPUs). */ movl $0x69727943,R(_cpu_vendor) # store vendor string movl $0x736e4978,R(_cpu_vendor+4) movl $0x64616574,R(_cpu_vendor+8) jmp 3f trycpuid: /* Use the `cpuid' instruction. */ xorl %eax,%eax .byte 0x0f,0xa2 # cpuid 0 movl %eax,R(_cpu_high) # highest capability movl %ebx,R(_cpu_vendor) # store vendor string movl %edx,R(_cpu_vendor+4) movl %ecx,R(_cpu_vendor+8) movb $0,R(_cpu_vendor+12) movl $1,%eax .byte 0x0f,0xa2 # cpuid 1 movl %eax,R(_cpu_id) # store cpu_id movl %edx,R(_cpu_feature) # store cpu_feature rorl $8,%eax # extract family type andl $15,%eax cmpl $5,%eax jae 1f /* less than Pentium; must be 486 */ movl $CPU_486,R(_cpu) jmp 3f 1: /* a Pentium? */ cmpl $5,%eax jne 2f movl $CPU_586,R(_cpu) jmp 3f 2: /* Greater than Pentium...call it a Pentium Pro */ movl $CPU_686,R(_cpu) 3: ret /********************************************************************** * * Create the first page directory and its page tables. * */ create_pagetables: testl $CPUID_PGE, R(_cpu_feature) jz 1f movl %cr4, %eax orl $CR4_PGE, %eax movl %eax, %cr4 1: /* Find end of kernel image (rounded up to a page boundary). */ movl $R(_end),%esi /* Include symbols, if any. */ movl R(_bootinfo+BI_ESYMTAB),%edi testl %edi,%edi je over_symalloc movl %edi,%esi movl $KERNBASE,%edi addl %edi,R(_bootinfo+BI_SYMTAB) addl %edi,R(_bootinfo+BI_ESYMTAB) over_symalloc: /* If we are told where the end of the kernel space is, believe it. */ movl R(_bootinfo+BI_KERNEND),%edi testl %edi,%edi je no_kernend movl %edi,%esi no_kernend: addl $PAGE_MASK,%esi andl $~PAGE_MASK,%esi movl %esi,R(_KERNend) /* save end of kernel */ movl %esi,R(physfree) /* next free page is at end of kernel */ /* Allocate Kernel Page Tables */ ALLOCPAGES(NKPT) movl %esi,R(_KPTphys) /* Allocate Page Table Directory */ ALLOCPAGES(1) movl %esi,R(_IdlePTD) /* Allocate UPAGES */ ALLOCPAGES(UPAGES) movl %esi,R(p0upa) addl $KERNBASE, %esi movl %esi, R(_proc0paddr) ALLOCPAGES(1) /* vm86/bios stack */ movl %esi,R(vm86phystk) ALLOCPAGES(3) /* pgtable + ext + IOPAGES */ movl %esi,R(_vm86pa) addl $KERNBASE, %esi movl %esi, R(_vm86paddr) #ifdef SMP /* Allocate cpu0's private data page */ ALLOCPAGES(1) movl %esi,R(cpu0pp) addl $KERNBASE, %esi movl %esi, R(_cpu0prvpage) /* relocated to KVM space */ /* Allocate SMP page table page */ ALLOCPAGES(1) movl %esi,R(SMPptpa) addl $KERNBASE, %esi movl %esi, R(_SMPpt) /* relocated to KVM space */ #endif /* SMP */ /* Map read-only from zero to the end of the kernel text section */ xorl %eax, %eax #ifdef BDE_DEBUGGER /* If the debugger is present, actually map everything read-write. */ cmpl $0,R(_bdb_exists) jne map_read_write #endif xorl %edx,%edx #if !defined(SMP) testl $CPUID_PGE, R(_cpu_feature) jz 2f orl $PG_G,%edx #endif 2: movl $R(_etext),%ecx addl $PAGE_MASK,%ecx shrl $PAGE_SHIFT,%ecx fillkptphys(%edx) /* Map read-write, data, bss and symbols */ movl $R(_etext),%eax addl $PAGE_MASK, %eax andl $~PAGE_MASK, %eax map_read_write: movl $PG_RW,%edx #if !defined(SMP) testl $CPUID_PGE, R(_cpu_feature) jz 1f orl $PG_G,%edx #endif 1: movl R(_KERNend),%ecx subl %eax,%ecx shrl $PAGE_SHIFT,%ecx fillkptphys(%edx) /* Map page directory. */ movl R(_IdlePTD), %eax movl $1, %ecx fillkptphys($PG_RW) /* Map proc0's UPAGES in the physical way ... */ movl R(p0upa), %eax movl $UPAGES, %ecx fillkptphys($PG_RW) /* Map ISA hole */ movl $ISA_HOLE_START, %eax movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkptphys($PG_RW) /* Map space for the vm86 region */ movl R(vm86phystk), %eax movl $4, %ecx fillkptphys($PG_RW) /* Map page 0 into the vm86 page table */ movl $0, %eax movl $0, %ebx movl $1, %ecx fillkpt(R(_vm86pa), $PG_RW|PG_U) /* ...likewise for the ISA hole */ movl $ISA_HOLE_START, %eax movl $ISA_HOLE_START>>PAGE_SHIFT, %ebx movl $ISA_HOLE_LENGTH>>PAGE_SHIFT, %ecx fillkpt(R(_vm86pa), $PG_RW|PG_U) #ifdef SMP /* Map cpu0's private page into global kmem (4K @ cpu0prvpage) */ movl R(cpu0pp), %eax movl $1, %ecx fillkptphys($PG_RW) /* Map SMP page table page into global kmem FWIW */ movl R(SMPptpa), %eax movl $1, %ecx fillkptphys($PG_RW) /* Map the private page into the SMP page table */ movl R(cpu0pp), %eax movl $0, %ebx /* pte offset = 0 */ movl $1, %ecx /* one private page coming right up */ fillkpt(R(SMPptpa), $PG_RW) /* ... and put the page table table in the pde. */ movl R(SMPptpa), %eax movl $MPPTDI, %ebx movl $1, %ecx fillkpt(R(_IdlePTD), $PG_RW) /* Fakeup VA for the local apic to allow early traps. */ ALLOCPAGES(1) movl %esi, %eax movl $(NPTEPG-1), %ebx /* pte offset = NTEPG-1 */ movl $1, %ecx /* one private pt coming right up */ fillkpt(R(SMPptpa), $PG_RW) /* Initialize mp lock to allow early traps */ movl $1, R(_mp_lock) #endif /* SMP */ /* install a pde for temporary double map of bottom of VA */ movl R(_KPTphys), %eax xorl %ebx, %ebx movl $NKPT, %ecx fillkpt(R(_IdlePTD), $PG_RW) /* install pde's for pt's */ movl R(_KPTphys), %eax movl $KPTDI, %ebx movl $NKPT, %ecx fillkpt(R(_IdlePTD), $PG_RW) /* install a pde recursively mapping page directory as a page table */ movl R(_IdlePTD), %eax movl $PTDPTDI, %ebx movl $1,%ecx fillkpt(R(_IdlePTD), $PG_RW) ret #ifdef BDE_DEBUGGER bdb_prepare_paging: cmpl $0,R(_bdb_exists) je bdb_prepare_paging_exit subl $6,%esp /* * Copy and convert debugger entries from the bootstrap gdt and idt * to the kernel gdt and idt. Everything is still in low memory. * Tracing continues to work after paging is enabled because the * low memory addresses remain valid until everything is relocated. * However, tracing through the setidt() that initializes the trace * trap will crash. */ sgdt (%esp) movl 2(%esp),%esi /* base address of bootstrap gdt */ movl $R(_gdt),%edi movl %edi,2(%esp) /* prepare to load kernel gdt */ movl $8*18/4,%ecx cld rep /* copy gdt */ movsl movl $R(_gdt),-8+2(%edi) /* adjust gdt self-ptr */ movb $0x92,-8+5(%edi) lgdt (%esp) sidt (%esp) movl 2(%esp),%esi /* base address of current idt */ movl 8+4(%esi),%eax /* convert dbg descriptor to ... */ movw 8(%esi),%ax movl %eax,R(bdb_dbg_ljmp+1) /* ... immediate offset ... */ movl 8+2(%esi),%eax movw %ax,R(bdb_dbg_ljmp+5) /* ... and selector for ljmp */ movl 24+4(%esi),%eax /* same for bpt descriptor */ movw 24(%esi),%ax movl %eax,R(bdb_bpt_ljmp+1) movl 24+2(%esi),%eax movw %ax,R(bdb_bpt_ljmp+5) movl R(_idt),%edi movl %edi,2(%esp) /* prepare to load kernel idt */ movl $8*4/4,%ecx cld rep /* copy idt */ movsl lidt (%esp) addl $6,%esp bdb_prepare_paging_exit: ret /* Relocate debugger gdt entries and gdt and idt pointers. */ bdb_commit_paging: cmpl $0,_bdb_exists je bdb_commit_paging_exit movl $_gdt+8*9,%eax /* adjust slots 9-17 */ movl $9,%ecx reloc_gdt: movb $KERNBASE>>24,7(%eax) /* top byte of base addresses, was 0, */ addl $8,%eax /* now KERNBASE>>24 */ loop reloc_gdt subl $6,%esp sgdt (%esp) addl $KERNBASE,2(%esp) lgdt (%esp) sidt (%esp) addl $KERNBASE,2(%esp) lidt (%esp) addl $6,%esp int $3 bdb_commit_paging_exit: ret #endif /* BDE_DEBUGGER */