Changeset View
Changeset View
Standalone View
Standalone View
sys/i386/i386/machdep.c
Show First 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | |||||
SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); | SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); | ||||
/* Intel ICH registers */ | /* Intel ICH registers */ | ||||
#define ICH_PMBASE 0x400 | #define ICH_PMBASE 0x400 | ||||
#define ICH_SMI_EN ICH_PMBASE + 0x30 | #define ICH_SMI_EN ICH_PMBASE + 0x30 | ||||
int _udatasel, _ucodesel; | int _udatasel, _ucodesel; | ||||
u_int basemem; | u_int basemem; | ||||
static int above4g_allow = 1; | |||||
static int above24g_allow = 0; | |||||
int cold = 1; | int cold = 1; | ||||
#ifdef COMPAT_43 | #ifdef COMPAT_43 | ||||
static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); | static void osendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); | ||||
#endif | #endif | ||||
#ifdef COMPAT_FREEBSD4 | #ifdef COMPAT_FREEBSD4 | ||||
static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); | static void freebsd4_sendsig(sig_t catcher, ksiginfo_t *, sigset_t *mask); | ||||
▲ Show 20 Lines • Show All 1,484 Lines • ▼ Show 20 Lines | sdtossd(sd, ssd) | ||||
ssd->ssd_def32 = sd->sd_def32; | ssd->ssd_def32 = sd->sd_def32; | ||||
ssd->ssd_gran = sd->sd_gran; | ssd->ssd_gran = sd->sd_gran; | ||||
} | } | ||||
static int | static int | ||||
add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, | add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, | ||||
int *physmap_idxp) | int *physmap_idxp) | ||||
{ | { | ||||
uint64_t lim, ign; | |||||
int i, insert_idx, physmap_idx; | int i, insert_idx, physmap_idx; | ||||
physmap_idx = *physmap_idxp; | physmap_idx = *physmap_idxp; | ||||
if (length == 0) | if (length == 0) | ||||
return (1); | return (1); | ||||
#ifndef PAE | lim = 0x100000000; /* 4G */ | ||||
if (base > 0xffffffff) { | if (pae_mode && above4g_allow) | ||||
printf("%uK of memory above 4GB ignored\n", | lim = above24g_allow ? -1ULL : 0x600000000; /* 24G */ | ||||
(u_int)(length / 1024)); | if (base >= lim) { | ||||
printf("%uK of memory above %uGB ignored, pae %d " | |||||
"above4g_allow %d above24g_allow %d\n", | |||||
(u_int)(length / 1024), (u_int)(lim >> 30), pae_mode, | |||||
above4g_allow, above24g_allow); | |||||
return (1); | return (1); | ||||
} | } | ||||
#endif | if (base + length >= lim) { | ||||
ign = base + length - lim; | |||||
length -= ign; | |||||
printf("%uK of memory above %uGB ignored, pae %d " | |||||
"above4g_allow %d above24g_allow %d\n", | |||||
(u_int)(ign / 1024), (u_int)(lim >> 30), pae_mode, | |||||
above4g_allow, above24g_allow); | |||||
} | |||||
/* | /* | ||||
* Find insertion point while checking for overlap. Start off by | * Find insertion point while checking for overlap. Start off by | ||||
* assuming the new entry will be added to the end. | * assuming the new entry will be added to the end. | ||||
*/ | */ | ||||
insert_idx = physmap_idx + 2; | insert_idx = physmap_idx + 2; | ||||
for (i = 0; i <= physmap_idx; i += 2) { | for (i = 0; i <= physmap_idx; i += 2) { | ||||
if (base < physmap[i + 1]) { | if (base < physmap[i + 1]) { | ||||
▲ Show 20 Lines • Show All 76 Lines • ▼ Show 20 Lines | add_smap_entries(struct bios_smap *smapbase, vm_paddr_t *physmap, | ||||
for (smap = smapbase; smap < smapend; smap++) | for (smap = smapbase; smap < smapend; smap++) | ||||
if (!add_smap_entry(smap, physmap, physmap_idxp)) | if (!add_smap_entry(smap, physmap, physmap_idxp)) | ||||
break; | break; | ||||
} | } | ||||
static void | static void | ||||
basemem_setup(void) | basemem_setup(void) | ||||
{ | { | ||||
pt_entry_t *pte; | |||||
int i; | |||||
if (basemem > 640) { | if (basemem > 640) { | ||||
printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", | printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", | ||||
basemem); | basemem); | ||||
basemem = 640; | basemem = 640; | ||||
} | } | ||||
/* | pmap_basemem_setup(basemem); | ||||
* Map pages between basemem and ISA_HOLE_START, if any, r/w into | |||||
* the vm86 page table so that vm86 can scribble on them using | |||||
* the vm86 map too. XXX: why 2 ways for this and only 1 way for | |||||
* page 0, at least as initialized here? | |||||
*/ | |||||
pte = (pt_entry_t *)vm86paddr; | |||||
for (i = basemem / 4; i < 160; i++) | |||||
pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; | |||||
} | } | ||||
/* | /* | ||||
* Populate the (physmap) array with base/bound pairs describing the | * Populate the (physmap) array with base/bound pairs describing the | ||||
* available physical memory in the system, then test this memory and | * available physical memory in the system, then test this memory and | ||||
* build the phys_avail array describing the actually-available memory. | * build the phys_avail array describing the actually-available memory. | ||||
* | * | ||||
* If we cannot accurately determine the physical memory map, then use | * If we cannot accurately determine the physical memory map, then use | ||||
* value from the 0xE801 call, and failing that, the RTC. | * value from the 0xE801 call, and failing that, the RTC. | ||||
* | * | ||||
* Total memory size may be set by the kernel environment variable | * Total memory size may be set by the kernel environment variable | ||||
* hw.physmem or the compile-time define MAXMEM. | * hw.physmem or the compile-time define MAXMEM. | ||||
* | * | ||||
* XXX first should be vm_paddr_t. | * XXX first should be vm_paddr_t. | ||||
*/ | */ | ||||
static void | static void | ||||
getmemsize(int first) | getmemsize(int first) | ||||
{ | { | ||||
int has_smap, off, physmap_idx, pa_indx, da_indx; | int has_smap, off, physmap_idx, pa_indx, da_indx; | ||||
u_long memtest; | u_long memtest; | ||||
vm_paddr_t physmap[PHYSMAP_SIZE]; | vm_paddr_t physmap[PHYSMAP_SIZE]; | ||||
pt_entry_t *pte; | |||||
quad_t dcons_addr, dcons_size, physmem_tunable; | quad_t dcons_addr, dcons_size, physmem_tunable; | ||||
int hasbrokenint12, i, res; | int hasbrokenint12, i, res; | ||||
u_int extmem; | u_int extmem; | ||||
struct vm86frame vmf; | struct vm86frame vmf; | ||||
struct vm86context vmc; | struct vm86context vmc; | ||||
vm_paddr_t pa; | vm_paddr_t pa; | ||||
struct bios_smap *smap, *smapbase; | struct bios_smap *smap, *smapbase; | ||||
caddr_t kmdp; | caddr_t kmdp; | ||||
has_smap = 0; | has_smap = 0; | ||||
bzero(&vmf, sizeof(vmf)); | bzero(&vmf, sizeof(vmf)); | ||||
bzero(physmap, sizeof(physmap)); | bzero(physmap, sizeof(physmap)); | ||||
basemem = 0; | basemem = 0; | ||||
/* | /* | ||||
* Tell the physical memory allocator about pages used to store | * Tell the physical memory allocator about pages used to store | ||||
* the kernel and preloaded data. See kmem_bootstrap_free(). | * the kernel and preloaded data. See kmem_bootstrap_free(). | ||||
*/ | */ | ||||
vm_phys_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); | vm_phys_add_seg((vm_paddr_t)KERNLOAD, trunc_page(first)); | ||||
TUNABLE_INT_FETCH("hw.above4g_allow", &above4g_allow); | |||||
TUNABLE_INT_FETCH("hw.above24g_allow", &above24g_allow); | |||||
/* | /* | ||||
* Check if the loader supplied an SMAP memory map. If so, | * Check if the loader supplied an SMAP memory map. If so, | ||||
* use that and do not make any VM86 calls. | * use that and do not make any VM86 calls. | ||||
*/ | */ | ||||
physmap_idx = 0; | physmap_idx = 0; | ||||
kmdp = preload_search_by_type("elf kernel"); | kmdp = preload_search_by_type("elf kernel"); | ||||
if (kmdp == NULL) | if (kmdp == NULL) | ||||
kmdp = preload_search_by_type("elf32 kernel"); | kmdp = preload_search_by_type("elf32 kernel"); | ||||
▲ Show 20 Lines • Show All 174 Lines • ▼ Show 20 Lines | #endif | ||||
* Size up each available chunk of physical memory. | * Size up each available chunk of physical memory. | ||||
*/ | */ | ||||
physmap[0] = PAGE_SIZE; /* mask off page 0 */ | physmap[0] = PAGE_SIZE; /* mask off page 0 */ | ||||
pa_indx = 0; | pa_indx = 0; | ||||
da_indx = 1; | da_indx = 1; | ||||
phys_avail[pa_indx++] = physmap[0]; | phys_avail[pa_indx++] = physmap[0]; | ||||
phys_avail[pa_indx] = physmap[0]; | phys_avail[pa_indx] = physmap[0]; | ||||
dump_avail[da_indx] = physmap[0]; | dump_avail[da_indx] = physmap[0]; | ||||
pte = CMAP3; | |||||
/* | /* | ||||
* Get dcons buffer address | * Get dcons buffer address | ||||
*/ | */ | ||||
if (getenv_quad("dcons.addr", &dcons_addr) == 0 || | if (getenv_quad("dcons.addr", &dcons_addr) == 0 || | ||||
getenv_quad("dcons.size", &dcons_size) == 0) | getenv_quad("dcons.size", &dcons_size) == 0) | ||||
dcons_addr = 0; | dcons_addr = 0; | ||||
/* | /* | ||||
* physmap is in bytes, so when converting to page boundaries, | * physmap is in bytes, so when converting to page boundaries, | ||||
* round up the start address and round down the end address. | * round up the start address and round down the end address. | ||||
*/ | */ | ||||
for (i = 0; i <= physmap_idx; i += 2) { | for (i = 0; i <= physmap_idx; i += 2) { | ||||
vm_paddr_t end; | vm_paddr_t end; | ||||
end = ptoa((vm_paddr_t)Maxmem); | end = ptoa((vm_paddr_t)Maxmem); | ||||
if (physmap[i + 1] < end) | if (physmap[i + 1] < end) | ||||
end = trunc_page(physmap[i + 1]); | end = trunc_page(physmap[i + 1]); | ||||
for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { | for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { | ||||
int tmp, page_bad, full; | int tmp, page_bad, full; | ||||
int *ptr = (int *)CADDR3; | int *ptr; | ||||
full = FALSE; | full = FALSE; | ||||
/* | /* | ||||
* block out kernel memory as not available. | * block out kernel memory as not available. | ||||
*/ | */ | ||||
if (pa >= KERNLOAD && pa < first) | if (pa >= KERNLOAD && pa < first) | ||||
goto do_dump_avail; | goto do_dump_avail; | ||||
/* | /* | ||||
* block out dcons buffer | * block out dcons buffer | ||||
*/ | */ | ||||
if (dcons_addr > 0 | if (dcons_addr > 0 | ||||
&& pa >= trunc_page(dcons_addr) | && pa >= trunc_page(dcons_addr) | ||||
&& pa < dcons_addr + dcons_size) | && pa < dcons_addr + dcons_size) | ||||
goto do_dump_avail; | goto do_dump_avail; | ||||
page_bad = FALSE; | page_bad = FALSE; | ||||
if (memtest == 0) | if (memtest == 0) | ||||
goto skip_memtest; | goto skip_memtest; | ||||
/* | /* | ||||
* map page into kernel: valid, read/write,non-cacheable | * map page into kernel: valid, read/write,non-cacheable | ||||
*/ | */ | ||||
*pte = pa | PG_V | PG_RW | PG_N; | ptr = (int *)pmap_cmap3(pa, PG_V | PG_RW | PG_N); | ||||
invltlb(); | |||||
tmp = *(int *)ptr; | tmp = *(int *)ptr; | ||||
/* | /* | ||||
* Test for alternating 1's and 0's | * Test for alternating 1's and 0's | ||||
*/ | */ | ||||
*(volatile int *)ptr = 0xaaaaaaaa; | *(volatile int *)ptr = 0xaaaaaaaa; | ||||
if (*(volatile int *)ptr != 0xaaaaaaaa) | if (*(volatile int *)ptr != 0xaaaaaaaa) | ||||
page_bad = TRUE; | page_bad = TRUE; | ||||
▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | do_dump_avail: | ||||
dump_avail[da_indx++] = pa; /* start */ | dump_avail[da_indx++] = pa; /* start */ | ||||
dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ | dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ | ||||
} | } | ||||
do_next: | do_next: | ||||
if (full) | if (full) | ||||
break; | break; | ||||
} | } | ||||
} | } | ||||
*pte = 0; | pmap_cmap3(0, 0); | ||||
invltlb(); | |||||
/* | /* | ||||
* XXX | * XXX | ||||
* The last chunk must contain at least one page plus the message | * The last chunk must contain at least one page plus the message | ||||
* buffer to avoid complicating other code (message buffer address | * buffer to avoid complicating other code (message buffer address | ||||
* calculation, etc.). | * calculation, etc.). | ||||
*/ | */ | ||||
while (phys_avail[pa_indx - 1] + PAGE_SIZE + | while (phys_avail[pa_indx - 1] + PAGE_SIZE + | ||||
▲ Show 20 Lines • Show All 238 Lines • ▼ Show 20 Lines | init386(int first) | ||||
/* | /* | ||||
* Initialize the clock before the console so that console | * Initialize the clock before the console so that console | ||||
* initialization can use DELAY(). | * initialization can use DELAY(). | ||||
*/ | */ | ||||
clock_init(); | clock_init(); | ||||
finishidentcpu(); /* Final stage of CPU initialization */ | finishidentcpu(); /* Final stage of CPU initialization */ | ||||
i386_setidt2(); | i386_setidt2(); | ||||
pmap_set_nx(); | |||||
initializecpu(); /* Initialize CPU registers */ | initializecpu(); /* Initialize CPU registers */ | ||||
initializecpucache(); | initializecpucache(); | ||||
/* pointer to selector slot for %fs/%gs */ | /* pointer to selector slot for %fs/%gs */ | ||||
PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); | PCPU_SET(fsgs_gdt, &gdt[GUFS_SEL].sd); | ||||
/* Initialize the tss (except for the final esp0) early for vm86. */ | /* Initialize the tss (except for the final esp0) early for vm86. */ | ||||
common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages * | common_tss0.tss_esp0 = thread0.td_kstack + thread0.td_kstack_pages * | ||||
▲ Show 20 Lines • Show All 78 Lines • ▼ Show 20 Lines | #endif | ||||
/* transfer to user mode */ | /* transfer to user mode */ | ||||
_ucodesel = GSEL(GUCODE_SEL, SEL_UPL); | _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); | ||||
_udatasel = GSEL(GUDATA_SEL, SEL_UPL); | _udatasel = GSEL(GUDATA_SEL, SEL_UPL); | ||||
/* setup proc 0's pcb */ | /* setup proc 0's pcb */ | ||||
thread0.td_pcb->pcb_flags = 0; | thread0.td_pcb->pcb_flags = 0; | ||||
#if defined(PAE) || defined(PAE_TABLES) | thread0.td_pcb->pcb_cr3 = pmap_get_kcr3(); | ||||
thread0.td_pcb->pcb_cr3 = (int)IdlePDPT; | |||||
#else | |||||
thread0.td_pcb->pcb_cr3 = (int)IdlePTD; | |||||
#endif | |||||
thread0.td_pcb->pcb_ext = 0; | thread0.td_pcb->pcb_ext = 0; | ||||
thread0.td_frame = &proc0_tf; | thread0.td_frame = &proc0_tf; | ||||
cpu_probe_amdc1e(); | cpu_probe_amdc1e(); | ||||
#ifdef FDT | #ifdef FDT | ||||
x86_init_fdt(); | x86_init_fdt(); | ||||
#endif | #endif | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | machdep_init_trampoline(void) | ||||
/* dblfault TSS */ | /* dblfault TSS */ | ||||
dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO); | dblfault_tss = pmap_trm_alloc(sizeof(struct i386tss), M_NOWAIT | M_ZERO); | ||||
dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT); | dblfault_stack = pmap_trm_alloc(PAGE_SIZE, M_NOWAIT); | ||||
dblfault_tss->tss_esp = dblfault_tss->tss_esp0 = | dblfault_tss->tss_esp = dblfault_tss->tss_esp0 = | ||||
dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 = | dblfault_tss->tss_esp1 = dblfault_tss->tss_esp2 = | ||||
(int)dblfault_stack + PAGE_SIZE; | (int)dblfault_stack + PAGE_SIZE; | ||||
dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 = | dblfault_tss->tss_ss = dblfault_tss->tss_ss0 = dblfault_tss->tss_ss1 = | ||||
dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); | dblfault_tss->tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); | ||||
#if defined(PAE) || defined(PAE_TABLES) | dblfault_tss->tss_cr3 = pmap_get_kcr3(); | ||||
dblfault_tss->tss_cr3 = (int)IdlePDPT; | |||||
#else | |||||
dblfault_tss->tss_cr3 = (int)IdlePTD; | |||||
#endif | |||||
dblfault_tss->tss_eip = (int)dblfault_handler; | dblfault_tss->tss_eip = (int)dblfault_handler; | ||||
dblfault_tss->tss_eflags = PSL_KERNEL; | dblfault_tss->tss_eflags = PSL_KERNEL; | ||||
dblfault_tss->tss_ds = dblfault_tss->tss_es = | dblfault_tss->tss_ds = dblfault_tss->tss_es = | ||||
dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL); | dblfault_tss->tss_gs = GSEL(GDATA_SEL, SEL_KPL); | ||||
dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL); | dblfault_tss->tss_fs = GSEL(GPRIV_SEL, SEL_KPL); | ||||
dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL); | dblfault_tss->tss_cs = GSEL(GCODE_SEL, SEL_KPL); | ||||
dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL); | dblfault_tss->tss_ldt = GSEL(GLDT_SEL, SEL_KPL); | ||||
gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss; | gdt[GPANIC_SEL].sd.sd_lobase = (int)dblfault_tss; | ||||
▲ Show 20 Lines • Show All 674 Lines • Show Last 20 Lines |