Index: head/stand/efi/loader/copy.c =================================================================== --- head/stand/efi/loader/copy.c (revision 368040) +++ head/stand/efi/loader/copy.c (revision 368041) @@ -1,370 +1,368 @@ /*- * Copyright (c) 2013 The FreeBSD Foundation * All rights reserved. * * This software was developed by Benno Rice under sponsorship from * the FreeBSD Foundation. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include "loader_efi.h" #if defined(__i386__) || defined(__amd64__) #include #include +#include /* * The code is excerpted from sys/x86/x86/identcpu.c: identify_cpu(), * identify_hypervisor(), and dev/hyperv/vmbus/hyperv.c: hyperv_identify(). */ #define CPUID_LEAF_HV_MAXLEAF 0x40000000 #define CPUID_LEAF_HV_INTERFACE 0x40000001 #define CPUID_LEAF_HV_FEATURES 0x40000003 #define CPUID_LEAF_HV_LIMITS 0x40000005 #define CPUID_HV_IFACE_HYPERV 0x31237648 /* HV#1 */ #define CPUID_HV_MSR_TIME_REFCNT 0x0002 /* MSR_HV_TIME_REF_COUNT */ #define CPUID_HV_MSR_HYPERCALL 0x0020 static int running_on_hyperv(void) { char hv_vendor[16]; uint32_t regs[4]; do_cpuid(1, regs); if ((regs[2] & CPUID2_HV) == 0) return (0); do_cpuid(CPUID_LEAF_HV_MAXLEAF, regs); if (regs[0] < CPUID_LEAF_HV_LIMITS) return (0); ((uint32_t *)&hv_vendor)[0] = regs[1]; ((uint32_t *)&hv_vendor)[1] = regs[2]; ((uint32_t *)&hv_vendor)[2] = regs[3]; hv_vendor[12] = '\0'; if (strcmp(hv_vendor, "Microsoft Hv") != 0) return (0); do_cpuid(CPUID_LEAF_HV_INTERFACE, regs); if (regs[0] != CPUID_HV_IFACE_HYPERV) return (0); do_cpuid(CPUID_LEAF_HV_FEATURES, regs); if ((regs[0] & CPUID_HV_MSR_HYPERCALL) == 0) return (0); if ((regs[0] & CPUID_HV_MSR_TIME_REFCNT) == 0) return (0); return (1); } -#define KERNEL_PHYSICAL_BASE (2*1024*1024) - static void efi_verify_staging_size(unsigned long *nr_pages) { UINTN sz; EFI_MEMORY_DESCRIPTOR *map = NULL, *p; EFI_PHYSICAL_ADDRESS start, end; UINTN key, dsz; UINT32 dver; EFI_STATUS status; int i, ndesc; unsigned long available_pages = 0; sz = 0; for (;;) { status = BS->GetMemoryMap(&sz, map, &key, &dsz, &dver); if (!EFI_ERROR(status)) break; if (status != EFI_BUFFER_TOO_SMALL) { printf("Can't read memory map: %lu\n", EFI_ERROR_CODE(status)); goto out; } free(map); /* Allocate 10 descriptors more than the size reported, * to allow for any fragmentation caused by calling * malloc */ map = malloc(sz + (10 * dsz)); if (map == NULL) { printf("Unable to allocate memory\n"); goto out; } } ndesc = sz / dsz; for (i = 0, p = map; i < ndesc; i++, p = NextMemoryDescriptor(p, dsz)) { start = p->PhysicalStart; end = start + p->NumberOfPages * EFI_PAGE_SIZE; - if (KERNEL_PHYSICAL_BASE < start || - KERNEL_PHYSICAL_BASE >= end) + if (KERNLOAD < start || KERNLOAD >= end) continue; available_pages = p->NumberOfPages - - ((KERNEL_PHYSICAL_BASE - start) >> EFI_PAGE_SHIFT); + ((KERNLOAD - start) >> EFI_PAGE_SHIFT); break; } if (available_pages == 0) { printf("Can't find valid memory map for staging area!\n"); goto out; } i++; p = NextMemoryDescriptor(p, dsz); for ( ; i < ndesc; i++, p = NextMemoryDescriptor(p, dsz)) { if (p->Type != EfiConventionalMemory && p->Type != EfiLoaderData) break; if (p->PhysicalStart != end) break; end = p->PhysicalStart + p->NumberOfPages * EFI_PAGE_SIZE; available_pages += p->NumberOfPages; } if (*nr_pages > available_pages) { printf("Staging area's size is reduced: %ld -> %ld!\n", *nr_pages, available_pages); *nr_pages = available_pages; } out: free(map); } #endif /* __i386__ || __amd64__ */ #ifndef EFI_STAGING_SIZE #if defined(__amd64__) #define EFI_STAGING_SIZE 100 #elif defined(__arm__) #define EFI_STAGING_SIZE 32 #else #define EFI_STAGING_SIZE 64 #endif #endif EFI_PHYSICAL_ADDRESS staging, staging_end, staging_base; int stage_offset_set = 0; ssize_t stage_offset; int efi_copy_init(void) { EFI_STATUS status; unsigned long nr_pages; nr_pages = EFI_SIZE_TO_PAGES((EFI_STAGING_SIZE) * 1024 * 1024); #if defined(__i386__) || defined(__amd64__) /* * We'll decrease nr_pages, if it's too big. Currently we only * apply this to FreeBSD VM running on Hyper-V. Why? Please see * https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=211746#c28 */ if (running_on_hyperv()) efi_verify_staging_size(&nr_pages); /* * The staging area must reside in the the first 1GB physical * memory: see elf64_exec() in * boot/efi/loader/arch/amd64/elf64_freebsd.c. */ staging = 1024*1024*1024; status = BS->AllocatePages(AllocateMaxAddress, EfiLoaderData, nr_pages, &staging); #else status = BS->AllocatePages(AllocateAnyPages, EfiLoaderData, nr_pages, &staging); #endif if (EFI_ERROR(status)) { printf("failed to allocate staging area: %lu\n", EFI_ERROR_CODE(status)); return (status); } staging_base = staging; staging_end = staging + nr_pages * EFI_PAGE_SIZE; #if defined(__aarch64__) || defined(__arm__) || defined(__riscv) /* * Round the kernel load address to a 2MiB value. This is needed * because the kernel builds a page table based on where it has * been loaded in physical address space. As the kernel will use * either a 1MiB or 2MiB page for this we need to make sure it * is correctly aligned for both cases. */ staging = roundup2(staging, 2 * 1024 * 1024); #endif return (0); } static bool efi_check_space(vm_offset_t end) { EFI_PHYSICAL_ADDRESS addr; EFI_STATUS status; unsigned long nr_pages; /* There is already enough space */ if (end <= staging_end) return (true); end = roundup2(end, EFI_PAGE_SIZE); nr_pages = EFI_SIZE_TO_PAGES(end - staging_end); #if defined(__i386__) || defined(__amd64__) /* X86 needs all memory to be allocated under the 1G boundary */ if (end > 1024*1024*1024) goto before_staging; #endif /* Try to allocate more space after the previous allocation */ addr = staging_end; status = BS->AllocatePages(AllocateAddress, EfiLoaderData, nr_pages, &addr); if (!EFI_ERROR(status)) { staging_end = staging_end + nr_pages * EFI_PAGE_SIZE; return (true); } before_staging: /* Try allocating space before the previous allocation */ if (staging < nr_pages * EFI_PAGE_SIZE) { printf("Not enough space before allocation\n"); return (false); } addr = staging - nr_pages * EFI_PAGE_SIZE; #if defined(__aarch64__) || defined(__arm__) || defined(__riscv) /* See efi_copy_init for why this is needed */ addr = rounddown2(addr, 2 * 1024 * 1024); #endif nr_pages = EFI_SIZE_TO_PAGES(staging_base - addr); status = BS->AllocatePages(AllocateAddress, EfiLoaderData, nr_pages, &addr); if (!EFI_ERROR(status)) { /* * Move the old allocation and update the state so * translation still works. */ staging_base = addr; memmove((void *)(uintptr_t)staging_base, (void *)(uintptr_t)staging, staging_end - staging); stage_offset -= (staging - staging_base); staging = staging_base; return (true); } printf("efi_check_space: Unable to expand staging area\n"); return (false); } void * efi_translate(vm_offset_t ptr) { return ((void *)(ptr + stage_offset)); } ssize_t efi_copyin(const void *src, vm_offset_t dest, const size_t len) { if (!stage_offset_set) { stage_offset = (vm_offset_t)staging - dest; stage_offset_set = 1; } /* XXX: Callers do not check for failure. */ if (!efi_check_space(dest + stage_offset + len)) { errno = ENOMEM; return (-1); } bcopy(src, (void *)(dest + stage_offset), len); return (len); } ssize_t efi_copyout(const vm_offset_t src, void *dest, const size_t len) { /* XXX: Callers do not check for failure. */ if (src + stage_offset + len > staging_end) { errno = ENOMEM; return (-1); } bcopy((void *)(src + stage_offset), dest, len); return (len); } ssize_t efi_readin(readin_handle_t fd, vm_offset_t dest, const size_t len) { if (!stage_offset_set) { stage_offset = (vm_offset_t)staging - dest; stage_offset_set = 1; } if (!efi_check_space(dest + stage_offset + len)) { errno = ENOMEM; return (-1); } return (VECTX_READ(fd, (void *)(dest + stage_offset), len)); } void efi_copy_finish(void) { uint64_t *src, *dst, *last; src = (uint64_t *)(uintptr_t)staging; dst = (uint64_t *)(uintptr_t)(staging - stage_offset); last = (uint64_t *)(uintptr_t)staging_end; while (src < last) *dst++ = *src++; } Index: head/sys/amd64/amd64/genassym.c =================================================================== --- head/sys/amd64/amd64/genassym.c (revision 368040) +++ head/sys/amd64/amd64/genassym.c (revision 368041) @@ -1,277 +1,278 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)genassym.c 5.11 (Berkeley) 5/10/91 */ #include __FBSDID("$FreeBSD$"); #include "opt_hwpmc_hooks.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #ifdef HWPMC_HOOKS #include #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include ASSYM(P_VMSPACE, offsetof(struct proc, p_vmspace)); ASSYM(VM_PMAP, offsetof(struct vmspace, vm_pmap)); ASSYM(PM_ACTIVE, offsetof(struct pmap, pm_active)); ASSYM(P_MD, offsetof(struct proc, p_md)); ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt)); ASSYM(MD_LDT_SD, offsetof(struct mdproc, md_ldt_sd)); ASSYM(MD_EFIRT_TMP, offsetof(struct mdthread, md_efirt_tmp)); ASSYM(TD_LOCK, offsetof(struct thread, td_lock)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_PFLAGS, offsetof(struct thread, td_pflags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); ASSYM(TD_FRAME, offsetof(struct thread, td_frame)); ASSYM(TD_MD, offsetof(struct thread, td_md)); ASSYM(TD_MD_PCB, offsetof(struct thread, td_md.md_pcb)); ASSYM(TD_MD_STACK_BASE, offsetof(struct thread, td_md.md_stack_base)); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN); ASSYM(TDP_KTHREAD, TDP_KTHREAD); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(NPTEPG, NPTEPG); ASSYM(NPDEPG, NPDEPG); ASSYM(addr_P4Tmap, addr_P4Tmap); ASSYM(addr_P4Dmap, addr_P4Dmap); ASSYM(addr_P5Tmap, addr_P5Tmap); ASSYM(addr_P5Dmap, addr_P5Dmap); ASSYM(PDESIZE, sizeof(pd_entry_t)); ASSYM(PTESIZE, sizeof(pt_entry_t)); ASSYM(PAGE_SHIFT, PAGE_SHIFT); ASSYM(PAGE_MASK, PAGE_MASK); ASSYM(PDRSHIFT, PDRSHIFT); ASSYM(PDPSHIFT, PDPSHIFT); ASSYM(PML4SHIFT, PML4SHIFT); ASSYM(val_KPDPI, KPDPI); ASSYM(val_KPML4I, KPML4I); ASSYM(val_PML4PML4I, PML4PML4I); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); ASSYM(KERNBASE, KERNBASE); +ASSYM(KERNLOAD, KERNLOAD); ASSYM(DMAP_MIN_ADDRESS, DMAP_MIN_ADDRESS); ASSYM(DMAP_MAX_ADDRESS, DMAP_MAX_ADDRESS); ASSYM(PCB_R15, offsetof(struct pcb, pcb_r15)); ASSYM(PCB_R14, offsetof(struct pcb, pcb_r14)); ASSYM(PCB_R13, offsetof(struct pcb, pcb_r13)); ASSYM(PCB_R12, offsetof(struct pcb, pcb_r12)); ASSYM(PCB_RBP, offsetof(struct pcb, pcb_rbp)); ASSYM(PCB_RSP, offsetof(struct pcb, pcb_rsp)); ASSYM(PCB_RBX, offsetof(struct pcb, pcb_rbx)); ASSYM(PCB_RIP, offsetof(struct pcb, pcb_rip)); ASSYM(PCB_FSBASE, offsetof(struct pcb, pcb_fsbase)); ASSYM(PCB_GSBASE, offsetof(struct pcb, pcb_gsbase)); ASSYM(PCB_KGSBASE, offsetof(struct pcb, pcb_kgsbase)); ASSYM(PCB_CR0, offsetof(struct pcb, pcb_cr0)); ASSYM(PCB_CR2, offsetof(struct pcb, pcb_cr2)); ASSYM(PCB_CR3, offsetof(struct pcb, pcb_cr3)); ASSYM(PCB_CR4, offsetof(struct pcb, pcb_cr4)); ASSYM(PCB_DR0, offsetof(struct pcb, pcb_dr0)); ASSYM(PCB_DR1, offsetof(struct pcb, pcb_dr1)); ASSYM(PCB_DR2, offsetof(struct pcb, pcb_dr2)); ASSYM(PCB_DR3, offsetof(struct pcb, pcb_dr3)); ASSYM(PCB_DR6, offsetof(struct pcb, pcb_dr6)); ASSYM(PCB_DR7, offsetof(struct pcb, pcb_dr7)); ASSYM(PCB_GDT, offsetof(struct pcb, pcb_gdt)); ASSYM(PCB_IDT, offsetof(struct pcb, pcb_idt)); ASSYM(PCB_LDT, offsetof(struct pcb, pcb_ldt)); ASSYM(PCB_TR, offsetof(struct pcb, pcb_tr)); ASSYM(PCB_FLAGS, offsetof(struct pcb, pcb_flags)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_SAVED_UCR3, offsetof(struct pcb, pcb_saved_ucr3)); ASSYM(PCB_TSSP, offsetof(struct pcb, pcb_tssp)); ASSYM(PCB_SAVEFPU, offsetof(struct pcb, pcb_save)); ASSYM(PCB_EFER, offsetof(struct pcb, pcb_efer)); ASSYM(PCB_STAR, offsetof(struct pcb, pcb_star)); ASSYM(PCB_LSTAR, offsetof(struct pcb, pcb_lstar)); ASSYM(PCB_CSTAR, offsetof(struct pcb, pcb_cstar)); ASSYM(PCB_SFMASK, offsetof(struct pcb, pcb_sfmask)); ASSYM(PCB_SIZE, sizeof(struct pcb)); ASSYM(PCB_FULL_IRET, PCB_FULL_IRET); ASSYM(PCB_DBREGS, PCB_DBREGS); ASSYM(PCB_32BIT, PCB_32BIT); ASSYM(TSS_RSP0, offsetof(struct amd64tss, tss_rsp0)); ASSYM(TF_R15, offsetof(struct trapframe, tf_r15)); ASSYM(TF_R14, offsetof(struct trapframe, tf_r14)); ASSYM(TF_R13, offsetof(struct trapframe, tf_r13)); ASSYM(TF_R12, offsetof(struct trapframe, tf_r12)); ASSYM(TF_R11, offsetof(struct trapframe, tf_r11)); ASSYM(TF_R10, offsetof(struct trapframe, tf_r10)); ASSYM(TF_R9, offsetof(struct trapframe, tf_r9)); ASSYM(TF_R8, offsetof(struct trapframe, tf_r8)); ASSYM(TF_RDI, offsetof(struct trapframe, tf_rdi)); ASSYM(TF_RSI, offsetof(struct trapframe, tf_rsi)); ASSYM(TF_RBP, offsetof(struct trapframe, tf_rbp)); ASSYM(TF_RBX, offsetof(struct trapframe, tf_rbx)); ASSYM(TF_RDX, offsetof(struct trapframe, tf_rdx)); ASSYM(TF_RCX, offsetof(struct trapframe, tf_rcx)); ASSYM(TF_RAX, offsetof(struct trapframe, tf_rax)); ASSYM(TF_TRAPNO, offsetof(struct trapframe, tf_trapno)); ASSYM(TF_ADDR, offsetof(struct trapframe, tf_addr)); ASSYM(TF_ERR, offsetof(struct trapframe, tf_err)); ASSYM(TF_RIP, offsetof(struct trapframe, tf_rip)); ASSYM(TF_CS, offsetof(struct trapframe, tf_cs)); ASSYM(TF_RFLAGS, offsetof(struct trapframe, tf_rflags)); ASSYM(TF_RSP, offsetof(struct trapframe, tf_rsp)); ASSYM(TF_SS, offsetof(struct trapframe, tf_ss)); ASSYM(TF_DS, offsetof(struct trapframe, tf_ds)); ASSYM(TF_ES, offsetof(struct trapframe, tf_es)); ASSYM(TF_FS, offsetof(struct trapframe, tf_fs)); ASSYM(TF_GS, offsetof(struct trapframe, tf_gs)); ASSYM(TF_FLAGS, offsetof(struct trapframe, tf_flags)); ASSYM(TF_SIZE, sizeof(struct trapframe)); ASSYM(TF_HASSEGS, TF_HASSEGS); ASSYM(PTI_RDX, offsetof(struct pti_frame, pti_rdx)); ASSYM(PTI_RAX, offsetof(struct pti_frame, pti_rax)); ASSYM(PTI_ERR, offsetof(struct pti_frame, pti_err)); ASSYM(PTI_RIP, offsetof(struct pti_frame, pti_rip)); ASSYM(PTI_CS, offsetof(struct pti_frame, pti_cs)); ASSYM(PTI_RFLAGS, offsetof(struct pti_frame, pti_rflags)); ASSYM(PTI_RSP, offsetof(struct pti_frame, pti_rsp)); ASSYM(PTI_SS, offsetof(struct pti_frame, pti_ss)); ASSYM(PTI_SIZE, sizeof(struct pti_frame)); ASSYM(SIGF_HANDLER, offsetof(struct sigframe, sf_ahu.sf_handler)); ASSYM(SIGF_UC, offsetof(struct sigframe, sf_uc)); ASSYM(UC_EFLAGS, offsetof(ucontext_t, uc_mcontext.mc_rflags)); ASSYM(ENOENT, ENOENT); ASSYM(EFAULT, EFAULT); ASSYM(ENAMETOOLONG, ENAMETOOLONG); ASSYM(MAXCOMLEN, MAXCOMLEN); ASSYM(MAXPATHLEN, MAXPATHLEN); ASSYM(PC_SIZEOF, sizeof(struct pcpu)); ASSYM(PC_PRVSPACE, offsetof(struct pcpu, pc_prvspace)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_FPCURTHREAD, offsetof(struct pcpu, pc_fpcurthread)); ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread)); ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb)); ASSYM(PC_CPUID, offsetof(struct pcpu, pc_cpuid)); ASSYM(PC_SCRATCH_RSP, offsetof(struct pcpu, pc_scratch_rsp)); ASSYM(PC_SCRATCH_RAX, offsetof(struct pcpu, pc_scratch_rax)); ASSYM(PC_CURPMAP, offsetof(struct pcpu, pc_curpmap)); ASSYM(PC_TSSP, offsetof(struct pcpu, pc_tssp)); ASSYM(PC_RSP0, offsetof(struct pcpu, pc_rsp0)); ASSYM(PC_FS32P, offsetof(struct pcpu, pc_fs32p)); ASSYM(PC_GS32P, offsetof(struct pcpu, pc_gs32p)); ASSYM(PC_LDT, offsetof(struct pcpu, pc_ldt)); ASSYM(PC_COMMONTSS, offsetof(struct pcpu, pc_common_tss)); ASSYM(PC_TSS, offsetof(struct pcpu, pc_tss)); ASSYM(PC_PM_SAVE_CNT, offsetof(struct pcpu, pc_pm_save_cnt)); ASSYM(PC_KCR3, offsetof(struct pcpu, pc_kcr3)); ASSYM(PC_UCR3, offsetof(struct pcpu, pc_ucr3)); ASSYM(PC_UCR3_LOAD_MASK, offsetof(struct pcpu, pc_ucr3_load_mask)); ASSYM(PC_SAVED_UCR3, offsetof(struct pcpu, pc_saved_ucr3)); ASSYM(PC_PTI_STACK, offsetof(struct pcpu, pc_pti_stack)); ASSYM(PC_PTI_STACK_SZ, PC_PTI_STACK_SZ); ASSYM(PC_PTI_RSP0, offsetof(struct pcpu, pc_pti_rsp0)); ASSYM(PC_IBPB_SET, offsetof(struct pcpu, pc_ibpb_set)); ASSYM(PC_MDS_TMP, offsetof(struct pcpu, pc_mds_tmp)); ASSYM(PC_MDS_BUF, offsetof(struct pcpu, pc_mds_buf)); ASSYM(PC_MDS_BUF64, offsetof(struct pcpu, pc_mds_buf64)); ASSYM(LA_EOI, LAPIC_EOI * LAPIC_MEM_MUL); ASSYM(LA_ISR, LAPIC_ISR0 * LAPIC_MEM_MUL); ASSYM(KCSEL, GSEL(GCODE_SEL, SEL_KPL)); ASSYM(KDSEL, GSEL(GDATA_SEL, SEL_KPL)); ASSYM(KUCSEL, GSEL(GUCODE_SEL, SEL_UPL)); ASSYM(KUDSEL, GSEL(GUDATA_SEL, SEL_UPL)); ASSYM(KUC32SEL, GSEL(GUCODE32_SEL, SEL_UPL)); ASSYM(KUF32SEL, GSEL(GUFS32_SEL, SEL_UPL)); ASSYM(KUG32SEL, GSEL(GUGS32_SEL, SEL_UPL)); ASSYM(TSSSEL, GSEL(GPROC0_SEL, SEL_KPL)); ASSYM(LDTSEL, GSEL(GUSERLDT_SEL, SEL_KPL)); ASSYM(SEL_RPL_MASK, SEL_RPL_MASK); ASSYM(__FreeBSD_version, __FreeBSD_version); #ifdef HWPMC_HOOKS ASSYM(PMC_FN_USER_CALLCHAIN, PMC_FN_USER_CALLCHAIN); #endif ASSYM(EC_EFI_STATUS, offsetof(struct efirt_callinfo, ec_efi_status)); ASSYM(EC_FPTR, offsetof(struct efirt_callinfo, ec_fptr)); ASSYM(EC_ARGCNT, offsetof(struct efirt_callinfo, ec_argcnt)); ASSYM(EC_ARG1, offsetof(struct efirt_callinfo, ec_arg1)); ASSYM(EC_ARG2, offsetof(struct efirt_callinfo, ec_arg2)); ASSYM(EC_ARG3, offsetof(struct efirt_callinfo, ec_arg3)); ASSYM(EC_ARG4, offsetof(struct efirt_callinfo, ec_arg4)); ASSYM(EC_ARG5, offsetof(struct efirt_callinfo, ec_arg5)); ASSYM(EC_RBX, offsetof(struct efirt_callinfo, ec_rbx)); ASSYM(EC_RSP, offsetof(struct efirt_callinfo, ec_rsp)); ASSYM(EC_RBP, offsetof(struct efirt_callinfo, ec_rbp)); ASSYM(EC_R12, offsetof(struct efirt_callinfo, ec_r12)); ASSYM(EC_R13, offsetof(struct efirt_callinfo, ec_r13)); ASSYM(EC_R14, offsetof(struct efirt_callinfo, ec_r14)); ASSYM(EC_R15, offsetof(struct efirt_callinfo, ec_r15)); ASSYM(EC_RFLAGS, offsetof(struct efirt_callinfo, ec_rflags)); Index: head/sys/amd64/amd64/locore.S =================================================================== --- head/sys/amd64/amd64/locore.S (revision 368040) +++ head/sys/amd64/amd64/locore.S (revision 368041) @@ -1,140 +1,141 @@ /*- * Copyright (c) 2003 Peter Wemm * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include "assym.inc" /* * Compiled KERNBASE location */ - .globl kernbase, loc_PTmap, loc_PDmap, loc_PDPmap, dmapbase, dmapend + .globl kernbase, kernload, loc_PTmap, loc_PDmap, loc_PDPmap, dmapbase, dmapend .set kernbase,KERNBASE + .set kernload,KERNLOAD .set dmapbase,DMAP_MIN_ADDRESS .set dmapend,DMAP_MAX_ADDRESS .text /********************************************************************** * * This is where the loader trampoline start us, set the ball rolling... * * We are called with the stack looking like this: * 0(%rsp) = 32 bit return address (cannot be used) * 4(%rsp) = 32 bit modulep * 8(%rsp) = 32 bit kernend * * We are already in long mode, on a 64 bit %cs and running at KERNBASE. */ NON_GPROF_ENTRY(btext) /* Tell the bios to warmboot next time */ movw $0x1234,0x472 /* Don't trust what the loader gives for rflags. */ pushq $PSL_KERNEL popfq /* Find the metadata pointers before we lose them */ movq %rsp, %rbp movl 4(%rbp),%edi /* modulep (arg 1) */ movl 8(%rbp),%esi /* kernend (arg 2) */ /* Get onto a stack that we can trust - there is no going back now. */ movq $bootstack,%rsp xorl %ebp, %ebp call hammer_time /* set up cpu for unix operation */ movq %rax,%rsp /* set up kstack for mi_startup() */ call mi_startup /* autoconfiguration, mountroot etc */ 0: hlt jmp 0b /* la57_trampoline(%rdi pml5) */ NON_GPROF_ENTRY(la57_trampoline) movq %rsp,%r11 movq %rbx,%r10 leaq la57_trampoline_end(%rip),%rsp movq %cr0,%rdx lgdtq la57_trampoline_gdt_desc(%rip) pushq $(2<<3) leaq l1(%rip),%rax leaq l2(%rip),%rbx pushq %rax lretq .code32 l1: movl $(3<<3),%eax movl %eax,%ss movl %edx,%eax andl $~CR0_PG,%eax movl %eax,%cr0 movl %cr4,%eax orl $CR4_LA57,%eax movl %eax,%cr4 movl %edi,%cr3 movl %edx,%cr0 pushl $(1<<3) pushl %ebx lretl .code64 l2: movq %r11,%rsp movq %r10,%rbx retq .p2align 4,0 NON_GPROF_ENTRY(la57_trampoline_gdt_desc) .word la57_trampoline_end - la57_trampoline_gdt .long 0 /* filled by pmap_bootstrap_la57 */ .p2align 4,0 NON_GPROF_ENTRY(la57_trampoline_gdt) .long 0x00000000 /* null desc */ .long 0x00000000 .long 0x00000000 /* 64bit code */ .long 0x00209800 .long 0x0000ffff /* 32bit code */ .long 0x00cf9b00 .long 0x0000ffff /* universal data */ .long 0x00cf9300 .dcb.l 16,0 NON_GPROF_ENTRY(la57_trampoline_end) .bss ALIGN_DATA /* just to be sure */ .globl bootstack .space 0x1000 /* space for bootstack - temporary stack */ bootstack: Index: head/sys/amd64/include/vmparam.h =================================================================== --- head/sys/amd64/include/vmparam.h (revision 368040) +++ head/sys/amd64/include/vmparam.h (revision 368041) @@ -1,266 +1,274 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 1990 The Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 2003 Peter Wemm * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)vmparam.h 5.9 (Berkeley) 5/12/91 * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ 1 /* * Machine dependent constants for AMD64. */ /* * Virtual memory related constants, all in bytes */ #define MAXTSIZ (32768UL*1024*1024) /* max text size */ #ifndef DFLDSIZ #define DFLDSIZ (32768UL*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ #define MAXDSIZ (32768UL*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ #define DFLSSIZ (8UL*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ #define MAXSSIZ (512UL*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128UL*1024) /* amount to grow stack */ #endif /* * We provide a machine specific single page allocator through the use * of the direct mapped segment. This uses 2MB pages for reduced * TLB pressure. */ #define UMA_MD_SMALL_ALLOC /* * The physical address space is densely populated. */ #define VM_PHYSSEG_DENSE /* * The number of PHYSSEG entries must be one greater than the number * of phys_avail entries because the phys_avail entry that spans the * largest physical address that is accessible by ISA DMA is split * into two PHYSSEG entries. */ #define VM_PHYSSEG_MAX 63 /* * Create two free page pools: VM_FREEPOOL_DEFAULT is the default pool * from which physical pages are allocated and VM_FREEPOOL_DIRECT is * the pool from which physical pages for page tables and small UMA * objects are allocated. */ #define VM_NFREEPOOL 2 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 1 /* * Create up to three free page lists: VM_FREELIST_DMA32 is for physical pages * that have physical addresses below 4G but are not accessible by ISA DMA, * and VM_FREELIST_ISADMA is for physical pages that are accessible by ISA * DMA. */ #define VM_NFREELIST 3 #define VM_FREELIST_DEFAULT 0 #define VM_FREELIST_DMA32 1 #define VM_FREELIST_LOWMEM 2 #define VM_LOWMEM_BOUNDARY (16 << 20) /* 16MB ISA DMA limit */ /* * Create the DMA32 free list only if the number of physical pages above * physical address 4G is at least 16M, which amounts to 64GB of physical * memory. */ #define VM_DMA32_NPAGES_THRESHOLD 16777216 /* * An allocation size of 16MB is supported in order to optimize the * use of the direct map by UMA. Specifically, a cache line contains * at most 8 PDEs, collectively mapping 16MB of physical memory. By * reducing the number of distinct 16MB "pages" that are used by UMA, * the physical memory allocator reduces the likelihood of both 2MB * page TLB misses and cache misses caused by 2MB page TLB misses. */ #define VM_NFREEORDER 13 /* * Enable superpage reservations: 1 level. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 1 #endif /* * Level 0 reservations consist of 512 pages. */ #ifndef VM_LEVEL_0_ORDER #define VM_LEVEL_0_ORDER 9 #endif #ifdef SMP #define PA_LOCK_COUNT 256 #endif /* + * Kernel physical load address. Needs to be aligned at 2MB superpage + * boundary. + */ +#ifndef KERNLOAD +#define KERNLOAD 0x200000 +#endif + +/* * Virtual addresses of things. Derived from the page directory and * page table indexes from pmap.h for precision. * * 0x0000000000000000 - 0x00007fffffffffff user map * 0x0000800000000000 - 0xffff7fffffffffff does not exist (hole) * 0xffff800000000000 - 0xffff804020100fff recursive page table (512GB slot) * 0xffff804020100fff - 0xffff807fffffffff unused * 0xffff808000000000 - 0xffff847fffffffff large map (can be tuned up) * 0xffff848000000000 - 0xfffff7ffffffffff unused (large map extends there) * 0xfffff80000000000 - 0xfffffbffffffffff 4TB direct map * 0xfffffc0000000000 - 0xfffffdffffffffff unused * 0xfffffe0000000000 - 0xffffffffffffffff 2TB kernel map * * Within the kernel map: * * 0xfffffe0000000000 vm_page_array * 0xffffffff80000000 KERNBASE */ #define VM_MIN_KERNEL_ADDRESS KV4ADDR(KPML4BASE, 0, 0, 0) #define VM_MAX_KERNEL_ADDRESS KV4ADDR(KPML4BASE + NKPML4E - 1, \ NPDPEPG-1, NPDEPG-1, NPTEPG-1) #define DMAP_MIN_ADDRESS KV4ADDR(DMPML4I, 0, 0, 0) #define DMAP_MAX_ADDRESS KV4ADDR(DMPML4I + NDMPML4E, 0, 0, 0) #define LARGEMAP_MIN_ADDRESS KV4ADDR(LMSPML4I, 0, 0, 0) #define LARGEMAP_MAX_ADDRESS KV4ADDR(LMEPML4I + 1, 0, 0, 0) #define KERNBASE KV4ADDR(KPML4I, KPDPI, 0, 0) #define UPT_MAX_ADDRESS KV4ADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I) #define UPT_MIN_ADDRESS KV4ADDR(PML4PML4I, 0, 0, 0) #define VM_MAXUSER_ADDRESS_LA57 UVADDR(NUPML5E, 0, 0, 0, 0) #define VM_MAXUSER_ADDRESS_LA48 UVADDR(0, NUP4ML4E, 0, 0, 0) #define VM_MAXUSER_ADDRESS VM_MAXUSER_ADDRESS_LA57 #define SHAREDPAGE_LA57 (VM_MAXUSER_ADDRESS_LA57 - PAGE_SIZE) #define SHAREDPAGE_LA48 (VM_MAXUSER_ADDRESS_LA48 - PAGE_SIZE) #define USRSTACK_LA57 SHAREDPAGE_LA57 #define USRSTACK_LA48 SHAREDPAGE_LA48 #define USRSTACK USRSTACK_LA48 #define PS_STRINGS_LA57 (USRSTACK_LA57 - sizeof(struct ps_strings)) #define PS_STRINGS_LA48 (USRSTACK_LA48 - sizeof(struct ps_strings)) #define VM_MAX_ADDRESS UPT_MAX_ADDRESS #define VM_MIN_ADDRESS (0) /* * XXX Allowing dmaplimit == 0 is a temporary workaround for vt(4) efifb's * early use of PHYS_TO_DMAP before the mapping is actually setup. This works * because the result is not actually accessed until later, but the early * vt fb startup needs to be reworked. */ #define PMAP_HAS_DMAP 1 #define PHYS_TO_DMAP(x) ({ \ KASSERT(dmaplimit == 0 || (x) < dmaplimit, \ ("physical address %#jx not covered by the DMAP", \ (uintmax_t)x)); \ (x) | DMAP_MIN_ADDRESS; }) #define DMAP_TO_PHYS(x) ({ \ KASSERT((x) < (DMAP_MIN_ADDRESS + dmaplimit) && \ (x) >= DMAP_MIN_ADDRESS, \ ("virtual address %#jx not covered by the DMAP", \ (uintmax_t)x)); \ (x) & ~DMAP_MIN_ADDRESS; }) /* * amd64 maps the page array into KVA so that it can be more easily * allocated on the correct memory domains. */ #define PMAP_HAS_PAGE_ARRAY 1 /* * How many physical pages per kmem arena virtual page. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (1) #endif /* * Optional ceiling (in bytes) on the size of the kmem arena: 60% of the * kernel map. */ #ifndef VM_KMEM_SIZE_MAX #define VM_KMEM_SIZE_MAX ((VM_MAX_KERNEL_ADDRESS - \ VM_MIN_KERNEL_ADDRESS + 1) * 3 / 5) #endif /* initial pagein size of beginning of executable file */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #define ZERO_REGION_SIZE (2 * 1024 * 1024) /* 2MB */ /* * Use a fairly large batch size since we expect amd64 systems to have lots of * memory. */ #define VM_BATCHQUEUE_SIZE 31 /* * The pmap can create non-transparent large page mappings. */ #define PMAP_HAS_LARGEPAGES 1 /* * Need a page dump array for minidump. */ #define MINIDUMP_PAGE_TRACKING 1 #endif /* _MACHINE_VMPARAM_H_ */ Index: head/sys/conf/ldscript.amd64 =================================================================== --- head/sys/conf/ldscript.amd64 (revision 368040) +++ head/sys/conf/ldscript.amd64 (revision 368041) @@ -1,220 +1,220 @@ /* $FreeBSD$ */ OUTPUT_FORMAT("elf64-x86-64-freebsd", "elf64-x86-64-freebsd", "elf64-x86-64-freebsd") OUTPUT_ARCH(i386:x86-64) ENTRY(btext) SEARCH_DIR("/usr/lib"); SECTIONS { + kernphys = kernload; /* Read-only sections, merged into text segment: */ - kernphys = 0x200000; /* 2MB superpage size */ . = kernbase + kernphys + SIZEOF_HEADERS; /* * Use the AT keyword in order to set the right LMA that contains * the physical address where the section should be loaded. This is * needed for the Xen loader which honours the LMA. */ .interp : AT (kernphys + SIZEOF_HEADERS) { *(.interp) } .hash : { *(.hash) } .gnu.hash : { *(.gnu.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .rel.init : { *(.rel.init) } .rela.init : { *(.rela.init) } .rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) } .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } .rel.fini : { *(.rel.fini) } .rela.fini : { *(.rela.fini) } .rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) } .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } .rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) } .rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) } .rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) } .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } .rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) } .rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) } .rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) } .rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) } .rel.ctors : { *(.rel.ctors) } .rela.ctors : { *(.rela.ctors) } .rel.dtors : { *(.rel.dtors) } .rela.dtors : { *(.rela.dtors) } .rel.got : { *(.rel.got) } .rela.got : { *(.rela.got) } .rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) } .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } .rel.plt : { *(.rel.plt) } .rela.plt : { *(.rela.plt) } .init : { KEEP (*(.init)) } =0xCCCCCCCC .plt : { *(.plt) } .text : { *(.text .stub .text.* .gnu.linkonce.t.*) KEEP (*(.text.*personality*)) /* .gnu.warning sections are handled specially by elf32.em. */ *(.gnu.warning) } =0xCCCCCCCC .fini : { KEEP (*(.fini)) } =0xCCCCCCCC PROVIDE (__etext = .); PROVIDE (_etext = .); PROVIDE (etext = .); .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .note.gnu.build-id : { PROVIDE (__build_id_start = .); *(.note.gnu.build-id) PROVIDE (__build_id_end = .); } .eh_frame_hdr : { *(.eh_frame_hdr) } .eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) } .gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) } /* Adjust the address for the data segment. We want to adjust up to the same address within the page on the next page up. */ . = ALIGN (CONSTANT (MAXPAGESIZE)) - ((CONSTANT (MAXPAGESIZE) - .) & (CONSTANT (MAXPAGESIZE) - 1)); . = DATA_SEGMENT_ALIGN (CONSTANT (MAXPAGESIZE), CONSTANT (COMMONPAGESIZE)); PROVIDE (brwsection = .); /* Exception handling */ .eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) } .gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) } /* Thread Local Storage sections */ .tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) } .tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) } .preinit_array : { PROVIDE_HIDDEN (__preinit_array_start = .); KEEP (*(.preinit_array)) PROVIDE_HIDDEN (__preinit_array_end = .); } .init_array : { PROVIDE_HIDDEN (__init_array_start = .); KEEP (*(SORT(.init_array.*))) KEEP (*(.init_array)) PROVIDE_HIDDEN (__init_array_end = .); } .fini_array : { PROVIDE_HIDDEN (__fini_array_start = .); KEEP (*(.fini_array)) KEEP (*(SORT(.fini_array.*))) PROVIDE_HIDDEN (__fini_array_end = .); } _start_ctors = .; PROVIDE (start_ctors = .); .ctors : { /* gcc uses crtbegin.o to find the start of the constructors, so we make sure it is first. Because this is a wildcard, it doesn't matter if the user does not actually link against crtbegin.o; the linker won't look for a file to match a wildcard. The wildcard also means that it doesn't matter which directory crtbegin.o is in. */ KEEP (*crtbegin.o(.ctors)) KEEP (*crtbegin?.o(.ctors)) /* We don't want to include the .ctor section from the crtend.o file until after the sorted ctors. The .ctor section from the crtend file contains the end of ctors marker and it must be last */ KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .ctors)) KEEP (*(SORT(.ctors.*))) KEEP (*(.ctors)) } _stop_ctors = .; PROVIDE (stop_ctors = .); .dtors : { KEEP (*crtbegin.o(.dtors)) KEEP (*crtbegin?.o(.dtors)) KEEP (*(EXCLUDE_FILE (*crtend.o *crtend?.o ) .dtors)) KEEP (*(SORT(.dtors.*))) KEEP (*(.dtors)) } .jcr : { KEEP (*(.jcr)) } .data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) } .dynamic : { *(.dynamic) } .got : { *(.got) } . = DATA_SEGMENT_RELRO_END (24, .); .got.plt : { *(.got.plt) } . = ALIGN(128); .data.read_frequently : { *(SORT_BY_ALIGNMENT(.data.read_frequently)) } .data.read_mostly : { *(.data.read_mostly) } . = ALIGN(128); .data.exclusive_cache_line : { *(.data.exclusive_cache_line) } . = ALIGN(128); .data : { *(.data .data.* .gnu.linkonce.d.*) KEEP (*(.gnu.linkonce.d.*personality*)) } .data1 : { *(.data1) } _edata = .; PROVIDE (edata = .); __bss_start = .; .bss : { *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(COMMON) /* Ensure that the .bss section ends at a superpage boundary. This way it can be mapped using non-executable large pages. */ . = ALIGN(0x200000); } _end = .; PROVIDE (end = .); . = DATA_SEGMENT_END (.); /* Stabs debugging sections. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } /* DWARF 3 */ .debug_pubtypes 0 : { *(.debug_pubtypes) } .debug_ranges 0 : { *(.debug_ranges) } .gnu.attributes 0 : { KEEP (*(.gnu.attributes)) } /DISCARD/ : { *(.note.GNU-stack) } }