Index: head/sys/conf/files.ia64 =================================================================== --- head/sys/conf/files.ia64 (revision 221270) +++ head/sys/conf/files.ia64 (revision 221271) @@ -1,137 +1,138 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # font8x16.o optional std8x16font \ compile-with "uudecode < /usr/share/syscons/fonts/${STD8X16FONT}-8x16.fnt && file2c 'unsigned char font_16[16*256] = {' '};' < ${STD8X16FONT}-8x16 > font8x16.c && ${CC} -c ${CFLAGS} font8x16.c" \ no-implicit-rule before-depend \ clean "${STD8X16FONT}-8x16 font8x16.c" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # font.h optional sc_dflt_font \ compile-with "uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x16.fnt && file2c 'static u_char dflt_font_16[16*256] = {' '};' < ${SC_DFLT_FONT}-8x16 > font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x14.fnt && file2c 'static u_char dflt_font_14[14*256] = {' '};' < ${SC_DFLT_FONT}-8x14 >> font.h && uudecode < /usr/share/syscons/fonts/${SC_DFLT_FONT}-8x8.fnt && file2c 'static u_char dflt_font_8[8*256] = {' '};' < ${SC_DFLT_FONT}-8x8 >> font.h" \ no-obj no-implicit-rule before-depend \ clean "font.h ${SC_DFLT_FONT}-8x14 ${SC_DFLT_FONT}-8x16 ${SC_DFLT_FONT}-8x8" # ukbdmap.h optional ukbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${UKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > ukbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "ukbdmap.h" # compat/freebsd32/freebsd32_ioctl.c optional compat_freebsd32 compat/freebsd32/freebsd32_misc.c optional compat_freebsd32 compat/freebsd32/freebsd32_syscalls.c optional compat_freebsd32 compat/freebsd32/freebsd32_sysent.c optional compat_freebsd32 compat/ia32/ia32_sysvec.c optional compat_freebsd32 contrib/ia64/libuwx/src/uwx_bstream.c standard contrib/ia64/libuwx/src/uwx_context.c standard contrib/ia64/libuwx/src/uwx_env.c standard contrib/ia64/libuwx/src/uwx_scoreboard.c standard contrib/ia64/libuwx/src/uwx_step.c standard contrib/ia64/libuwx/src/uwx_str.c standard contrib/ia64/libuwx/src/uwx_swap.c standard contrib/ia64/libuwx/src/uwx_trace.c standard contrib/ia64/libuwx/src/uwx_uinfo.c standard contrib/ia64/libuwx/src/uwx_utable.c standard crypto/blowfish/bf_enc.c optional crypto | ipsec crypto/des/des_enc.c optional crypto | ipsec | netsmb dev/atkbdc/atkbd.c optional atkbd atkbdc dev/atkbdc/atkbd_atkbdc.c optional atkbd atkbdc dev/atkbdc/atkbdc.c optional atkbdc dev/atkbdc/atkbdc_isa.c optional atkbdc isa dev/atkbdc/atkbdc_subr.c optional atkbdc dev/atkbdc/psm.c optional psm atkbdc dev/fb/fb.c optional fb | vga dev/fb/vga.c optional vga dev/hwpmc/hwpmc_ia64.c optional hwpmc dev/io/iodev.c optional io dev/kbd/kbd.c optional atkbd | sc | ukbd dev/syscons/scterm-teken.c optional sc dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvtb.c optional sc dev/uart/uart_cpu_ia64.c optional uart dev/acpica/acpi_if.m standard ia64/acpica/OsdEnvironment.c optional acpi ia64/acpica/acpi_machdep.c optional acpi ia64/acpica/acpi_wakeup.c optional acpi ia64/acpica/madt.c optional acpi ia64/disasm/disasm_decode.c standard ia64/disasm/disasm_extract.c standard ia64/disasm/disasm_format.c standard ia64/ia32/ia32_misc.c optional compat_freebsd32 ia64/ia32/ia32_reg.c optional compat_freebsd32 ia64/ia32/ia32_signal.c optional compat_freebsd32 ia64/ia32/ia32_trap.c optional compat_freebsd32 ia64/ia64/autoconf.c standard ia64/ia64/bus_machdep.c standard ia64/ia64/busdma_machdep.c standard ia64/ia64/clock.c standard ia64/ia64/context.S standard ia64/ia64/db_machdep.c optional ddb ia64/ia64/dump_machdep.c standard ia64/ia64/efi.c standard ia64/ia64/elf_machdep.c standard ia64/ia64/emulate.c standard ia64/ia64/exception.S standard ia64/ia64/gdb_machdep.c optional gdb ia64/ia64/highfp.c standard ia64/ia64/in_cksum.c optional inet | inet6 ia64/ia64/interrupt.c standard ia64/ia64/iodev_machdep.c optional io ia64/ia64/locore.S standard no-obj ia64/ia64/machdep.c standard ia64/ia64/mca.c standard ia64/ia64/mem.c optional mem +ia64/ia64/mp_locore.S optional smp ia64/ia64/mp_machdep.c optional smp ia64/ia64/nexus.c standard ia64/ia64/pal.S standard ia64/ia64/physical.S standard ia64/ia64/pmap.c standard ia64/ia64/ptrace_machdep.c standard ia64/ia64/sal.c standard ia64/ia64/sapic.c standard ia64/ia64/setjmp.S standard ia64/ia64/ssc.c optional ski ia64/ia64/sscdisk.c optional ski ia64/ia64/stack_machdep.c optional ddb | stack ia64/ia64/support.S standard ia64/ia64/sys_machdep.c standard ia64/ia64/syscall.S standard ia64/ia64/trap.c standard ia64/ia64/uio_machdep.c standard ia64/ia64/uma_machdep.c standard ia64/ia64/unaligned.c standard ia64/ia64/unwind.c standard ia64/ia64/vm_machdep.c standard ia64/isa/isa.c optional isa ia64/isa/isa_dma.c optional isa ia64/pci/pci_cfgreg.c optional pci isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/imgact_elf32.c optional compat_freebsd32 libkern/bcmp.c standard libkern/ffsl.c standard libkern/fls.c standard libkern/flsl.c standard libkern/ia64/__divdi3.S standard libkern/ia64/__divsi3.S standard libkern/ia64/__moddi3.S standard libkern/ia64/__modsi3.S standard libkern/ia64/__udivdi3.S standard libkern/ia64/__udivsi3.S standard libkern/ia64/__umoddi3.S standard libkern/ia64/__umodsi3.S standard libkern/ia64/bswap16.S standard libkern/ia64/bswap32.S standard libkern/memmove.c standard libkern/memset.c standard Index: head/sys/conf/ldscript.ia64 =================================================================== --- head/sys/conf/ldscript.ia64 (revision 221270) +++ head/sys/conf/ldscript.ia64 (revision 221271) @@ -1,150 +1,152 @@ /* $FreeBSD$ */ OUTPUT_FORMAT("elf64-ia64-freebsd", "elf64-ia64-freebsd", "elf64-ia64-freebsd") OUTPUT_ARCH(ia64) ENTRY(__start) SEARCH_DIR(/usr/lib); -kernel_text = 0xe000000004000000; +kernel_text = 0x9ffc000000000000; SECTIONS { /* Read-only sections, merged into text segment: */ . = kernel_text + SIZEOF_HEADERS; .interp : { *(.interp) } PROVIDE (btext = .); - .ivt : { *(.ivt) } .text : { - *(.text.ivt) + *(.ivt) + *(.ivt.text) *(.text .stub .text.* .gnu.linkonce.t.*) /* .gnu.warning sections are handled specially by elf32.em. */ *(.gnu.warning) } = 0x00300000010070000002000001000400 .init : { *(.init) } = 0x00300000010070000002000001000400 .plt : { *(.plt) } .fini : { *(.fini) } = 0x00300000010070000002000001000400 _etext = .; PROVIDE (etext = .); .hash : { *(.hash) } .dynsym : { *(.dynsym) } .dynstr : { *(.dynstr) } .gnu.version : { *(.gnu.version) } .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } .rela.init : { *(.rela.init) } .rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) } .rela.fini : { *(.rela.fini) } .rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) } .rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) } .rela.ctors : { *(.rela.ctors) } .rela.dtors : { *(.rela.dtors) } .rela.got : { *(.rela.got) } .rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) } .rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) } .rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) } .rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) } .rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) } .rela.plt : { *(.rela.plt) } .rela.IA_64.pltoff : { *(.rela.IA_64.pltoff) } .IA_64.unwind_info : { *(.IA_64.unwind_info* .gnu.linkonce.ia64unwi.*) } .IA_64.unwind : { *(.IA_64.unwind* .gnu.linkonce.ia64unw.*) } .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } .rodata1 : { *(.rodata1) } .sdata2 : { *(.sdata2 .sdata2.* .gnu.linkonce.s2.*) } .sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) } .opd : { *(.opd) } /* Adjust the address for the data segment. We want to start in the next page in the loader virtual memory. */ . = ALIGN(65536); + PROVIDE (bdata = .); .data : { - *(.data.kstack .data .data.* .gnu.linkonce.d.*) + *(.ivt.data) + *(.data .data.* .gnu.linkonce.d.*) SORT(CONSTRUCTORS) } .data1 : { *(.data1) } .dynamic : { *(.dynamic) } .ctors : { *(.ctors) *(SORT(.ctors.*)) } .dtors : { *(.dtors) *(SORT(.dtors.*)) } . = ALIGN(16); __gp = . + 0x200000; .got : { *(.got.plt) *(.got) } .IA_64.pltoff : { *(.IA_64.pltoff) } /* We want the small data sections together, so single-instruction offsets can access them all, and initialized data all before uninitialized, so we can shorten the on-disk segment size. */ .sdata : { *(.sdata .sdata.* .gnu.linkonce.s.*) } _edata = .; PROVIDE (edata = .); __bss_start = .; .sbss : { PROVIDE (__sbss_start = .); PROVIDE (___sbss_start = .); *(.dynsbss) *(.sbss .sbss.* .gnu.linkonce.sb.*) *(.scommon) PROVIDE (__sbss_end = .); PROVIDE (___sbss_end = .); } .bss : { *(.dynbss) *(.bss .bss.* .gnu.linkonce.b.*) *(COMMON) /* Align here to ensure that the .bss section occupies space up to _end. Align after .bss to ensure correct alignment even if the .bss section disappears because there are no input sections. */ . = ALIGN(64 / 8); } . = ALIGN(64 / 8); _end = .; PROVIDE (end = .); /* Stabs debugging sections. */ .stab 0 : { *(.stab) } .stabstr 0 : { *(.stabstr) } .stab.excl 0 : { *(.stab.excl) } .stab.exclstr 0 : { *(.stab.exclstr) } .stab.index 0 : { *(.stab.index) } .stab.indexstr 0 : { *(.stab.indexstr) } .comment 0 : { *(.comment) } /* DWARF debug sections. Symbols in the DWARF debugging sections are relative to the beginning of the section so we begin them at 0. */ /* DWARF 1 */ .debug 0 : { *(.debug) } .line 0 : { *(.line) } /* GNU DWARF 1 extensions */ .debug_srcinfo 0 : { *(.debug_srcinfo) } .debug_sfnames 0 : { *(.debug_sfnames) } /* DWARF 1.1 and DWARF 2 */ .debug_aranges 0 : { *(.debug_aranges) } .debug_pubnames 0 : { *(.debug_pubnames) } /* DWARF 2 */ .debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) } .debug_abbrev 0 : { *(.debug_abbrev) } .debug_line 0 : { *(.debug_line) } .debug_frame 0 : { *(.debug_frame) } .debug_str 0 : { *(.debug_str) } .debug_loc 0 : { *(.debug_loc) } .debug_macinfo 0 : { *(.debug_macinfo) } /* SGI/MIPS DWARF 2 extensions */ .debug_weaknames 0 : { *(.debug_weaknames) } .debug_funcnames 0 : { *(.debug_funcnames) } .debug_typenames 0 : { *(.debug_typenames) } .debug_varnames 0 : { *(.debug_varnames) } } Index: head/sys/ia64/ia64/efi.c =================================================================== --- head/sys/ia64/ia64/efi.c (revision 221270) +++ head/sys/ia64/ia64/efi.c (revision 221271) @@ -1,223 +1,223 @@ /*- * Copyright (c) 2004 Marcel Moolenaar * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include static struct efi_systbl *efi_systbl; static struct efi_cfgtbl *efi_cfgtbl; static struct efi_rt *efi_runtime; static int efi_status2err[25] = { 0, /* EFI_SUCCESS */ ENOEXEC, /* EFI_LOAD_ERROR */ EINVAL, /* EFI_INVALID_PARAMETER */ ENOSYS, /* EFI_UNSUPPORTED */ EMSGSIZE, /* EFI_BAD_BUFFER_SIZE */ EOVERFLOW, /* EFI_BUFFER_TOO_SMALL */ EBUSY, /* EFI_NOT_READY */ EIO, /* EFI_DEVICE_ERROR */ EROFS, /* EFI_WRITE_PROTECTED */ EAGAIN, /* EFI_OUT_OF_RESOURCES */ EIO, /* EFI_VOLUME_CORRUPTED */ ENOSPC, /* EFI_VOLUME_FULL */ ENXIO, /* EFI_NO_MEDIA */ ESTALE, /* EFI_MEDIA_CHANGED */ ENOENT, /* EFI_NOT_FOUND */ EACCES, /* EFI_ACCESS_DENIED */ ETIMEDOUT, /* EFI_NO_RESPONSE */ EADDRNOTAVAIL, /* EFI_NO_MAPPING */ ETIMEDOUT, /* EFI_TIMEOUT */ EDOOFUS, /* EFI_NOT_STARTED */ EALREADY, /* EFI_ALREADY_STARTED */ ECANCELED, /* EFI_ABORTED */ EPROTO, /* EFI_ICMP_ERROR */ EPROTO, /* EFI_TFTP_ERROR */ EPROTO /* EFI_PROTOCOL_ERROR */ }; static int efi_status_to_errno(efi_status status) { u_long code; int error; code = status & 0x3ffffffffffffffful; error = (code < 25) ? efi_status2err[code] : EDOOFUS; return (error); } void efi_boot_finish(void) { } /* * Collect the entry points for PAL and SAL. Be extra careful about NULL * pointer values. We're running pre-console, so it's better to return * error values than to cause panics, machine checks and other traps and * faults. Keep this minimal... */ int efi_boot_minimal(uint64_t systbl) { ia64_efi_f setvirt; struct efi_md *md; efi_status status; if (systbl == 0) return (EINVAL); efi_systbl = (struct efi_systbl *)IA64_PHYS_TO_RR7(systbl); if (efi_systbl->st_hdr.th_sig != EFI_SYSTBL_SIG) { efi_systbl = NULL; return (EFAULT); } efi_cfgtbl = (efi_systbl->st_cfgtbl == 0) ? NULL : (struct efi_cfgtbl *)IA64_PHYS_TO_RR7(efi_systbl->st_cfgtbl); if (efi_cfgtbl == NULL) return (ENOENT); efi_runtime = (efi_systbl->st_rt == 0) ? NULL : (struct efi_rt *)IA64_PHYS_TO_RR7(efi_systbl->st_rt); if (efi_runtime == NULL) return (ENOENT); /* * Relocate runtime memory segments for firmware. */ md = efi_md_first(); while (md != NULL) { if (md->md_attr & EFI_MD_ATTR_RT) { md->md_virt = (md->md_attr & EFI_MD_ATTR_WB) ? (void *)IA64_PHYS_TO_RR7(md->md_phys) : (void *)IA64_PHYS_TO_RR6(md->md_phys); } md = efi_md_next(md); } setvirt = (void *)IA64_PHYS_TO_RR7((u_long)efi_runtime->rt_setvirtual); status = ia64_efi_physical(setvirt, bootinfo->bi_memmap_size, bootinfo->bi_memdesc_size, bootinfo->bi_memdesc_version, - bootinfo->bi_memmap); + ia64_tpa(bootinfo->bi_memmap)); return ((status < 0) ? EFAULT : 0); } void * efi_get_table(struct uuid *uuid) { struct efi_cfgtbl *ct; u_long count; if (efi_cfgtbl == NULL) return (NULL); count = efi_systbl->st_entries; ct = efi_cfgtbl; while (count--) { if (!bcmp(&ct->ct_uuid, uuid, sizeof(*uuid))) return ((void *)IA64_PHYS_TO_RR7(ct->ct_data)); ct++; } return (NULL); } void efi_get_time(struct efi_tm *tm) { efi_runtime->rt_gettime(tm, NULL); } struct efi_md * efi_md_first(void) { if (bootinfo->bi_memmap == 0) return (NULL); - return ((struct efi_md *)IA64_PHYS_TO_RR7(bootinfo->bi_memmap)); + return ((struct efi_md *)bootinfo->bi_memmap); } struct efi_md * efi_md_next(struct efi_md *md) { uint64_t plim; - plim = IA64_PHYS_TO_RR7(bootinfo->bi_memmap + bootinfo->bi_memmap_size); + plim = bootinfo->bi_memmap + bootinfo->bi_memmap_size; md = (struct efi_md *)((uintptr_t)md + bootinfo->bi_memdesc_size); return ((md >= (struct efi_md *)plim) ? NULL : md); } void efi_reset_system(void) { if (efi_runtime != NULL) efi_runtime->rt_reset(EFI_RESET_WARM, 0, 0, NULL); panic("%s: unable to reset the machine", __func__); } int efi_set_time(struct efi_tm *tm) { return (efi_status_to_errno(efi_runtime->rt_settime(tm))); } int efi_var_get(efi_char *name, struct uuid *vendor, uint32_t *attrib, size_t *datasize, void *data) { efi_status status; status = efi_runtime->rt_getvar(name, vendor, attrib, datasize, data); return (efi_status_to_errno(status)); } int efi_var_nextname(size_t *namesize, efi_char *name, struct uuid *vendor) { efi_status status; status = efi_runtime->rt_scanvar(namesize, name, vendor); return (efi_status_to_errno(status)); } int efi_var_set(efi_char *name, struct uuid *vendor, uint32_t attrib, size_t datasize, void *data) { efi_status status; status = efi_runtime->rt_setvar(name, vendor, attrib, datasize, data); return (efi_status_to_errno(status)); } Index: head/sys/ia64/ia64/exception.S =================================================================== --- head/sys/ia64/ia64/exception.S (revision 221270) +++ head/sys/ia64/ia64/exception.S (revision 221271) @@ -1,1617 +1,1625 @@ /*- * Copyright (c) 2003,2004 Marcel Moolenaar * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_xtrace.h" #include #include /* * Nested TLB restart tokens. These are used by the * nested TLB handler for jumping back to the code * where the nested TLB was caused. */ #define NTLBRT_SAVE 0x12c12c #define NTLBRT_RESTORE 0x12c12d /* * ar.k7 = kernel memory stack * ar.k6 = kernel register stack * ar.k5 = EPC gateway page * ar.k4 = PCPU data */ + .section .ivt.data, "aw" + + .global pmap_ptc_g_sem +pmap_ptc_g_sem: data8 0 + + .global ia64_kptdir +ia64_kptdir: data8 0 + #ifdef EXCEPTION_TRACING - .data .global xtrace, xhead xtrace: .space 1024*5*8 xhead: data8 xtrace #define XTRACE(offset) \ { .mmi ; \ mov r24=ar.itc ; \ mov r25=cr.iip ; \ mov r27=offset ; \ } ; \ { .mlx ; \ mov r28=cr.ifa ; \ movl r29=xhead ;; \ } ; \ { .mmi ; \ ld8 r29=[r29] ;; \ st8 [r29]=r24,8 ; \ nop 0 ;; \ } ; \ { .mmi ; \ st8 [r29]=r27,8 ;; \ mov r24=cr.isr ; \ add r27=8,r29 ;; \ } ; \ { .mmi ; \ st8 [r29]=r25,16 ;; \ st8 [r27]=r28,16 ; \ mov r25=pr ;; \ } ; \ { .mlx ; \ st8 [r29]=r24 ; \ movl r28=xhead ;; \ } ; \ { .mii ; \ cmp.eq p15,p0=r27,r28 ; \ addl r29=1024*5*8,r0 ;; \ (p15) sub r27=r28,r29 ;; \ } ; \ { .mmi ; \ st8 [r28]=r27 ; \ nop 0 ; \ mov pr=r25,0x1ffff ;; \ } #else #define XTRACE(offset) #endif - .section .text.ivt, "ax" + .section .ivt.text, "ax" /* * exception_save: save interrupted state * * Arguments: * r16 address of bundle that contains the branch. The * return address will be the next bundle. * r17 the value to save as ifa in the trapframe. This * normally is cr.ifa, but some interruptions set * set cr.iim and not cr.ifa. * * Returns: * p15 interrupted from user stack * p14 interrupted from kernel stack * p13 interrupted from user backing store * p12 interrupted from kernel backing store * p11 interrupts were enabled * p10 interrupts were disabled */ ENTRY_NOPROFILE(exception_save, 0) { .mii mov r20=ar.unat extr.u r31=sp,61,3 mov r18=pr ;; } { .mmi - cmp.le p14,p15=5,r31 + cmp.le p14,p15=IA64_VM_MINKERN_REGION,r31 ;; (p15) mov r23=ar.k7 // kernel memory stack (p14) mov r23=sp ;; } { .mii mov r21=ar.rsc add r30=-SIZEOF_TRAPFRAME,r23 ;; dep r30=0,r30,0,10 ;; } { .mmi mov ar.rsc=0 sub r19=r23,r30 add r31=8,r30 ;; } { .mmi mov r22=cr.iip nop 0 addl r29=NTLBRT_SAVE,r0 // 22-bit restart token. ;; } /* * We have a 1KB aligned trapframe, pointed to by sp. If we write * to the trapframe, we may trigger a data nested TLB fault. By * aligning the trapframe on a 1KB boundary, we guarantee that if * we get a data nested TLB fault, it will be on the very first * write. Since the data nested TLB fault does not preserve any * state, we have to be careful what we clobber. Consequently, we * have to be careful what we use here. Below a list of registers * that are currently alive: * r16,r17=arguments * r18=pr, r19=length, r20=unat, r21=rsc, r22=iip, r23=TOS * r29=restart point * r30,r31=trapframe pointers * p14,p15=memory stack switch */ /* PTC.G enter non-exclusive */ mov r24 = ar.ccv movl r25 = pmap_ptc_g_sem ;; .ptc_g_0: ld8.acq r26 = [r25] ;; tbit.nz p12, p0 = r26, 63 (p12) br.cond.spnt.few .ptc_g_0 ;; mov ar.ccv = r26 adds r27 = 1, r26 ;; cmpxchg8.rel r27 = [r25], r27, ar.ccv ;; cmp.ne p12, p0 = r26, r27 (p12) br.cond.spnt.few .ptc_g_0 ;; mov ar.ccv = r24 exception_save_restart: { .mmi st8 [r30]=r19,16 // length st8 [r31]=r0,16 // flags add r19=16,r19 ;; } { .mmi st8.spill [r30]=sp,16 // sp st8 [r31]=r20,16 // unat sub sp=r23,r19 ;; } { .mmi mov r19=ar.rnat mov r20=ar.bspstore mov r23=rp ;; } // r18=pr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=rp { .mmi st8 [r30]=r23,16 // rp st8 [r31]=r18,16 // pr mov r24=ar.pfs ;; } { .mmb st8 [r30]=r24,16 // pfs st8 [r31]=r20,16 // bspstore cover ;; } { .mmi mov r18=ar.fpsr mov r23=cr.ipsr extr.u r24=r20,61,3 ;; } // r18=fpsr, r19=rnat, r20=bspstore, r21=rsc, r22=iip, r23=ipsr { .mmi st8 [r30]=r19,16 // rnat st8 [r31]=r0,16 // __spare - cmp.le p12,p13=5,r24 + cmp.le p12,p13=IA64_VM_MINKERN_REGION,r24 ;; } { .mmi st8.spill [r30]=r13,16 // tp st8 [r31]=r21,16 // rsc tbit.nz p11,p10=r23,14 // p11=interrupts enabled ;; } { .mmi (p13) mov r21=ar.k6 // kernel register stack ;; st8 [r30]=r18,16 // fpsr (p13) dep r20=r20,r21,0,9 // align dirty registers ;; } // r19=rnat, r20=bspstore, r22=iip, r23=ipsr { .mmi st8 [r31]=r23,16 // psr (p13) mov ar.bspstore=r20 nop 0 ;; } { .mmi (p13) mov ar.rnat=r19 mov r18=ar.bsp nop 0 ;; } { .mmi mov r19=cr.ifs st8.spill [r30]=gp,16 // gp sub r18=r18,r20 ;; } // r19=ifs, r22=iip { .mmi st8 [r31]=r18,16 // ndirty st8 [r30]=r19,16 // cfm nop 0 ;; } { .mmi mov r18=cr.isr st8 [r31]=r22,16 // iip add r29=16,r30 ;; } { .mmi st8 [r30]=r17,24 // ifa st8 [r31]=r18,24 // isr nop 0 ;; } { .mmi .mem.offset 0,0 st8.spill [r30]=r2,16 // r2 .mem.offset 8,0 st8.spill [r31]=r3,16 // r3 add r2=9*8,r29 ;; } { .mmi .mem.offset 0,0 st8.spill [r30]=r8,16 // r8 .mem.offset 8,0 st8.spill [r31]=r9,16 // r9 add r3=8,r2 ;; } { .mmi .mem.offset 0,0 st8.spill [r30]=r10,16 // r10 .mem.offset 8,0 st8.spill [r31]=r11,16 // r11 add r8=16,r16 ;; } { .mmi .mem.offset 0,0 st8.spill [r30]=r14 // r14 .mem.offset 8,0 st8.spill [r31]=r15 // r15 mov r9=r29 } { .mmb mov r10=ar.csd mov r11=ar.ssd bsw.1 ;; } { .mmi .mem.offset 0,0 st8.spill [r2]=r16,16 // r16 .mem.offset 8,0 st8.spill [r3]=r17,16 // r17 mov r14=b6 ;; } { .mmi .mem.offset 0,0 st8.spill [r2]=r18,16 // r18 .mem.offset 8,0 st8.spill [r3]=r19,16 // r19 mov r15=b7 ;; } { .mmi .mem.offset 0,0 st8.spill [r2]=r20,16 // r20 .mem.offset 8,0 st8.spill [r3]=r21,16 // r21 mov b7=r8 ;; } { .mmi .mem.offset 0,0 st8.spill [r2]=r22,16 // r22 .mem.offset 8,0 st8.spill [r3]=r23,16 // r23 ;; } .mem.offset 0,0 st8.spill [r2]=r24,16 // r24 .mem.offset 8,0 st8.spill [r3]=r25,16 // r25 ;; .mem.offset 0,0 st8.spill [r2]=r26,16 // r26 .mem.offset 8,0 st8.spill [r3]=r27,16 // r27 ;; .mem.offset 0,0 st8.spill [r2]=r28,16 // r28 .mem.offset 8,0 st8.spill [r3]=r29,16 // r29 ;; .mem.offset 0,0 st8.spill [r2]=r30,16 // r30 .mem.offset 8,0 st8.spill [r3]=r31,16 // r31 ;; { .mmi st8 [r2]=r14,16 // b6 mov r17=ar.unat nop 0 ;; } { .mmi st8 [r3]=r15,16 // b7 mov r16=ar.ccv nop 0 ;; } { .mmi st8 [r2]=r16,16 // ccv st8 [r3]=r10,16 // csd nop 0 ;; } { .mmi st8 [r2]=r11,24 // ssd st8 [r9]=r17 nop 0 ;; } stf.spill [r3]=f6,32 // f6 stf.spill [r2]=f7,32 // f7 ;; stf.spill [r3]=f8,32 // f8 stf.spill [r2]=f9,32 // f9 ;; stf.spill [r3]=f10,32 // f10 stf.spill [r2]=f11,32 // f11 ;; stf.spill [r3]=f12,32 // f12 stf.spill [r2]=f13,32 // f13 ;; stf.spill [r3]=f14 // f14 stf.spill [r2]=f15 // f15 ;; { .mmi mov ar.rsc=3 mov r13=ar.k4 nop 0 ;; } { .mlx ssm psr.ic|psr.dfh movl gp=__gp ;; } /* PTC.G leave non-exclusive */ srlz.d movl r25 = pmap_ptc_g_sem ;; .ptc_g_1: ld8.acq r26 = [r25] ;; mov ar.ccv = r26 adds r27 = -1, r26 ;; cmpxchg8.rel r27 = [r25], r27, ar.ccv ;; cmp.ne p12, p0 = r26, r27 (p12) br.cond.spnt.few .ptc_g_1 ;; { .mib srlz.d nop 0 br.sptk b7 ;; } END(exception_save) /* * exception_restore: restore interrupted state * * Arguments: * sp+16 trapframe pointer */ ENTRY_NOPROFILE(exception_restore, 0) { .mmi rsm psr.i add r3=SIZEOF_TRAPFRAME-16,sp add r2=SIZEOF_TRAPFRAME,sp ;; } { .mmi srlz.d add r8=SIZEOF_SPECIAL+32,sp nop 0 ;; } // The next load can trap. Let it be... ldf.fill f15=[r2],-32 // f15 ldf.fill f14=[r3],-32 // f14 add sp=16,sp ;; ldf.fill f13=[r2],-32 // f13 ldf.fill f12=[r3],-32 // f12 ;; ldf.fill f11=[r2],-32 // f11 ldf.fill f10=[r3],-32 // f10 ;; ldf.fill f9=[r2],-32 // f9 ldf.fill f8=[r3],-32 // f8 ;; ldf.fill f7=[r2],-24 // f7 ldf.fill f6=[r3],-16 // f6 ;; { .mmi ld8 r8=[r8] // unat (after) ;; mov ar.unat=r8 nop 0 ;; } ld8 r10=[r2],-16 // ssd ld8 r11=[r3],-16 // csd ;; mov ar.ssd=r10 mov ar.csd=r11 ld8 r14=[r2],-16 // ccv ld8 r15=[r3],-16 // b7 ;; { .mmi mov ar.ccv=r14 ld8 r8=[r2],-16 // b6 mov b7=r15 ;; } { .mmi ld8.fill r31=[r3],-16 // r31 ld8.fill r30=[r2],-16 // r30 mov b6=r8 ;; } ld8.fill r29=[r3],-16 // r29 ld8.fill r28=[r2],-16 // r28 ;; ld8.fill r27=[r3],-16 // r27 ld8.fill r26=[r2],-16 // r26 ;; ld8.fill r25=[r3],-16 // r25 ld8.fill r24=[r2],-16 // r24 ;; ld8.fill r23=[r3],-16 // r23 ld8.fill r22=[r2],-16 // r22 ;; ld8.fill r21=[r3],-16 // r21 ld8.fill r20=[r2],-16 // r20 ;; ld8.fill r19=[r3],-16 // r19 ld8.fill r18=[r2],-16 // r18 ;; { .mmb ld8.fill r17=[r3],-16 // r17 ld8.fill r16=[r2],-16 // r16 bsw.0 ;; } { .mmi ld8.fill r15=[r3],-16 // r15 ld8.fill r14=[r2],-16 // r14 add r31=16,sp ;; } { .mmi ld8 r16=[sp] // tf_length ld8.fill r11=[r3],-16 // r11 add r30=24,sp ;; } { .mmi ld8.fill r10=[r2],-16 // r10 ld8.fill r9=[r3],-16 // r9 add r16=r16,sp // ar.k7 ;; } { .mmi ld8.fill r8=[r2],-16 // r8 ld8.fill r3=[r3] // r3 ;; } // We want nested TLB faults from here on... rsm psr.ic|psr.i ld8.fill r2=[r2] // r2 nop 0 ;; srlz.d ld8.fill sp=[r31],16 // sp nop 0 ;; ld8 r17=[r30],16 // unat ld8 r29=[r31],16 // rp ;; ld8 r18=[r30],16 // pr ld8 r28=[r31],16 // pfs mov rp=r29 ;; ld8 r20=[r30],24 // bspstore ld8 r21=[r31],24 // rnat mov ar.pfs=r28 ;; ld8.fill r26=[r30],16 // tp ld8 r22=[r31],16 // rsc ;; { .mmi ld8 r23=[r30],16 // fpsr ld8 r24=[r31],16 // psr extr.u r28=r20,61,3 ;; } { .mmi ld8.fill r1=[r30],16 // gp ld8 r27=[r31],16 // ndirty - cmp.le p14,p15=5,r28 + cmp.le p14,p15=IA64_VM_MINKERN_REGION,r28 ;; } { .mmi ld8 r25=[r30] // cfm ld8 r19=[r31] // ip nop 0 ;; } { .mii // Switch register stack alloc r30=ar.pfs,0,0,0,0 // discard current frame shl r31=r27,16 // value for ar.rsc (p15) mov r13=r26 ;; } // The loadrs can fault if the backing store is not currently // mapped. We assured forward progress by getting everything we // need from the trapframe so that we don't care if the CPU // purges that translation when it needs to insert a new one for // the backing store. { .mmi mov ar.rsc=r31 // setup for loadrs mov ar.k7=r16 addl r29=NTLBRT_RESTORE,r0 // 22-bit restart token ;; } exception_restore_restart: { .mmi mov r30=ar.bspstore ;; loadrs // load user regs nop 0 ;; } { .mmi mov r31=ar.bspstore ;; mov ar.bspstore=r20 dep r31=0,r31,0,13 // 8KB aligned ;; } { .mmi mov ar.k6=r31 mov ar.rnat=r21 nop 0 ;; } { .mmi mov ar.unat=r17 mov cr.iip=r19 nop 0 } { .mmi mov cr.ipsr=r24 mov cr.ifs=r25 mov pr=r18,0x1ffff ;; } { .mmb mov ar.rsc=r22 mov ar.fpsr=r23 rfi ;; } END(exception_restore) /* * Call exception_save_regs to preserve the interrupted state in a * trapframe. Note that we don't use a call instruction because we * must be careful not to lose track of the RSE state. We then call * trap() with the value of _n_ as an argument to handle the * exception. We arrange for trap() to return to exception_restore * which will restore the interrupted state before executing an rfi to * resume it. */ #define CALL(_func_, _n_, _ifa_) \ { .mib ; \ mov r17=_ifa_ ; \ mov r16=ip ; \ br.sptk exception_save ;; \ } ; \ { .mmi ; \ alloc r15=ar.pfs,0,0,2,0 ;; \ (p11) ssm psr.i ; \ mov out0=_n_ ;; \ } ; \ { .mib ; \ (p11) srlz.d ; \ add out1=16,sp ; \ br.call.sptk rp=_func_ ;; \ } ; \ { .mib ; \ nop 0 ; \ nop 0 ; \ br.sptk exception_restore ;; \ } #define IVT_ENTRY(name, offset) \ .org ia64_vector_table + offset; \ .global ivt_##name; \ .proc ivt_##name; \ .prologue; \ .unwabi @svr4, 'I'; \ .save rp, r0; \ .body; \ ivt_##name: \ XTRACE(offset) #define IVT_END(name) \ .endp ivt_##name #ifdef COMPAT_FREEBSD32 #define IA32_TRAP ia32_trap #else #define IA32_TRAP trap #endif /* * The IA64 Interrupt Vector Table (IVT) contains 20 slots with 64 * bundles per vector and 48 slots with 16 bundles per vector. */ .section .ivt, "ax" .align 32768 .global ia64_vector_table .size ia64_vector_table, 32768 ia64_vector_table: IVT_ENTRY(VHPT_Translation, 0x0000) CALL(trap, 0, cr.ifa) IVT_END(VHPT_Translation) IVT_ENTRY(Instruction_TLB, 0x0400) mov r16=cr.ifa mov r17=pr ;; thash r18=r16 ttag r19=r16 ;; add r21=16,r18 // tag add r20=24,r18 // collision chain ;; ld8 r21=[r21] // check VHPT tag ld8 r20=[r20] // bucket head ;; cmp.ne p15,p0=r21,r19 (p15) br.dpnt.few 1f ;; ld8 r21=[r18] // read pte ;; itc.i r21 // insert pte mov pr=r17,0x1ffff ;; rfi // done ;; 1: rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; 2: cmp.eq p15,p0=r0,r20 // done? (p15) br.cond.spnt.few 9f // bail if done ;; add r21=16,r20 // tag location ;; ld8 r21=[r21] // read tag ;; cmp.ne p15,p0=r21,r19 // compare tags (p15) br.cond.sptk.few 3f // if not, read next in chain ;; ld8 r21=[r20] // read pte mov r22=PTE_ACCESSED ;; or r21=r21,r22 ;; st8 [r20]=r21,8 ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical ;; add r20=16,r18 // address of tag ;; ld8.acq r23=[r20] // read old tag ;; dep r23=-1,r23,63,1 // set ti bit ;; st8.rel [r20]=r23 // store old tag + ti ;; mf // make sure everyone sees ;; st8 [r18]=r21,8 // store pte ;; st8 [r18]=r22,8 ;; st8.rel [r18]=r19 // store new tag ;; itc.i r21 // and place in TLB ssm psr.dt ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi ;; 3: add r20=24,r20 // next in chain ;; ld8 r20=[r20] // read chain br.sptk 2b // loop ;; 9: ssm psr.dt mov pr=r17,0x1ffff // restore predicates ;; srlz.d ;; CALL(trap, 20, cr.ifa) // Page Not Present trap IVT_END(Instruction_TLB) IVT_ENTRY(Data_TLB, 0x0800) mov r16=cr.ifa mov r17=pr ;; thash r18=r16 ttag r19=r16 ;; add r21=16,r18 // tag add r20=24,r18 // collision chain ;; ld8 r21=[r21] // check VHPT tag ld8 r20=[r20] // bucket head ;; cmp.ne p15,p0=r21,r19 (p15) br.dpnt.few 1f ;; ld8 r21=[r18] // read pte ;; itc.d r21 // insert pte mov pr=r17,0x1ffff ;; rfi // done ;; 1: rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; 2: cmp.eq p15,p0=r0,r20 // done? (p15) br.cond.spnt.few 9f // bail if done ;; add r21=16,r20 // tag location ;; ld8 r21=[r21] // read tag ;; cmp.ne p15,p0=r21,r19 // compare tags (p15) br.cond.sptk.few 3f // if not, read next in chain ;; ld8 r21=[r20] // read pte mov r22=PTE_ACCESSED ;; or r21=r21,r22 ;; st8 [r20]=r21,8 ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical ;; add r20=16,r18 // address of tag ;; ld8.acq r23=[r20] // read old tag ;; dep r23=-1,r23,63,1 // set ti bit ;; st8.rel [r20]=r23 // store old tag + ti ;; mf // make sure everyone sees ;; st8 [r18]=r21,8 // store pte ;; st8 [r18]=r22,8 ;; st8.rel [r18]=r19 // store new tag ;; itc.d r21 // and place in TLB ssm psr.dt ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi ;; 3: add r20=24,r20 // next in chain ;; ld8 r20=[r20] // read chain br.sptk 2b // loop ;; 9: ssm psr.dt mov pr=r17,0x1ffff // restore predicates ;; srlz.d ;; CALL(trap, 20, cr.ifa) // Page Not Present trap IVT_END(Data_TLB) IVT_ENTRY(Alternate_Instruction_TLB, 0x0c00) mov r16=cr.ifa // where did it happen mov r18=pr // save predicates ;; extr.u r17=r16,61,3 // get region number mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX ;; - cmp.eq p13,p0=4,r17 // RR4? + cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4? (p13) br.cond.sptk.few 4f ;; cmp.ge p13,p0=5,r17 // RR0-RR5? cmp.eq p14,p15=7,r17 // RR7? (p13) br.cond.spnt.few 9f ;; (p14) add r19=PTE_MA_WB,r19 (p15) add r19=PTE_MA_UC,r19 dep r17=0,r16,50,14 // clear bits above PPN ;; 1: dep r16=r19,r17,0,12 // put pte bits in 0..11 ;; itc.i r16 mov pr=r18,0x1ffff // restore predicates ;; rfi ;; 4: add r19=PTE_MA_WB,r19 movl r17=IA64_PBVM_BASE ;; sub r17=r16,r17 movl r16=IA64_PBVM_PGTBL ;; extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT ;; shladd r16=r17,3,r16 ;; ld8 r17=[r16] br.sptk 1b ;; 9: mov pr=r18,0x1ffff // restore predicates CALL(trap, 3, cr.ifa) IVT_END(Alternate_Instruction_TLB) IVT_ENTRY(Alternate_Data_TLB, 0x1000) mov r16=cr.ifa // where did it happen mov r18=pr // save predicates ;; extr.u r17=r16,61,3 // get region number mov r19=PTE_PRESENT+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+PTE_AR_RWX ;; - cmp.eq p13,p0=4,r17 // RR4? + cmp.eq p13,p0=IA64_PBVM_RR,r17 // RR4? (p13) br.cond.sptk.few 4f ;; cmp.ge p13,p0=5,r17 // RR0-RR5? cmp.eq p14,p15=7,r17 // RR7? (p13) br.cond.spnt.few 9f ;; (p14) add r19=PTE_MA_WB,r19 (p15) add r19=PTE_MA_UC,r19 dep r17=0,r16,50,14 // clear bits above PPN ;; 1: dep r16=r19,r17,0,12 // put pte bits in 0..11 ;; itc.d r16 mov pr=r18,0x1ffff // restore predicates ;; rfi ;; 4: add r19=PTE_MA_WB,r19 movl r17=IA64_PBVM_BASE ;; sub r17=r16,r17 movl r16=IA64_PBVM_PGTBL ;; extr.u r17=r17,IA64_PBVM_PAGE_SHIFT,61-IA64_PBVM_PAGE_SHIFT ;; shladd r16=r17,3,r16 ;; ld8 r17=[r16] br.sptk 1b ;; 9: mov pr=r18,0x1ffff // restore predicates CALL(trap, 4, cr.ifa) IVT_END(Alternate_Data_TLB) IVT_ENTRY(Data_Nested_TLB, 0x1400) // See exception_save_restart and exception_restore_restart for the // contexts that may cause a data nested TLB. We can only use the // banked general registers and predicates, but don't use: // p14 & p15 - Set in exception save // r16 & r17 - Arguments to exception save // r30 - Faulting address (modulo page size) // We assume r30 has the virtual addresses that relate to the data // nested TLB fault. The address does not have to be exact, as long // as it's in the same page. We use physical addressing to avoid // double nested faults. Since all virtual addresses we encounter // here are direct mapped region 7 addresses, we have no problem // constructing physical addresses. + { .mlx - rsm psr.dt + nop 0 movl r27=ia64_kptdir ;; } { .mii - srlz.d - dep r27=0,r27,61,3 - ;; + ld8 r27=[r27] extr.u r28=r30,3*PAGE_SHIFT-8, PAGE_SHIFT-3 // dir L0 index -} -{ .mii - ld8 r27=[r27] // dir L0 page extr.u r26=r30,2*PAGE_SHIFT-5, PAGE_SHIFT-3 // dir L1 index ;; +} +{ .mmi + rsm psr.dt + ;; + srlz.d dep r27=0,r27,61,3 ;; } { .mmi shladd r27=r28,3,r27 ;; ld8 r27=[r27] // dir L1 page extr.u r28=r30,PAGE_SHIFT,PAGE_SHIFT-5 // pte index ;; } { .mmi shladd r27=r26,3,r27 ;; mov r26=rr[r30] dep r27=0,r27,61,3 ;; } { .mii ld8 r27=[r27] // pte page shl r28=r28,5 dep r26=0,r26,0,2 ;; } { .mmi add r27=r28,r27 ;; mov cr.ifa=r30 dep r27=0,r27,61,3 ;; } { .mmi ld8 r28=[r27] // pte ;; mov cr.itir=r26 or r28=PTE_DIRTY+PTE_ACCESSED,r28 ;; } { .mmi st8 [r27]=r28 ;; addl r26=NTLBRT_SAVE,r0 addl r27=NTLBRT_RESTORE,r0 } { .mmi itc.d r28 ;; ssm psr.dt cmp.eq p12,p0=r29,r26 ;; } { .mib srlz.d cmp.eq p13,p0=r29,r27 (p12) br.cond.sptk.few exception_save_restart ;; } { .mib nop 0 nop 0 (p13) br.cond.sptk.few exception_restore_restart ;; } { .mlx mov r26=ar.bsp movl r27=kstack ;; } { .mmi mov r28=sp nop 0 addl r27=KSTACK_PAGES*PAGE_SIZE-16,r0 ;; } { .mmi mov sp=r27 ;; mov r27=ar.bspstore nop 0 ;; } CALL(trap, 5, r30) IVT_END(Data_Nested_TLB) IVT_ENTRY(Instruction_Key_Miss, 0x1800) CALL(trap, 6, cr.ifa) IVT_END(Instruction_Key_Miss) IVT_ENTRY(Data_Key_Miss, 0x1c00) CALL(trap, 7, cr.ifa) IVT_END(Data_Key_Miss) IVT_ENTRY(Dirty_Bit, 0x2000) mov r16=cr.ifa mov r17=pr ;; thash r18=r16 ;; ttag r19=r16 add r20=24,r18 // collision chain ;; ld8 r20=[r20] // bucket head ;; rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; 1: cmp.eq p15,p0=r0,r20 // done? (p15) br.cond.spnt.few 9f // bail if done ;; add r21=16,r20 // tag location ;; ld8 r21=[r21] // read tag ;; cmp.ne p15,p0=r21,r19 // compare tags (p15) br.cond.sptk.few 2f // if not, read next in chain ;; ld8 r21=[r20] // read pte mov r22=PTE_DIRTY+PTE_ACCESSED ;; or r21=r22,r21 // set dirty & access bit ;; st8 [r20]=r21,8 // store back ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical ;; add r20=16,r18 // address of tag ;; ld8.acq r23=[r20] // read old tag ;; dep r23=-1,r23,63,1 // set ti bit ;; st8.rel [r20]=r23 // store old tag + ti ;; mf // make sure everyone sees ;; st8 [r18]=r21,8 // store pte ;; st8 [r18]=r22,8 ;; st8.rel [r18]=r19 // store new tag ;; itc.d r21 // and place in TLB ssm psr.dt ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi ;; 2: add r20=24,r20 // next in chain ;; ld8 r20=[r20] // read chain br.sptk 1b // loop ;; 9: ssm psr.dt mov pr=r17,0x1ffff // restore predicates ;; srlz.d ;; CALL(trap, 8, cr.ifa) // die horribly IVT_END(Dirty_Bit) IVT_ENTRY(Instruction_Access_Bit, 0x2400) mov r16=cr.ifa mov r17=pr ;; thash r18=r16 ;; ttag r19=r16 add r20=24,r18 // collision chain ;; ld8 r20=[r20] // bucket head ;; rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; 1: cmp.eq p15,p0=r0,r20 // done? (p15) br.cond.spnt.few 9f // bail if done ;; add r21=16,r20 // tag location ;; ld8 r21=[r21] // read tag ;; cmp.ne p15,p0=r21,r19 // compare tags (p15) br.cond.sptk.few 2f // if not, read next in chain ;; ld8 r21=[r20] // read pte mov r22=PTE_ACCESSED ;; or r21=r22,r21 // set accessed bit ;; st8 [r20]=r21,8 // store back ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical ;; add r20=16,r18 // address of tag ;; ld8.acq r23=[r20] // read old tag ;; dep r23=-1,r23,63,1 // set ti bit ;; st8.rel [r20]=r23 // store old tag + ti ;; mf // make sure everyone sees ;; st8 [r18]=r21,8 // store pte ;; st8 [r18]=r22,8 ;; st8.rel [r18]=r19 // store new tag ;; itc.i r21 // and place in TLB ssm psr.dt ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi // walker will retry the access ;; 2: add r20=24,r20 // next in chain ;; ld8 r20=[r20] // read chain br.sptk 1b // loop ;; 9: ssm psr.dt mov pr=r17,0x1ffff // restore predicates ;; srlz.d ;; CALL(trap, 9, cr.ifa) IVT_END(Instruction_Access_Bit) IVT_ENTRY(Data_Access_Bit, 0x2800) mov r16=cr.ifa mov r17=pr ;; thash r18=r16 ;; ttag r19=r16 add r20=24,r18 // collision chain ;; ld8 r20=[r20] // bucket head ;; rsm psr.dt // turn off data translations dep r20=0,r20,61,3 // convert vhpt ptr to physical ;; srlz.d // serialize ld8 r20=[r20] // first entry ;; 1: cmp.eq p15,p0=r0,r20 // done? (p15) br.cond.spnt.few 9f // bail if done ;; add r21=16,r20 // tag location ;; ld8 r21=[r21] // read tag ;; cmp.ne p15,p0=r21,r19 // compare tags (p15) br.cond.sptk.few 2f // if not, read next in chain ;; ld8 r21=[r20] // read pte mov r22=PTE_ACCESSED ;; or r21=r22,r21 // set accessed bit ;; st8 [r20]=r21,8 // store back ;; ld8 r22=[r20] // read rest of pte ;; dep r18=0,r18,61,3 // convert vhpt ptr to physical ;; add r20=16,r18 // address of tag ;; ld8.acq r23=[r20] // read old tag ;; dep r23=-1,r23,63,1 // set ti bit ;; st8.rel [r20]=r23 // store old tag + ti ;; mf // make sure everyone sees ;; st8 [r18]=r21,8 // store pte ;; st8 [r18]=r22,8 ;; st8.rel [r18]=r19 // store new tag ;; itc.d r21 // and place in TLB ssm psr.dt ;; srlz.d mov pr=r17,0x1ffff // restore predicates rfi // walker will retry the access ;; 2: add r20=24,r20 // next in chain ;; ld8 r20=[r20] // read chain br.sptk 1b // loop ;; 9: ssm psr.dt mov pr=r17,0x1ffff // restore predicates ;; srlz.d ;; CALL(trap, 10, cr.ifa) IVT_END(Data_Access_Bit) IVT_ENTRY(Break_Instruction, 0x2c00) { .mib mov r17=cr.iim mov r16=ip br.sptk exception_save ;; } { .mmi alloc r15=ar.pfs,0,0,2,0 ;; (p11) ssm psr.i mov out0=11 ;; } { .mmi flushrs ;; (p11) srlz.d add out1=16,sp } { .mib nop 0 nop 0 br.call.sptk rp=trap ;; } { .mib nop 0 nop 0 br.sptk exception_restore ;; } IVT_END(Break_Instruction) IVT_ENTRY(External_Interrupt, 0x3000) { .mib mov r17=ar.itc // Put the ITC in the trapframe. mov r16=ip br.sptk exception_save ;; } { .mmi alloc r15=ar.pfs,0,0,1,0 nop 0 nop 0 ;; } { .mib add out0=16,sp nop 0 br.call.sptk rp=ia64_handle_intr ;; } { .mib nop 0 nop 0 br.sptk exception_restore ;; } IVT_END(External_Interrupt) IVT_ENTRY(Reserved_3400, 0x3400) CALL(trap, 13, cr.ifa) IVT_END(Reserved_3400) IVT_ENTRY(Reserved_3800, 0x3800) CALL(trap, 14, cr.ifa) IVT_END(Reserved_3800) IVT_ENTRY(Reserved_3c00, 0x3c00) CALL(trap, 15, cr.ifa) IVT_END(Reserved_3c00) IVT_ENTRY(Reserved_4000, 0x4000) CALL(trap, 16, cr.ifa) IVT_END(Reserved_4000) IVT_ENTRY(Reserved_4400, 0x4400) CALL(trap, 17, cr.ifa) IVT_END(Reserved_4400) IVT_ENTRY(Reserved_4800, 0x4800) CALL(trap, 18, cr.ifa) IVT_END(Reserved_4800) IVT_ENTRY(Reserved_4c00, 0x4c00) CALL(trap, 19, cr.ifa) IVT_END(Reserved_4c00) IVT_ENTRY(Page_Not_Present, 0x5000) CALL(trap, 20, cr.ifa) IVT_END(Page_Not_Present) IVT_ENTRY(Key_Permission, 0x5100) CALL(trap, 21, cr.ifa) IVT_END(Key_Permission) IVT_ENTRY(Instruction_Access_Rights, 0x5200) CALL(trap, 22, cr.ifa) IVT_END(Instruction_Access_Rights) IVT_ENTRY(Data_Access_Rights, 0x5300) CALL(trap, 23, cr.ifa) IVT_END(Data_Access_Rights) IVT_ENTRY(General_Exception, 0x5400) CALL(trap, 24, cr.ifa) IVT_END(General_Exception) IVT_ENTRY(Disabled_FP_Register, 0x5500) CALL(trap, 25, cr.ifa) IVT_END(Disabled_FP_Register) IVT_ENTRY(NaT_Consumption, 0x5600) CALL(trap, 26, cr.ifa) IVT_END(NaT_Consumption) IVT_ENTRY(Speculation, 0x5700) CALL(trap, 27, cr.iim) IVT_END(Speculation) IVT_ENTRY(Reserved_5800, 0x5800) CALL(trap, 28, cr.ifa) IVT_END(Reserved_5800) IVT_ENTRY(Debug, 0x5900) CALL(trap, 29, cr.ifa) IVT_END(Debug) IVT_ENTRY(Unaligned_Reference, 0x5a00) CALL(trap, 30, cr.ifa) IVT_END(Unaligned_Reference) IVT_ENTRY(Unsupported_Data_Reference, 0x5b00) CALL(trap, 31, cr.ifa) IVT_END(Unsupported_Data_Reference) IVT_ENTRY(Floating_Point_Fault, 0x5c00) CALL(trap, 32, cr.ifa) IVT_END(Floating_Point_Fault) IVT_ENTRY(Floating_Point_Trap, 0x5d00) CALL(trap, 33, cr.ifa) IVT_END(Floating_Point_Trap) IVT_ENTRY(Lower_Privilege_Transfer_Trap, 0x5e00) CALL(trap, 34, cr.ifa) IVT_END(Lower_Privilege_Transfer_Trap) IVT_ENTRY(Taken_Branch_Trap, 0x5f00) CALL(trap, 35, cr.ifa) IVT_END(Taken_Branch_Trap) IVT_ENTRY(Single_Step_Trap, 0x6000) CALL(trap, 36, cr.ifa) IVT_END(Single_Step_Trap) IVT_ENTRY(Reserved_6100, 0x6100) CALL(trap, 37, cr.ifa) IVT_END(Reserved_6100) IVT_ENTRY(Reserved_6200, 0x6200) CALL(trap, 38, cr.ifa) IVT_END(Reserved_6200) IVT_ENTRY(Reserved_6300, 0x6300) CALL(trap, 39, cr.ifa) IVT_END(Reserved_6300) IVT_ENTRY(Reserved_6400, 0x6400) CALL(trap, 40, cr.ifa) IVT_END(Reserved_6400) IVT_ENTRY(Reserved_6500, 0x6500) CALL(trap, 41, cr.ifa) IVT_END(Reserved_6500) IVT_ENTRY(Reserved_6600, 0x6600) CALL(trap, 42, cr.ifa) IVT_END(Reserved_6600) IVT_ENTRY(Reserved_6700, 0x6700) CALL(trap, 43, cr.ifa) IVT_END(Reserved_6700) IVT_ENTRY(Reserved_6800, 0x6800) CALL(trap, 44, cr.ifa) IVT_END(Reserved_6800) IVT_ENTRY(IA_32_Exception, 0x6900) CALL(IA32_TRAP, 45, cr.ifa) IVT_END(IA_32_Exception) IVT_ENTRY(IA_32_Intercept, 0x6a00) CALL(IA32_TRAP, 46, cr.iim) IVT_END(IA_32_Intercept) IVT_ENTRY(IA_32_Interrupt, 0x6b00) CALL(IA32_TRAP, 47, cr.ifa) IVT_END(IA_32_Interrupt) IVT_ENTRY(Reserved_6c00, 0x6c00) CALL(trap, 48, cr.ifa) IVT_END(Reserved_6c00) IVT_ENTRY(Reserved_6d00, 0x6d00) CALL(trap, 49, cr.ifa) IVT_END(Reserved_6d00) IVT_ENTRY(Reserved_6e00, 0x6e00) CALL(trap, 50, cr.ifa) IVT_END(Reserved_6e00) IVT_ENTRY(Reserved_6f00, 0x6f00) CALL(trap, 51, cr.ifa) IVT_END(Reserved_6f00) IVT_ENTRY(Reserved_7000, 0x7000) CALL(trap, 52, cr.ifa) IVT_END(Reserved_7000) IVT_ENTRY(Reserved_7100, 0x7100) CALL(trap, 53, cr.ifa) IVT_END(Reserved_7100) IVT_ENTRY(Reserved_7200, 0x7200) CALL(trap, 54, cr.ifa) IVT_END(Reserved_7200) IVT_ENTRY(Reserved_7300, 0x7300) CALL(trap, 55, cr.ifa) IVT_END(Reserved_7300) IVT_ENTRY(Reserved_7400, 0x7400) CALL(trap, 56, cr.ifa) IVT_END(Reserved_7400) IVT_ENTRY(Reserved_7500, 0x7500) CALL(trap, 57, cr.ifa) IVT_END(Reserved_7500) IVT_ENTRY(Reserved_7600, 0x7600) CALL(trap, 58, cr.ifa) IVT_END(Reserved_7600) IVT_ENTRY(Reserved_7700, 0x7700) CALL(trap, 59, cr.ifa) IVT_END(Reserved_7700) IVT_ENTRY(Reserved_7800, 0x7800) CALL(trap, 60, cr.ifa) IVT_END(Reserved_7800) IVT_ENTRY(Reserved_7900, 0x7900) CALL(trap, 61, cr.ifa) IVT_END(Reserved_7900) IVT_ENTRY(Reserved_7a00, 0x7a00) CALL(trap, 62, cr.ifa) IVT_END(Reserved_7a00) IVT_ENTRY(Reserved_7b00, 0x7b00) CALL(trap, 63, cr.ifa) IVT_END(Reserved_7b00) IVT_ENTRY(Reserved_7c00, 0x7c00) CALL(trap, 64, cr.ifa) IVT_END(Reserved_7c00) IVT_ENTRY(Reserved_7d00, 0x7d00) CALL(trap, 65, cr.ifa) IVT_END(Reserved_7d00) IVT_ENTRY(Reserved_7e00, 0x7e00) CALL(trap, 66, cr.ifa) IVT_END(Reserved_7e00) IVT_ENTRY(Reserved_7f00, 0x7f00) CALL(trap, 67, cr.ifa) IVT_END(Reserved_7f00) Index: head/sys/ia64/ia64/genassym.c =================================================================== --- head/sys/ia64/ia64/genassym.c (revision 221270) +++ head/sys/ia64/ia64/genassym.c (revision 221271) @@ -1,124 +1,124 @@ /*- * Copyright (c) 1982, 1990 The Regents of the University of California. * All rights reserved. * * This code is derived from software contributed to Berkeley by * William Jolitz. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_compat.h" #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef COMPAT_FREEBSD32 ASSYM(COMPAT_FREEBSD32, COMPAT_FREEBSD32); #endif ASSYM(DT_NULL, DT_NULL); ASSYM(DT_RELA, DT_RELA); ASSYM(DT_RELAENT, DT_RELAENT); ASSYM(DT_RELASZ, DT_RELASZ); ASSYM(DT_SYMTAB, DT_SYMTAB); ASSYM(DT_SYMENT, DT_SYMENT); ASSYM(EFAULT, EFAULT); ASSYM(ENAMETOOLONG, ENAMETOOLONG); ASSYM(ERESTART, ERESTART); ASSYM(FRAME_SYSCALL, FRAME_SYSCALL); -ASSYM(IA64_ID_PAGE_SHIFT, IA64_ID_PAGE_SHIFT); - ASSYM(IA64_PBVM_BASE, IA64_PBVM_BASE); ASSYM(IA64_PBVM_PAGE_SHIFT, IA64_PBVM_PAGE_SHIFT); ASSYM(IA64_PBVM_PGTBL, IA64_PBVM_PGTBL); ASSYM(IA64_PBVM_RR, IA64_PBVM_RR); + +ASSYM(IA64_VM_MINKERN_REGION, IA64_VM_MINKERN_REGION); ASSYM(KSTACK_PAGES, KSTACK_PAGES); ASSYM(MC_PRESERVED, offsetof(mcontext_t, mc_preserved)); ASSYM(MC_PRESERVED_FP, offsetof(mcontext_t, mc_preserved_fp)); ASSYM(MC_SPECIAL, offsetof(mcontext_t, mc_special)); ASSYM(MC_SPECIAL_BSPSTORE, offsetof(mcontext_t, mc_special.bspstore)); ASSYM(MC_SPECIAL_RNAT, offsetof(mcontext_t, mc_special.rnat)); ASSYM(PAGE_SHIFT, PAGE_SHIFT); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(PC_CURRENT_PMAP, offsetof(struct pcpu, pc_md.current_pmap)); ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread)); ASSYM(PC_IDLETHREAD, offsetof(struct pcpu, pc_idlethread)); ASSYM(PCB_CURRENT_PMAP, offsetof(struct pcb, pcb_current_pmap)); ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault)); ASSYM(PCB_SPECIAL_RP, offsetof(struct pcb, pcb_special.rp)); ASSYM(R_IA_64_DIR64LSB, R_IA_64_DIR64LSB); ASSYM(R_IA_64_FPTR64LSB, R_IA_64_FPTR64LSB); ASSYM(R_IA_64_NONE, R_IA_64_NONE); ASSYM(R_IA_64_REL64LSB, R_IA_64_REL64LSB); ASSYM(SIZEOF_PCB, sizeof(struct pcb)); ASSYM(SIZEOF_SPECIAL, sizeof(struct _special)); ASSYM(SIZEOF_TRAPFRAME, sizeof(struct trapframe)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_KSTACK, offsetof(struct thread, td_kstack)); ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(UC_MCONTEXT, offsetof(ucontext_t, uc_mcontext)); ASSYM(VM_MAXUSER_ADDRESS, VM_MAXUSER_ADDRESS); Index: head/sys/ia64/ia64/locore.S =================================================================== --- head/sys/ia64/ia64/locore.S (revision 221270) +++ head/sys/ia64/ia64/locore.S (revision 221271) @@ -1,477 +1,358 @@ /*- + * Copyright (c) 2001-2011 Marcel Moolenaar * Copyright (c) 1998 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ -#include #include #include #include #include -#include #include /* * The Altix 350 needs more than the architected 16KB (8KB for stack and * 8KB for RSE backing store) when calling EFI to setup virtual mode. */ #define FW_STACK_SIZE 3*PAGE_SIZE - .section .data.kstack, "aw" + .section .ivt.data, "aw" .align PAGE_SIZE .global kstack kstack: .space FW_STACK_SIZE .global kstack_top kstack_top: .text /* * Not really a leaf but we can't return. * The EFI loader passes the physical address of the bootinfo block in * register r8. */ ENTRY_NOPROFILE(__start, 1) .prologue .save rp,r0 .body { .mlx mov ar.rsc=0 movl r16=ia64_vector_table // set up IVT early ;; } { .mlx mov cr.iva=r16 movl r16=kstack ;; } { .mmi srlz.i ;; ssm IA64_PSR_DFH mov r17=FW_STACK_SIZE-16 ;; } { .mlx add sp=r16,r17 // proc0's stack movl gp=__gp // find kernel globals ;; } { .mlx mov ar.bspstore=r16 // switch backing store - movl r16=pa_bootinfo + movl r16=bootinfo ;; } { .mmi st8 [r16]=r8 // save the PA of the bootinfo block loadrs // invalidate regs mov r17=IA64_DCR_DEFAULT ;; } { .mmi mov cr.dcr=r17 mov ar.rsc=3 // turn rse back on nop 0 ;; } { .mmi srlz.d alloc r16=ar.pfs,0,0,1,0 mov out0=r0 // we are linked at the right address ;; // we just need to process fptrs } { .mib nop 0 nop 0 br.call.sptk.many rp=_reloc ;; } { .mib nop 0 nop 0 br.call.sptk.many rp=ia64_init ;; } // We have the new bspstore in r8 and the new sp in r9. // Switch onto the new stack and call mi_startup(). { .mmi mov ar.rsc = 0 ;; mov ar.bspstore = r8 mov sp = r9 ;; } { .mmi loadrs ;; mov ar.rsc = 3 nop 0 ;; } { .mib nop 0 nop 0 br.call.sptk.many rp=mi_startup ;; } /* NOTREACHED */ 1: br.cond.sptk.few 1b END(__start) /* * fork_trampoline() * * Arrange for a function to be invoked neatly, after a cpu_switch(). * * Invokes fork_exit() passing in three arguments: a callout function, an * argument to the callout, and a trapframe pointer. For child processes * returning from fork(2), the argument is a pointer to the child process. * * The callout function and its argument is in the trapframe in scratch * registers r2 and r3. */ ENTRY(fork_trampoline, 0) .prologue .save rp,r0 .body { .mmi alloc r14=ar.pfs,0,0,3,0 add r15=32+SIZEOF_SPECIAL+8,sp add r16=32+SIZEOF_SPECIAL+16,sp ;; } { .mmi ld8 out0=[r15] ld8 out1=[r16] nop 0 } { .mib add out2=16,sp nop 0 br.call.sptk rp=fork_exit ;; } // If we get back here, it means we're a user space process that's // the immediate result of fork(2). .global enter_userland .type enter_userland, @function enter_userland: { .mib nop 0 nop 0 br.sptk epc_syscall_return ;; } END(fork_trampoline) - -#ifdef SMP -/* - * AP wake-up entry point. The handoff state is similar as for the BSP, - * as described on page 3-9 of the IPF SAL Specification. The difference - * lies in the contents of register b0. For APs this register holds the - * return address into the SAL rendezvous routine. - * - * Note that we're responsible for clearing the IRR bit by reading cr.ivr - * and issuing the EOI to the local SAPIC. - */ - .align 32 -ENTRY_NOPROFILE(os_boot_rendez,0) - mov r16=cr.ivr // clear IRR bit - ;; - srlz.d - mov cr.eoi=r0 // ACK the wake-up - ;; - srlz.d - rsm IA64_PSR_IC|IA64_PSR_I - ;; - mov r16 = (5<<8)|(PAGE_SHIFT<<2)|1 - movl r17 = 5<<61 - ;; - mov rr[r17] = r16 - ;; - srlz.d - mov r16 = (6<<8)|(IA64_ID_PAGE_SHIFT<<2) - movl r17 = 6<<61 - ;; - mov rr[r17] = r16 - ;; - srlz.d - mov r16 = (7<<8)|(IA64_ID_PAGE_SHIFT<<2) - movl r17 = 7<<61 - ;; - mov rr[r17] = r16 - ;; - srlz.d - mov r18 = 28<<2 - movl r16 = PTE_PRESENT+PTE_MA_WB+PTE_ACCESSED+PTE_DIRTY+ \ - PTE_PL_KERN+PTE_AR_RWX+PTE_ED - ;; - mov cr.ifa = r17 - mov cr.itir = r18 - ptr.d r17, r18 - ptr.i r17, r18 - ;; - srlz.i - ;; - itr.d dtr[r0] = r16 - mov r18 = IA64_DCR_DEFAULT - ;; - itr.i itr[r0] = r16 - mov cr.dcr = r18 - ;; - srlz.i - ;; -1: mov r16 = ip - add r17 = 2f-1b, r17 - movl r18 = (IA64_PSR_AC|IA64_PSR_BN|IA64_PSR_DFH|IA64_PSR_DT|IA64_PSR_IC|IA64_PSR_IT|IA64_PSR_RT) - ;; - add r17 = r17, r16 - mov cr.ipsr = r18 - mov cr.ifs = r0 - ;; - mov cr.iip = r17 - ;; - rfi - - .align 32 -2: -{ .mlx - mov ar.rsc = 0 - movl r16 = ia64_vector_table // set up IVT early - ;; -} -{ .mlx - mov cr.iva = r16 - movl r16 = ap_stack - ;; -} -{ .mmi - srlz.i - ;; - ld8 r16 = [r16] - mov r18 = KSTACK_PAGES*PAGE_SIZE-SIZEOF_PCB-SIZEOF_TRAPFRAME-16 - ;; -} -{ .mlx - mov ar.bspstore = r16 - movl gp = __gp - ;; -} -{ .mmi - loadrs - ;; - alloc r17 = ar.pfs, 0, 0, 0, 0 - add sp = r18, r16 - ;; -} -{ .mib - mov ar.rsc = 3 - nop 0 - br.call.sptk.few rp = ia64_ap_startup - ;; -} - /* NOT REACHED */ -9: -{ .mib - nop 0 - nop 0 - br.sptk 9b - ;; -} -END(os_boot_rendez) - -#endif /* !SMP */ /* * Create a default interrupt name table. The first entry (vector 0) is * hardwaired to the clock interrupt. */ .data .align 8 EXPORT(intrnames) .ascii "clock" .fill INTRNAME_LEN - 5 - 1, 1, ' ' .byte 0 intr_n = 1 .rept INTRCNT_COUNT - 1 .ascii "#" .byte intr_n / 100 + '0' .byte (intr_n % 100) / 10 + '0' .byte intr_n % 10 + '0' .fill INTRNAME_LEN - 1 - 3 - 1, 1, ' ' .byte 0 intr_n = intr_n + 1 .endr EXPORT(eintrnames) .align 8 EXPORT(intrcnt) .fill INTRCNT_COUNT, 8, 0 EXPORT(eintrcnt) .text // in0: image base STATIC_ENTRY(_reloc, 1) alloc loc0=ar.pfs,1,2,0,0 mov loc1=rp ;; movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc. movl r2=@gprel(fptr_storage) movl r3=@gprel(fptr_storage_end) ;; add r15=r15,gp // relocate _DYNAMIC etc. add r2=r2,gp add r3=r3,gp ;; 1: ld8 r16=[r15],8 // read r15->d_tag ;; ld8 r17=[r15],8 // and r15->d_val ;; cmp.eq p6,p0=DT_NULL,r16 // done? (p6) br.cond.dpnt.few 2f ;; cmp.eq p6,p0=DT_RELA,r16 ;; (p6) add r18=r17,in0 // found rela section ;; cmp.eq p6,p0=DT_RELASZ,r16 ;; (p6) mov r19=r17 // found rela size ;; cmp.eq p6,p0=DT_SYMTAB,r16 ;; (p6) add r20=r17,in0 // found symbol table ;; (p6) setf.sig f8=r20 ;; cmp.eq p6,p0=DT_SYMENT,r16 ;; (p6) setf.sig f9=r17 // found symbol entry size ;; cmp.eq p6,p0=DT_RELAENT,r16 ;; (p6) mov r22=r17 // found rela entry size ;; br.sptk.few 1b 2: ld8 r15=[r18],8 // read r_offset ;; ld8 r16=[r18],8 // read r_info add r15=r15,in0 // relocate r_offset ;; ld8 r17=[r18],8 // read r_addend sub r19=r19,r22 // update relasz extr.u r23=r16,0,32 // ELF64_R_TYPE(r16) ;; cmp.eq p6,p0=R_IA_64_NONE,r23 (p6) br.cond.dpnt.few 3f ;; cmp.eq p6,p0=R_IA_64_REL64LSB,r23 (p6) br.cond.dptk.few 4f ;; extr.u r16=r16,32,32 // ELF64_R_SYM(r16) ;; setf.sig f10=r16 // so we can multiply ;; xma.lu f10=f10,f9,f8 // f10=symtab + r_sym*syment ;; getf.sig r16=f10 ;; add r16=8,r16 // address of st_value ;; ld8 r16=[r16] // read symbol value ;; add r16=r16,in0 // relocate symbol value ;; cmp.eq p6,p0=R_IA_64_DIR64LSB,r23 (p6) br.cond.dptk.few 5f ;; cmp.eq p6,p0=R_IA_64_FPTR64LSB,r23 (p6) br.cond.dptk.few 6f ;; 3: cmp.ltu p6,p0=0,r19 // more? (p6) br.cond.dptk.few 2b // loop mov r8=0 // success return value br.cond.sptk.few 9f // done 4: add r16=in0,r17 // BD + A ;; st8 [r15]=r16 // word64 (LSB) br.cond.sptk.few 3b 5: add r16=r16,r17 // S + A ;; st8 [r15]=r16 // word64 (LSB) br.cond.sptk.few 3b 6: movl r17=@gprel(fptr_storage) ;; add r17=r17,gp // start of fptrs ;; 7: cmp.geu p6,p0=r17,r2 // end of fptrs? (p6) br.cond.dpnt.few 8f // can't find existing fptr ld8 r20=[r17] // read function from fptr ;; cmp.eq p6,p0=r16,r20 // same function? ;; (p6) st8 [r15]=r17 // reuse fptr (p6) br.cond.sptk.few 3b // done add r17=16,r17 // next fptr br.cond.sptk.few 7b 8: // allocate new fptr mov r8=1 // failure return value cmp.geu p6,p0=r2,r3 // space left? (p6) br.cond.dpnt.few 9f // bail out st8 [r15]=r2 // install fptr st8 [r2]=r16,8 // write fptr address ;; st8 [r2]=gp,8 // write fptr gp br.cond.sptk.few 3b 9: mov ar.pfs=loc0 mov rp=loc1 ;; br.ret.sptk.few rp END(_reloc) .data .align 16 .global fptr_storage fptr_storage: .space 4096*16 // XXX fptr_storage_end: Index: head/sys/ia64/ia64/machdep.c =================================================================== --- head/sys/ia64/ia64/machdep.c (revision 221270) +++ head/sys/ia64/ia64/machdep.c (revision 221271) @@ -1,1554 +1,1555 @@ /*- * Copyright (c) 2003,2004 Marcel Moolenaar * Copyright (c) 2000,2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_compat.h" #include "opt_ddb.h" #include "opt_kstack_pages.h" #include "opt_sched.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef SMP #include #endif #include #include SYSCTL_NODE(_hw, OID_AUTO, freq, CTLFLAG_RD, 0, ""); SYSCTL_NODE(_machdep, OID_AUTO, cpu, CTLFLAG_RD, 0, ""); static u_int bus_freq; SYSCTL_UINT(_hw_freq, OID_AUTO, bus, CTLFLAG_RD, &bus_freq, 0, "Bus clock frequency"); static u_int cpu_freq; SYSCTL_UINT(_hw_freq, OID_AUTO, cpu, CTLFLAG_RD, &cpu_freq, 0, "CPU clock frequency"); static u_int itc_freq; SYSCTL_UINT(_hw_freq, OID_AUTO, itc, CTLFLAG_RD, &itc_freq, 0, "ITC frequency"); int cold = 1; -u_int64_t pa_bootinfo; struct bootinfo *bootinfo; struct pcpu pcpu0; extern u_int64_t kernel_text[], _end[]; extern u_int64_t ia64_gateway_page[]; extern u_int64_t break_sigtramp[]; extern u_int64_t epc_sigtramp[]; struct fpswa_iface *fpswa_iface; -u_int64_t ia64_pal_base; -u_int64_t ia64_port_base; +vm_size_t ia64_pal_size; +vm_paddr_t ia64_pal_base; +vm_offset_t ia64_port_base; u_int64_t ia64_lapic_addr = PAL_PIB_DEFAULT_ADDR; struct ia64_pib *ia64_pib; static int ia64_sync_icache_needed; char machine[] = MACHINE; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); static char cpu_model[64]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, "The CPU model name"); static char cpu_family[64]; SYSCTL_STRING(_hw, OID_AUTO, family, CTLFLAG_RD, cpu_family, 0, "The CPU family name"); #ifdef DDB extern vm_offset_t ksym_start, ksym_end; #endif struct msgbuf *msgbufp = NULL; /* Other subsystems (e.g., ACPI) can hook this later. */ void (*cpu_idle_hook)(void) = NULL; long Maxmem = 0; long realmem = 0; #define PHYSMAP_SIZE (2 * VM_PHYSSEG_MAX) vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) struct kva_md_info kmi; #define Mhz 1000000L #define Ghz (1000L*Mhz) static void identifycpu(void) { char vendor[17]; char *family_name, *model_name; u_int64_t features, tmp; int number, revision, model, family, archrev; /* * Assumes little-endian. */ *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0); *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1); vendor[16] = '\0'; tmp = ia64_get_cpuid(3); number = (tmp >> 0) & 0xff; revision = (tmp >> 8) & 0xff; model = (tmp >> 16) & 0xff; family = (tmp >> 24) & 0xff; archrev = (tmp >> 32) & 0xff; family_name = model_name = "unknown"; switch (family) { case 0x07: family_name = "Itanium"; model_name = "Merced"; break; case 0x1f: family_name = "Itanium 2"; switch (model) { case 0x00: model_name = "McKinley"; break; case 0x01: /* * Deerfield is a low-voltage variant based on the * Madison core. We need circumstantial evidence * (i.e. the clock frequency) to identify those. * Allow for roughly 1% error margin. */ if (cpu_freq > 990 && cpu_freq < 1010) model_name = "Deerfield"; else model_name = "Madison"; break; case 0x02: model_name = "Madison II"; break; } break; case 0x20: ia64_sync_icache_needed = 1; family_name = "Itanium 2"; switch (model) { case 0x00: model_name = "Montecito"; break; } break; } snprintf(cpu_family, sizeof(cpu_family), "%s", family_name); snprintf(cpu_model, sizeof(cpu_model), "%s", model_name); features = ia64_get_cpuid(4); printf("CPU: %s (", model_name); if (cpu_freq) printf("%u Mhz ", cpu_freq); printf("%s)\n", family_name); printf(" Origin = \"%s\" Revision = %d\n", vendor, revision); printf(" Features = 0x%b\n", (u_int32_t) features, "\020" "\001LB" /* long branch (brl) instruction. */ "\002SD" /* Spontaneous deferral. */ "\003AO" /* 16-byte atomic operations (ld, st, cmpxchg). */ ); } static void cpu_startup(void *dummy) { char nodename[16]; struct pcpu *pc; struct pcpu_stats *pcs; /* * Good {morning,afternoon,evening,night}. */ identifycpu(); #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem), ia64_ptob(Maxmem) / 1048576); realmem = Maxmem; /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { long size1 = phys_avail[indx + 1] - phys_avail[indx]; printf("0x%08lx - 0x%08lx, %ld bytes (%ld pages)\n", phys_avail[indx], phys_avail[indx + 1] - 1, size1, size1 >> PAGE_SHIFT); } } vm_ksubmap_init(&kmi); printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count), ptoa(cnt.v_free_count) / 1048576); if (fpswa_iface == NULL) printf("Warning: no FPSWA package supplied\n"); else printf("FPSWA Revision = 0x%lx, Entry = %p\n", (long)fpswa_iface->if_rev, (void *)fpswa_iface->if_fpswa); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); /* * Traverse the MADT to discover IOSAPIC and Local SAPIC * information. */ ia64_probe_sapics(); ia64_pib = pmap_mapdev(ia64_lapic_addr, sizeof(*ia64_pib)); ia64_mca_init(); /* * Create sysctl tree for per-CPU information. */ SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { snprintf(nodename, sizeof(nodename), "%u", pc->pc_cpuid); sysctl_ctx_init(&pc->pc_md.sysctl_ctx); pc->pc_md.sysctl_tree = SYSCTL_ADD_NODE(&pc->pc_md.sysctl_ctx, SYSCTL_STATIC_CHILDREN(_machdep_cpu), OID_AUTO, nodename, CTLFLAG_RD, NULL, ""); if (pc->pc_md.sysctl_tree == NULL) continue; pcs = &pc->pc_md.stats; SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nasts", CTLFLAG_RD, &pcs->pcs_nasts, "Number of IPI_AST interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nclks", CTLFLAG_RD, &pcs->pcs_nclks, "Number of clock interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nextints", CTLFLAG_RD, &pcs->pcs_nextints, "Number of ExtINT interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nhighfps", CTLFLAG_RD, &pcs->pcs_nhighfps, "Number of IPI_HIGH_FP interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nhwints", CTLFLAG_RD, &pcs->pcs_nhwints, "Number of hardware (device) interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "npreempts", CTLFLAG_RD, &pcs->pcs_npreempts, "Number of IPI_PREEMPT interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nrdvs", CTLFLAG_RD, &pcs->pcs_nrdvs, "Number of IPI_RENDEZVOUS interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nstops", CTLFLAG_RD, &pcs->pcs_nstops, "Number of IPI_STOP interrupts"); SYSCTL_ADD_ULONG(&pc->pc_md.sysctl_ctx, SYSCTL_CHILDREN(pc->pc_md.sysctl_tree), OID_AUTO, "nstrays", CTLFLAG_RD, &pcs->pcs_nstrays, "Number of stray interrupts"); } } SYSINIT(cpu_startup, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); void cpu_flush_dcache(void *ptr, size_t len) { vm_offset_t lim, va; va = (uintptr_t)ptr & ~31; lim = (uintptr_t)ptr + len; while (va < lim) { ia64_fc(va); va += 32; } ia64_srlz_d(); } /* Get current clock frequency for the given cpu id. */ int cpu_est_clockrate(int cpu_id, uint64_t *rate) { if (pcpu_find(cpu_id) == NULL || rate == NULL) return (EINVAL); *rate = (u_long)cpu_freq * 1000000ul; return (0); } void cpu_halt() { efi_reset_system(); } void cpu_idle(int busy) { struct ia64_pal_result res; if (cpu_idle_hook != NULL) (*cpu_idle_hook)(); else res = ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); } int cpu_idle_wakeup(int cpu) { return (0); } void cpu_reset() { efi_reset_system(); } void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx) { struct pcb *oldpcb, *newpcb; oldpcb = old->td_pcb; #ifdef COMPAT_FREEBSD32 ia32_savectx(oldpcb); #endif if (PCPU_GET(fpcurthread) == old) old->td_frame->tf_special.psr |= IA64_PSR_DFH; if (!savectx(oldpcb)) { atomic_store_rel_ptr(&old->td_lock, mtx); newpcb = new->td_pcb; oldpcb->pcb_current_pmap = pmap_switch(newpcb->pcb_current_pmap); #if defined(SCHED_ULE) && defined(SMP) while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock) cpu_spinwait(); #endif PCPU_SET(curthread, new); #ifdef COMPAT_FREEBSD32 ia32_restorectx(newpcb); #endif if (PCPU_GET(fpcurthread) == new) new->td_frame->tf_special.psr &= ~IA64_PSR_DFH; restorectx(newpcb); /* We should not get here. */ panic("cpu_switch: restorectx() returned"); /* NOTREACHED */ } } void cpu_throw(struct thread *old __unused, struct thread *new) { struct pcb *newpcb; newpcb = new->td_pcb; (void)pmap_switch(newpcb->pcb_current_pmap); #if defined(SCHED_ULE) && defined(SMP) while (atomic_load_acq_ptr(&new->td_lock) == &blocked_lock) cpu_spinwait(); #endif PCPU_SET(curthread, new); #ifdef COMPAT_FREEBSD32 ia32_restorectx(newpcb); #endif restorectx(newpcb); /* We should not get here. */ panic("cpu_throw: restorectx() returned"); /* NOTREACHED */ } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { /* * Set pc_acpi_id to "uninitialized". * See sys/dev/acpica/acpi_cpu.c */ pcpu->pc_acpi_id = 0xffffffff; } void spinlock_enter(void) { struct thread *td; int intr; td = curthread; if (td->td_md.md_spinlock_count == 0) { intr = intr_disable(); td->td_md.md_spinlock_count = 1; td->td_md.md_saved_intr = intr; } else td->td_md.md_spinlock_count++; critical_enter(); } void spinlock_exit(void) { struct thread *td; int intr; td = curthread; critical_exit(); intr = td->td_md.md_saved_intr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(intr); } void map_vhpt(uintptr_t vhpt) { pt_entry_t pte; uint64_t psr; pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW; pte |= vhpt & PTE_PPN_MASK; __asm __volatile("ptr.d %0,%1" :: "r"(vhpt), - "r"(IA64_ID_PAGE_SHIFT<<2)); + "r"(pmap_vhpt_log2size << 2)); __asm __volatile("mov %0=psr" : "=r"(psr)); __asm __volatile("rsm psr.ic|psr.i"); ia64_srlz_i(); ia64_set_ifa(vhpt); - ia64_set_itir(IA64_ID_PAGE_SHIFT << 2); + ia64_set_itir(pmap_vhpt_log2size << 2); ia64_srlz_d(); - __asm __volatile("itr.d dtr[%0]=%1" :: "r"(2), "r"(pte)); + __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte)); __asm __volatile("mov psr.l=%0" :: "r" (psr)); ia64_srlz_i(); } void map_pal_code(void) { pt_entry_t pte; + vm_offset_t va; + vm_size_t sz; uint64_t psr; + u_int shft; - if (ia64_pal_base == 0) + if (ia64_pal_size == 0) return; + va = IA64_PHYS_TO_RR7(ia64_pal_base); + + sz = ia64_pal_size; + shft = 0; + while (sz > 1) { + shft++; + sz >>= 1; + } + pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RWX; pte |= ia64_pal_base & PTE_PPN_MASK; - __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: - "r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2)); + __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(va), "r"(shft<<2)); __asm __volatile("mov %0=psr" : "=r"(psr)); __asm __volatile("rsm psr.ic|psr.i"); ia64_srlz_i(); - ia64_set_ifa(IA64_PHYS_TO_RR7(ia64_pal_base)); - ia64_set_itir(IA64_ID_PAGE_SHIFT << 2); + ia64_set_ifa(va); + ia64_set_itir(shft << 2); ia64_srlz_d(); - __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte)); + __asm __volatile("itr.d dtr[%0]=%1" :: "r"(4), "r"(pte)); ia64_srlz_d(); __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte)); __asm __volatile("mov psr.l=%0" :: "r" (psr)); ia64_srlz_i(); } void map_gateway_page(void) { pt_entry_t pte; uint64_t psr; pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_X_RX; - pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK; + pte |= ia64_tpa((uint64_t)ia64_gateway_page) & PTE_PPN_MASK; __asm __volatile("ptr.d %0,%1; ptr.i %0,%1" :: "r"(VM_MAXUSER_ADDRESS), "r"(PAGE_SHIFT << 2)); __asm __volatile("mov %0=psr" : "=r"(psr)); __asm __volatile("rsm psr.ic|psr.i"); ia64_srlz_i(); ia64_set_ifa(VM_MAXUSER_ADDRESS); ia64_set_itir(PAGE_SHIFT << 2); ia64_srlz_d(); - __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte)); + __asm __volatile("itr.d dtr[%0]=%1" :: "r"(5), "r"(pte)); ia64_srlz_d(); - __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte)); + __asm __volatile("itr.i itr[%0]=%1" :: "r"(2), "r"(pte)); __asm __volatile("mov psr.l=%0" :: "r" (psr)); ia64_srlz_i(); /* Expose the mapping to userland in ar.k5 */ ia64_set_k5(VM_MAXUSER_ADDRESS); } static u_int freq_ratio(u_long base, u_long ratio) { u_long f; f = (base * (ratio >> 32)) / (ratio & 0xfffffffful); return ((f + 500000) / 1000000); } static void calculate_frequencies(void) { struct ia64_sal_result sal; struct ia64_pal_result pal; sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0); pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0); if (sal.sal_status == 0 && pal.pal_status == 0) { if (bootverbose) { printf("Platform clock frequency %ld Hz\n", sal.sal_result[0]); printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, " "ITC ratio %ld/%ld\n", pal.pal_result[0] >> 32, pal.pal_result[0] & ((1L << 32) - 1), pal.pal_result[1] >> 32, pal.pal_result[1] & ((1L << 32) - 1), pal.pal_result[2] >> 32, pal.pal_result[2] & ((1L << 32) - 1)); } cpu_freq = freq_ratio(sal.sal_result[0], pal.pal_result[0]); bus_freq = freq_ratio(sal.sal_result[0], pal.pal_result[1]); itc_freq = freq_ratio(sal.sal_result[0], pal.pal_result[2]); } } struct ia64_init_return ia64_init(void) { struct ia64_init_return ret; int phys_avail_cnt; vm_offset_t kernstart, kernend; vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1; char *p; struct efi_md *md; int metadata_missing; /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ /* * TODO: Disable interrupts, floating point etc. * Maybe flush cache and tlb */ ia64_set_fpsr(IA64_FPSR_DEFAULT); /* * TODO: Get critical system information (if possible, from the * information provided by the boot program). */ /* - * pa_bootinfo is the physical address of the bootinfo block as - * passed to us by the loader and set in locore.s. - */ - bootinfo = (struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo)); - - if (bootinfo->bi_magic != BOOTINFO_MAGIC || bootinfo->bi_version != 1) { - bzero(bootinfo, sizeof(*bootinfo)); - bootinfo->bi_kernend = (vm_offset_t)round_page(_end); - } - - /* * Look for the I/O ports first - we need them for console * probing. */ for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) { switch (md->md_type) { case EFI_MD_TYPE_IOPORT: ia64_port_base = (uintptr_t)pmap_mapdev(md->md_phys, md->md_pages * EFI_PAGE_SIZE); break; case EFI_MD_TYPE_PALCODE: + ia64_pal_size = md->md_pages * EFI_PAGE_SIZE; ia64_pal_base = md->md_phys; break; } } metadata_missing = 0; if (bootinfo->bi_modulep) preload_metadata = (caddr_t)bootinfo->bi_modulep; else metadata_missing = 1; if (envmode == 0 && bootinfo->bi_envp) kern_envp = (caddr_t)bootinfo->bi_envp; else kern_envp = static_env; /* * Look at arguments passed to us and compute boothowto. */ boothowto = bootinfo->bi_boothowto; if (boothowto & RB_VERBOSE) bootverbose = 1; /* * Find the beginning and end of the kernel. */ kernstart = trunc_page(kernel_text); #ifdef DDB ksym_start = bootinfo->bi_symtab; ksym_end = bootinfo->bi_esymtab; kernend = (vm_offset_t)round_page(ksym_end); #else kernend = (vm_offset_t)round_page(_end); #endif /* But if the bootstrap tells us otherwise, believe it! */ if (bootinfo->bi_kernend) kernend = round_page(bootinfo->bi_kernend); /* + * Region 6 is direct mapped UC and region 7 is direct mapped + * WC. The details of this is controlled by the Alt {I,D}TLB + * handlers. Here we just make sure that they have the largest + * possible page size to minimise TLB usage. + */ + ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2)); + ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2)); + ia64_srlz_d(); + + /* + * Wire things up so we can call the firmware. + */ + map_pal_code(); + efi_boot_minimal(bootinfo->bi_systab); + ia64_xiv_init(); + ia64_sal_init(); + calculate_frequencies(); + + /* * Setup the PCPU data for the bootstrap processor. It is needed * by printf(). Also, since printf() has critical sections, we * need to initialize at least pc_curthread. */ pcpup = &pcpu0; ia64_set_k4((u_int64_t)pcpup); pcpu_init(pcpup, 0, sizeof(pcpu0)); dpcpu_init((void *)kernend, 0); + PCPU_SET(md.lid, ia64_get_lid()); kernend += DPCPU_SIZE; PCPU_SET(curthread, &thread0); /* * Initialize the console before we print anything out. */ cninit(); /* OUTPUT NOW ALLOWED */ - - if (ia64_pal_base != 0) { - ia64_pal_base &= ~IA64_ID_PAGE_MASK; - /* - * We use a TR to map the first 256M of memory - this might - * cover the palcode too. - */ - if (ia64_pal_base == 0) - printf("PAL code mapped by the kernel's TR\n"); - } else - printf("PAL code not found\n"); - - /* - * Wire things up so we can call the firmware. - */ - map_pal_code(); - efi_boot_minimal(bootinfo->bi_systab); - ia64_xiv_init(); - ia64_sal_init(); - calculate_frequencies(); if (metadata_missing) printf("WARNING: loader(8) metadata is missing!\n"); /* Get FPSWA interface */ fpswa_iface = (bootinfo->bi_fpswa == 0) ? NULL : (struct fpswa_iface *)IA64_PHYS_TO_RR7(bootinfo->bi_fpswa); /* Init basic tunables, including hz */ init_param1(); p = getenv("kernelname"); if (p != NULL) { strlcpy(kernelname, p, sizeof(kernelname)); freeenv(p); } kernstartpfn = atop(IA64_RR_MASK(kernstart)); kernendpfn = atop(IA64_RR_MASK(kernend)); /* * Size the memory regions and load phys_avail[] with the results. */ /* * Find out how much memory is available, by looking at * the memory descriptors. */ #ifdef DEBUG_MD printf("Memory descriptor count: %d\n", mdcount); #endif phys_avail_cnt = 0; for (md = efi_md_first(); md != NULL; md = efi_md_next(md)) { #ifdef DEBUG_MD printf("MD %p: type %d pa 0x%lx cnt 0x%lx\n", md, md->md_type, md->md_phys, md->md_pages); #endif pfn0 = ia64_btop(round_page(md->md_phys)); pfn1 = ia64_btop(trunc_page(md->md_phys + md->md_pages * 4096)); if (pfn1 <= pfn0) continue; if (md->md_type != EFI_MD_TYPE_FREE) continue; /* * We have a memory descriptor that describes conventional * memory that is for general use. We must determine if the * loader has put the kernel in this region. */ physmem += (pfn1 - pfn0); if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) { /* * Must compute the location of the kernel * within the segment. */ #ifdef DEBUG_MD printf("Descriptor %p contains kernel\n", mp); #endif if (pfn0 < kernstartpfn) { /* * There is a chunk before the kernel. */ #ifdef DEBUG_MD printf("Loading chunk before kernel: " "0x%lx / 0x%lx\n", pfn0, kernstartpfn); #endif phys_avail[phys_avail_cnt] = ia64_ptob(pfn0); phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn); phys_avail_cnt += 2; } if (kernendpfn < pfn1) { /* * There is a chunk after the kernel. */ #ifdef DEBUG_MD printf("Loading chunk after kernel: " "0x%lx / 0x%lx\n", kernendpfn, pfn1); #endif phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn); phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1); phys_avail_cnt += 2; } } else { /* * Just load this cluster as one chunk. */ #ifdef DEBUG_MD printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i, pfn0, pfn1); #endif phys_avail[phys_avail_cnt] = ia64_ptob(pfn0); phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1); phys_avail_cnt += 2; } } phys_avail[phys_avail_cnt] = 0; Maxmem = physmem; init_param2(physmem); /* * Initialize error message buffer (at end of core). */ msgbufp = (struct msgbuf *)pmap_steal_memory(msgbufsize); msgbufinit(msgbufp, msgbufsize); proc_linkup0(&proc0, &thread0); /* * Init mapping for kernel stack for proc 0 */ thread0.td_kstack = pmap_steal_memory(KSTACK_PAGES * PAGE_SIZE); thread0.td_kstack_pages = KSTACK_PAGES; mutex_init(); /* * Initialize the rest of proc 0's PCB. * * Set the kernel sp, reserving space for an (empty) trapframe, * and make proc0's trapframe pointer point to it for sanity. * Initialise proc0's backing store to start after u area. */ cpu_thread_alloc(&thread0); thread0.td_frame->tf_flags = FRAME_SYSCALL; thread0.td_pcb->pcb_special.sp = (u_int64_t)thread0.td_frame - 16; thread0.td_pcb->pcb_special.bspstore = thread0.td_kstack; /* * Initialize the virtual memory system. */ pmap_bootstrap(); /* * Initialize debuggers, and break into them if appropriate. */ kdb_init(); #ifdef KDB if (boothowto & RB_KDB) kdb_enter(KDB_WHY_BOOTFLAGS, "Boot flags requested debugger\n"); #endif ia64_set_tpr(0); ia64_srlz_d(); ret.bspstore = thread0.td_pcb->pcb_special.bspstore; ret.sp = thread0.td_pcb->pcb_special.sp; return (ret); } uint64_t ia64_get_hcdp(void) { return (bootinfo->bi_hcdp); } void bzero(void *buf, size_t len) { caddr_t p = buf; while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { *p++ = 0; len--; } while (len >= sizeof(u_long) * 8) { *(u_long*) p = 0; *((u_long*) p + 1) = 0; *((u_long*) p + 2) = 0; *((u_long*) p + 3) = 0; len -= sizeof(u_long) * 8; *((u_long*) p + 4) = 0; *((u_long*) p + 5) = 0; *((u_long*) p + 6) = 0; *((u_long*) p + 7) = 0; p += sizeof(u_long) * 8; } while (len >= sizeof(u_long)) { *(u_long*) p = 0; len -= sizeof(u_long); p += sizeof(u_long); } while (len) { *p++ = 0; len--; } } u_int ia64_itc_freq(void) { return (itc_freq); } void DELAY(int n) { u_int64_t start, end, now; sched_pin(); start = ia64_get_itc(); end = start + itc_freq * n; /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */ do { now = ia64_get_itc(); } while (now < end || (now > start && end < start)); sched_unpin(); } /* * Send an interrupt (signal) to a process. */ void sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) { struct proc *p; struct thread *td; struct trapframe *tf; struct sigacts *psp; struct sigframe sf, *sfp; u_int64_t sbs, sp; int oonstack; int sig; u_long code; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); sig = ksi->ksi_signo; code = ksi->ksi_code; psp = p->p_sigacts; mtx_assert(&psp->ps_mtx, MA_OWNED); tf = td->td_frame; sp = tf->tf_special.sp; oonstack = sigonstack(sp); sbs = 0; /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = td->td_sigstk; sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; /* * Allocate and validate space for the signal handler * context. Note that if the stack is in P0 space, the * call to grow() is a nop, and the useracc() check * will fail if the process has not already allocated * the space with a `brk'. */ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sbs = (u_int64_t)td->td_sigstk.ss_sp; sbs = (sbs + 15) & ~15; sfp = (struct sigframe *)(sbs + td->td_sigstk.ss_size); #if defined(COMPAT_43) td->td_sigstk.ss_flags |= SS_ONSTACK; #endif } else sfp = (struct sigframe *)sp; sfp = (struct sigframe *)((u_int64_t)(sfp - 1) & ~15); /* Fill in the siginfo structure for POSIX handlers. */ if (SIGISMEMBER(psp->ps_siginfo, sig)) { sf.sf_si = ksi->ksi_info; sf.sf_si.si_signo = sig; /* * XXX this shouldn't be here after code in trap.c * is fixed */ sf.sf_si.si_addr = (void*)tf->tf_special.ifa; code = (u_int64_t)&sfp->sf_si; } mtx_unlock(&psp->ps_mtx); PROC_UNLOCK(p); get_mcontext(td, &sf.sf_uc.uc_mcontext, 0); /* Copy the frame out to userland. */ if (copyout(&sf, sfp, sizeof(sf)) != 0) { /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ PROC_LOCK(p); sigexit(td, SIGILL); return; } if ((tf->tf_flags & FRAME_SYSCALL) == 0) { tf->tf_special.psr &= ~IA64_PSR_RI; tf->tf_special.iip = ia64_get_k5() + ((uint64_t)break_sigtramp - (uint64_t)ia64_gateway_page); } else tf->tf_special.iip = ia64_get_k5() + ((uint64_t)epc_sigtramp - (uint64_t)ia64_gateway_page); /* * Setup the trapframe to return to the signal trampoline. We pass * information to the trampoline in the following registers: * * gp new backing store or NULL * r8 signal number * r9 signal code or siginfo pointer * r10 signal handler (function descriptor) */ tf->tf_special.sp = (u_int64_t)sfp - 16; tf->tf_special.gp = sbs; tf->tf_special.bspstore = sf.sf_uc.uc_mcontext.mc_special.bspstore; tf->tf_special.ndirty = 0; tf->tf_special.rnat = sf.sf_uc.uc_mcontext.mc_special.rnat; tf->tf_scratch.gr8 = sig; tf->tf_scratch.gr9 = code; tf->tf_scratch.gr10 = (u_int64_t)catcher; PROC_LOCK(p); mtx_lock(&psp->ps_mtx); } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ int sigreturn(struct thread *td, struct sigreturn_args /* { ucontext_t *sigcntxp; } */ *uap) { ucontext_t uc; struct trapframe *tf; struct pcb *pcb; tf = td->td_frame; pcb = td->td_pcb; /* * Fetch the entire context structure at once for speed. * We don't use a normal argument to simplify RSE handling. */ if (copyin(uap->sigcntxp, (caddr_t)&uc, sizeof(uc))) return (EFAULT); set_mcontext(td, &uc.uc_mcontext); #if defined(COMPAT_43) if (sigonstack(tf->tf_special.sp)) td->td_sigstk.ss_flags |= SS_ONSTACK; else td->td_sigstk.ss_flags &= ~SS_ONSTACK; #endif kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sigreturn(td, (struct sigreturn_args *)uap); } #endif /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter * the debugger. We have the context in the trapframe, but base the trace * on the PCB. The PCB doesn't have to be perfect, as long as it contains * enough for a backtrace. */ void makectx(struct trapframe *tf, struct pcb *pcb) { pcb->pcb_special = tf->tf_special; pcb->pcb_special.__spare = ~0UL; /* XXX see unwind.c */ save_callee_saved(&pcb->pcb_preserved); save_callee_saved_fp(&pcb->pcb_preserved_fp); } int ia64_flush_dirty(struct thread *td, struct _special *r) { struct iovec iov; struct uio uio; uint64_t bspst, kstk, rnat; int error, locked; if (r->ndirty == 0) return (0); kstk = td->td_kstack + (r->bspstore & 0x1ffUL); if (td == curthread) { __asm __volatile("mov ar.rsc=0;;"); __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst)); /* Make sure we have all the user registers written out. */ if (bspst - kstk < r->ndirty) { __asm __volatile("flushrs;;"); __asm __volatile("mov %0=ar.bspstore" : "=r"(bspst)); } __asm __volatile("mov %0=ar.rnat;;" : "=r"(rnat)); __asm __volatile("mov ar.rsc=3"); error = copyout((void*)kstk, (void*)r->bspstore, r->ndirty); kstk += r->ndirty; r->rnat = (bspst > kstk && (bspst & 0x1ffL) < (kstk & 0x1ffL)) ? *(uint64_t*)(kstk | 0x1f8L) : rnat; } else { locked = PROC_LOCKED(td->td_proc); if (!locked) PHOLD(td->td_proc); iov.iov_base = (void*)(uintptr_t)kstk; iov.iov_len = r->ndirty; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = r->bspstore; uio.uio_resid = r->ndirty; uio.uio_segflg = UIO_SYSSPACE; uio.uio_rw = UIO_WRITE; uio.uio_td = td; error = proc_rwmem(td->td_proc, &uio); /* * XXX proc_rwmem() doesn't currently return ENOSPC, * so I think it can bogusly return 0. Neither do * we allow short writes. */ if (uio.uio_resid != 0 && error == 0) error = ENOSPC; if (!locked) PRELE(td->td_proc); } r->bspstore += r->ndirty; r->ndirty = 0; return (error); } int get_mcontext(struct thread *td, mcontext_t *mc, int flags) { struct trapframe *tf; int error; tf = td->td_frame; bzero(mc, sizeof(*mc)); mc->mc_special = tf->tf_special; error = ia64_flush_dirty(td, &mc->mc_special); if (tf->tf_flags & FRAME_SYSCALL) { mc->mc_flags |= _MC_FLAGS_SYSCALL_CONTEXT; mc->mc_scratch = tf->tf_scratch; if (flags & GET_MC_CLEAR_RET) { mc->mc_scratch.gr8 = 0; mc->mc_scratch.gr9 = 0; mc->mc_scratch.gr10 = 0; mc->mc_scratch.gr11 = 0; } } else { mc->mc_flags |= _MC_FLAGS_ASYNC_CONTEXT; mc->mc_scratch = tf->tf_scratch; mc->mc_scratch_fp = tf->tf_scratch_fp; /* * XXX If the thread never used the high FP registers, we * probably shouldn't waste time saving them. */ ia64_highfp_save(td); mc->mc_flags |= _MC_FLAGS_HIGHFP_VALID; mc->mc_high_fp = td->td_pcb->pcb_high_fp; } save_callee_saved(&mc->mc_preserved); save_callee_saved_fp(&mc->mc_preserved_fp); return (error); } int set_mcontext(struct thread *td, const mcontext_t *mc) { struct _special s; struct trapframe *tf; uint64_t psrmask; tf = td->td_frame; KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0, ("Whoa there! We have more than 8KB of dirty registers!")); s = mc->mc_special; /* * Only copy the user mask and the restart instruction bit from * the new context. */ psrmask = IA64_PSR_BE | IA64_PSR_UP | IA64_PSR_AC | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_RI; s.psr = (tf->tf_special.psr & ~psrmask) | (s.psr & psrmask); /* We don't have any dirty registers of the new context. */ s.ndirty = 0; if (mc->mc_flags & _MC_FLAGS_ASYNC_CONTEXT) { /* * We can get an async context passed to us while we * entered the kernel through a syscall: sigreturn(2) * takes contexts that could previously be the result of * a trap or interrupt. * Hence, we cannot assert that the trapframe is not * a syscall frame, but we can assert that it's at * least an expected syscall. */ if (tf->tf_flags & FRAME_SYSCALL) { KASSERT(tf->tf_scratch.gr15 == SYS_sigreturn, ("foo")); tf->tf_flags &= ~FRAME_SYSCALL; } tf->tf_scratch = mc->mc_scratch; tf->tf_scratch_fp = mc->mc_scratch_fp; if (mc->mc_flags & _MC_FLAGS_HIGHFP_VALID) td->td_pcb->pcb_high_fp = mc->mc_high_fp; } else { KASSERT((tf->tf_flags & FRAME_SYSCALL) != 0, ("foo")); if ((mc->mc_flags & _MC_FLAGS_SYSCALL_CONTEXT) == 0) { s.cfm = tf->tf_special.cfm; s.iip = tf->tf_special.iip; tf->tf_scratch.gr15 = 0; /* Clear syscall nr. */ } else tf->tf_scratch = mc->mc_scratch; } tf->tf_special = s; restore_callee_saved(&mc->mc_preserved); restore_callee_saved_fp(&mc->mc_preserved_fp); return (0); } /* * Clear registers on exec. */ void exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) { struct trapframe *tf; uint64_t *ksttop, *kst; tf = td->td_frame; ksttop = (uint64_t*)(td->td_kstack + tf->tf_special.ndirty + (tf->tf_special.bspstore & 0x1ffUL)); /* * We can ignore up to 8KB of dirty registers by masking off the * lower 13 bits in exception_restore() or epc_syscall(). This * should be enough for a couple of years, but if there are more * than 8KB of dirty registers, we lose track of the bottom of * the kernel stack. The solution is to copy the active part of * the kernel stack down 1 page (or 2, but not more than that) * so that we always have less than 8KB of dirty registers. */ KASSERT((tf->tf_special.ndirty & ~PAGE_MASK) == 0, ("Whoa there! We have more than 8KB of dirty registers!")); bzero(&tf->tf_special, sizeof(tf->tf_special)); if ((tf->tf_flags & FRAME_SYSCALL) == 0) { /* break syscalls. */ bzero(&tf->tf_scratch, sizeof(tf->tf_scratch)); bzero(&tf->tf_scratch_fp, sizeof(tf->tf_scratch_fp)); tf->tf_special.cfm = (1UL<<63) | (3UL<<7) | 3UL; tf->tf_special.bspstore = IA64_BACKINGSTORE; /* * Copy the arguments onto the kernel register stack so that * they get loaded by the loadrs instruction. Skip over the * NaT collection points. */ kst = ksttop - 1; if (((uintptr_t)kst & 0x1ff) == 0x1f8) *kst-- = 0; *kst-- = 0; if (((uintptr_t)kst & 0x1ff) == 0x1f8) *kst-- = 0; *kst-- = imgp->ps_strings; if (((uintptr_t)kst & 0x1ff) == 0x1f8) *kst-- = 0; *kst = stack; tf->tf_special.ndirty = (ksttop - kst) << 3; } else { /* epc syscalls (default). */ tf->tf_special.cfm = (3UL<<62) | (3UL<<7) | 3UL; tf->tf_special.bspstore = IA64_BACKINGSTORE + 24; /* * Write values for out0, out1 and out2 to the user's backing * store and arrange for them to be restored into the user's * initial register frame. * Assumes that (bspstore & 0x1f8) < 0x1e0. */ suword((caddr_t)tf->tf_special.bspstore - 24, stack); suword((caddr_t)tf->tf_special.bspstore - 16, imgp->ps_strings); suword((caddr_t)tf->tf_special.bspstore - 8, 0); } tf->tf_special.iip = imgp->entry_addr; tf->tf_special.sp = (stack & ~15) - 16; tf->tf_special.rsc = 0xf; tf->tf_special.fpsr = IA64_FPSR_DEFAULT; tf->tf_special.psr = IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN | IA64_PSR_CPL_USER; } int ptrace_set_pc(struct thread *td, unsigned long addr) { uint64_t slot; switch (addr & 0xFUL) { case 0: slot = IA64_PSR_RI_0; break; case 1: /* XXX we need to deal with MLX bundles here */ slot = IA64_PSR_RI_1; break; case 2: slot = IA64_PSR_RI_2; break; default: return (EINVAL); } td->td_frame->tf_special.iip = addr & ~0x0FULL; td->td_frame->tf_special.psr = (td->td_frame->tf_special.psr & ~IA64_PSR_RI) | slot; return (0); } int ptrace_single_step(struct thread *td) { struct trapframe *tf; /* * There's no way to set single stepping when we're leaving the * kernel through the EPC syscall path. The way we solve this is * by enabling the lower-privilege trap so that we re-enter the * kernel as soon as the privilege level changes. See trap.c for * how we proceed from there. */ tf = td->td_frame; if (tf->tf_flags & FRAME_SYSCALL) tf->tf_special.psr |= IA64_PSR_LP; else tf->tf_special.psr |= IA64_PSR_SS; return (0); } int ptrace_clear_single_step(struct thread *td) { struct trapframe *tf; /* * Clear any and all status bits we may use to implement single * stepping. */ tf = td->td_frame; tf->tf_special.psr &= ~IA64_PSR_SS; tf->tf_special.psr &= ~IA64_PSR_LP; tf->tf_special.psr &= ~IA64_PSR_TB; return (0); } int fill_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; tf = td->td_frame; regs->r_special = tf->tf_special; regs->r_scratch = tf->tf_scratch; save_callee_saved(®s->r_preserved); return (0); } int set_regs(struct thread *td, struct reg *regs) { struct trapframe *tf; int error; tf = td->td_frame; error = ia64_flush_dirty(td, &tf->tf_special); if (!error) { tf->tf_special = regs->r_special; tf->tf_special.bspstore += tf->tf_special.ndirty; tf->tf_special.ndirty = 0; tf->tf_scratch = regs->r_scratch; restore_callee_saved(®s->r_preserved); } return (error); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int fill_fpregs(struct thread *td, struct fpreg *fpregs) { struct trapframe *frame = td->td_frame; struct pcb *pcb = td->td_pcb; /* Save the high FP registers. */ ia64_highfp_save(td); fpregs->fpr_scratch = frame->tf_scratch_fp; save_callee_saved_fp(&fpregs->fpr_preserved); fpregs->fpr_high = pcb->pcb_high_fp; return (0); } int set_fpregs(struct thread *td, struct fpreg *fpregs) { struct trapframe *frame = td->td_frame; struct pcb *pcb = td->td_pcb; /* Throw away the high FP registers (should be redundant). */ ia64_highfp_drop(td); frame->tf_scratch_fp = fpregs->fpr_scratch; restore_callee_saved_fp(&fpregs->fpr_preserved); pcb->pcb_high_fp = fpregs->fpr_high; return (0); } void ia64_sync_icache(vm_offset_t va, vm_offset_t sz) { vm_offset_t lim; if (!ia64_sync_icache_needed) return; lim = va + sz; while (va < lim) { ia64_fc_i(va); va += 32; /* XXX */ } ia64_sync_i(); ia64_srlz_i(); } Index: head/sys/ia64/ia64/mp_locore.S =================================================================== --- head/sys/ia64/ia64/mp_locore.S (nonexistent) +++ head/sys/ia64/ia64/mp_locore.S (revision 221271) @@ -0,0 +1,275 @@ +/*- + * Copyright (c) 2011 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include + +/* + * AP wake-up entry point. The handoff state is similar as for the BSP, + * as described on page 3-9 of the IPF SAL Specification. The difference + * lies in the contents of register b0. For APs this register holds the + * return address into the SAL rendezvous routine. + * + * Note that we're responsible for clearing the IRR bit by reading cr.ivr + * and issuing the EOI to the local SAPIC. + */ + .align 32 +ENTRY_NOPROFILE(os_boot_rendez,0) +{ .mmi + st8 [gp] = gp // trace = 0x00 + mov r8 = cr.ivr // clear IRR bit + add r2 = 8, gp + ;; +} +{ .mmi + srlz.d + mov cr.eoi = r0 // ACK the wake-up + add r3 = 16, gp + ;; +} +{ .mmi + srlz.d + rsm IA64_PSR_IC | IA64_PSR_I + mov r16 = (IA64_PBVM_RR << 8) | (IA64_PBVM_PAGE_SHIFT << 2) + ;; +} +{ .mmi + srlz.d + st8 [gp] = r2 // trace = 0x08 + dep.z r17 = IA64_PBVM_RR, 61, 3 + ;; +} +{ .mlx + mov rr[r17] = r16 + movl r18 = IA64_PBVM_PGTBL + ;; +} +{ .mmi + srlz.i + ;; + st8 [gp] = r3 // trace = 0x10 + nop 0 + ;; +} +{ .mmi + ld8 r16 = [r2], 16 // as_pgtbl_pte + ld8 r17 = [r3], 16 // as_pgtbl_itir + nop 0 + ;; +} +{ .mmi + mov cr.itir = r17 + mov cr.ifa = r18 + nop 0 + ;; +} +{ .mmi + srlz.d + ptr.d r18, r17 + nop 0 + ;; +} +{ .mmi + srlz.d + st8 [gp] = r2 // trace = 0x18 + mov r8 = r0 + ;; +} +{ .mmi + itr.d dtr[r8] = r16 + ;; + srlz.d + mov r9 = r0 + ;; +} +{ .mmi + ld8 r16 = [r2], 16 // as_text_va + st8 [gp] = r3 // trace = 0x20 + add r8 = 1, r8 + ;; +} +{ .mmi + ld8 r17 = [r3], 16 // as_text_pte + ld8 r18 = [r2], 16 // as_text_itir + nop 0 + ;; +} +{ .mmi + mov cr.ifa = r16 + mov cr.itir = r18 + nop 0 + ;; +} +{ .mmi + srlz.d + ptr.d r16, r18 + nop 0 + ;; +} +{ .mmi + srlz.d + st8 [gp] = r3 // trace = 0x30 + nop 0 + ;; +} +{ .mmi + itr.d dtr[r8] = r17 + ;; + srlz.d + nop 0 +} +{ .mmi + st8 [gp] = r2 // trace = 0x38 + ptr.i r16, r18 + add r8 = 1, r8 + ;; +} +{ .mmi + srlz.i + ;; + itr.i itr[r9] = r17 + nop 0 + ;; +} +{ .mmi + srlz.i + ;; + ld8 r16 = [r3], 16 // as_data_va + add r9 = 1, r9 + ;; +} +{ .mmi + st8 [gp] = r3 // trace = 0x40 + ld8 r17 = [r2], 16 // as_data_pte + nop 0 + ;; +} +{ .mmi + mov cr.ifa = r16 + ld8 r18 = [r3], 16 // as_data_itir + nop 0 + ;; +} +{ .mmi + mov cr.itir = r18 + ;; + srlz.d + nop 0 + ;; +} +{ .mmi + ptr.d r16, r18 + ;; + srlz.d + mov r19 = IA64_DCR_DEFAULT + ;; +} +{ .mmi + itr.d dtr[r8] = r17 + ;; + srlz.d + add r8 = 1, r8 + ;; +} +{ .mmi + st8 [gp] = r2 // trace = 0x48 + ;; + ld8 r16 = [r2], 16 // as_kstack + nop 0 +} +{ .mmi + ld8 r17 = [r3], 16 // as_kstack_top + mov cr.dcr = r19 + nop 0 + ;; +} +{ .mlx + srlz.i + movl r18 = IA64_PSR_BN | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_IC | \ + IA64_PSR_RT | IA64_PSR_DFH + ;; +} +{ .mlx + mov cr.ipsr = r18 + movl r19 = ia64_vector_table // set up IVT early + ;; +} +{ .mlx + mov cr.iva = r19 + movl r18 = 1f + ;; +} +{ .mmi + mov cr.iip = r18 + mov cr.ifs = r0 + nop 0 + ;; +} +{ .mmb + srlz.d + st8 [gp] = r2 // trace = 0x58 + rfi + ;; +} + + .align 32 +1: +{ .mlx + mov ar.bspstore = r16 + movl gp = __gp + ;; +} +{ .mmi + loadrs + add sp = -16, r17 + nop 0 + ;; +} +{ .mmi + mov ar.rsc = 3 + ;; + alloc r18 = ar.pfs, 0, 0, 0, 0 + ;; +} +{ .mib + nop 0 + nop 0 + br.call.sptk.few rp = ia64_ap_startup + ;; +} + /* NOT REACHED */ +9: +{ .mib + nop 0 + nop 0 + br.sptk 9b + ;; +} +END(os_boot_rendez) Property changes on: head/sys/ia64/ia64/mp_locore.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/ia64/ia64/mp_machdep.c =================================================================== --- head/sys/ia64/ia64/mp_machdep.c (revision 221270) +++ head/sys/ia64/ia64/mp_machdep.c (revision 221271) @@ -1,454 +1,513 @@ /*- * Copyright (c) 2001-2005 Marcel Moolenaar * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include "opt_kstack_pages.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include #include #include #include #include #include #include #include #include #include #include #include #include #include +extern uint64_t bdata[]; + MALLOC_DEFINE(M_SMP, "SMP", "SMP related allocations"); void ia64_ap_startup(void); -#define LID_SAPIC(x) ((u_int)((x) >> 16)) -#define LID_SAPIC_ID(x) ((u_int)((x) >> 24) & 0xff) -#define LID_SAPIC_EID(x) ((u_int)((x) >> 16) & 0xff) -#define LID_SAPIC_SET(id,eid) (((id & 0xff) << 8 | (eid & 0xff)) << 16); -#define LID_SAPIC_MASK 0xffff0000UL +#define SAPIC_ID_GET_ID(x) ((u_int)((x) >> 8) & 0xff) +#define SAPIC_ID_GET_EID(x) ((u_int)(x) & 0xff) +#define SAPIC_ID_SET(id, eid) ((u_int)(((id) & 0xff) << 8) | ((eid) & 0xff)) -/* Variables used by os_boot_rendez and ia64_ap_startup */ -struct pcpu *ap_pcpu; -void *ap_stack; -volatile int ap_delay; -volatile int ap_awake; -volatile int ap_spin; +/* State used to wake and bootstrap APs. */ +struct ia64_ap_state ia64_ap_state; int ia64_ipi_ast; int ia64_ipi_highfp; int ia64_ipi_nmi; int ia64_ipi_preempt; int ia64_ipi_rndzvs; int ia64_ipi_stop; static u_int +sz2shft(uint64_t sz) +{ + uint64_t s; + u_int shft; + + shft = 12; /* Start with 4K */ + s = 1 << shft; + while (s < sz) { + shft++; + s <<= 1; + } + return (shft); +} + +static u_int ia64_ih_ast(struct thread *td, u_int xiv, struct trapframe *tf) { PCPU_INC(md.stats.pcs_nasts); CTR1(KTR_SMP, "IPI_AST, cpuid=%d", PCPU_GET(cpuid)); return (0); } static u_int ia64_ih_highfp(struct thread *td, u_int xiv, struct trapframe *tf) { PCPU_INC(md.stats.pcs_nhighfps); ia64_highfp_save_ipi(); return (0); } static u_int ia64_ih_preempt(struct thread *td, u_int xiv, struct trapframe *tf) { PCPU_INC(md.stats.pcs_npreempts); CTR1(KTR_SMP, "IPI_PREEMPT, cpuid=%d", PCPU_GET(cpuid)); sched_preempt(curthread); return (0); } static u_int ia64_ih_rndzvs(struct thread *td, u_int xiv, struct trapframe *tf) { PCPU_INC(md.stats.pcs_nrdvs); CTR1(KTR_SMP, "IPI_RENDEZVOUS, cpuid=%d", PCPU_GET(cpuid)); smp_rendezvous_action(); return (0); } static u_int ia64_ih_stop(struct thread *td, u_int xiv, struct trapframe *tf) { cpumask_t mybit; PCPU_INC(md.stats.pcs_nstops); mybit = PCPU_GET(cpumask); savectx(PCPU_PTR(md.pcb)); atomic_set_int(&stopped_cpus, mybit); while ((started_cpus & mybit) == 0) cpu_spinwait(); atomic_clear_int(&started_cpus, mybit); atomic_clear_int(&stopped_cpus, mybit); return (0); } struct cpu_group * cpu_topo(void) { return smp_topo_none(); } static void ia64_store_mca_state(void* arg) { struct pcpu *pc = arg; struct thread *td = curthread; /* * ia64_mca_save_state() is CPU-sensitive, so bind ourself to our * target CPU. */ thread_lock(td); sched_bind(td, pc->pc_cpuid); thread_unlock(td); ia64_mca_init_ap(); /* * Get and save the CPU specific MCA records. Should we get the * MCA state for each processor, or just the CMC state? */ ia64_mca_save_state(SAL_INFO_MCA); ia64_mca_save_state(SAL_INFO_CMC); kproc_exit(0); } void ia64_ap_startup(void) { uint64_t vhpt; - pcpup = ap_pcpu; + ia64_ap_state.as_trace = 0x100; + + ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); + ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (PAGE_SHIFT << 2)); + ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (PAGE_SHIFT << 2)); + ia64_srlz_d(); + + pcpup = ia64_ap_state.as_pcpu; ia64_set_k4((intptr_t)pcpup); + ia64_ap_state.as_trace = 0x108; + vhpt = PCPU_GET(md.vhpt); map_vhpt(vhpt); ia64_set_pta(vhpt + (1 << 8) + (pmap_vhpt_log2size << 2) + 1); ia64_srlz_i(); - ap_awake = 1; - ap_delay = 0; + ia64_ap_state.as_trace = 0x110; + ia64_ap_state.as_awake = 1; + ia64_ap_state.as_delay = 0; + map_pal_code(); map_gateway_page(); ia64_set_fpsr(IA64_FPSR_DEFAULT); /* Wait until it's time for us to be unleashed */ - while (ap_spin) + while (ia64_ap_state.as_spin) cpu_spinwait(); /* Initialize curthread. */ KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); PCPU_SET(curthread, PCPU_GET(idlethread)); - atomic_add_int(&ap_awake, 1); + atomic_add_int(&ia64_ap_state.as_awake, 1); while (!smp_started) cpu_spinwait(); CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid)); /* Mask interval timer interrupts on APs. */ ia64_set_itv(0x10000); ia64_set_tpr(0); ia64_srlz_d(); ia64_enable_intr(); sched_throw(NULL); /* NOTREACHED */ } void cpu_mp_setmaxid(void) { /* * Count the number of processors in the system by walking the ACPI * tables. Note that we record the actual number of processors, even * if this is larger than MAXCPU. We only activate MAXCPU processors. */ mp_ncpus = ia64_count_cpus(); /* * Set the largest cpuid we're going to use. This is necessary for * VM initialization. */ mp_maxid = min(mp_ncpus, MAXCPU) - 1; } int cpu_mp_probe(void) { /* * If there's only 1 processor, or we don't have a wake-up vector, * we're not going to enable SMP. Note that no wake-up vector can * also mean that the wake-up mechanism is not supported. In this * case we can have multiple processors, but we simply can't wake * them up... */ return (mp_ncpus > 1 && ia64_ipi_wakeup != 0); } void -cpu_mp_add(u_int acpiid, u_int apicid, u_int apiceid) +cpu_mp_add(u_int acpi_id, u_int id, u_int eid) { struct pcpu *pc; - u_int64_t lid; void *dpcpu; - u_int cpuid; + u_int cpuid, sapic_id; - lid = LID_SAPIC_SET(apicid, apiceid); - cpuid = ((ia64_get_lid() & LID_SAPIC_MASK) == lid) ? 0 : smp_cpus++; + sapic_id = SAPIC_ID_SET(id, eid); + cpuid = (IA64_LID_GET_SAPIC_ID(ia64_get_lid()) == sapic_id) + ? 0 : smp_cpus++; KASSERT((all_cpus & (1UL << cpuid)) == 0, - ("%s: cpu%d already in CPU map", __func__, acpiid)); + ("%s: cpu%d already in CPU map", __func__, acpi_id)); if (cpuid != 0) { pc = (struct pcpu *)malloc(sizeof(*pc), M_SMP, M_WAITOK); pcpu_init(pc, cpuid, sizeof(*pc)); dpcpu = (void *)kmem_alloc(kernel_map, DPCPU_SIZE); dpcpu_init(dpcpu, cpuid); } else pc = pcpup; - pc->pc_acpi_id = acpiid; - pc->pc_md.lid = lid; - all_cpus |= (1UL << cpuid); + pc->pc_acpi_id = acpi_id; + pc->pc_md.lid = IA64_LID_SET_SAPIC_ID(sapic_id); + + all_cpus |= (1UL << pc->pc_cpuid); } void cpu_mp_announce() { struct pcpu *pc; + uint32_t sapic_id; int i; for (i = 0; i <= mp_maxid; i++) { pc = pcpu_find(i); if (pc != NULL) { + sapic_id = IA64_LID_GET_SAPIC_ID(pc->pc_md.lid); printf("cpu%d: ACPI Id=%x, SAPIC Id=%x, SAPIC Eid=%x", - i, pc->pc_acpi_id, LID_SAPIC_ID(pc->pc_md.lid), - LID_SAPIC_EID(pc->pc_md.lid)); + i, pc->pc_acpi_id, SAPIC_ID_GET_ID(sapic_id), + SAPIC_ID_GET_EID(sapic_id)); if (i == 0) printf(" (BSP)\n"); else printf("\n"); } } } void cpu_mp_start() { + struct ia64_sal_result result; + struct ia64_fdesc *fd; struct pcpu *pc; + uintptr_t state; + u_char *stp; - ap_spin = 1; + state = ia64_tpa((uintptr_t)&ia64_ap_state); + fd = (struct ia64_fdesc *) os_boot_rendez; + result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, + ia64_tpa(fd->func), state, 0, 0, 0, 0); + ia64_ap_state.as_pgtbl_pte = PTE_PRESENT | PTE_MA_WB | + PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | + (bootinfo->bi_pbvm_pgtbl & PTE_PPN_MASK); + ia64_ap_state.as_pgtbl_itir = sz2shft(bootinfo->bi_pbvm_pgtblsz) << 2; + ia64_ap_state.as_text_va = IA64_PBVM_BASE; + ia64_ap_state.as_text_pte = PTE_PRESENT | PTE_MA_WB | + PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RX | + (ia64_tpa(IA64_PBVM_BASE) & PTE_PPN_MASK); + ia64_ap_state.as_text_itir = bootinfo->bi_text_mapped << 2; + ia64_ap_state.as_data_va = (uintptr_t)bdata; + ia64_ap_state.as_data_pte = PTE_PRESENT | PTE_MA_WB | + PTE_ACCESSED | PTE_DIRTY | PTE_PL_KERN | PTE_AR_RW | + (ia64_tpa((uintptr_t)bdata) & PTE_PPN_MASK); + ia64_ap_state.as_data_itir = bootinfo->bi_data_mapped << 2; + + /* Keep 'em spinning until we unleash them... */ + ia64_ap_state.as_spin = 1; + SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { pc->pc_md.current_pmap = kernel_pmap; pc->pc_other_cpus = all_cpus & ~pc->pc_cpumask; - if (pc->pc_cpuid > 0) { - ap_pcpu = pc; - pc->pc_md.vhpt = pmap_alloc_vhpt(); - if (pc->pc_md.vhpt == 0) { - printf("SMP: WARNING: unable to allocate VHPT" - " for cpu%d", pc->pc_cpuid); - continue; - } - ap_stack = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, - M_WAITOK); - ap_delay = 2000; - ap_awake = 0; + /* The BSP is obviously running already. */ + if (pc->pc_cpuid == 0) { + pc->pc_md.awake = 1; + continue; + } - if (bootverbose) - printf("SMP: waking up cpu%d\n", pc->pc_cpuid); + ia64_ap_state.as_pcpu = pc; + pc->pc_md.vhpt = pmap_alloc_vhpt(); + if (pc->pc_md.vhpt == 0) { + printf("SMP: WARNING: unable to allocate VHPT" + " for cpu%d", pc->pc_cpuid); + continue; + } - ipi_send(pc, ia64_ipi_wakeup); + stp = malloc(KSTACK_PAGES * PAGE_SIZE, M_SMP, M_WAITOK); + ia64_ap_state.as_kstack = stp; + ia64_ap_state.as_kstack_top = stp + KSTACK_PAGES * PAGE_SIZE; - do { - DELAY(1000); - } while (--ap_delay > 0); - pc->pc_md.awake = ap_awake; + ia64_ap_state.as_trace = 0; + ia64_ap_state.as_delay = 2000; + ia64_ap_state.as_awake = 0; - if (!ap_awake) - printf("SMP: WARNING: cpu%d did not wake up\n", - pc->pc_cpuid); - } else - pc->pc_md.awake = 1; + if (bootverbose) + printf("SMP: waking up cpu%d\n", pc->pc_cpuid); + + /* Here she goes... */ + ipi_send(pc, ia64_ipi_wakeup); + do { + DELAY(1000); + } while (--ia64_ap_state.as_delay > 0); + + pc->pc_md.awake = ia64_ap_state.as_awake; + + if (!ia64_ap_state.as_awake) { + printf("SMP: WARNING: cpu%d did not wake up (code " + "%#lx)\n", pc->pc_cpuid, + ia64_ap_state.as_trace - state); + } } } static void cpu_mp_unleash(void *dummy) { struct pcpu *pc; int cpus; if (mp_ncpus <= 1) return; /* Allocate XIVs for IPIs */ ia64_ipi_ast = ia64_xiv_alloc(PI_DULL, IA64_XIV_IPI, ia64_ih_ast); ia64_ipi_highfp = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_highfp); ia64_ipi_preempt = ia64_xiv_alloc(PI_SOFT, IA64_XIV_IPI, ia64_ih_preempt); ia64_ipi_rndzvs = ia64_xiv_alloc(PI_AV, IA64_XIV_IPI, ia64_ih_rndzvs); ia64_ipi_stop = ia64_xiv_alloc(PI_REALTIME, IA64_XIV_IPI, ia64_ih_stop); /* Reserve the NMI vector for IPI_STOP_HARD if possible */ ia64_ipi_nmi = (ia64_xiv_reserve(2, IA64_XIV_IPI, ia64_ih_stop) != 0) ? ia64_ipi_stop : 0x400; /* DM=NMI, Vector=n/a */ cpus = 0; smp_cpus = 0; SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { cpus++; if (pc->pc_md.awake) { kproc_create(ia64_store_mca_state, pc, NULL, 0, 0, "mca %u", pc->pc_cpuid); smp_cpus++; } } - ap_awake = 1; - ap_spin = 0; + ia64_ap_state.as_awake = 1; + ia64_ap_state.as_spin = 0; - while (ap_awake != smp_cpus) + while (ia64_ap_state.as_awake != smp_cpus) cpu_spinwait(); if (smp_cpus != cpus || cpus != mp_ncpus) { printf("SMP: %d CPUs found; %d CPUs usable; %d CPUs woken\n", mp_ncpus, cpus, smp_cpus); } smp_active = 1; smp_started = 1; /* * Now that all CPUs are up and running, bind interrupts to each of * them. */ ia64_bind_intr(); } /* * send an IPI to a set of cpus. */ void ipi_selected(cpumask_t cpus, int ipi) { struct pcpu *pc; SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { if (cpus & pc->pc_cpumask) ipi_send(pc, ipi); } } /* * send an IPI to a specific CPU. */ void ipi_cpu(int cpu, u_int ipi) { ipi_send(cpuid_to_pcpu[cpu], ipi); } /* * send an IPI to all CPUs EXCEPT myself. */ void ipi_all_but_self(int ipi) { struct pcpu *pc; SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { if (pc != pcpup) ipi_send(pc, ipi); } } /* - * Send an IPI to the specified processor. The lid parameter holds the - * cr.lid (CR64) contents of the target processor. Only the id and eid - * fields are used here. + * Send an IPI to the specified processor. */ void ipi_send(struct pcpu *cpu, int xiv) { - u_int lid; + u_int sapic_id; KASSERT(xiv != 0, ("ipi_send")); - lid = LID_SAPIC(cpu->pc_md.lid); + sapic_id = IA64_LID_GET_SAPIC_ID(cpu->pc_md.lid); ia64_mf(); - ia64_st8(&(ia64_pib->ib_ipi[lid][0]), xiv); + ia64_st8(&(ia64_pib->ib_ipi[sapic_id][0]), xiv); ia64_mf_a(); CTR3(KTR_SMP, "ipi_send(%p, %d): cpuid=%d", cpu, xiv, PCPU_GET(cpuid)); } SYSINIT(start_aps, SI_SUB_SMP, SI_ORDER_FIRST, cpu_mp_unleash, NULL); Index: head/sys/ia64/ia64/pmap.c =================================================================== --- head/sys/ia64/ia64/pmap.c (revision 221270) +++ head/sys/ia64/ia64/pmap.c (revision 221271) @@ -1,2469 +1,2461 @@ /*- * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 1998,2000 Doug Rabson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ /* * Following the Linux model, region IDs are allocated in groups of * eight so that a single region ID can be used for as many RRs as we * want by encoding the RR number into the low bits of the ID. * * We reserve region ID 0 for the kernel and allocate the remaining * IDs for user pmaps. * * Region 0-3: User virtually mapped * Region 4: PBVM and special mappings * Region 5: Kernel virtual memory * Region 6: Direct-mapped uncacheable * Region 7: Direct-mapped cacheable */ /* XXX move to a header. */ extern uint64_t ia64_gateway_page[]; #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if !defined(DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif #define pmap_accessed(lpte) ((lpte)->pte & PTE_ACCESSED) #define pmap_dirty(lpte) ((lpte)->pte & PTE_DIRTY) #define pmap_exec(lpte) ((lpte)->pte & PTE_AR_RX) #define pmap_managed(lpte) ((lpte)->pte & PTE_MANAGED) #define pmap_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK) #define pmap_present(lpte) ((lpte)->pte & PTE_PRESENT) #define pmap_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56) #define pmap_wired(lpte) ((lpte)->pte & PTE_WIRED) #define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED #define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY #define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT #define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED #define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED /* * The VHPT bucket head structure. */ struct ia64_bucket { uint64_t chain; struct mtx mutex; u_int length; }; /* * Statically allocated kernel pmap */ struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ /* * Kernel virtual memory management. */ static int nkpt; -struct ia64_lpte ***ia64_kptdir; +extern struct ia64_lpte ***ia64_kptdir; + #define KPTE_DIR0_INDEX(va) \ (((va) >> (3*PAGE_SHIFT-8)) & ((1<<(PAGE_SHIFT-3))-1)) #define KPTE_DIR1_INDEX(va) \ (((va) >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1)) #define KPTE_PTE_INDEX(va) \ (((va) >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1)) #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte)) vm_offset_t kernel_vm_end; /* Values for ptc.e. XXX values for SKI. */ static uint64_t pmap_ptc_e_base = 0x100000000; static uint64_t pmap_ptc_e_count1 = 3; static uint64_t pmap_ptc_e_count2 = 2; static uint64_t pmap_ptc_e_stride1 = 0x2000; static uint64_t pmap_ptc_e_stride2 = 0x100000000; -volatile u_long pmap_ptc_g_sem; +extern volatile u_long pmap_ptc_g_sem; /* * Data for the RID allocator */ static int pmap_ridcount; static int pmap_rididx; static int pmap_ridmapsz; static int pmap_ridmax; static uint64_t *pmap_ridmap; struct mtx pmap_ridmutex; /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; /* * Data for allocating PTEs for user processes. */ static uma_zone_t ptezone; /* * Virtual Hash Page Table (VHPT) data. */ /* SYSCTL_DECL(_machdep); */ SYSCTL_NODE(_machdep, OID_AUTO, vhpt, CTLFLAG_RD, 0, ""); struct ia64_bucket *pmap_vhpt_bucket; int pmap_vhpt_nbuckets; SYSCTL_INT(_machdep_vhpt, OID_AUTO, nbuckets, CTLFLAG_RD, &pmap_vhpt_nbuckets, 0, ""); int pmap_vhpt_log2size = 0; TUNABLE_INT("machdep.vhpt.log2size", &pmap_vhpt_log2size); SYSCTL_INT(_machdep_vhpt, OID_AUTO, log2size, CTLFLAG_RD, &pmap_vhpt_log2size, 0, ""); static int pmap_vhpt_inserts; SYSCTL_INT(_machdep_vhpt, OID_AUTO, inserts, CTLFLAG_RD, &pmap_vhpt_inserts, 0, ""); static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS); SYSCTL_PROC(_machdep_vhpt, OID_AUTO, population, CTLTYPE_INT | CTLFLAG_RD, NULL, 0, pmap_vhpt_population, "I", ""); static struct ia64_lpte *pmap_find_vhpt(vm_offset_t va); static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(pmap_t locked_pmap); static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot); static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va); static void pmap_invalidate_all(void); static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va, pv_entry_t pv, int freepte); static int pmap_remove_vhpt(vm_offset_t va); static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); vm_offset_t pmap_steal_memory(vm_size_t size) { vm_size_t bank_size; vm_offset_t pa, va; size = round_page(size); bank_size = phys_avail[1] - phys_avail[0]; while (size > bank_size) { int i; for (i = 0; phys_avail[i+2]; i+= 2) { phys_avail[i] = phys_avail[i+2]; phys_avail[i+1] = phys_avail[i+3]; } phys_avail[i] = 0; phys_avail[i+1] = 0; if (!phys_avail[0]) panic("pmap_steal_memory: out of memory"); bank_size = phys_avail[1] - phys_avail[0]; } pa = phys_avail[0]; phys_avail[0] += size; va = IA64_PHYS_TO_RR7(pa); bzero((caddr_t) va, size); return va; } static void pmap_initialize_vhpt(vm_offset_t vhpt) { struct ia64_lpte *pte; u_int i; pte = (struct ia64_lpte *)vhpt; for (i = 0; i < pmap_vhpt_nbuckets; i++) { pte[i].pte = 0; pte[i].itir = 0; pte[i].tag = 1UL << 63; /* Invalid tag */ pte[i].chain = (uintptr_t)(pmap_vhpt_bucket + i); } } #ifdef SMP MALLOC_DECLARE(M_SMP); vm_offset_t pmap_alloc_vhpt(void) { vm_offset_t vhpt; vm_size_t size; size = 1UL << pmap_vhpt_log2size; vhpt = (uintptr_t)contigmalloc(size, M_SMP, 0, 0UL, ~0UL, size, 0UL); if (vhpt != 0) { vhpt = IA64_PHYS_TO_RR7(ia64_tpa(vhpt)); pmap_initialize_vhpt(vhpt); } return (vhpt); } #endif /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap() { struct ia64_pal_result res; vm_offset_t base; size_t size; int i, j, count, ridbits; /* * Query the PAL Code to find the loop parameters for the * ptc.e instruction. */ res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0); if (res.pal_status != 0) panic("Can't configure ptc.e parameters"); pmap_ptc_e_base = res.pal_result[0]; pmap_ptc_e_count1 = res.pal_result[1] >> 32; pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1); pmap_ptc_e_stride1 = res.pal_result[2] >> 32; pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1); if (bootverbose) printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, " "stride1=0x%lx, stride2=0x%lx\n", pmap_ptc_e_base, pmap_ptc_e_count1, pmap_ptc_e_count2, pmap_ptc_e_stride1, pmap_ptc_e_stride2); /* * Setup RIDs. RIDs 0..7 are reserved for the kernel. * * We currently need at least 19 bits in the RID because PID_MAX * can only be encoded in 17 bits and we need RIDs for 4 regions * per process. With PID_MAX equalling 99999 this means that we * need to be able to encode 399996 (=4*PID_MAX). * The Itanium processor only has 18 bits and the architected * minimum is exactly that. So, we cannot use a PID based scheme * in those cases. Enter pmap_ridmap... * We should avoid the map when running on a processor that has * implemented enough bits. This means that we should pass the * process/thread ID to pmap. This we currently don't do, so we * use the map anyway. However, we don't want to allocate a map * that is large enough to cover the range dictated by the number * of bits in the RID, because that may result in a RID map of * 2MB in size for a 24-bit RID. A 64KB map is enough. * The bottomline: we create a 32KB map when the processor only * implements 18 bits (or when we can't figure it out). Otherwise * we create a 64KB map. */ res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { if (bootverbose) printf("Can't read VM Summary - assuming 18 Region ID bits\n"); ridbits = 18; /* guaranteed minimum */ } else { ridbits = (res.pal_result[1] >> 8) & 0xff; if (bootverbose) printf("Processor supports %d Region ID bits\n", ridbits); } if (ridbits > 19) ridbits = 19; pmap_ridmax = (1 << ridbits); pmap_ridmapsz = pmap_ridmax / 64; pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8); pmap_ridmap[0] |= 0xff; pmap_rididx = 0; pmap_ridcount = 8; mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF); /* * Allocate some memory for initial kernel 'page tables'. */ ia64_kptdir = (void *)pmap_steal_memory(PAGE_SIZE); nkpt = 0; kernel_vm_end = VM_MIN_KERNEL_ADDRESS; for (i = 0; phys_avail[i+2]; i+= 2) ; count = i+2; + /* + * Determine a valid (mappable) VHPT size. + */ TUNABLE_INT_FETCH("machdep.vhpt.log2size", &pmap_vhpt_log2size); if (pmap_vhpt_log2size == 0) pmap_vhpt_log2size = 20; - else if (pmap_vhpt_log2size < 15) - pmap_vhpt_log2size = 15; - else if (pmap_vhpt_log2size > 61) - pmap_vhpt_log2size = 61; + else if (pmap_vhpt_log2size < 16) + pmap_vhpt_log2size = 16; + else if (pmap_vhpt_log2size > 28) + pmap_vhpt_log2size = 28; + if (pmap_vhpt_log2size & 1) + pmap_vhpt_log2size--; base = 0; size = 1UL << pmap_vhpt_log2size; for (i = 0; i < count; i += 2) { base = (phys_avail[i] + size - 1) & ~(size - 1); if (base + size <= phys_avail[i+1]) break; } if (!phys_avail[i]) panic("Unable to allocate VHPT"); if (base != phys_avail[i]) { /* Split this region. */ for (j = count; j > i; j -= 2) { phys_avail[j] = phys_avail[j-2]; phys_avail[j+1] = phys_avail[j-2+1]; } phys_avail[i+1] = base; phys_avail[i+2] = base + size; } else phys_avail[i] = base + size; base = IA64_PHYS_TO_RR7(base); PCPU_SET(md.vhpt, base); if (bootverbose) printf("VHPT: address=%#lx, size=%#lx\n", base, size); pmap_vhpt_nbuckets = size / sizeof(struct ia64_lpte); pmap_vhpt_bucket = (void *)pmap_steal_memory(pmap_vhpt_nbuckets * sizeof(struct ia64_bucket)); for (i = 0; i < pmap_vhpt_nbuckets; i++) { /* Stolen memory is zeroed. */ mtx_init(&pmap_vhpt_bucket[i].mutex, "VHPT bucket lock", NULL, MTX_NOWITNESS | MTX_SPIN); } pmap_initialize_vhpt(base); map_vhpt(base); ia64_set_pta(base + (1 << 8) + (pmap_vhpt_log2size << 2) + 1); ia64_srlz_i(); virtual_avail = VM_MIN_KERNEL_ADDRESS; virtual_end = VM_MAX_KERNEL_ADDRESS; /* * Initialize the kernel pmap (which is statically allocated). */ PMAP_LOCK_INIT(kernel_pmap); for (i = 0; i < IA64_VM_MINKERN_REGION; i++) kernel_pmap->pm_rid[i] = 0; TAILQ_INIT(&kernel_pmap->pm_pvlist); PCPU_SET(md.current_pmap, kernel_pmap); /* Region 5 is mapped via the VHPT. */ ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); /* - * Region 6 is direct mapped UC and region 7 is direct mapped - * WC. The details of this is controlled by the Alt {I,D}TLB - * handlers. Here we just make sure that they have the largest - * possible page size to minimise TLB usage. - */ - ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (IA64_ID_PAGE_SHIFT << 2)); - ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (IA64_ID_PAGE_SHIFT << 2)); - ia64_srlz_d(); - - /* * Clear out any random TLB entries left over from booting. */ pmap_invalidate_all(); map_gateway_page(); } static int pmap_vhpt_population(SYSCTL_HANDLER_ARGS) { int count, error, i; count = 0; for (i = 0; i < pmap_vhpt_nbuckets; i++) count += pmap_vhpt_bucket[i].length; error = SYSCTL_OUT(req, &count, sizeof(count)); return (error); } /* * Initialize a vm_page's machine-dependent fields. */ void pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. */ void pmap_init(void) { int shpgperproc = PMAP_SHPGPERPROC; /* * Initialize the address space (zone) for the pv entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ pvzone = uma_zcreate("PV ENTRY", sizeof(struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); } /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ static void pmap_invalidate_page(vm_offset_t va) { struct ia64_lpte *pte; struct pcpu *pc; uint64_t tag, sem; register_t is; u_int vhpt_ofs; critical_enter(); vhpt_ofs = ia64_thash(va) - PCPU_GET(md.vhpt); tag = ia64_ttag(va); SLIST_FOREACH(pc, &cpuhead, pc_allcpu) { pte = (struct ia64_lpte *)(pc->pc_md.vhpt + vhpt_ofs); atomic_cmpset_64(&pte->tag, tag, 1UL << 63); } /* PTC.G enter exclusive */ is = intr_disable(); /* Atomically assert writer after all writers have gone. */ do { /* Wait until there's no more writer. */ do { sem = atomic_load_acq_long(&pmap_ptc_g_sem); tag = sem | (1ul << 63); } while (sem == tag); } while (!atomic_cmpset_rel_long(&pmap_ptc_g_sem, sem, tag)); /* Wait until all readers are gone. */ tag = (1ul << 63); do { sem = atomic_load_acq_long(&pmap_ptc_g_sem); } while (sem != tag); ia64_ptc_ga(va, PAGE_SHIFT << 2); /* PTC.G leave exclusive */ atomic_store_rel_long(&pmap_ptc_g_sem, 0); intr_restore(is); critical_exit(); } static void pmap_invalidate_all_1(void *arg) { uint64_t addr; int i, j; critical_enter(); addr = pmap_ptc_e_base; for (i = 0; i < pmap_ptc_e_count1; i++) { for (j = 0; j < pmap_ptc_e_count2; j++) { ia64_ptc_e(addr); addr += pmap_ptc_e_stride2; } addr += pmap_ptc_e_stride1; } critical_exit(); } static void pmap_invalidate_all(void) { #ifdef SMP if (mp_ncpus > 1) { smp_rendezvous(NULL, pmap_invalidate_all_1, NULL, NULL); return; } #endif pmap_invalidate_all_1(NULL); } static uint32_t pmap_allocate_rid(void) { uint64_t bit, bits; int rid; mtx_lock(&pmap_ridmutex); if (pmap_ridcount == pmap_ridmax) panic("pmap_allocate_rid: All Region IDs used"); /* Find an index with a free bit. */ while ((bits = pmap_ridmap[pmap_rididx]) == ~0UL) { pmap_rididx++; if (pmap_rididx == pmap_ridmapsz) pmap_rididx = 0; } rid = pmap_rididx * 64; /* Find a free bit. */ bit = 1UL; while (bits & bit) { rid++; bit <<= 1; } pmap_ridmap[pmap_rididx] |= bit; pmap_ridcount++; mtx_unlock(&pmap_ridmutex); return rid; } static void pmap_free_rid(uint32_t rid) { uint64_t bit; int idx; idx = rid / 64; bit = ~(1UL << (rid & 63)); mtx_lock(&pmap_ridmutex); pmap_ridmap[idx] &= bit; pmap_ridcount--; mtx_unlock(&pmap_ridmutex); } /*************************************************** * Page table page management routines..... ***************************************************/ void pmap_pinit0(struct pmap *pmap) { /* kernel_pmap is the same as any other pmap. */ pmap_pinit(pmap); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ int pmap_pinit(struct pmap *pmap) { int i; PMAP_LOCK_INIT(pmap); for (i = 0; i < IA64_VM_MINKERN_REGION; i++) pmap->pm_rid[i] = pmap_allocate_rid(); TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); return (1); } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { int i; for (i = 0; i < IA64_VM_MINKERN_REGION; i++) if (pmap->pm_rid[i]) pmap_free_rid(pmap->pm_rid[i]); PMAP_LOCK_DESTROY(pmap); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { struct ia64_lpte **dir1; struct ia64_lpte *leaf; vm_page_t nkpg; while (kernel_vm_end <= addr) { if (nkpt == PAGE_SIZE/8 + PAGE_SIZE*PAGE_SIZE/64) panic("%s: out of kernel address space", __func__); dir1 = ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)]; if (dir1 == NULL) { nkpg = vm_page_alloc(NULL, nkpt++, VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED); if (!nkpg) panic("%s: cannot add dir. page", __func__); dir1 = (struct ia64_lpte **) IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg)); bzero(dir1, PAGE_SIZE); ia64_kptdir[KPTE_DIR0_INDEX(kernel_vm_end)] = dir1; } nkpg = vm_page_alloc(NULL, nkpt++, VM_ALLOC_NOOBJ|VM_ALLOC_INTERRUPT|VM_ALLOC_WIRED); if (!nkpg) panic("%s: cannot add PTE page", __func__); leaf = (struct ia64_lpte *) IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg)); bzero(leaf, PAGE_SIZE); dir1[KPTE_DIR1_INDEX(kernel_vm_end)] = leaf; kernel_vm_end += PAGE_SIZE * NKPTEPG; } } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. */ static pv_entry_t get_pv_entry(pmap_t locked_pmap) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; struct vpgqueues *vpq; struct ia64_lpte *pte; pmap_t oldpmap, pmap; pv_entry_t allocated_pv, next_pv, pv; vm_offset_t va; vm_page_t m; PMAP_LOCK_ASSERT(locked_pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); allocated_pv = uma_zalloc(pvzone, M_NOWAIT); if (allocated_pv != NULL) { pv_entry_count++; if (pv_entry_count > pv_entry_high_water) pagedaemon_wakeup(); else return (allocated_pv); } /* * Reclaim pv entries: At first, destroy mappings to inactive * pages. After that, if a pv entry is still needed, destroy * mappings to active pages. */ if (ratecheck(&lastprint, &printinterval)) printf("Approaching the limit on PV entries, " "increase the vm.pmap.shpgperproc tunable.\n"); vpq = &vm_page_queues[PQ_INACTIVE]; retry: TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy) continue; TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { va = pv->pv_va; pmap = pv->pv_pmap; /* Avoid deadlock and lock recursion. */ if (pmap > locked_pmap) PMAP_LOCK(pmap); else if (pmap != locked_pmap && !PMAP_TRYLOCK(pmap)) continue; pmap->pm_stats.resident_count--; oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); pmap_remove_vhpt(va); pmap_invalidate_page(va); pmap_switch(oldpmap); if (pmap_accessed(pte)) vm_page_flag_set(m, PG_REFERENCED); if (pmap_dirty(pte)) vm_page_dirty(m); pmap_free_pte(pte, va); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); m->md.pv_list_count--; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (pmap != locked_pmap) PMAP_UNLOCK(pmap); if (allocated_pv == NULL) allocated_pv = pv; else free_pv_entry(pv); } if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_flag_clear(m, PG_WRITEABLE); } if (allocated_pv == NULL) { if (vpq == &vm_page_queues[PQ_INACTIVE]) { vpq = &vm_page_queues[PQ_ACTIVE]; goto retry; } panic("get_pv_entry: increase the vm.pmap.shpgperproc tunable"); } return (allocated_pv); } /* * Conditionally create a pv entry. */ static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); if (pv_entry_count < pv_entry_high_water && (pv = uma_zalloc(pvzone, M_NOWAIT)) != NULL) { pv_entry_count++; pv->pv_va = va; pv->pv_pmap = pmap; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; return (TRUE); } else return (FALSE); } /* * Add an ia64_lpte to the VHPT. */ static void pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *vhpte; uint64_t pte_pa; /* Can fault, so get it out of the way. */ pte_pa = ia64_tpa((vm_offset_t)pte); vhpte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)vhpte->chain; mtx_lock_spin(&bckt->mutex); pte->chain = bckt->chain; ia64_mf(); bckt->chain = pte_pa; pmap_vhpt_inserts++; bckt->length++; mtx_unlock_spin(&bckt->mutex); } /* * Remove the ia64_lpte matching va from the VHPT. Return zero if it * worked or an appropriate error code otherwise. */ static int pmap_remove_vhpt(vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *pte; struct ia64_lpte *lpte; struct ia64_lpte *vhpte; uint64_t chain, tag; tag = ia64_ttag(va); vhpte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)vhpte->chain; lpte = NULL; mtx_lock_spin(&bckt->mutex); chain = bckt->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); while (chain != 0 && pte->tag != tag) { lpte = pte; chain = pte->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); } if (chain == 0) { mtx_unlock_spin(&bckt->mutex); return (ENOENT); } /* Snip this pv_entry out of the collision chain. */ if (lpte == NULL) bckt->chain = pte->chain; else lpte->chain = pte->chain; ia64_mf(); bckt->length--; mtx_unlock_spin(&bckt->mutex); return (0); } /* * Find the ia64_lpte for the given va, if any. */ static struct ia64_lpte * pmap_find_vhpt(vm_offset_t va) { struct ia64_bucket *bckt; struct ia64_lpte *pte; uint64_t chain, tag; tag = ia64_ttag(va); pte = (struct ia64_lpte *)ia64_thash(va); bckt = (struct ia64_bucket *)pte->chain; mtx_lock_spin(&bckt->mutex); chain = bckt->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); while (chain != 0 && pte->tag != tag) { chain = pte->chain; pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(chain); } mtx_unlock_spin(&bckt->mutex); return ((chain != 0) ? pte : NULL); } /* * Remove an entry from the list of managed mappings. */ static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv) { if (!pv) { if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } } if (pv) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); return 0; } else { return ENOENT; } } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(pmap); pv->pv_pmap = pmap; pv->pv_va = va; PMAP_LOCK_ASSERT(pmap, MA_OWNED); mtx_assert(&vm_page_queue_mtx, MA_OWNED); TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_paddr_t pmap_extract(pmap_t pmap, vm_offset_t va) { struct ia64_lpte *pte; pmap_t oldpmap; vm_paddr_t pa; pa = 0; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(va); if (pte != NULL && pmap_present(pte)) pa = pmap_ppn(pte); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); return (pa); } /* * Routine: pmap_extract_and_hold * Function: * Atomically extract and hold the physical page * with the given pmap and virtual address pair * if that mapping permits the given protection. */ vm_page_t pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) { struct ia64_lpte *pte; pmap_t oldpmap; vm_page_t m; vm_paddr_t pa; pa = 0; m = NULL; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); retry: pte = pmap_find_vhpt(va); if (pte != NULL && pmap_present(pte) && (pmap_prot(pte) & prot) == prot) { m = PHYS_TO_VM_PAGE(pmap_ppn(pte)); if (vm_page_pa_tryrelock(pmap, pmap_ppn(pte), &pa)) goto retry; vm_page_hold(m); } PA_UNLOCK_COND(pa); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); return (m); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Find the kernel lpte for mapping the given virtual address, which * must be in the part of region 5 which we can cover with our kernel * 'page tables'. */ static struct ia64_lpte * pmap_find_kpte(vm_offset_t va) { struct ia64_lpte **dir1; struct ia64_lpte *leaf; KASSERT((va >> 61) == 5, ("kernel mapping 0x%lx not in region 5", va)); KASSERT(va < kernel_vm_end, ("kernel mapping 0x%lx out of range", va)); dir1 = ia64_kptdir[KPTE_DIR0_INDEX(va)]; leaf = dir1[KPTE_DIR1_INDEX(va)]; return (&leaf[KPTE_PTE_INDEX(va)]); } /* * Find a pte suitable for mapping a user-space address. If one exists * in the VHPT, that one will be returned, otherwise a new pte is * allocated. */ static struct ia64_lpte * pmap_find_pte(vm_offset_t va) { struct ia64_lpte *pte; if (va >= VM_MAXUSER_ADDRESS) return pmap_find_kpte(va); pte = pmap_find_vhpt(va); if (pte == NULL) { pte = uma_zalloc(ptezone, M_NOWAIT | M_ZERO); pte->tag = 1UL << 63; } return (pte); } /* * Free a pte which is now unused. This simply returns it to the zone * allocator if it is a user mapping. For kernel mappings, clear the * valid bit to make it clear that the mapping is not currently used. */ static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va) { if (va < VM_MAXUSER_ADDRESS) uma_zfree(ptezone, pte); else pmap_clear_present(pte); } static PMAP_INLINE void pmap_pte_prot(pmap_t pm, struct ia64_lpte *pte, vm_prot_t prot) { static long prot2ar[4] = { PTE_AR_R, /* VM_PROT_NONE */ PTE_AR_RW, /* VM_PROT_WRITE */ PTE_AR_RX|PTE_ED, /* VM_PROT_EXECUTE */ PTE_AR_RWX|PTE_ED /* VM_PROT_WRITE|VM_PROT_EXECUTE */ }; pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED); pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56; pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap) ? PTE_PL_KERN : PTE_PL_USER; pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1]; } /* * Set a pte to contain a valid mapping and enter it in the VHPT. If * the pte was orginally valid, then its assumed to already be in the * VHPT. * This functions does not set the protection bits. It's expected * that those have been set correctly prior to calling this function. */ static void pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa, boolean_t wired, boolean_t managed) { pte->pte &= PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK | PTE_ED; pte->pte |= PTE_PRESENT | PTE_MA_WB; pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED); pte->pte |= (wired) ? PTE_WIRED : 0; pte->pte |= pa & PTE_PPN_MASK; pte->itir = PAGE_SHIFT << 2; pte->tag = ia64_ttag(va); } /* * Remove the (possibly managed) mapping represented by pte from the * given pmap. */ static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va, pv_entry_t pv, int freepte) { int error; vm_page_t m; /* * First remove from the VHPT. */ error = pmap_remove_vhpt(va); if (error) return (error); pmap_invalidate_page(va); if (pmap_wired(pte)) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (pmap_managed(pte)) { m = PHYS_TO_VM_PAGE(pmap_ppn(pte)); if (pmap_dirty(pte)) vm_page_dirty(m); if (pmap_accessed(pte)) vm_page_flag_set(m, PG_REFERENCED); error = pmap_remove_entry(pmap, m, va, pv); } if (freepte) pmap_free_pte(pte, va); return (error); } /* * Extract the physical page address associated with a kernel * virtual address. */ vm_paddr_t pmap_kextract(vm_offset_t va) { struct ia64_lpte *pte; - vm_offset_t gwpage; KASSERT(va >= VM_MAXUSER_ADDRESS, ("Must be kernel VA")); /* Regions 6 and 7 are direct mapped. */ if (va >= IA64_RR_BASE(6)) return (IA64_RR_MASK(va)); - /* EPC gateway page? */ - gwpage = (vm_offset_t)ia64_get_k5(); - if (va >= gwpage && va < gwpage + PAGE_SIZE) - return (IA64_RR_MASK((vm_offset_t)ia64_gateway_page)); - /* Bail out if the virtual address is beyond our limits. */ if (va >= kernel_vm_end) return (0); - pte = pmap_find_kpte(va); - if (!pmap_present(pte)) - return (0); - return (pmap_ppn(pte) | (va & PAGE_MASK)); + if (va >= VM_MIN_KERNEL_ADDRESS) { + pte = pmap_find_kpte(va); + return (pmap_present(pte) ? pmap_ppn(pte)|(va&PAGE_MASK) : 0); + } + + return (0); } /* * Add a list of wired pages to the kva this routine is only used for * temporary kernel mappings that do not need to have page modification * or references recorded. Note that old mappings are simply written * over. The page is effectively wired, but it's customary to not have * the PTE reflect that, nor update statistics. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { struct ia64_lpte *pte; int i; for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) pmap_invalidate_page(va); else pmap_enter_vhpt(pte, va); pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE); va += PAGE_SIZE; } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t va, int count) { struct ia64_lpte *pte; int i; for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); if (pmap_present(pte)) { pmap_remove_vhpt(va); pmap_invalidate_page(va); pmap_clear_present(pte); } va += PAGE_SIZE; } } /* * Add a wired page to the kva. As for pmap_qenter(), it's customary * to not have the PTE reflect that, nor update statistics. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { struct ia64_lpte *pte; pte = pmap_find_kpte(va); if (pmap_present(pte)) pmap_invalidate_page(va); else pmap_enter_vhpt(pte, va); pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL); pmap_set_pte(pte, va, pa, FALSE, FALSE); } /* * Remove a page from the kva */ void pmap_kremove(vm_offset_t va) { struct ia64_lpte *pte; pte = pmap_find_kpte(va); if (pmap_present(pte)) { pmap_remove_vhpt(va); pmap_invalidate_page(va); pmap_clear_present(pte); } } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { return IA64_PHYS_TO_RR7(start); } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_t oldpmap; vm_offset_t va; pv_entry_t npv, pv; struct ia64_lpte *pte; if (pmap->pm_stats.resident_count == 0) return; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pte = pmap_find_vhpt(sva); if (pte != NULL) pmap_remove_pte(pmap, pte, sva, 0, 1); goto out; } if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) { TAILQ_FOREACH_SAFE(pv, &pmap->pm_pvlist, pv_plist, npv) { va = pv->pv_va; if (va >= sva && va < eva) { pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); pmap_remove_pte(pmap, pte, va, pv, 1); } } } else { for (va = sva; va < eva; va += PAGE_SIZE) { pte = pmap_find_vhpt(va); if (pte != NULL) pmap_remove_pte(pmap, pte, va, 0, 1); } } out: vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pmap_t oldpmap; pv_entry_t pv; KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_remove_all: page %p is fictitious", m)); vm_page_lock_queues(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { struct ia64_lpte *pte; pmap_t pmap = pv->pv_pmap; vm_offset_t va = pv->pv_va; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); if (pmap_ppn(pte) != VM_PAGE_TO_PHYS(m)) panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); pmap_remove_pte(pmap, pte, va, pv, 1); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); vm_page_unlock_queues(); } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pmap_t oldpmap; struct ia64_lpte *pte; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); return; } if ((prot & (VM_PROT_WRITE|VM_PROT_EXECUTE)) == (VM_PROT_WRITE|VM_PROT_EXECUTE)) return; if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); for ( ; sva < eva; sva += PAGE_SIZE) { /* If page is invalid, skip this page */ pte = pmap_find_vhpt(sva); if (pte == NULL) continue; /* If there's no change, skip it too */ if (pmap_prot(pte) == prot) continue; if ((prot & VM_PROT_WRITE) == 0 && pmap_managed(pte) && pmap_dirty(pte)) { vm_paddr_t pa = pmap_ppn(pte); vm_page_t m = PHYS_TO_VM_PAGE(pa); vm_page_dirty(m); pmap_clear_dirty(pte); } if (prot & VM_PROT_EXECUTE) ia64_sync_icache(sva, PAGE_SIZE); pmap_pte_prot(pmap, pte, prot); pmap_invalidate_page(sva); } vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, vm_prot_t prot, boolean_t wired) { pmap_t oldpmap; vm_offset_t pa; vm_offset_t opa; struct ia64_lpte origpte; struct ia64_lpte *pte; boolean_t icache_inval, managed; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); va &= ~PAGE_MASK; KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || (m->oflags & VPO_BUSY) != 0, ("pmap_enter: page %p is not busy", m)); /* * Find (or create) a pte for the given mapping. */ while ((pte = pmap_find_pte(va)) == NULL) { pmap_switch(oldpmap); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); VM_WAIT; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); } origpte = *pte; if (!pmap_present(pte)) { opa = ~0UL; pmap_enter_vhpt(pte, va); } else opa = pmap_ppn(pte); managed = FALSE; pa = VM_PAGE_TO_PHYS(m); icache_inval = (prot & VM_PROT_EXECUTE) ? TRUE : FALSE; /* * Mapping has not changed, must be protection or wiring change. */ if (opa == pa) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && !pmap_wired(&origpte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_wired(&origpte)) pmap->pm_stats.wired_count--; managed = (pmap_managed(&origpte)) ? TRUE : FALSE; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. Otherwise, * we can avoid I-cache invalidation if the page * already allowed execution. */ if (managed && pmap_dirty(&origpte)) vm_page_dirty(m); else if (pmap_exec(&origpte)) icache_inval = FALSE; pmap_invalidate_page(va); goto validate; } /* * Mapping has changed, invalidate old range and fall * through to handle validating new mapping. */ if (opa != ~0UL) { pmap_remove_pte(pmap, pte, va, 0, 0); pmap_enter_vhpt(pte, va); } /* * Enter on the PV list if part of our managed memory. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); pmap_insert_entry(pmap, va, m); managed = TRUE; } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. This * adds the pte to the VHPT if necessary. */ pmap_pte_prot(pmap, pte, prot); pmap_set_pte(pte, va, pa, wired, managed); /* Invalidate the I-cache when needed. */ if (icache_inval) ia64_sync_icache(va, PAGE_SIZE); if ((prot & VM_PROT_WRITE) != 0 && managed) vm_page_flag_set(m, PG_WRITEABLE); vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } /* * Maps a sequence of resident pages belonging to the same object. * The sequence begins with the given page m_start. This page is * mapped at the given virtual address start. Each subsequent page is * mapped at a virtual address that is offset from start by the same * amount as the page is offset from m_start within the object. The * last page in the sequence is the page with the largest offset from * m_start that can be mapped at a virtual address less than the given * virtual address end. Not every virtual page between start and end * is mapped; only those for which a resident page exists with the * corresponding offset from m_start are mapped. */ void pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start, vm_prot_t prot) { pmap_t oldpmap; vm_page_t m; vm_pindex_t diff, psize; VM_OBJECT_LOCK_ASSERT(m_start->object, MA_OWNED); psize = atop(end - start); m = m_start; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_quick_locked(pmap, start + ptoa(diff), m, prot); m = TAILQ_NEXT(m, listq); } vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * but is *MUCH* faster than pmap_enter... */ void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { pmap_t oldpmap; vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pmap_enter_quick_locked(pmap, va, m, prot); vm_page_unlock_queues(); pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } static void pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) { struct ia64_lpte *pte; boolean_t managed; KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva || (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0, ("pmap_enter_quick_locked: managed mapping within the clean submap")); mtx_assert(&vm_page_queue_mtx, MA_OWNED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if ((pte = pmap_find_pte(va)) == NULL) return; if (!pmap_present(pte)) { /* Enter on the PV list if the page is managed. */ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { if (!pmap_try_insert_pv_entry(pmap, va, m)) { pmap_free_pte(pte, va); return; } managed = TRUE; } else managed = FALSE; /* Increment counters. */ pmap->pm_stats.resident_count++; /* Initialise with R/O protection and enter into VHPT. */ pmap_enter_vhpt(pte, va); pmap_pte_prot(pmap, pte, prot & (VM_PROT_READ | VM_PROT_EXECUTE)); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed); if (prot & VM_PROT_EXECUTE) ia64_sync_icache(va, PAGE_SIZE); } } /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size) { VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, ("pmap_object_init_pt: non-device object")); } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { pmap_t oldpmap; struct ia64_lpte *pte; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(va); KASSERT(pte != NULL, ("pte")); if (wired && !pmap_wired(pte)) { pmap->pm_stats.wired_count++; pmap_set_wired(pte); } else if (!wired && pmap_wired(pte)) { pmap->pm_stats.wired_count--; pmap_clear_wired(pte); } pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_idlezero process. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(mdst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops = 0; boolean_t rv; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { rv = TRUE; break; } loops++; if (loops >= 16) break; } vm_page_unlock_queues(); return (rv); } /* * pmap_page_wired_mappings: * * Return the number of managed mappings to the given physical page * that are wired. */ int pmap_page_wired_mappings(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap, pmap; pv_entry_t pv; int count; count = 0; if ((m->flags & PG_FICTITIOUS) != 0) return (count); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_wired(pte)) count++; pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } vm_page_unlock_queues(); return (count); } /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap) { pmap_t oldpmap; pv_entry_t pv, npv; if (pmap != vmspace_pmap(curthread->td_proc->p_vmspace)) { printf("warning: %s called with non-current pmap\n", __func__); return; } vm_page_lock_queues(); PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { struct ia64_lpte *pte; npv = TAILQ_NEXT(pv, pv_plist); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (!pmap_wired(pte)) pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1); } pmap_switch(oldpmap); PMAP_UNLOCK(pmap); vm_page_unlock_queues(); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; int count = 0; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_ts_referenced: page %p is not managed", m)); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_accessed(pte)) { count++; pmap_clear_accessed(pte); pmap_invalidate_page(pv->pv_va); } pmap_switch(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } vm_page_unlock_queues(); return (count); } /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; boolean_t rv; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_is_modified: page %p is not managed", m)); rv = FALSE; /* * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be * concurrently set while the object is locked. Thus, if PG_WRITEABLE * is clear, no PTEs can be dirty. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0) return (rv); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); pmap_switch(oldpmap); KASSERT(pte != NULL, ("pte")); rv = pmap_dirty(pte) ? TRUE : FALSE; PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } vm_page_unlock_queues(); return (rv); } /* * pmap_is_prefaultable: * * Return whether or not the specified virtual address is elgible * for prefault. */ boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { struct ia64_lpte *pte; pte = pmap_find_vhpt(addr); if (pte != NULL && pmap_present(pte)) return (FALSE); return (TRUE); } /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ boolean_t pmap_is_referenced(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; boolean_t rv; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); pmap_switch(oldpmap); KASSERT(pte != NULL, ("pte")); rv = pmap_accessed(pte) ? TRUE : FALSE; PMAP_UNLOCK(pv->pv_pmap); if (rv) break; } vm_page_unlock_queues(); return (rv); } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, ("pmap_clear_modify: page %p is busy", m)); /* * If the page is not PG_WRITEABLE, then no PTEs can be modified. * If the object containing the page is locked and the page is not * VPO_BUSY, then PG_WRITEABLE cannot be concurrently set. */ if ((m->flags & PG_WRITEABLE) == 0) return; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_dirty(pte)) { pmap_clear_dirty(pte); pmap_invalidate_page(pv->pv_va); } pmap_switch(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } vm_page_unlock_queues(); } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pv_entry_t pv; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { PMAP_LOCK(pv->pv_pmap); oldpmap = pmap_switch(pv->pv_pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); if (pmap_accessed(pte)) { pmap_clear_accessed(pte); pmap_invalidate_page(pv->pv_va); } pmap_switch(oldpmap); PMAP_UNLOCK(pv->pv_pmap); } vm_page_unlock_queues(); } /* * Clear the write and modified bits in each of the given page's mappings. */ void pmap_remove_write(vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap, pmap; pv_entry_t pv; vm_prot_t prot; KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, ("pmap_remove_write: page %p is not managed", m)); /* * If the page is not VPO_BUSY, then PG_WRITEABLE cannot be set by * another thread while the object is locked. Thus, if PG_WRITEABLE * is clear, no page table entries need updating. */ VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); if ((m->oflags & VPO_BUSY) == 0 && (m->flags & PG_WRITEABLE) == 0) return; vm_page_lock_queues(); TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = pv->pv_pmap; PMAP_LOCK(pmap); oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(pv->pv_va); KASSERT(pte != NULL, ("pte")); prot = pmap_prot(pte); if ((prot & VM_PROT_WRITE) != 0) { if (pmap_dirty(pte)) { vm_page_dirty(m); pmap_clear_dirty(pte); } prot &= ~VM_PROT_WRITE; pmap_pte_prot(pmap, pte, prot); pmap_invalidate_page(pv->pv_va); } pmap_switch(oldpmap); PMAP_UNLOCK(pmap); } vm_page_flag_clear(m, PG_WRITEABLE); vm_page_unlock_queues(); } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_paddr_t pa, vm_size_t size) { vm_offset_t va; va = pa | IA64_RR_BASE(6); return ((void *)va); } /* * 'Unmap' a range mapped by pmap_mapdev(). */ void pmap_unmapdev(vm_offset_t va, vm_size_t size) { } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) { pmap_t oldpmap; struct ia64_lpte *pte, tpte; vm_paddr_t pa; int val; PMAP_LOCK(pmap); retry: oldpmap = pmap_switch(pmap); pte = pmap_find_vhpt(addr); if (pte != NULL) { tpte = *pte; pte = &tpte; } pmap_switch(oldpmap); if (pte == NULL || !pmap_present(pte)) { val = 0; goto out; } val = MINCORE_INCORE; if (pmap_dirty(pte)) val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; if (pmap_accessed(pte)) val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && pmap_managed(pte)) { pa = pmap_ppn(pte); /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) goto retry; } else out: PA_UNLOCK_COND(*locked_pa); PMAP_UNLOCK(pmap); return (val); } void pmap_activate(struct thread *td) { pmap_switch(vmspace_pmap(td->td_proc->p_vmspace)); } pmap_t pmap_switch(pmap_t pm) { pmap_t prevpm; int i; critical_enter(); prevpm = PCPU_GET(md.current_pmap); if (prevpm == pm) goto out; if (pm == NULL) { for (i = 0; i < IA64_VM_MINKERN_REGION; i++) { ia64_set_rr(IA64_RR_BASE(i), (i << 8)|(PAGE_SHIFT << 2)|1); } } else { for (i = 0; i < IA64_VM_MINKERN_REGION; i++) { ia64_set_rr(IA64_RR_BASE(i), (pm->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1); } } PCPU_SET(md.current_pmap, pm); ia64_srlz_d(); out: critical_exit(); return (prevpm); } void pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) { pmap_t oldpm; struct ia64_lpte *pte; vm_offset_t lim; vm_size_t len; sz += va & 31; va &= ~31; sz = (sz + 31) & ~31; PMAP_LOCK(pm); oldpm = pmap_switch(pm); while (sz > 0) { lim = round_page(va); len = MIN(lim - va, sz); pte = pmap_find_vhpt(va); if (pte != NULL && pmap_present(pte)) ia64_sync_icache(va, len); va += len; sz -= len; } pmap_switch(oldpm); PMAP_UNLOCK(pm); } /* * Increase the starting virtual address of the given mapping if a * different alignment might result in more superpage mappings. */ void pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, vm_offset_t *addr, vm_size_t size) { } #include "opt_ddb.h" #ifdef DDB #include static const char* psnames[] = { "1B", "2B", "4B", "8B", "16B", "32B", "64B", "128B", "256B", "512B", "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", "1G", "2G" }; static void print_trs(int type) { struct ia64_pal_result res; int i, maxtr; struct { pt_entry_t pte; uint64_t itir; uint64_t ifa; struct ia64_rr rr; } buf; static const char *manames[] = { "WB", "bad", "bad", "bad", "UC", "UCE", "WC", "NaT", }; res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { db_printf("Can't get VM summary\n"); return; } if (type == 0) maxtr = (res.pal_result[0] >> 40) & 0xff; else maxtr = (res.pal_result[0] >> 32) & 0xff; db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n"); for (i = 0; i <= maxtr; i++) { bzero(&buf, sizeof(buf)); res = ia64_pal_physical(PAL_VM_TR_READ, i, type, ia64_tpa((uint64_t)&buf)); if (!(res.pal_result[0] & 1)) buf.pte &= ~PTE_AR_MASK; if (!(res.pal_result[0] & 2)) buf.pte &= ~PTE_PL_MASK; if (!(res.pal_result[0] & 4)) pmap_clear_dirty(&buf); if (!(res.pal_result[0] & 8)) buf.pte &= ~PTE_MA_MASK; db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s " "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid, buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12, psnames[(buf.itir & ITIR_PS_MASK) >> 2], (buf.pte & PTE_ED) ? 1 : 0, (int)(buf.pte & PTE_AR_MASK) >> 9, (int)(buf.pte & PTE_PL_MASK) >> 7, (pmap_dirty(&buf)) ? 1 : 0, (pmap_accessed(&buf)) ? 1 : 0, manames[(buf.pte & PTE_MA_MASK) >> 2], (pmap_present(&buf)) ? 1 : 0, (int)((buf.itir & ITIR_KEY_MASK) >> 8)); } } DB_COMMAND(itr, db_itr) { print_trs(0); } DB_COMMAND(dtr, db_dtr) { print_trs(1); } DB_COMMAND(rr, db_rr) { int i; uint64_t t; struct ia64_rr rr; printf("RR RID PgSz VE\n"); for (i = 0; i < 8; i++) { __asm __volatile ("mov %0=rr[%1]" : "=r"(t) : "r"(IA64_RR_BASE(i))); *(uint64_t *) &rr = t; printf("%d %06x %4s %d\n", i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve); } } DB_COMMAND(thash, db_thash) { if (!have_addr) return; db_printf("%p\n", (void *) ia64_thash(addr)); } DB_COMMAND(ttag, db_ttag) { if (!have_addr) return; db_printf("0x%lx\n", ia64_ttag(addr)); } DB_COMMAND(kpte, db_kpte) { struct ia64_lpte *pte; if (!have_addr) { db_printf("usage: kpte \n"); return; } if (addr < VM_MIN_KERNEL_ADDRESS) { db_printf("kpte: error: invalid \n"); return; } pte = pmap_find_kpte(addr); db_printf("kpte at %p:\n", pte); db_printf(" pte =%016lx\n", pte->pte); db_printf(" itir =%016lx\n", pte->itir); db_printf(" tag =%016lx\n", pte->tag); db_printf(" chain=%016lx\n", pte->chain); } #endif Index: head/sys/ia64/ia64/sal.c =================================================================== --- head/sys/ia64/ia64/sal.c (revision 221270) +++ head/sys/ia64/ia64/sal.c (revision 221271) @@ -1,142 +1,130 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include int ia64_ipi_wakeup; static struct ia64_fdesc sal_fdesc; static sal_entry_t fake_sal; extern u_int64_t ia64_pal_entry; sal_entry_t *ia64_sal_entry = fake_sal; static struct uuid sal_table = EFI_TABLE_SAL; static struct sal_system_table *sal_systbl; static struct ia64_sal_result fake_sal(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4, u_int64_t a5, u_int64_t a6, u_int64_t a7, u_int64_t a8) { struct ia64_sal_result res; res.sal_status = -3; res.sal_result[0] = 0; res.sal_result[1] = 0; res.sal_result[2] = 0; return res; } void ia64_sal_init(void) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int error, i; sal_systbl = efi_get_table(&sal_table); if (sal_systbl == NULL) return; if (bcmp(sal_systbl->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (sal_systbl + 1); for (i = 0; i < sal_systbl->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; -#ifdef SMP - struct ia64_sal_result result; - struct ia64_fdesc *fd; -#endif dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } /* Reserve the XIV so that we won't use it. */ error = ia64_xiv_reserve(dp->sale_vector, IA64_XIV_PLAT, NULL); if (error) { printf("SAL: invalid AP wake-up XIV (%#lx)\n", dp->sale_vector); break; } ia64_ipi_wakeup = dp->sale_vector; if (bootverbose) printf("SAL: AP wake-up XIV: %#x\n", ia64_ipi_wakeup); - -#ifdef SMP - fd = (struct ia64_fdesc *) os_boot_rendez; - result = ia64_sal_entry(SAL_SET_VECTORS, - SAL_OS_BOOT_RENDEZ, ia64_tpa(fd->func), - ia64_tpa(fd->gp), 0, 0, 0, 0); -#endif - break; } } p += sizes[*p]; } } Index: head/sys/ia64/include/ia64_cpu.h =================================================================== --- head/sys/ia64/include/ia64_cpu.h (revision 221270) +++ head/sys/ia64/include/ia64_cpu.h (revision 221271) @@ -1,520 +1,526 @@ /*- * Copyright (c) 2007 Marcel Moolenaar * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_IA64_CPU_H_ #define _MACHINE_IA64_CPU_H_ /* + * Local Interrupt ID. + */ +#define IA64_LID_GET_SAPIC_ID(x) ((u_int)((x) >> 16) & 0xffff) +#define IA64_LID_SET_SAPIC_ID(x) ((u_int)((x) & 0xffff) << 16) + +/* * Definition of DCR bits. */ #define IA64_DCR_PP 0x0000000000000001 #define IA64_DCR_BE 0x0000000000000002 #define IA64_DCR_LC 0x0000000000000004 #define IA64_DCR_DM 0x0000000000000100 #define IA64_DCR_DP 0x0000000000000200 #define IA64_DCR_DK 0x0000000000000400 #define IA64_DCR_DX 0x0000000000000800 #define IA64_DCR_DR 0x0000000000001000 #define IA64_DCR_DA 0x0000000000002000 #define IA64_DCR_DD 0x0000000000004000 #define IA64_DCR_DEFAULT \ (IA64_DCR_DM | IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | \ IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD) /* * Definition of PSR and IPSR bits. */ #define IA64_PSR_BE 0x0000000000000002 #define IA64_PSR_UP 0x0000000000000004 #define IA64_PSR_AC 0x0000000000000008 #define IA64_PSR_MFL 0x0000000000000010 #define IA64_PSR_MFH 0x0000000000000020 #define IA64_PSR_IC 0x0000000000002000 #define IA64_PSR_I 0x0000000000004000 #define IA64_PSR_PK 0x0000000000008000 #define IA64_PSR_DT 0x0000000000020000 #define IA64_PSR_DFL 0x0000000000040000 #define IA64_PSR_DFH 0x0000000000080000 #define IA64_PSR_SP 0x0000000000100000 #define IA64_PSR_PP 0x0000000000200000 #define IA64_PSR_DI 0x0000000000400000 #define IA64_PSR_SI 0x0000000000800000 #define IA64_PSR_DB 0x0000000001000000 #define IA64_PSR_LP 0x0000000002000000 #define IA64_PSR_TB 0x0000000004000000 #define IA64_PSR_RT 0x0000000008000000 #define IA64_PSR_CPL 0x0000000300000000 #define IA64_PSR_CPL_KERN 0x0000000000000000 #define IA64_PSR_CPL_1 0x0000000100000000 #define IA64_PSR_CPL_2 0x0000000200000000 #define IA64_PSR_CPL_USER 0x0000000300000000 #define IA64_PSR_IS 0x0000000400000000 #define IA64_PSR_MC 0x0000000800000000 #define IA64_PSR_IT 0x0000001000000000 #define IA64_PSR_ID 0x0000002000000000 #define IA64_PSR_DA 0x0000004000000000 #define IA64_PSR_DD 0x0000008000000000 #define IA64_PSR_SS 0x0000010000000000 #define IA64_PSR_RI 0x0000060000000000 #define IA64_PSR_RI_0 0x0000000000000000 #define IA64_PSR_RI_1 0x0000020000000000 #define IA64_PSR_RI_2 0x0000040000000000 #define IA64_PSR_ED 0x0000080000000000 #define IA64_PSR_BN 0x0000100000000000 #define IA64_PSR_IA 0x0000200000000000 /* * Definition of ISR bits. */ #define IA64_ISR_CODE 0x000000000000ffff #define IA64_ISR_VECTOR 0x0000000000ff0000 #define IA64_ISR_X 0x0000000100000000 #define IA64_ISR_W 0x0000000200000000 #define IA64_ISR_R 0x0000000400000000 #define IA64_ISR_NA 0x0000000800000000 #define IA64_ISR_SP 0x0000001000000000 #define IA64_ISR_RS 0x0000002000000000 #define IA64_ISR_IR 0x0000004000000000 #define IA64_ISR_NI 0x0000008000000000 #define IA64_ISR_SO 0x0000010000000000 #define IA64_ISR_EI 0x0000060000000000 #define IA64_ISR_EI_0 0x0000000000000000 #define IA64_ISR_EI_1 0x0000020000000000 #define IA64_ISR_EI_2 0x0000040000000000 #define IA64_ISR_ED 0x0000080000000000 /* * Vector numbers for various ia64 interrupts. */ #define IA64_VEC_VHPT 0 #define IA64_VEC_ITLB 1 #define IA64_VEC_DTLB 2 #define IA64_VEC_ALT_ITLB 3 #define IA64_VEC_ALT_DTLB 4 #define IA64_VEC_NESTED_DTLB 5 #define IA64_VEC_IKEY_MISS 6 #define IA64_VEC_DKEY_MISS 7 #define IA64_VEC_DIRTY_BIT 8 #define IA64_VEC_INST_ACCESS 9 #define IA64_VEC_DATA_ACCESS 10 #define IA64_VEC_BREAK 11 #define IA64_VEC_EXT_INTR 12 #define IA64_VEC_PAGE_NOT_PRESENT 20 #define IA64_VEC_KEY_PERMISSION 21 #define IA64_VEC_INST_ACCESS_RIGHTS 22 #define IA64_VEC_DATA_ACCESS_RIGHTS 23 #define IA64_VEC_GENERAL_EXCEPTION 24 #define IA64_VEC_DISABLED_FP 25 #define IA64_VEC_NAT_CONSUMPTION 26 #define IA64_VEC_SPECULATION 27 #define IA64_VEC_DEBUG 29 #define IA64_VEC_UNALIGNED_REFERENCE 30 #define IA64_VEC_UNSUPP_DATA_REFERENCE 31 #define IA64_VEC_FLOATING_POINT_FAULT 32 #define IA64_VEC_FLOATING_POINT_TRAP 33 #define IA64_VEC_LOWER_PRIVILEGE_TRANSFER 34 #define IA64_VEC_TAKEN_BRANCH_TRAP 35 #define IA64_VEC_SINGLE_STEP_TRAP 36 #define IA64_VEC_IA32_EXCEPTION 45 #define IA64_VEC_IA32_INTERCEPT 46 #define IA64_VEC_IA32_INTERRUPT 47 /* * IA-32 exceptions. */ #define IA32_EXCEPTION_DIVIDE 0 #define IA32_EXCEPTION_DEBUG 1 #define IA32_EXCEPTION_BREAK 3 #define IA32_EXCEPTION_OVERFLOW 4 #define IA32_EXCEPTION_BOUND 5 #define IA32_EXCEPTION_DNA 7 #define IA32_EXCEPTION_NOT_PRESENT 11 #define IA32_EXCEPTION_STACK_FAULT 12 #define IA32_EXCEPTION_GPFAULT 13 #define IA32_EXCEPTION_FPERROR 16 #define IA32_EXCEPTION_ALIGNMENT_CHECK 17 #define IA32_EXCEPTION_STREAMING_SIMD 19 #define IA32_INTERCEPT_INSTRUCTION 0 #define IA32_INTERCEPT_GATE 1 #define IA32_INTERCEPT_SYSTEM_FLAG 2 #define IA32_INTERCEPT_LOCK 4 #ifndef LOCORE /* * Various special ia64 instructions. */ /* * Memory Fence. */ static __inline void ia64_mf(void) { __asm __volatile("mf"); } static __inline void ia64_mf_a(void) { __asm __volatile("mf.a"); } /* * Flush Cache. */ static __inline void ia64_fc(uint64_t va) { __asm __volatile("fc %0" :: "r"(va)); } static __inline void ia64_fc_i(uint64_t va) { __asm __volatile("fc.i %0" :: "r"(va)); } /* * Sync instruction stream. */ static __inline void ia64_sync_i(void) { __asm __volatile("sync.i"); } /* * Calculate address in VHPT for va. */ static __inline uint64_t ia64_thash(uint64_t va) { uint64_t result; __asm __volatile("thash %0=%1" : "=r" (result) : "r" (va)); return result; } /* * Calculate VHPT tag for va. */ static __inline uint64_t ia64_ttag(uint64_t va) { uint64_t result; __asm __volatile("ttag %0=%1" : "=r" (result) : "r" (va)); return result; } /* * Convert virtual address to physical. */ static __inline uint64_t ia64_tpa(uint64_t va) { uint64_t result; __asm __volatile("tpa %0=%1" : "=r" (result) : "r" (va)); return result; } /* * Generate a ptc.e instruction. */ static __inline void ia64_ptc_e(uint64_t v) { __asm __volatile("ptc.e %0;; srlz.i;;" :: "r"(v)); } /* * Generate a ptc.g instruction. */ static __inline void ia64_ptc_g(uint64_t va, uint64_t log2size) { __asm __volatile("ptc.g %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size)); } /* * Generate a ptc.ga instruction. */ static __inline void ia64_ptc_ga(uint64_t va, uint64_t log2size) { __asm __volatile("ptc.ga %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size)); } /* * Generate a ptc.l instruction. */ static __inline void ia64_ptc_l(uint64_t va, uint64_t log2size) { __asm __volatile("ptc.l %0,%1;; srlz.i;;" :: "r"(va), "r"(log2size)); } /* * Unordered memory load. */ static __inline uint8_t ia64_ld1(uint8_t *p) { uint8_t v; __asm __volatile("ld1 %0=[%1];;" : "=r"(v) : "r"(p)); return (v); } static __inline uint16_t ia64_ld2(uint16_t *p) { uint16_t v; __asm __volatile("ld2 %0=[%1];;" : "=r"(v) : "r"(p)); return (v); } static __inline uint32_t ia64_ld4(uint32_t *p) { uint32_t v; __asm __volatile("ld4 %0=[%1];;" : "=r"(v) : "r"(p)); return (v); } static __inline uint64_t ia64_ld8(uint64_t *p) { uint64_t v; __asm __volatile("ld8 %0=[%1];;" : "=r"(v) : "r"(p)); return (v); } /* * Unordered memory store. */ static __inline void ia64_st1(uint8_t *p, uint8_t v) { __asm __volatile("st1 [%0]=%1;;" :: "r"(p), "r"(v)); } static __inline void ia64_st2(uint16_t *p, uint16_t v) { __asm __volatile("st2 [%0]=%1;;" :: "r"(p), "r"(v)); } static __inline void ia64_st4(uint32_t *p, uint32_t v) { __asm __volatile("st4 [%0]=%1;;" :: "r"(p), "r"(v)); } static __inline void ia64_st8(uint64_t *p, uint64_t v) { __asm __volatile("st8 [%0]=%1;;" :: "r"(p), "r"(v)); } /* * Read the value of psr. */ static __inline uint64_t ia64_get_psr(void) { uint64_t result; __asm __volatile("mov %0=psr;;" : "=r" (result)); return result; } /* * Define accessors for application registers. */ #define IA64_AR(name) \ \ static __inline uint64_t \ ia64_get_##name(void) \ { \ uint64_t result; \ __asm __volatile("mov %0=ar." #name : "=r" (result)); \ return result; \ } \ \ static __inline void \ ia64_set_##name(uint64_t v) \ { \ __asm __volatile("mov ar." #name "=%0;;" :: "r" (v)); \ } IA64_AR(k0) IA64_AR(k1) IA64_AR(k2) IA64_AR(k3) IA64_AR(k4) IA64_AR(k5) IA64_AR(k6) IA64_AR(k7) IA64_AR(rsc) IA64_AR(bsp) IA64_AR(bspstore) IA64_AR(rnat) IA64_AR(fcr) IA64_AR(eflag) IA64_AR(csd) IA64_AR(ssd) IA64_AR(cflg) IA64_AR(fsr) IA64_AR(fir) IA64_AR(fdr) IA64_AR(ccv) IA64_AR(unat) IA64_AR(fpsr) IA64_AR(itc) IA64_AR(pfs) IA64_AR(lc) IA64_AR(ec) /* * Define accessors for control registers. */ #define IA64_CR(name) \ \ static __inline uint64_t \ ia64_get_##name(void) \ { \ uint64_t result; \ __asm __volatile("mov %0=cr." #name : "=r" (result)); \ return result; \ } \ \ static __inline void \ ia64_set_##name(uint64_t v) \ { \ __asm __volatile("mov cr." #name "=%0;;" :: "r" (v)); \ } IA64_CR(dcr) IA64_CR(itm) IA64_CR(iva) IA64_CR(pta) IA64_CR(ipsr) IA64_CR(isr) IA64_CR(iip) IA64_CR(ifa) IA64_CR(itir) IA64_CR(iipa) IA64_CR(ifs) IA64_CR(iim) IA64_CR(iha) IA64_CR(lid) IA64_CR(ivr) IA64_CR(tpr) IA64_CR(eoi) IA64_CR(irr0) IA64_CR(irr1) IA64_CR(irr2) IA64_CR(irr3) IA64_CR(itv) IA64_CR(pmv) IA64_CR(cmcv) IA64_CR(lrr0) IA64_CR(lrr1) /* * Write a region register. */ static __inline void ia64_set_rr(uint64_t rrbase, uint64_t v) { __asm __volatile("mov rr[%0]=%1" :: "r"(rrbase), "r"(v) : "memory"); } /* * Read a CPUID register. */ static __inline uint64_t ia64_get_cpuid(int i) { uint64_t result; __asm __volatile("mov %0=cpuid[%1]" : "=r" (result) : "r"(i)); return result; } static __inline void ia64_disable_highfp(void) { __asm __volatile("ssm psr.dfh;; srlz.d"); } static __inline void ia64_enable_highfp(void) { __asm __volatile("rsm psr.dfh;; srlz.d"); } static __inline void ia64_srlz_d(void) { __asm __volatile("srlz.d"); } static __inline void ia64_srlz_i(void) { __asm __volatile("srlz.i;;"); } #endif /* !LOCORE */ #endif /* _MACHINE_IA64_CPU_H_ */ Index: head/sys/ia64/include/smp.h =================================================================== --- head/sys/ia64/include/smp.h (revision 221270) +++ head/sys/ia64/include/smp.h (revision 221271) @@ -1,34 +1,52 @@ /* * $FreeBSD$ */ #ifndef _MACHINE_SMP_H_ #define _MACHINE_SMP_H_ #ifdef _KERNEL #define IPI_AST ia64_ipi_ast #define IPI_PREEMPT ia64_ipi_preempt #define IPI_RENDEZVOUS ia64_ipi_rndzvs #define IPI_STOP ia64_ipi_stop #define IPI_STOP_HARD ia64_ipi_nmi #ifndef LOCORE struct pcpu; +struct ia64_ap_state { + uint64_t as_trace; + uint64_t as_pgtbl_pte; + uint64_t as_pgtbl_itir; + uint64_t as_text_va; + uint64_t as_text_pte; + uint64_t as_text_itir; + uint64_t as_data_va; + uint64_t as_data_pte; + uint64_t as_data_itir; + void *as_kstack; + void *as_kstack_top; + struct pcpu *as_pcpu; + volatile int as_delay; + volatile u_int as_awake; + volatile u_int as_spin; +}; + extern int ia64_ipi_ast; extern int ia64_ipi_highfp; extern int ia64_ipi_nmi; extern int ia64_ipi_preempt; extern int ia64_ipi_rndzvs; extern int ia64_ipi_stop; extern int ia64_ipi_wakeup; void ipi_all_but_self(int ipi); void ipi_cpu(int cpu, u_int ipi); void ipi_selected(cpumask_t cpus, int ipi); void ipi_send(struct pcpu *, int ipi); #endif /* !LOCORE */ #endif /* _KERNEL */ #endif /* !_MACHINE_SMP_H */ Index: head/sys/ia64/include/vmparam.h =================================================================== --- head/sys/ia64/include/vmparam.h (revision 221270) +++ head/sys/ia64/include/vmparam.h (revision 221271) @@ -1,229 +1,217 @@ /*- * Copyright (c) 1988 University of Utah. * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: Utah $Hdr: vmparam.h 1.16 91/01/18$ * * @(#)vmparam.h 8.2 (Berkeley) 4/22/94 * * $FreeBSD$ */ #ifndef _MACHINE_VMPARAM_H_ #define _MACHINE_VMPARAM_H_ /* * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ #define MAXTSIZ (1<<30) /* max text size (1G) */ #endif #ifndef DFLDSIZ #define DFLDSIZ (1<<27) /* initial data size (128M) */ #endif #ifndef MAXDSIZ #define MAXDSIZ (1<<30) /* max data size (1G) */ #endif #ifndef DFLSSIZ #define DFLSSIZ (1<<21) /* initial stack size (2M) */ #endif #ifndef MAXSSIZ #define MAXSSIZ (1<<28) /* max stack size (256M) */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128UL*1024) /* amount to grow stack */ #endif /* * We need region 7 virtual addresses for pagetables. */ #define UMA_MD_SMALL_ALLOC /* * The physical address space is sparsely populated. */ #define VM_PHYSSEG_SPARSE /* * The number of PHYSSEG entries is equal to the number of phys_avail * entries. */ #define VM_PHYSSEG_MAX 49 /* * Create three free page pools: VM_FREEPOOL_DEFAULT is the default pool * from which physical pages are allocated and VM_FREEPOOL_DIRECT is * the pool from which physical pages for small UMA objects are * allocated. */ #define VM_NFREEPOOL 3 #define VM_FREEPOOL_CACHE 2 #define VM_FREEPOOL_DEFAULT 0 #define VM_FREEPOOL_DIRECT 1 /* * Create one free page list. */ #define VM_NFREELIST 1 #define VM_FREELIST_DEFAULT 0 /* * An allocation size of 256MB is supported in order to optimize the * use of the identity mappings in region 7 by UMA. */ #define VM_NFREEORDER 16 /* * Only one memory domain. */ #ifndef VM_NDOMAIN #define VM_NDOMAIN 1 #endif /* * Disable superpage reservations. */ #ifndef VM_NRESERVLEVEL #define VM_NRESERVLEVEL 0 #endif #define IA64_VM_MINKERN_REGION 4 /* * Manipulating region bits of an address. */ #define IA64_RR_BASE(n) (((uint64_t) (n)) << 61) #define IA64_RR_MASK(x) ((x) & ((1L << 61) - 1)) -#define IA64_PHYS_TO_RR6(x) ((x) | IA64_RR_BASE(6)) -#define IA64_PHYS_TO_RR7(x) ((x) | IA64_RR_BASE(7)) +#define IA64_PHYS_TO_RR6(x) ((x) | IA64_RR_BASE(6)) +#define IA64_PHYS_TO_RR7(x) ((x) | IA64_RR_BASE(7)) /* * The Itanium architecture defines that all implementations support at * least 51 virtual address bits (i.e. IMPL_VA_MSB=50). The unimplemented * bits are sign-extended from VA{IMPL_VA_MSB}. As such, there's a gap in * the virtual address range, which extends at most from 0x0004000000000000 * to 0x1ffbffffffffffff. We define the top half of a region in terms of * this worst-case gap. */ #define IA64_REGION_GAP_START 0x0004000000000000 #define IA64_REGION_GAP_EXTEND 0x1ffc000000000000 - -/* - * Page size of the identity mappings in region 7. - */ -#ifndef LOG2_ID_PAGE_SIZE -#define LOG2_ID_PAGE_SIZE 28 /* 256M */ -#endif - -#define IA64_ID_PAGE_SHIFT (LOG2_ID_PAGE_SIZE) -#define IA64_ID_PAGE_SIZE (1<<(LOG2_ID_PAGE_SIZE)) -#define IA64_ID_PAGE_MASK (IA64_ID_PAGE_SIZE-1) - /* * Parameters for Pre-Boot Virtual Memory (PBVM). * The kernel, its modules and metadata are loaded in the PBVM by the loader. * The PBVM consists of pages for which the mapping is maintained in a page * table. The page table is at least 1 EFI page large (i.e. 4KB), but can be * larger to accommodate more PBVM. The maximum page table size is 1MB. With * 8 bytes per page table entry, this means that the PBVM has at least 512 * pages and at most 128K pages. * The GNU toolchain (in particular GNU ld) does not support an alignment * larger than 64K. This means that we cannot guarantee page alignment for * a page size that's larger than 64K. We do want to have text and data in * different pages, which means that the maximum usable page size is 64KB. * Consequently: * The maximum total PBVM size is 8GB -- enough for a DVD image. A page table * of a single EFI page (4KB) allows for 32MB of PBVM. * * The kernel is given the PA and size of the page table that provides the * mapping of the PBVM. The page table itself is assumed to be mapped at a * known virtual address and using a single translation wired into the CPU. * As such, the page table is assumed to be a power of 2 and naturally aligned. * The kernel also assumes that a good portion of the kernel text is mapped * and wired into the CPU, but does not assume that the mapping covers the * whole of PBVM. */ #define IA64_PBVM_RR IA64_VM_MINKERN_REGION #define IA64_PBVM_BASE \ (IA64_RR_BASE(IA64_PBVM_RR) + IA64_REGION_GAP_EXTEND) #define IA64_PBVM_PGTBL_MAXSZ 1048576 #define IA64_PBVM_PGTBL \ (IA64_RR_BASE(IA64_PBVM_RR + 1) - IA64_PBVM_PGTBL_MAXSZ) #define IA64_PBVM_PAGE_SHIFT 16 /* 64KB */ #define IA64_PBVM_PAGE_SIZE (1 << IA64_PBVM_PAGE_SHIFT) #define IA64_PBVM_PAGE_MASK (IA64_PBVM_PAGE_SIZE - 1) /* * Mach derived constants */ /* user/kernel map constants */ #define VM_MIN_ADDRESS 0 #define VM_MAXUSER_ADDRESS IA64_RR_BASE(IA64_VM_MINKERN_REGION) #define VM_MIN_KERNEL_ADDRESS IA64_RR_BASE(IA64_VM_MINKERN_REGION + 1) #define VM_MAX_KERNEL_ADDRESS (IA64_RR_BASE(IA64_VM_MINKERN_REGION + 2) - 1) #define VM_MAX_ADDRESS ~0UL #define KERNBASE VM_MAXUSER_ADDRESS /* * USRSTACK is the top (end) of the user stack. Immediately above the user * stack resides the syscall gateway page. */ #define USRSTACK VM_MAXUSER_ADDRESS #define IA64_BACKINGSTORE (USRSTACK - (2 * MAXSSIZ) - PAGE_SIZE) /* virtual sizes (bytes) for various kernel submaps */ #ifndef VM_KMEM_SIZE #define VM_KMEM_SIZE (12 * 1024 * 1024) #endif /* * How many physical pages per KVA page allocated. * min(max(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) * is the total KVA space allocated for kmem_map. */ #ifndef VM_KMEM_SIZE_SCALE #define VM_KMEM_SIZE_SCALE (4) /* XXX 8192 byte pages */ #endif /* initial pagein size of beginning of executable file */ #ifndef VM_INITIAL_PAGEIN #define VM_INITIAL_PAGEIN 16 #endif #endif /* !_MACHINE_VMPARAM_H_ */