Index: head/sys/boot/ia64/Makefile.inc =================================================================== --- head/sys/boot/ia64/Makefile.inc (revision 110210) +++ head/sys/boot/ia64/Makefile.inc (revision 110211) @@ -1,4 +1,4 @@ # $FreeBSD$ # Options used when building standalone components -CFLAGS+= -ffreestanding +CFLAGS+= -ffreestanding -fshort-wchar -Wformat Index: head/sys/boot/ia64/libski/Makefile =================================================================== --- head/sys/boot/ia64/libski/Makefile (revision 110210) +++ head/sys/boot/ia64/libski/Makefile (revision 110211) @@ -1,35 +1,36 @@ # $FreeBSD$ LIB= ski INTERNALLIB= true SRCS= skiconsole.c time.c copy.c devicename.c module.c exit.c SRCS+= delay.c skifs.c elf_freebsd.c bootinfo.c ssc.c +SRCS+= acpi_stub.c efi_stub.c pal_stub.s sal_stub.c CFLAGS+= -ffreestanding -fpic -g CFLAGS+= -I${.CURDIR}/../include CFLAGS+= -I${.CURDIR}/../include/${MACHINE_ARCH} CFLAGS+= -I${.CURDIR}/../../../../lib/libstand/ CFLAGS+= -I${.CURDIR}/../../efi/include CFLAGS+= -I${.CURDIR}/../../efi/include/${MACHINE_ARCH} # Pick up the bootstrap header for some interface items CFLAGS+= -I${.CURDIR}/../../common -I${.CURDIR}/../../.. -I. .if ${MACHINE_ARCH} == "powerpc" CFLAGS+= -msoft-float .endif .ifdef(BOOT_DISK_DEBUG) # Make the disk code more talkative CFLAGS+= -DDISK_DEBUG .endif machine: ln -sf ${.CURDIR}/../../../${MACHINE_ARCH}/include machine CLEANFILES+= machine .include beforedepend ${OBJS}: machine Index: head/sys/boot/ia64/libski/acpi_stub.c =================================================================== --- head/sys/boot/ia64/libski/acpi_stub.c (nonexistent) +++ head/sys/boot/ia64/libski/acpi_stub.c (revision 110211) @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + +#define APIC_IO_SAPIC 6 +#define APIC_LOCAL_SAPIC 7 + +#pragma pack(1) + +typedef struct /* LOCAL SAPIC */ +{ + APIC_HEADER Header; + UINT8 ProcessorId; /* ACPI processor id */ + UINT8 LocalSapicId; /* Processor local SAPIC id */ + UINT8 LocalSapicEid; /* Processor local SAPIC eid */ + UINT8 Reserved[3]; + UINT32 ProcessorEnabled: 1; + UINT32 FlagsReserved: 31; +} LOCAL_SAPIC; + +typedef struct /* IO SAPIC */ +{ + APIC_HEADER Header; + UINT8 IoSapicId; /* I/O SAPIC ID */ + UINT8 Reserved; /* reserved - must be zero */ + UINT32 Vector; /* interrupt base */ + UINT64 IoSapicAddress; /* SAPIC's physical address */ +} IO_SAPIC; + +/* + */ + +struct { + APIC_TABLE Header; + LOCAL_SAPIC cpu0; + LOCAL_SAPIC cpu1; + LOCAL_SAPIC cpu2; + LOCAL_SAPIC cpu3; + IO_SAPIC sapic; +} apic = { + /* Header. */ + { + { + APIC_SIG, /* Signature. */ + sizeof(apic), /* Length of table. */ + 0, /* ACPI minor revision. */ + 0, /* XXX checksum. */ + "FBSD", /* OEM Id. */ + "SKI", /* OEM table Id. */ + 0, /* OEM revision. */ + "FBSD", /* ASL compiler Id. */ + 0 /* ASL revision. */ + }, + 0xfee00000, + }, + /* cpu0. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu0) /* Length. */ + }, + 0, /* ACPI processor id */ + 0, /* Processor local SAPIC id */ + 0, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 1, /* FL: Enabled. */ + }, + /* cpu1. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu1) /* Length. */ + }, + 1, /* ACPI processor id */ + 0, /* Processor local SAPIC id */ + 1, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 1, /* FL: Enabled. */ + }, + /* cpu2. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu2) /* Length. */ + }, + 2, /* ACPI processor id */ + 1, /* Processor local SAPIC id */ + 0, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 0, /* FL: Enabled. */ + }, + /* cpu3. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu3) /* Length. */ + }, + 3, /* ACPI processor id */ + 1, /* Processor local SAPIC id */ + 1, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 0, /* FL: Enabled. */ + }, + /* sapic. */ + { + { + APIC_IO_SAPIC, /* Type. */ + sizeof(apic.sapic) /* Length. */ + }, + 4, /* IO SAPIC id. */ + 0, + 16, /* Interrupt base. */ + 0xfec00000 /* IO SAPIC address. */ + } +}; + +struct { + ACPI_TABLE_HEADER Header; + UINT64 apic_tbl; +} xsdt = { + { + XSDT_SIG, /* Signature. */ + sizeof(xsdt), /* Length of table. */ + 0, /* ACPI minor revision. */ + 0, /* XXX checksum. */ + "FBSD", /* OEM Id. */ + "SKI", /* OEM table Id. */ + 0, /* OEM revision. */ + "FBSD", /* ASL compiler Id. */ + 0 /* ASL revision. */ + }, + NULL /* XXX APIC table address. */ +}; + +RSDP_DESCRIPTOR acpi_root = { + RSDP_SIG, + 0, /* XXX checksum. */ + "FBSD", + 2, /* ACPI Rev 2.0. */ + NULL, + sizeof(xsdt), /* XSDT length. */ + NULL, /* XXX PA of XSDT. */ + 0, /* XXX Extended checksum. */ +}; + +static void +cksum(void *addr, int sz, UINT8 *sum) +{ + UINT8 *p, s; + + p = addr; + s = 0; + while (sz--) + s += *p++; + *sum = -s; +} + +void +acpi_stub_init(void) +{ + acpi_root.XsdtPhysicalAddress = (UINT64)&xsdt; + cksum(&acpi_root, 20, &acpi_root.Checksum); + cksum(&acpi_root, sizeof(acpi_root), &acpi_root.ExtendedChecksum); + + xsdt.apic_tbl = (UINT32)&apic; + cksum(&xsdt, sizeof(xsdt), &xsdt.Header.Checksum); +} Property changes on: head/sys/boot/ia64/libski/acpi_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/libski/bootinfo.c =================================================================== --- head/sys/boot/ia64/libski/bootinfo.c (revision 110210) +++ head/sys/boot/ia64/libski/bootinfo.c (revision 110211) @@ -1,344 +1,322 @@ /*- * Copyright (c) 1998 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include -#include - #include "bootstrap.h" /* * Return a 'boothowto' value corresponding to the kernel arguments in * (kargs) and any relevant environment variables. */ static struct { const char *ev; int mask; } howto_names[] = { {"boot_askname", RB_ASKNAME}, {"boot_cdrom", RB_CDROM}, {"boot_userconfig", RB_CONFIG}, {"boot_ddb", RB_KDB}, {"boot_gdb", RB_GDB}, {"boot_single", RB_SINGLE}, {"boot_verbose", RB_VERBOSE}, {"boot_multicons", RB_MULTIPLE}, {"boot_serial", RB_SERIAL}, {NULL, 0} }; extern char *ski_fmtdev(void *vdev); +extern int ski_init_stubs(struct bootinfo *); int bi_getboothowto(char *kargs) { char *cp; int howto; int active; int i; /* Parse kargs */ howto = 0; if (kargs != NULL) { cp = kargs; active = 0; while (*cp != 0) { if (!active && (*cp == '-')) { active = 1; } else if (active) switch (*cp) { case 'a': howto |= RB_ASKNAME; break; case 'c': howto |= RB_CONFIG; break; case 'C': howto |= RB_CDROM; break; case 'd': howto |= RB_KDB; break; case 'm': howto |= RB_MUTE; break; case 'g': howto |= RB_GDB; break; case 'h': howto |= RB_SERIAL; break; case 'r': howto |= RB_DFLTROOT; break; case 's': howto |= RB_SINGLE; break; case 'v': howto |= RB_VERBOSE; break; default: active = 0; break; } cp++; } } /* get equivalents from the environment */ for (i = 0; howto_names[i].ev != NULL; i++) if (getenv(howto_names[i].ev) != NULL) howto |= howto_names[i].mask; if (!strcmp(getenv("console"), "comconsole")) howto |= RB_SERIAL; if (!strcmp(getenv("console"), "nullconsole")) howto |= RB_MUTE; return(howto); } /* * Copy the environment into the load area starting at (addr). * Each variable is formatted as =, with a single nul * separating each variable, and a double nul terminating the environment. */ vm_offset_t bi_copyenv(vm_offset_t addr) { struct env_var *ep; /* traverse the environment */ for (ep = environ; ep != NULL; ep = ep->ev_next) { ski_copyin(ep->ev_name, addr, strlen(ep->ev_name)); addr += strlen(ep->ev_name); ski_copyin("=", addr, 1); addr++; if (ep->ev_value != NULL) { ski_copyin(ep->ev_value, addr, strlen(ep->ev_value)); addr += strlen(ep->ev_value); } ski_copyin("", addr, 1); addr++; } ski_copyin("", addr, 1); addr++; return(addr); } /* * Copy module-related data into the load area, where it can be * used as a directory for loaded modules. * * Module data is presented in a self-describing format. Each datum * is preceded by a 32-bit identifier and a 32-bit size field. * * Currently, the following data are saved: * * MOD_NAME (variable) module name (string) * MOD_TYPE (variable) module type (string) * MOD_ARGS (variable) module parameters (string) * MOD_ADDR sizeof(vm_offset_t) module load address * MOD_SIZE sizeof(size_t) module size * MOD_METADATA (variable) type-specific metadata */ #define COPY32(v, a) { \ u_int32_t x = (v); \ ski_copyin(&x, a, sizeof(x)); \ a += sizeof(x); \ } #define MOD_STR(t, a, s) { \ COPY32(t, a); \ COPY32(strlen(s) + 1, a); \ ski_copyin(s, a, strlen(s) + 1); \ a += roundup(strlen(s) + 1, sizeof(u_int64_t));\ } #define MOD_NAME(a, s) MOD_STR(MODINFO_NAME, a, s) #define MOD_TYPE(a, s) MOD_STR(MODINFO_TYPE, a, s) #define MOD_ARGS(a, s) MOD_STR(MODINFO_ARGS, a, s) #define MOD_VAR(t, a, s) { \ COPY32(t, a); \ COPY32(sizeof(s), a); \ ski_copyin(&s, a, sizeof(s)); \ a += roundup(sizeof(s), sizeof(u_int64_t)); \ } #define MOD_ADDR(a, s) MOD_VAR(MODINFO_ADDR, a, s) #define MOD_SIZE(a, s) MOD_VAR(MODINFO_SIZE, a, s) #define MOD_METADATA(a, mm) { \ COPY32(MODINFO_METADATA | mm->md_type, a); \ COPY32(mm->md_size, a); \ ski_copyin(mm->md_data, a, mm->md_size); \ a += roundup(mm->md_size, sizeof(u_int64_t));\ } #define MOD_END(a) { \ COPY32(MODINFO_END, a); \ COPY32(0, a); \ } vm_offset_t bi_copymodules(vm_offset_t addr) { struct preloaded_file *fp; struct file_metadata *md; /* start with the first module on the list, should be the kernel */ for (fp = file_findfile(NULL, NULL); fp != NULL; fp = fp->f_next) { MOD_NAME(addr, fp->f_name); /* this field must come first */ MOD_TYPE(addr, fp->f_type); if (fp->f_args) MOD_ARGS(addr, fp->f_args); MOD_ADDR(addr, fp->f_addr); MOD_SIZE(addr, fp->f_size); for (md = fp->f_metadata; md != NULL; md = md->md_next) if (!(md->md_type & MODINFOMD_NOCOPY)) MOD_METADATA(addr, md); } MOD_END(addr); return(addr); } /* * Load the information expected by an alpha kernel. * * - The kernel environment is copied into kernel space. * - Module metadata are formatted and placed in kernel space. */ int bi_load(struct bootinfo *bi, struct preloaded_file *fp, char *args) { char *rootdevname; struct ski_devdesc *rootdev; struct preloaded_file *xp; vm_offset_t addr, bootinfo_addr; u_int pad; char *kernelname; vm_offset_t ssym, esym; struct file_metadata *md; - EFI_MEMORY_DESCRIPTOR *memp; /* * Version 1 bootinfo. */ bi->bi_magic = BOOTINFO_MAGIC; bi->bi_version = 1; /* * Calculate boothowto. */ bi->bi_boothowto = bi_getboothowto(fp->f_args); /* * Allow the environment variable 'rootdev' to override the supplied device * This should perhaps go to MI code and/or have $rootdev tested/set by * MI code before launching the kernel. */ rootdevname = getenv("rootdev"); ski_getdev((void **)(&rootdev), rootdevname, NULL); if (rootdev == NULL) { /* bad $rootdev/$currdev */ printf("can't determine root device\n"); return(EINVAL); } /* Try reading the /etc/fstab file to select the root device */ getrootmount(ski_fmtdev((void *)rootdev)); free(rootdev); ssym = esym = 0; if ((md = file_findmetadata(fp, MODINFOMD_SSYM)) != NULL) ssym = *((vm_offset_t *)&(md->md_data)); if ((md = file_findmetadata(fp, MODINFOMD_ESYM)) != NULL) esym = *((vm_offset_t *)&(md->md_data)); if (ssym == 0 || esym == 0) ssym = esym = 0; /* sanity */ bi->bi_symtab = ssym; bi->bi_esymtab = esym; /* find the last module in the chain */ addr = 0; for (xp = file_findfile(NULL, NULL); xp != NULL; xp = xp->f_next) { if (addr < (xp->f_addr + xp->f_size)) addr = xp->f_addr + xp->f_size; } /* pad to a page boundary */ pad = (u_int)addr & PAGE_MASK; if (pad != 0) { pad = PAGE_SIZE - pad; addr += pad; } /* copy our environment */ bi->bi_envp = addr; addr = bi_copyenv(addr); /* pad to a page boundary */ pad = (u_int)addr & PAGE_MASK; if (pad != 0) { pad = PAGE_SIZE - pad; addr += pad; } /* copy module list and metadata */ bi->bi_modulep = addr; addr = bi_copymodules(addr); /* all done copying stuff in, save end of loaded object space */ bi->bi_kernend = addr; - /* Describe the SKI memory map. */ - bi->bi_memmap = (u_int64_t)(bi + 1); - bi->bi_memmap_size = 2 * sizeof(EFI_MEMORY_DESCRIPTOR); - bi->bi_memdesc_size = sizeof(EFI_MEMORY_DESCRIPTOR); - bi->bi_memdesc_version = 1; - - memp = (EFI_MEMORY_DESCRIPTOR *) bi->bi_memmap; - - memp[0].Type = EfiConventionalMemory; - memp[0].PhysicalStart = 2L*1024*1024; - memp[0].VirtualStart = 0; - memp[0].NumberOfPages = (64L*1024*1024)>>12; - memp[0].Attribute = EFI_MEMORY_WB; - - memp[1].Type = EfiMemoryMappedIOPortSpace; - memp[1].PhysicalStart = 0xffffc000000; - memp[1].VirtualStart = 0; - memp[1].NumberOfPages = (64L*1024*1024)>>12; - memp[1].Attribute = EFI_MEMORY_UC; - - return(0); + return (ski_init_stubs(bi)); } Index: head/sys/boot/ia64/libski/efi_stub.c =================================================================== --- head/sys/boot/ia64/libski/efi_stub.c (nonexistent) +++ head/sys/boot/ia64/libski/efi_stub.c (revision 110211) @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include "libski.h" + +extern void acpi_root; +extern void sal_systab; + +extern void acpi_stub_init(void); +extern void sal_stub_init(void); + +EFI_CONFIGURATION_TABLE efi_cfgtab[] = { + { ACPI_20_TABLE_GUID, &acpi_root }, + { SAL_SYSTEM_TABLE_GUID, &sal_systab } +}; + + +static EFI_STATUS GetTime(EFI_TIME *, EFI_TIME_CAPABILITIES *); +static EFI_STATUS SetTime(EFI_TIME *); +static EFI_STATUS GetWakeupTime(BOOLEAN *, BOOLEAN *, EFI_TIME *); +static EFI_STATUS SetWakeupTime(BOOLEAN, EFI_TIME *); + +static EFI_STATUS SetVirtualAddressMap(UINTN, UINTN, UINT32, + EFI_MEMORY_DESCRIPTOR*); +static EFI_STATUS ConvertPointer(UINTN, VOID **); + +static EFI_STATUS GetVariable(CHAR16 *, EFI_GUID *, UINT32 *, UINTN *, VOID *); +static EFI_STATUS GetNextVariableName(UINTN *, CHAR16 *, EFI_GUID *); +static EFI_STATUS SetVariable(CHAR16 *, EFI_GUID *, UINT32, UINTN, VOID *); + +static EFI_STATUS GetNextHighMonotonicCount(UINT32 *); +static EFI_STATUS ResetSystem(EFI_RESET_TYPE, EFI_STATUS, UINTN, CHAR16 *); + +EFI_RUNTIME_SERVICES efi_rttab = { + /* Header. */ + { EFI_RUNTIME_SERVICES_SIGNATURE, + EFI_RUNTIME_SERVICES_REVISION, + 0, /* XXX HeaderSize */ + 0, /* XXX CRC32 */ + }, + + /* Time services */ + GetTime, + SetTime, + GetWakeupTime, + SetWakeupTime, + + /* Virtual memory services */ + SetVirtualAddressMap, + ConvertPointer, + + /* Variable services */ + GetVariable, + GetNextVariableName, + SetVariable, + + /* Misc */ + GetNextHighMonotonicCount, + ResetSystem +}; + +EFI_SYSTEM_TABLE efi_systab = { + /* Header. */ + { EFI_SYSTEM_TABLE_SIGNATURE, + EFI_SYSTEM_TABLE_REVISION, + 0, /* XXX HeaderSize */ + 0, /* XXX CRC32 */ + }, + + /* Firmware info. */ + L"FreeBSD", 0, + + /* Console stuff. */ + NULL, NULL, + NULL, NULL, + NULL, NULL, + + /* Services (runtime first). */ + &efi_rttab, + NULL, + + /* Configuration tables. */ + sizeof(efi_cfgtab)/sizeof(EFI_CONFIGURATION_TABLE), + efi_cfgtab +}; + +static EFI_STATUS +unsupported(const char *func) +{ + printf("EFI: %s not supported\n", func); + return (EFI_UNSUPPORTED); +} + +static EFI_STATUS +GetTime(EFI_TIME *time, EFI_TIME_CAPABILITIES *caps) +{ + UINT32 comps[8]; + + ssc((UINT64)comps, 0, 0, 0, SSC_GET_RTC); + time->Year = comps[0] + 1900; + time->Month = comps[1] + 1; + time->Day = comps[2]; + time->Hour = comps[3]; + time->Minute = comps[4]; + time->Second = comps[5]; + time->Pad1 = time->Pad2 = 0; + time->Nanosecond = 0; + time->TimeZone = 0; + time->Daylight = 0; + return (EFI_SUCCESS); +} + +static EFI_STATUS +SetTime(EFI_TIME *time) +{ + return (EFI_SUCCESS); +} + +static EFI_STATUS +GetWakeupTime(BOOLEAN *enabled, BOOLEAN *pending, EFI_TIME *time) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +SetWakeupTime(BOOLEAN enable, EFI_TIME *time) +{ + return (unsupported(__func__)); +} + +static void +Reloc(void *addr, UINT64 delta) +{ + UINT64 **fpp = addr; + + *fpp[0] += delta; + *fpp[1] += delta; + *fpp += delta >> 3; +} + +static EFI_STATUS +SetVirtualAddressMap(UINTN mapsz, UINTN descsz, UINT32 version, + EFI_MEMORY_DESCRIPTOR *memmap) +{ + UINT64 delta; + + delta = memmap->VirtualStart - memmap->PhysicalStart; + Reloc(&efi_rttab.GetTime, delta); + Reloc(&efi_rttab.SetTime, delta); + return (EFI_SUCCESS); /* Hah... */ +} + +static EFI_STATUS +ConvertPointer(UINTN debug, VOID **addr) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetVariable(CHAR16 *name, EFI_GUID *vendor, UINT32 *attrs, UINTN *datasz, + VOID *data) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetNextVariableName(UINTN *namesz, CHAR16 *name, EFI_GUID *vendor) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +SetVariable(CHAR16 *name, EFI_GUID *vendor, UINT32 attrs, UINTN datasz, + VOID *data) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetNextHighMonotonicCount(UINT32 *high) +{ + static UINT32 counter = 0; + + *high = counter++; + return (EFI_SUCCESS); +} + +static EFI_STATUS +ResetSystem(EFI_RESET_TYPE type, EFI_STATUS status, UINTN datasz, + CHAR16 *data) +{ + return (unsupported(__func__)); +} + +int +ski_init_stubs(struct bootinfo *bi) +{ + EFI_MEMORY_DESCRIPTOR *memp; + + /* Describe the SKI memory map. */ + bi->bi_memmap = (u_int64_t)(bi + 1); + bi->bi_memmap_size = 4 * sizeof(EFI_MEMORY_DESCRIPTOR); + bi->bi_memdesc_size = sizeof(EFI_MEMORY_DESCRIPTOR); + bi->bi_memdesc_version = 1; + + memp = (EFI_MEMORY_DESCRIPTOR *)bi->bi_memmap; + + memp[0].Type = EfiPalCode; + memp[0].PhysicalStart = 0x100000; + memp[0].VirtualStart = 0; + memp[0].NumberOfPages = (4L*1024*1024)>>12; + memp[0].Attribute = EFI_MEMORY_WB | EFI_MEMORY_RUNTIME; + + memp[1].Type = EfiConventionalMemory; + memp[1].PhysicalStart = 5L*1024*1024; + memp[1].VirtualStart = 0; + memp[1].NumberOfPages = (128L*1024*1024)>>12; + memp[1].Attribute = EFI_MEMORY_WB; + + memp[2].Type = EfiConventionalMemory; + memp[2].PhysicalStart = 4L*1024*1024*1024; + memp[2].VirtualStart = 0; + memp[2].NumberOfPages = (64L*1024*1024)>>12; + memp[2].Attribute = EFI_MEMORY_WB; + + memp[3].Type = EfiMemoryMappedIOPortSpace; + memp[3].PhysicalStart = 0xffffc000000; + memp[3].VirtualStart = 0; + memp[3].NumberOfPages = (64L*1024*1024)>>12; + memp[3].Attribute = EFI_MEMORY_UC; + + bi->bi_systab = (u_int64_t)&efi_systab; + + sal_stub_init(); + acpi_stub_init(); + + return (0); +} Property changes on: head/sys/boot/ia64/libski/efi_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/libski/elf_freebsd.c =================================================================== --- head/sys/boot/ia64/libski/elf_freebsd.c (revision 110210) +++ head/sys/boot/ia64/libski/elf_freebsd.c (revision 110211) @@ -1,203 +1,205 @@ /* $FreeBSD$ */ /* $NetBSD: loadfile.c,v 1.10 1998/06/25 06:45:46 ross Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)boot.c 8.1 (Berkeley) 6/10/93 */ #include #include #include #include #include #include #include #include #include "bootstrap.h" #include "libski.h" #define _KERNEL static int elf_exec(struct preloaded_file *amp); struct file_format ia64_elf = { elf_loadfile, elf_exec }; #define PTE_MA_WB 0 #define PTE_MA_UC 4 #define PTE_MA_UCE 5 #define PTE_MA_WC 6 #define PTE_MA_NATPAGE 7 #define PTE_PL_KERN 0 #define PTE_PL_USER 3 #define PTE_AR_R 0 #define PTE_AR_RX 1 #define PTE_AR_RW 2 #define PTE_AR_RWX 3 #define PTE_AR_R_RW 4 #define PTE_AR_RX_RWX 5 #define PTE_AR_RWX_RW 6 #define PTE_AR_X_RX 7 /* * A short-format VHPT entry. Also matches the TLB insertion format. */ struct ia64_pte { u_int64_t pte_p :1; /* bits 0..0 */ u_int64_t pte_rv1 :1; /* bits 1..1 */ u_int64_t pte_ma :3; /* bits 2..4 */ u_int64_t pte_a :1; /* bits 5..5 */ u_int64_t pte_d :1; /* bits 6..6 */ u_int64_t pte_pl :2; /* bits 7..8 */ u_int64_t pte_ar :3; /* bits 9..11 */ u_int64_t pte_ppn :38; /* bits 12..49 */ u_int64_t pte_rv2 :2; /* bits 50..51 */ u_int64_t pte_ed :1; /* bits 52..52 */ u_int64_t pte_ig :11; /* bits 53..63 */ }; +static struct bootinfo bootinfo; + void enter_kernel(const char* filename, u_int64_t start, struct bootinfo *bi) { printf("Entering %s at 0x%lx...\n", filename, start); while (*filename == '/') filename++; ssc(0, (u_int64_t) filename, 0, 0, SSC_LOAD_SYMBOLS); __asm __volatile("mov cr.ipsr=%0" :: "r"(IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT | IA64_PSR_BN)); __asm __volatile("mov cr.iip=%0" :: "r"(start)); __asm __volatile("mov cr.ifs=r0;;"); __asm __volatile("mov r8=%0" :: "r" (bi)); __asm __volatile("rfi;;"); } static int elf_exec(struct preloaded_file *fp) { struct file_metadata *md; Elf_Ehdr *hdr; struct ia64_pte pte; struct bootinfo *bi; if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL) return(EFTYPE); /* XXX actually EFUCKUP */ hdr = (Elf_Ehdr *)&(md->md_data); /* * Ugly hack, similar to linux. Dump the bootinfo into a * special page reserved in the link map. */ - bi = (struct bootinfo *) 0x508000; + bi = &bootinfo; bzero(bi, sizeof(struct bootinfo)); bi_load(bi, fp); /* * Region 6 is direct mapped UC and region 7 is direct mapped * WC. The details of this is controlled by the Alt {I,D}TLB * handlers. Here we just make sure that they have the largest * possible page size to minimise TLB usage. */ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); bzero(&pte, sizeof(pte)); pte.pte_p = 1; pte.pte_ma = PTE_MA_WB; pte.pte_a = 1; pte.pte_d = 1; pte.pte_pl = PTE_PL_KERN; pte.pte_ar = PTE_AR_RWX; pte.pte_ppn = 0; __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7))); __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); enter_kernel(fp->f_name, hdr->e_entry, bi); } Index: head/sys/boot/ia64/libski/libski.h =================================================================== --- head/sys/boot/ia64/libski/libski.h (revision 110210) +++ head/sys/boot/ia64/libski/libski.h (revision 110211) @@ -1,95 +1,96 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * SKI fully-qualified device descriptor */ struct ski_devdesc { struct devsw *d_dev; int d_type; #define DEVT_NONE 0 #define DEVT_DISK 1 #define DEVT_NET 2 union { struct { int unit; int slice; int partition; } skidisk; struct { int unit; /* XXX net layer lives over these? */ } netif; } d_kind; }; extern int ski_getdev(void **vdev, const char *devspec, const char **path); extern char *ski_fmtdev(void *vdev); extern int ski_setcurrdev(struct env_var *ev, int flags, void *value); #define MAXDEV 31 /* maximum number of distinct devices */ typedef unsigned long physaddr_t; /* exported devices XXX rename? */ extern struct devsw skifs_dev; extern struct devsw ski_disk; extern struct netif_driver ski_net; /* Wrapper over SKI filesystems. */ extern struct fs_ops ski_fsops; /* this is in startup code */ extern void delay(int); extern void reboot(void); extern ssize_t ski_copyin(const void *src, vm_offset_t dest, size_t len); extern ssize_t ski_copyout(const vm_offset_t src, void *dest, size_t len); extern ssize_t ski_readin(int fd, vm_offset_t dest, size_t len); extern int ski_boot(void); extern int ski_autoload(void); struct bootinfo; struct preloaded_file; extern int bi_load(struct bootinfo *, struct preloaded_file *); #define SSC_CONSOLE_INIT 20 #define SSC_GETCHAR 21 #define SSC_PUTCHAR 31 #define SSC_OPEN 50 #define SSC_CLOSE 51 #define SSC_READ 52 #define SSC_WRITE 53 #define SSC_GET_COMPLETION 54 #define SSC_WAIT_COMPLETION 55 #define SSC_GET_RTC 65 #define SSC_EXIT 66 #define SSC_LOAD_SYMBOLS 69 +#define SSC_SAL_SET_VECTORS 120 u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which); Index: head/sys/boot/ia64/libski/pal_stub.S =================================================================== --- head/sys/boot/ia64/libski/pal_stub.S (nonexistent) +++ head/sys/boot/ia64/libski/pal_stub.S (revision 110211) @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * Copyright (c) 2001 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + + .text +ENTRY(PalProc, 0) + cmp.eq p6,p0=6,r28 // PAL_PTCE_INFO +(p6) br.cond.dptk pal_ptce_info + ;; + cmp.eq p6,p0=8,r28 // PAL_VM_SUMMARY +(p6) br.cond.dptk pal_vm_summary + ;; + cmp.eq p6,p0=14,r28 // PAL_FREQ_RATIOS +(p6) br.cond.dptk pal_freq_ratios + ;; + mov r15=66 // EXIT + break.i 0x80000 // SSC + ;; +pal_ptce_info: + mov r8=0 + mov r9=0 // base + movl r10=0x0000000100000001 // loop counts (outer|inner) + mov r11=0x0000000000000000 // loop strides (outer|inner) + br.sptk b0 +pal_vm_summary: + mov r8=0 + movl r9=(8<<40)|(8<<32) // VM info 1 + mov r10=(18<<8)|(41<<0) // VM info 2 + mov r11=0 + br.sptk b0 +pal_freq_ratios: + mov r8=0 + movl r9=0x0000000B00000002 // processor ratio 11/2 + movl r10=0x0000000100000001 // bus ratio 1/1 + movl r11=0x0000000B00000002 // ITC ratio 11/2 + br.sptk b0 +END(PalProc) Property changes on: head/sys/boot/ia64/libski/pal_stub.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/libski/pal_stub.s =================================================================== --- head/sys/boot/ia64/libski/pal_stub.s (nonexistent) +++ head/sys/boot/ia64/libski/pal_stub.s (revision 110211) @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * Copyright (c) 2001 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + + .text +ENTRY(PalProc, 0) + cmp.eq p6,p0=6,r28 // PAL_PTCE_INFO +(p6) br.cond.dptk pal_ptce_info + ;; + cmp.eq p6,p0=8,r28 // PAL_VM_SUMMARY +(p6) br.cond.dptk pal_vm_summary + ;; + cmp.eq p6,p0=14,r28 // PAL_FREQ_RATIOS +(p6) br.cond.dptk pal_freq_ratios + ;; + mov r15=66 // EXIT + break.i 0x80000 // SSC + ;; +pal_ptce_info: + mov r8=0 + mov r9=0 // base + movl r10=0x0000000100000001 // loop counts (outer|inner) + mov r11=0x0000000000000000 // loop strides (outer|inner) + br.sptk b0 +pal_vm_summary: + mov r8=0 + movl r9=(8<<40)|(8<<32) // VM info 1 + mov r10=(18<<8)|(41<<0) // VM info 2 + mov r11=0 + br.sptk b0 +pal_freq_ratios: + mov r8=0 + movl r9=0x0000000B00000002 // processor ratio 11/2 + movl r10=0x0000000100000001 // bus ratio 1/1 + movl r11=0x0000000B00000002 // ITC ratio 11/2 + br.sptk b0 +END(PalProc) Property changes on: head/sys/boot/ia64/libski/pal_stub.s ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/libski/sal_stub.c =================================================================== --- head/sys/boot/ia64/libski/sal_stub.c (nonexistent) +++ head/sys/boot/ia64/libski/sal_stub.c (revision 110211) @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include "libski.h" + +extern void PalProc(void); +static sal_entry_t SalProc; + +struct { + struct sal_system_table header; + struct sal_entrypoint_descriptor entry; + struct sal_ap_wakeup_descriptor wakeup; +} sal_systab = { + /* Header. */ + { + SAL_SIGNATURE, + sizeof(sal_systab), + { 00, 03 }, /* Revision 3.0. */ + 2, /* Number of decsriptors. */ + 0, /* XXX checksum. */ + { 0 }, + { 00, 00 }, /* XXX SAL_A version. */ + { 00, 00 }, /* XXX SAL_B version. */ + "FreeBSD", + "Ski loader", + { 0 } + }, + /* Entrypoint. */ + { + 0, /* Type=entrypoint descr. */ + { 0 }, + 0, /* XXX PalProc. */ + 0, /* XXX SalProc. */ + 0, /* XXX SalProc GP. */ + { 0 } + }, + /* AP wakeup. */ + { + 5, /* Type=AP wakeup descr. */ + 0, /* External interrupt. */ + { 0 }, + 255 /* Wakeup vector. */ + } +}; + +static inline void +puts(const char *s) +{ + s = (const char *)((7UL << 61) | (u_long)s); + while (*s) + ski_cons_putchar(*s++); +} + +static struct ia64_sal_result +SalProc(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4, u_int64_t a5, + u_int64_t a6, u_int64_t a7, u_int64_t a8) +{ + struct ia64_sal_result res; + + res.sal_status = -3; + res.sal_result[0] = 0; + res.sal_result[1] = 0; + res.sal_result[2] = 0; + + if (a1 == SAL_FREQ_BASE) { + res.sal_status = 0; + res.sal_result[0] = 133338184; + } else if (a1 == SAL_SET_VECTORS) { + /* XXX unofficial SSC function. */ + ssc(a2, a3, a4, a5, SSC_SAL_SET_VECTORS); + } else if (a1 != SAL_GET_STATE_INFO_SIZE) { + puts("SAL: unimplemented function called\n"); + } + + return (res); +} + +void +sal_stub_init(void) +{ + struct ia64_fdesc *fd; + + fd = (void*)PalProc; + sal_systab.entry.sale_pal_proc = fd->func; + fd = (void*)SalProc; + sal_systab.entry.sale_sal_proc = fd->func; + sal_systab.entry.sale_sal_gp = fd->gp; +} Property changes on: head/sys/boot/ia64/libski/sal_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/libski/ssc.c =================================================================== --- head/sys/boot/ia64/libski/ssc.c (revision 110210) +++ head/sys/boot/ia64/libski/ssc.c (revision 110211) @@ -1,42 +1,52 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include "libski.h" +/* + * Ugh... Work around a bug in the Linux version of ski for SSC_GET_RTC. The + * PSR.dt register is not preserved properly and causes further memory + * references to be done without translation. All we need to do is preserve + * PSR.dt across the SSC call. We do this by saving and restoring psr.l + * completely. + */ u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which) { + register u_int64_t psr; register u_int64_t ret0 __asm("r8"); + __asm __volatile("mov %0=psr;;" : "=r"(psr)); __asm __volatile("mov r15=%1\n\t" - "break 0x80000" + "break 0x80000;;" : "=r"(ret0) : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3)); + __asm __volatile("mov psr.l=%0;; srlz.d" :: "r"(psr)); return ret0; } Index: head/sys/boot/ia64/ski/acpi_stub.c =================================================================== --- head/sys/boot/ia64/ski/acpi_stub.c (nonexistent) +++ head/sys/boot/ia64/ski/acpi_stub.c (revision 110211) @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + +#define APIC_IO_SAPIC 6 +#define APIC_LOCAL_SAPIC 7 + +#pragma pack(1) + +typedef struct /* LOCAL SAPIC */ +{ + APIC_HEADER Header; + UINT8 ProcessorId; /* ACPI processor id */ + UINT8 LocalSapicId; /* Processor local SAPIC id */ + UINT8 LocalSapicEid; /* Processor local SAPIC eid */ + UINT8 Reserved[3]; + UINT32 ProcessorEnabled: 1; + UINT32 FlagsReserved: 31; +} LOCAL_SAPIC; + +typedef struct /* IO SAPIC */ +{ + APIC_HEADER Header; + UINT8 IoSapicId; /* I/O SAPIC ID */ + UINT8 Reserved; /* reserved - must be zero */ + UINT32 Vector; /* interrupt base */ + UINT64 IoSapicAddress; /* SAPIC's physical address */ +} IO_SAPIC; + +/* + */ + +struct { + APIC_TABLE Header; + LOCAL_SAPIC cpu0; + LOCAL_SAPIC cpu1; + LOCAL_SAPIC cpu2; + LOCAL_SAPIC cpu3; + IO_SAPIC sapic; +} apic = { + /* Header. */ + { + { + APIC_SIG, /* Signature. */ + sizeof(apic), /* Length of table. */ + 0, /* ACPI minor revision. */ + 0, /* XXX checksum. */ + "FBSD", /* OEM Id. */ + "SKI", /* OEM table Id. */ + 0, /* OEM revision. */ + "FBSD", /* ASL compiler Id. */ + 0 /* ASL revision. */ + }, + 0xfee00000, + }, + /* cpu0. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu0) /* Length. */ + }, + 0, /* ACPI processor id */ + 0, /* Processor local SAPIC id */ + 0, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 1, /* FL: Enabled. */ + }, + /* cpu1. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu1) /* Length. */ + }, + 1, /* ACPI processor id */ + 0, /* Processor local SAPIC id */ + 1, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 1, /* FL: Enabled. */ + }, + /* cpu2. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu2) /* Length. */ + }, + 2, /* ACPI processor id */ + 1, /* Processor local SAPIC id */ + 0, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 0, /* FL: Enabled. */ + }, + /* cpu3. */ + { + { + APIC_LOCAL_SAPIC, /* Type. */ + sizeof(apic.cpu3) /* Length. */ + }, + 3, /* ACPI processor id */ + 1, /* Processor local SAPIC id */ + 1, /* Processor local SAPIC eid */ + { 0, 0, 0 }, + 0, /* FL: Enabled. */ + }, + /* sapic. */ + { + { + APIC_IO_SAPIC, /* Type. */ + sizeof(apic.sapic) /* Length. */ + }, + 4, /* IO SAPIC id. */ + 0, + 16, /* Interrupt base. */ + 0xfec00000 /* IO SAPIC address. */ + } +}; + +struct { + ACPI_TABLE_HEADER Header; + UINT64 apic_tbl; +} xsdt = { + { + XSDT_SIG, /* Signature. */ + sizeof(xsdt), /* Length of table. */ + 0, /* ACPI minor revision. */ + 0, /* XXX checksum. */ + "FBSD", /* OEM Id. */ + "SKI", /* OEM table Id. */ + 0, /* OEM revision. */ + "FBSD", /* ASL compiler Id. */ + 0 /* ASL revision. */ + }, + NULL /* XXX APIC table address. */ +}; + +RSDP_DESCRIPTOR acpi_root = { + RSDP_SIG, + 0, /* XXX checksum. */ + "FBSD", + 2, /* ACPI Rev 2.0. */ + NULL, + sizeof(xsdt), /* XSDT length. */ + NULL, /* XXX PA of XSDT. */ + 0, /* XXX Extended checksum. */ +}; + +static void +cksum(void *addr, int sz, UINT8 *sum) +{ + UINT8 *p, s; + + p = addr; + s = 0; + while (sz--) + s += *p++; + *sum = -s; +} + +void +acpi_stub_init(void) +{ + acpi_root.XsdtPhysicalAddress = (UINT64)&xsdt; + cksum(&acpi_root, 20, &acpi_root.Checksum); + cksum(&acpi_root, sizeof(acpi_root), &acpi_root.ExtendedChecksum); + + xsdt.apic_tbl = (UINT32)&apic; + cksum(&xsdt, sizeof(xsdt), &xsdt.Header.Checksum); +} Property changes on: head/sys/boot/ia64/ski/acpi_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/ski/bootinfo.c =================================================================== --- head/sys/boot/ia64/ski/bootinfo.c (revision 110210) +++ head/sys/boot/ia64/ski/bootinfo.c (revision 110211) @@ -1,344 +1,322 @@ /*- * Copyright (c) 1998 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include -#include - #include "bootstrap.h" /* * Return a 'boothowto' value corresponding to the kernel arguments in * (kargs) and any relevant environment variables. */ static struct { const char *ev; int mask; } howto_names[] = { {"boot_askname", RB_ASKNAME}, {"boot_cdrom", RB_CDROM}, {"boot_userconfig", RB_CONFIG}, {"boot_ddb", RB_KDB}, {"boot_gdb", RB_GDB}, {"boot_single", RB_SINGLE}, {"boot_verbose", RB_VERBOSE}, {"boot_multicons", RB_MULTIPLE}, {"boot_serial", RB_SERIAL}, {NULL, 0} }; extern char *ski_fmtdev(void *vdev); +extern int ski_init_stubs(struct bootinfo *); int bi_getboothowto(char *kargs) { char *cp; int howto; int active; int i; /* Parse kargs */ howto = 0; if (kargs != NULL) { cp = kargs; active = 0; while (*cp != 0) { if (!active && (*cp == '-')) { active = 1; } else if (active) switch (*cp) { case 'a': howto |= RB_ASKNAME; break; case 'c': howto |= RB_CONFIG; break; case 'C': howto |= RB_CDROM; break; case 'd': howto |= RB_KDB; break; case 'm': howto |= RB_MUTE; break; case 'g': howto |= RB_GDB; break; case 'h': howto |= RB_SERIAL; break; case 'r': howto |= RB_DFLTROOT; break; case 's': howto |= RB_SINGLE; break; case 'v': howto |= RB_VERBOSE; break; default: active = 0; break; } cp++; } } /* get equivalents from the environment */ for (i = 0; howto_names[i].ev != NULL; i++) if (getenv(howto_names[i].ev) != NULL) howto |= howto_names[i].mask; if (!strcmp(getenv("console"), "comconsole")) howto |= RB_SERIAL; if (!strcmp(getenv("console"), "nullconsole")) howto |= RB_MUTE; return(howto); } /* * Copy the environment into the load area starting at (addr). * Each variable is formatted as =, with a single nul * separating each variable, and a double nul terminating the environment. */ vm_offset_t bi_copyenv(vm_offset_t addr) { struct env_var *ep; /* traverse the environment */ for (ep = environ; ep != NULL; ep = ep->ev_next) { ski_copyin(ep->ev_name, addr, strlen(ep->ev_name)); addr += strlen(ep->ev_name); ski_copyin("=", addr, 1); addr++; if (ep->ev_value != NULL) { ski_copyin(ep->ev_value, addr, strlen(ep->ev_value)); addr += strlen(ep->ev_value); } ski_copyin("", addr, 1); addr++; } ski_copyin("", addr, 1); addr++; return(addr); } /* * Copy module-related data into the load area, where it can be * used as a directory for loaded modules. * * Module data is presented in a self-describing format. Each datum * is preceded by a 32-bit identifier and a 32-bit size field. * * Currently, the following data are saved: * * MOD_NAME (variable) module name (string) * MOD_TYPE (variable) module type (string) * MOD_ARGS (variable) module parameters (string) * MOD_ADDR sizeof(vm_offset_t) module load address * MOD_SIZE sizeof(size_t) module size * MOD_METADATA (variable) type-specific metadata */ #define COPY32(v, a) { \ u_int32_t x = (v); \ ski_copyin(&x, a, sizeof(x)); \ a += sizeof(x); \ } #define MOD_STR(t, a, s) { \ COPY32(t, a); \ COPY32(strlen(s) + 1, a); \ ski_copyin(s, a, strlen(s) + 1); \ a += roundup(strlen(s) + 1, sizeof(u_int64_t));\ } #define MOD_NAME(a, s) MOD_STR(MODINFO_NAME, a, s) #define MOD_TYPE(a, s) MOD_STR(MODINFO_TYPE, a, s) #define MOD_ARGS(a, s) MOD_STR(MODINFO_ARGS, a, s) #define MOD_VAR(t, a, s) { \ COPY32(t, a); \ COPY32(sizeof(s), a); \ ski_copyin(&s, a, sizeof(s)); \ a += roundup(sizeof(s), sizeof(u_int64_t)); \ } #define MOD_ADDR(a, s) MOD_VAR(MODINFO_ADDR, a, s) #define MOD_SIZE(a, s) MOD_VAR(MODINFO_SIZE, a, s) #define MOD_METADATA(a, mm) { \ COPY32(MODINFO_METADATA | mm->md_type, a); \ COPY32(mm->md_size, a); \ ski_copyin(mm->md_data, a, mm->md_size); \ a += roundup(mm->md_size, sizeof(u_int64_t));\ } #define MOD_END(a) { \ COPY32(MODINFO_END, a); \ COPY32(0, a); \ } vm_offset_t bi_copymodules(vm_offset_t addr) { struct preloaded_file *fp; struct file_metadata *md; /* start with the first module on the list, should be the kernel */ for (fp = file_findfile(NULL, NULL); fp != NULL; fp = fp->f_next) { MOD_NAME(addr, fp->f_name); /* this field must come first */ MOD_TYPE(addr, fp->f_type); if (fp->f_args) MOD_ARGS(addr, fp->f_args); MOD_ADDR(addr, fp->f_addr); MOD_SIZE(addr, fp->f_size); for (md = fp->f_metadata; md != NULL; md = md->md_next) if (!(md->md_type & MODINFOMD_NOCOPY)) MOD_METADATA(addr, md); } MOD_END(addr); return(addr); } /* * Load the information expected by an alpha kernel. * * - The kernel environment is copied into kernel space. * - Module metadata are formatted and placed in kernel space. */ int bi_load(struct bootinfo *bi, struct preloaded_file *fp, char *args) { char *rootdevname; struct ski_devdesc *rootdev; struct preloaded_file *xp; vm_offset_t addr, bootinfo_addr; u_int pad; char *kernelname; vm_offset_t ssym, esym; struct file_metadata *md; - EFI_MEMORY_DESCRIPTOR *memp; /* * Version 1 bootinfo. */ bi->bi_magic = BOOTINFO_MAGIC; bi->bi_version = 1; /* * Calculate boothowto. */ bi->bi_boothowto = bi_getboothowto(fp->f_args); /* * Allow the environment variable 'rootdev' to override the supplied device * This should perhaps go to MI code and/or have $rootdev tested/set by * MI code before launching the kernel. */ rootdevname = getenv("rootdev"); ski_getdev((void **)(&rootdev), rootdevname, NULL); if (rootdev == NULL) { /* bad $rootdev/$currdev */ printf("can't determine root device\n"); return(EINVAL); } /* Try reading the /etc/fstab file to select the root device */ getrootmount(ski_fmtdev((void *)rootdev)); free(rootdev); ssym = esym = 0; if ((md = file_findmetadata(fp, MODINFOMD_SSYM)) != NULL) ssym = *((vm_offset_t *)&(md->md_data)); if ((md = file_findmetadata(fp, MODINFOMD_ESYM)) != NULL) esym = *((vm_offset_t *)&(md->md_data)); if (ssym == 0 || esym == 0) ssym = esym = 0; /* sanity */ bi->bi_symtab = ssym; bi->bi_esymtab = esym; /* find the last module in the chain */ addr = 0; for (xp = file_findfile(NULL, NULL); xp != NULL; xp = xp->f_next) { if (addr < (xp->f_addr + xp->f_size)) addr = xp->f_addr + xp->f_size; } /* pad to a page boundary */ pad = (u_int)addr & PAGE_MASK; if (pad != 0) { pad = PAGE_SIZE - pad; addr += pad; } /* copy our environment */ bi->bi_envp = addr; addr = bi_copyenv(addr); /* pad to a page boundary */ pad = (u_int)addr & PAGE_MASK; if (pad != 0) { pad = PAGE_SIZE - pad; addr += pad; } /* copy module list and metadata */ bi->bi_modulep = addr; addr = bi_copymodules(addr); /* all done copying stuff in, save end of loaded object space */ bi->bi_kernend = addr; - /* Describe the SKI memory map. */ - bi->bi_memmap = (u_int64_t)(bi + 1); - bi->bi_memmap_size = 2 * sizeof(EFI_MEMORY_DESCRIPTOR); - bi->bi_memdesc_size = sizeof(EFI_MEMORY_DESCRIPTOR); - bi->bi_memdesc_version = 1; - - memp = (EFI_MEMORY_DESCRIPTOR *) bi->bi_memmap; - - memp[0].Type = EfiConventionalMemory; - memp[0].PhysicalStart = 2L*1024*1024; - memp[0].VirtualStart = 0; - memp[0].NumberOfPages = (64L*1024*1024)>>12; - memp[0].Attribute = EFI_MEMORY_WB; - - memp[1].Type = EfiMemoryMappedIOPortSpace; - memp[1].PhysicalStart = 0xffffc000000; - memp[1].VirtualStart = 0; - memp[1].NumberOfPages = (64L*1024*1024)>>12; - memp[1].Attribute = EFI_MEMORY_UC; - - return(0); + return (ski_init_stubs(bi)); } Index: head/sys/boot/ia64/ski/efi_stub.c =================================================================== --- head/sys/boot/ia64/ski/efi_stub.c (nonexistent) +++ head/sys/boot/ia64/ski/efi_stub.c (revision 110211) @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include "libski.h" + +extern void acpi_root; +extern void sal_systab; + +extern void acpi_stub_init(void); +extern void sal_stub_init(void); + +EFI_CONFIGURATION_TABLE efi_cfgtab[] = { + { ACPI_20_TABLE_GUID, &acpi_root }, + { SAL_SYSTEM_TABLE_GUID, &sal_systab } +}; + + +static EFI_STATUS GetTime(EFI_TIME *, EFI_TIME_CAPABILITIES *); +static EFI_STATUS SetTime(EFI_TIME *); +static EFI_STATUS GetWakeupTime(BOOLEAN *, BOOLEAN *, EFI_TIME *); +static EFI_STATUS SetWakeupTime(BOOLEAN, EFI_TIME *); + +static EFI_STATUS SetVirtualAddressMap(UINTN, UINTN, UINT32, + EFI_MEMORY_DESCRIPTOR*); +static EFI_STATUS ConvertPointer(UINTN, VOID **); + +static EFI_STATUS GetVariable(CHAR16 *, EFI_GUID *, UINT32 *, UINTN *, VOID *); +static EFI_STATUS GetNextVariableName(UINTN *, CHAR16 *, EFI_GUID *); +static EFI_STATUS SetVariable(CHAR16 *, EFI_GUID *, UINT32, UINTN, VOID *); + +static EFI_STATUS GetNextHighMonotonicCount(UINT32 *); +static EFI_STATUS ResetSystem(EFI_RESET_TYPE, EFI_STATUS, UINTN, CHAR16 *); + +EFI_RUNTIME_SERVICES efi_rttab = { + /* Header. */ + { EFI_RUNTIME_SERVICES_SIGNATURE, + EFI_RUNTIME_SERVICES_REVISION, + 0, /* XXX HeaderSize */ + 0, /* XXX CRC32 */ + }, + + /* Time services */ + GetTime, + SetTime, + GetWakeupTime, + SetWakeupTime, + + /* Virtual memory services */ + SetVirtualAddressMap, + ConvertPointer, + + /* Variable services */ + GetVariable, + GetNextVariableName, + SetVariable, + + /* Misc */ + GetNextHighMonotonicCount, + ResetSystem +}; + +EFI_SYSTEM_TABLE efi_systab = { + /* Header. */ + { EFI_SYSTEM_TABLE_SIGNATURE, + EFI_SYSTEM_TABLE_REVISION, + 0, /* XXX HeaderSize */ + 0, /* XXX CRC32 */ + }, + + /* Firmware info. */ + L"FreeBSD", 0, + + /* Console stuff. */ + NULL, NULL, + NULL, NULL, + NULL, NULL, + + /* Services (runtime first). */ + &efi_rttab, + NULL, + + /* Configuration tables. */ + sizeof(efi_cfgtab)/sizeof(EFI_CONFIGURATION_TABLE), + efi_cfgtab +}; + +static EFI_STATUS +unsupported(const char *func) +{ + printf("EFI: %s not supported\n", func); + return (EFI_UNSUPPORTED); +} + +static EFI_STATUS +GetTime(EFI_TIME *time, EFI_TIME_CAPABILITIES *caps) +{ + UINT32 comps[8]; + + ssc((UINT64)comps, 0, 0, 0, SSC_GET_RTC); + time->Year = comps[0] + 1900; + time->Month = comps[1] + 1; + time->Day = comps[2]; + time->Hour = comps[3]; + time->Minute = comps[4]; + time->Second = comps[5]; + time->Pad1 = time->Pad2 = 0; + time->Nanosecond = 0; + time->TimeZone = 0; + time->Daylight = 0; + return (EFI_SUCCESS); +} + +static EFI_STATUS +SetTime(EFI_TIME *time) +{ + return (EFI_SUCCESS); +} + +static EFI_STATUS +GetWakeupTime(BOOLEAN *enabled, BOOLEAN *pending, EFI_TIME *time) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +SetWakeupTime(BOOLEAN enable, EFI_TIME *time) +{ + return (unsupported(__func__)); +} + +static void +Reloc(void *addr, UINT64 delta) +{ + UINT64 **fpp = addr; + + *fpp[0] += delta; + *fpp[1] += delta; + *fpp += delta >> 3; +} + +static EFI_STATUS +SetVirtualAddressMap(UINTN mapsz, UINTN descsz, UINT32 version, + EFI_MEMORY_DESCRIPTOR *memmap) +{ + UINT64 delta; + + delta = memmap->VirtualStart - memmap->PhysicalStart; + Reloc(&efi_rttab.GetTime, delta); + Reloc(&efi_rttab.SetTime, delta); + return (EFI_SUCCESS); /* Hah... */ +} + +static EFI_STATUS +ConvertPointer(UINTN debug, VOID **addr) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetVariable(CHAR16 *name, EFI_GUID *vendor, UINT32 *attrs, UINTN *datasz, + VOID *data) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetNextVariableName(UINTN *namesz, CHAR16 *name, EFI_GUID *vendor) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +SetVariable(CHAR16 *name, EFI_GUID *vendor, UINT32 attrs, UINTN datasz, + VOID *data) +{ + return (unsupported(__func__)); +} + +static EFI_STATUS +GetNextHighMonotonicCount(UINT32 *high) +{ + static UINT32 counter = 0; + + *high = counter++; + return (EFI_SUCCESS); +} + +static EFI_STATUS +ResetSystem(EFI_RESET_TYPE type, EFI_STATUS status, UINTN datasz, + CHAR16 *data) +{ + return (unsupported(__func__)); +} + +int +ski_init_stubs(struct bootinfo *bi) +{ + EFI_MEMORY_DESCRIPTOR *memp; + + /* Describe the SKI memory map. */ + bi->bi_memmap = (u_int64_t)(bi + 1); + bi->bi_memmap_size = 4 * sizeof(EFI_MEMORY_DESCRIPTOR); + bi->bi_memdesc_size = sizeof(EFI_MEMORY_DESCRIPTOR); + bi->bi_memdesc_version = 1; + + memp = (EFI_MEMORY_DESCRIPTOR *)bi->bi_memmap; + + memp[0].Type = EfiPalCode; + memp[0].PhysicalStart = 0x100000; + memp[0].VirtualStart = 0; + memp[0].NumberOfPages = (4L*1024*1024)>>12; + memp[0].Attribute = EFI_MEMORY_WB | EFI_MEMORY_RUNTIME; + + memp[1].Type = EfiConventionalMemory; + memp[1].PhysicalStart = 5L*1024*1024; + memp[1].VirtualStart = 0; + memp[1].NumberOfPages = (128L*1024*1024)>>12; + memp[1].Attribute = EFI_MEMORY_WB; + + memp[2].Type = EfiConventionalMemory; + memp[2].PhysicalStart = 4L*1024*1024*1024; + memp[2].VirtualStart = 0; + memp[2].NumberOfPages = (64L*1024*1024)>>12; + memp[2].Attribute = EFI_MEMORY_WB; + + memp[3].Type = EfiMemoryMappedIOPortSpace; + memp[3].PhysicalStart = 0xffffc000000; + memp[3].VirtualStart = 0; + memp[3].NumberOfPages = (64L*1024*1024)>>12; + memp[3].Attribute = EFI_MEMORY_UC; + + bi->bi_systab = (u_int64_t)&efi_systab; + + sal_stub_init(); + acpi_stub_init(); + + return (0); +} Property changes on: head/sys/boot/ia64/ski/efi_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/ski/elf_freebsd.c =================================================================== --- head/sys/boot/ia64/ski/elf_freebsd.c (revision 110210) +++ head/sys/boot/ia64/ski/elf_freebsd.c (revision 110211) @@ -1,203 +1,205 @@ /* $FreeBSD$ */ /* $NetBSD: loadfile.c,v 1.10 1998/06/25 06:45:46 ross Exp $ */ /*- * Copyright (c) 1997 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, * NASA Ames Research Center. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the NetBSD * Foundation, Inc. and its contributors. * 4. Neither the name of The NetBSD Foundation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Copyright (c) 1992, 1993 * The Regents of the University of California. All rights reserved. * * This code is derived from software contributed to Berkeley by * Ralph Campbell. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)boot.c 8.1 (Berkeley) 6/10/93 */ #include #include #include #include #include #include #include #include #include "bootstrap.h" #include "libski.h" #define _KERNEL static int elf_exec(struct preloaded_file *amp); struct file_format ia64_elf = { elf_loadfile, elf_exec }; #define PTE_MA_WB 0 #define PTE_MA_UC 4 #define PTE_MA_UCE 5 #define PTE_MA_WC 6 #define PTE_MA_NATPAGE 7 #define PTE_PL_KERN 0 #define PTE_PL_USER 3 #define PTE_AR_R 0 #define PTE_AR_RX 1 #define PTE_AR_RW 2 #define PTE_AR_RWX 3 #define PTE_AR_R_RW 4 #define PTE_AR_RX_RWX 5 #define PTE_AR_RWX_RW 6 #define PTE_AR_X_RX 7 /* * A short-format VHPT entry. Also matches the TLB insertion format. */ struct ia64_pte { u_int64_t pte_p :1; /* bits 0..0 */ u_int64_t pte_rv1 :1; /* bits 1..1 */ u_int64_t pte_ma :3; /* bits 2..4 */ u_int64_t pte_a :1; /* bits 5..5 */ u_int64_t pte_d :1; /* bits 6..6 */ u_int64_t pte_pl :2; /* bits 7..8 */ u_int64_t pte_ar :3; /* bits 9..11 */ u_int64_t pte_ppn :38; /* bits 12..49 */ u_int64_t pte_rv2 :2; /* bits 50..51 */ u_int64_t pte_ed :1; /* bits 52..52 */ u_int64_t pte_ig :11; /* bits 53..63 */ }; +static struct bootinfo bootinfo; + void enter_kernel(const char* filename, u_int64_t start, struct bootinfo *bi) { printf("Entering %s at 0x%lx...\n", filename, start); while (*filename == '/') filename++; ssc(0, (u_int64_t) filename, 0, 0, SSC_LOAD_SYMBOLS); __asm __volatile("mov cr.ipsr=%0" :: "r"(IA64_PSR_IC | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_IT | IA64_PSR_BN)); __asm __volatile("mov cr.iip=%0" :: "r"(start)); __asm __volatile("mov cr.ifs=r0;;"); __asm __volatile("mov r8=%0" :: "r" (bi)); __asm __volatile("rfi;;"); } static int elf_exec(struct preloaded_file *fp) { struct file_metadata *md; Elf_Ehdr *hdr; struct ia64_pte pte; struct bootinfo *bi; if ((md = file_findmetadata(fp, MODINFOMD_ELFHDR)) == NULL) return(EFTYPE); /* XXX actually EFUCKUP */ hdr = (Elf_Ehdr *)&(md->md_data); /* * Ugly hack, similar to linux. Dump the bootinfo into a * special page reserved in the link map. */ - bi = (struct bootinfo *) 0x508000; + bi = &bootinfo; bzero(bi, sizeof(struct bootinfo)); bi_load(bi, fp); /* * Region 6 is direct mapped UC and region 7 is direct mapped * WC. The details of this is controlled by the Alt {I,D}TLB * handlers. Here we just make sure that they have the largest * possible page size to minimise TLB usage. */ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); bzero(&pte, sizeof(pte)); pte.pte_p = 1; pte.pte_ma = PTE_MA_WB; pte.pte_a = 1; pte.pte_d = 1; pte.pte_pl = PTE_PL_KERN; pte.pte_ar = PTE_AR_RWX; pte.pte_ppn = 0; __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_RR_BASE(7))); __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(0), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.d dtr[%0]=%1;;" :: "r"(0), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); enter_kernel(fp->f_name, hdr->e_entry, bi); } Index: head/sys/boot/ia64/ski/libski.h =================================================================== --- head/sys/boot/ia64/ski/libski.h (revision 110210) +++ head/sys/boot/ia64/ski/libski.h (revision 110211) @@ -1,95 +1,96 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * SKI fully-qualified device descriptor */ struct ski_devdesc { struct devsw *d_dev; int d_type; #define DEVT_NONE 0 #define DEVT_DISK 1 #define DEVT_NET 2 union { struct { int unit; int slice; int partition; } skidisk; struct { int unit; /* XXX net layer lives over these? */ } netif; } d_kind; }; extern int ski_getdev(void **vdev, const char *devspec, const char **path); extern char *ski_fmtdev(void *vdev); extern int ski_setcurrdev(struct env_var *ev, int flags, void *value); #define MAXDEV 31 /* maximum number of distinct devices */ typedef unsigned long physaddr_t; /* exported devices XXX rename? */ extern struct devsw skifs_dev; extern struct devsw ski_disk; extern struct netif_driver ski_net; /* Wrapper over SKI filesystems. */ extern struct fs_ops ski_fsops; /* this is in startup code */ extern void delay(int); extern void reboot(void); extern ssize_t ski_copyin(const void *src, vm_offset_t dest, size_t len); extern ssize_t ski_copyout(const vm_offset_t src, void *dest, size_t len); extern ssize_t ski_readin(int fd, vm_offset_t dest, size_t len); extern int ski_boot(void); extern int ski_autoload(void); struct bootinfo; struct preloaded_file; extern int bi_load(struct bootinfo *, struct preloaded_file *); #define SSC_CONSOLE_INIT 20 #define SSC_GETCHAR 21 #define SSC_PUTCHAR 31 #define SSC_OPEN 50 #define SSC_CLOSE 51 #define SSC_READ 52 #define SSC_WRITE 53 #define SSC_GET_COMPLETION 54 #define SSC_WAIT_COMPLETION 55 #define SSC_GET_RTC 65 #define SSC_EXIT 66 #define SSC_LOAD_SYMBOLS 69 +#define SSC_SAL_SET_VECTORS 120 u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which); Index: head/sys/boot/ia64/ski/pal_stub.S =================================================================== --- head/sys/boot/ia64/ski/pal_stub.S (nonexistent) +++ head/sys/boot/ia64/ski/pal_stub.S (revision 110211) @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * Copyright (c) 2001 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include + + .text +ENTRY(PalProc, 0) + cmp.eq p6,p0=6,r28 // PAL_PTCE_INFO +(p6) br.cond.dptk pal_ptce_info + ;; + cmp.eq p6,p0=8,r28 // PAL_VM_SUMMARY +(p6) br.cond.dptk pal_vm_summary + ;; + cmp.eq p6,p0=14,r28 // PAL_FREQ_RATIOS +(p6) br.cond.dptk pal_freq_ratios + ;; + mov r15=66 // EXIT + break.i 0x80000 // SSC + ;; +pal_ptce_info: + mov r8=0 + mov r9=0 // base + movl r10=0x0000000100000001 // loop counts (outer|inner) + mov r11=0x0000000000000000 // loop strides (outer|inner) + br.sptk b0 +pal_vm_summary: + mov r8=0 + movl r9=(8<<40)|(8<<32) // VM info 1 + mov r10=(18<<8)|(41<<0) // VM info 2 + mov r11=0 + br.sptk b0 +pal_freq_ratios: + mov r8=0 + movl r9=0x0000000B00000002 // processor ratio 11/2 + movl r10=0x0000000100000001 // bus ratio 1/1 + movl r11=0x0000000B00000002 // ITC ratio 11/2 + br.sptk b0 +END(PalProc) Property changes on: head/sys/boot/ia64/ski/pal_stub.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/ski/sal_stub.c =================================================================== --- head/sys/boot/ia64/ski/sal_stub.c (nonexistent) +++ head/sys/boot/ia64/ski/sal_stub.c (revision 110211) @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2003 Marcel Moolenaar + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include +#include "libski.h" + +extern void PalProc(void); +static sal_entry_t SalProc; + +struct { + struct sal_system_table header; + struct sal_entrypoint_descriptor entry; + struct sal_ap_wakeup_descriptor wakeup; +} sal_systab = { + /* Header. */ + { + SAL_SIGNATURE, + sizeof(sal_systab), + { 00, 03 }, /* Revision 3.0. */ + 2, /* Number of decsriptors. */ + 0, /* XXX checksum. */ + { 0 }, + { 00, 00 }, /* XXX SAL_A version. */ + { 00, 00 }, /* XXX SAL_B version. */ + "FreeBSD", + "Ski loader", + { 0 } + }, + /* Entrypoint. */ + { + 0, /* Type=entrypoint descr. */ + { 0 }, + 0, /* XXX PalProc. */ + 0, /* XXX SalProc. */ + 0, /* XXX SalProc GP. */ + { 0 } + }, + /* AP wakeup. */ + { + 5, /* Type=AP wakeup descr. */ + 0, /* External interrupt. */ + { 0 }, + 255 /* Wakeup vector. */ + } +}; + +static inline void +puts(const char *s) +{ + s = (const char *)((7UL << 61) | (u_long)s); + while (*s) + ski_cons_putchar(*s++); +} + +static struct ia64_sal_result +SalProc(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4, u_int64_t a5, + u_int64_t a6, u_int64_t a7, u_int64_t a8) +{ + struct ia64_sal_result res; + + res.sal_status = -3; + res.sal_result[0] = 0; + res.sal_result[1] = 0; + res.sal_result[2] = 0; + + if (a1 == SAL_FREQ_BASE) { + res.sal_status = 0; + res.sal_result[0] = 133338184; + } else if (a1 == SAL_SET_VECTORS) { + /* XXX unofficial SSC function. */ + ssc(a2, a3, a4, a5, SSC_SAL_SET_VECTORS); + } else if (a1 != SAL_GET_STATE_INFO_SIZE) { + puts("SAL: unimplemented function called\n"); + } + + return (res); +} + +void +sal_stub_init(void) +{ + struct ia64_fdesc *fd; + + fd = (void*)PalProc; + sal_systab.entry.sale_pal_proc = fd->func; + fd = (void*)SalProc; + sal_systab.entry.sale_sal_proc = fd->func; + sal_systab.entry.sale_sal_gp = fd->gp; +} Property changes on: head/sys/boot/ia64/ski/sal_stub.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/sys/boot/ia64/ski/ssc.c =================================================================== --- head/sys/boot/ia64/ski/ssc.c (revision 110210) +++ head/sys/boot/ia64/ski/ssc.c (revision 110211) @@ -1,42 +1,52 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include "libski.h" +/* + * Ugh... Work around a bug in the Linux version of ski for SSC_GET_RTC. The + * PSR.dt register is not preserved properly and causes further memory + * references to be done without translation. All we need to do is preserve + * PSR.dt across the SSC call. We do this by saving and restoring psr.l + * completely. + */ u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which) { + register u_int64_t psr; register u_int64_t ret0 __asm("r8"); + __asm __volatile("mov %0=psr;;" : "=r"(psr)); __asm __volatile("mov r15=%1\n\t" - "break 0x80000" + "break 0x80000;;" : "=r"(ret0) : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3)); + __asm __volatile("mov psr.l=%0;; srlz.d" :: "r"(psr)); return ret0; } Index: head/sys/conf/files.ia64 =================================================================== --- head/sys/conf/files.ia64 (revision 110210) +++ head/sys/conf/files.ia64 (revision 110211) @@ -1,121 +1,119 @@ # This file tells config what files go into building a kernel, # files marked standard are always included. # # $FreeBSD$ # # The long compile-with and dependency lines are required because of # limitations in config: backslash-newline doesn't work in strings, and # dependency lines other than the first are silently ignored. # # font8x16.o optional std8x16font \ compile-with "uudecode < /usr/share/syscons/fonts/${STD8X16FONT}-8x16.fnt && file2c 'unsigned char font_16[16*256] = {' '};' < ${STD8X16FONT}-8x16 > font8x16.c && ${CC} -c ${CFLAGS} font8x16.c" \ no-implicit-rule before-depend \ clean "${STD8X16FONT}-8x16 font8x16.c" # atkbdmap.h optional atkbd_dflt_keymap \ compile-with "/usr/sbin/kbdcontrol -L ${ATKBD_DFLT_KEYMAP} | sed -e 's/^static keymap_t.* = /static keymap_t key_map = /' -e 's/^static accentmap_t.* = /static accentmap_t accent_map = /' > atkbdmap.h" \ no-obj no-implicit-rule before-depend \ clean "atkbdmap.h" # ia64/acpica/acpi_machdep.c optional acpi ia64/acpica/acpi_wakeup.c optional acpi ia64/acpica/OsdEnvironment.c optional acpi ia64/acpica/madt.c optional acpi ia64/ia32/ia32_misc.c optional ia32 ia64/ia32/ia32_sysent.c optional ia32 ia64/ia32/ia32_sysvec.c optional ia32 ia64/ia64/ia64-gdbstub.c optional ddb ia64/ia64/autoconf.c standard ia64/ia64/busdma_machdep.c standard ia64/ia64/clock.c standard ia64/ia64/clock_if.m standard ia64/ia64/critical.c standard ia64/ia64/db_disasm.c optional ddb ia64/ia64/db_interface.c optional ddb ia64/ia64/db_trace.c optional ddb ia64/ia64/dump_machdep.c standard ia64/ia64/efi.c standard ia64/ia64/eficlock.c standard ia64/ia64/elf_machdep.c standard ia64/ia64/exception.s standard ia64/ia64/in_cksum.c optional inet ia64/ia64/interrupt.c standard # locore.s needs to be handled in Makefile to put it first. Otherwise it's # now normal. # ia64/ia64/locore.s standard ia64/ia64/machdep.c standard ia64/ia64/mca.c standard ia64/ia64/mem.c standard ia64/ia64/mp_machdep.c optional smp ia64/ia64/nexus.c standard ia64/ia64/pal.s standard -ia64/ia64/pal_stub.s optional ski ia64/ia64/pmap.c standard ia64/ia64/sal.c standard ia64/ia64/sapic.c standard ia64/ia64/setjmp.s standard -ia64/ia64/ski.c optional ski ia64/ia64/support.s standard ia64/ia64/ssc.c optional ski ia64/ia64/sscdisk.c optional ski ia64/ia64/swtch.s standard ia64/ia64/sys_machdep.c standard ia64/ia64/trap.c standard ia64/ia64/unaligned.c standard ia64/ia64/unwind.c standard ia64/ia64/vm_machdep.c standard ia64/isa/isa.c optional isa ia64/isa/isa_dma.c optional isa ia64/pci/pci_cfgreg.c optional pci crypto/blowfish/bf_enc.c optional ipsec ipsec_esp crypto/des/des_enc.c optional ipsec ipsec_esp crypto/blowfish/bf_enc.c optional crypto crypto/des/des_enc.c optional crypto dev/advansys/adv_isa.c optional adv isa dev/aic/aic_isa.c optional aic isa dev/fb/fb.c optional fb dev/fb/fb.c optional vga dev/fb/splash.c optional splash dev/fb/vga.c optional vga dev/kbd/atkbd.c optional atkbd dev/kbd/atkbdc.c optional atkbdc dev/kbd/kbd.c optional atkbd dev/kbd/kbd.c optional kbd dev/kbd/kbd.c optional sc dev/kbd/kbd.c optional ukbd dev/sio/sio.c optional sio dev/sio/sio_isa.c optional sio isa dev/syscons/schistory.c optional sc dev/syscons/scmouse.c optional sc dev/syscons/scterm.c optional sc dev/syscons/scterm-dumb.c optional sc dev/syscons/scterm-sc.c optional sc dev/syscons/scvgarndr.c optional sc vga dev/syscons/scvidctl.c optional sc dev/syscons/scvtb.c optional sc dev/syscons/syscons.c optional sc dev/syscons/sysmouse.c optional sc geom/geom_bsd.c standard geom/geom_gpt.c standard geom/geom_mbr.c standard isa/atkbd_isa.c optional atkbd isa/atkbdc_isa.c optional atkbdc isa/fd.c optional fdc isa/ppc.c optional ppc isa/psm.c optional psm isa/syscons_isa.c optional sc isa/vga_isa.c optional vga kern/imgact_elf32.c optional ia32 libkern/ia64/bswap16.S standard libkern/ia64/bswap32.S standard libkern/ia64/__divsi3.S standard libkern/ia64/__modsi3.S standard libkern/ia64/__udivsi3.S standard libkern/ia64/__umodsi3.S standard libkern/ia64/__divdi3.S standard libkern/ia64/__moddi3.S standard libkern/ia64/__udivdi3.S standard libkern/ia64/__umoddi3.S standard libkern/bcmp.c standard libkern/ffs.c standard Index: head/sys/ia64/conf/SKI =================================================================== --- head/sys/ia64/conf/SKI (revision 110210) +++ head/sys/ia64/conf/SKI (revision 110211) @@ -1,83 +1,69 @@ # # SKI -- Kernel configuration file for FreeBSD/ia64 running in the HP # SKI simulator # # For more information on this file, please read the handbook section on # Kernel Configuration Files: # # http://www.FreeBSD.org/handbook/kernelconfig-config.html # # The handbook is also available locally in /usr/share/doc/handbook # if you've installed the doc distribution, otherwise always see the # FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the # latest information. # # An exhaustive list of options and more detailed explanations of the # device lines is also present in the ../../i386/conf/NOTES file. # If you are in doubt as to the purpose or necessity of a line, check first # in NOTES. Please note that this is the i386 NOTES, but it still contains # valuable info for ia64 too. # # For hardware specific information check HARDWARE.TXT # # $FreeBSD$ machine ia64 cpu ITANIUM ident SKI -maxusers 32 +maxusers 0 -#To statically compile in device wiring instead of /boot/device.hints -#hints "GENERIC.hints" - makeoptions DEBUG=-g #Build kernel with gdb(1) debug symbols -makeoptions NO_CPU_COPTFLAGS=true #Ignore any x86 CPUTYPE +makeoptions NO_MODULES=yes #Ignore any x86 CPUTYPE -options SKI #Support for HP simulator -options SCHED_4BSD #4BSD scheduler -options INET #InterNETworking -#options INET6 #IPv6 communications protocols -options FFS #Berkeley Fast Filesystem -options SOFTUPDATES #Enable FFS soft updates support -options MD_ROOT #MD is a potential root device -options PROCFS #Process filesystem (requires PSEUDOFS) -options PSEUDOFS #Pseudo-filesystem framework options COMPAT_43 #Compatible with BSD 4.3 [KEEP THIS!] options COMPAT_FREEBSD4 -options SCSI_DELAY=2000 #Delay (in ms) before probing SCSI -options KTRACE #ktrace(1) syscall trace support -options SYSVSHM #SYSV-style shared memory -options SYSVMSG #SYSV-style message queues -options SYSVSEM #SYSV-style semaphores -options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions -options CONSPEED=115200 -options BREAK_TO_DEBUGGER #a BREAK on a comconsole goes to - -# Debugging for use in -current options DDB +options FFS #Berkeley Fast Filesystem +options INET #InterNETworking +options INET6 #IPv6 communications protocols options INVARIANTS options INVARIANT_SUPPORT -options WITNESS - options KTR -options KTR_ENTRIES=1024 +options KTRACE #ktrace(1) syscall trace support options KTR_COMPILE="(KTR_INTR|KTR_PROC)" -options KTR_MASK=0 options KTR_CPUMASK=0x3 -#options KTR_VERBOSE +options KTR_ENTRIES=1024 +options KTR_MASK=0 +options KTR_VERBOSE +options MD_ROOT #MD is a potential root device +options PROCFS #Process filesystem (requires PSEUDOFS) +options PSEUDOFS #Pseudo-filesystem framework +options SCHED_4BSD #4BSD scheduler +options SCSI_DELAY=500 #Delay (in ms) before probing SCSI +options SKI +options SOFTUPDATES #Enable FFS soft updates support +options SYSVMSG #SYSV-style message queues +options SYSVSEM #SYSV-style semaphores +options SYSVSHM #SYSV-style shared memory +options WITNESS +options _KPOSIX_PRIORITY_SCHEDULING #Posix P1003_1B real-time extensions -# Pseudo devices - the number indicates how many units to allocated. -device random # Entropy device -device loop # Network loopback +device acpi +device bpf # Berkeley packet filter device ether # Ethernet support -device sl # Kernel SLIP -device ppp # Kernel PPP -device tun # Packet tunnel. -device pty # Pseudo-ttys (telnet etc) +device loop # Network loopback device md # Memory "disks" -device gif # IPv6 and IPv4 tunneling -device faith # IPv6-to-IPv4 relaying/(translation) - -# The `bpf' device enables the Berkeley Packet Filter. -# Be aware of the administrative consequences of enabling this! -device bpf #Berkeley packet filter +device pci +device pty # Pseudo-ttys (telnet etc) +device random # Entropy device +device tun # Packet tunnel. Index: head/sys/ia64/ia64/ski.c =================================================================== --- head/sys/ia64/ia64/ski.c (revision 110210) +++ head/sys/ia64/ia64/ski.c (nonexistent) @@ -1,163 +0,0 @@ -/*- - * Copyright (c) 2001 Doug Rabson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -/* - * Fake out bits of EFI and SAL when running under SKI. - */ - -#include -#include -#include -#include -#include - -struct ssc_time { - int year; - int month; - int day; - int hour; - int minute; - int second; - int msec; - int wday; -}; - -#define SSC_GET_RTC 65 - -static u_int64_t -ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which) -{ - register u_int64_t ret0 __asm("r8"); - - __asm __volatile("mov r15=%1\n\t" - "break 0x80001" - : "=r"(ret0) - : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3)); - - /* - * Ugh... Work around a bug in the Linux version of ski for - * SSC_GET_RTC. The PSR.dt register is not preserved properly - * and causes further memory references to be done without - * translation. All we need to do is set PSR.dt again. Note - * that dependency violations do not exist in ski, so we - * don't have to serialize. - */ - __asm __volatile("ssm psr.dt"); - - return ret0; -} - -extern u_int64_t ski_fake_pal[]; /* *not* a function decl */ -extern void ia64_ski_init(void); -extern u_int64_t ia64_pal_entry; - -static EFI_STATUS ski_fake_efi_proc(void); -static EFI_STATUS ski_fake_efi_get_time(EFI_TIME *time, - EFI_TIME_CAPABILITIES *caps); - -static EFI_RUNTIME_SERVICES ski_fake_efi = { - { EFI_RUNTIME_SERVICES_SIGNATURE, - EFI_RUNTIME_SERVICES_REVISION, - 0, 0, 0 }, - - (EFI_GET_TIME) ski_fake_efi_get_time, - (EFI_SET_TIME) ski_fake_efi_proc, - (EFI_GET_WAKEUP_TIME) ski_fake_efi_proc, - (EFI_SET_WAKEUP_TIME) ski_fake_efi_proc, - - (EFI_SET_VIRTUAL_ADDRESS_MAP) ski_fake_efi_proc, - (EFI_CONVERT_POINTER) ski_fake_efi_proc, - - (EFI_GET_VARIABLE) ski_fake_efi_proc, - (EFI_GET_NEXT_VARIABLE_NAME) ski_fake_efi_proc, - (EFI_SET_VARIABLE) ski_fake_efi_proc, - - (EFI_GET_NEXT_HIGH_MONO_COUNT) ski_fake_efi_proc, - (EFI_RESET_SYSTEM) ski_fake_efi_proc -}; - -static EFI_STATUS -ski_fake_efi_get_time(EFI_TIME *time, EFI_TIME_CAPABILITIES *caps) -{ - struct ssc_time ssctime; - - ssc(ia64_tpa((vm_offset_t) &ssctime), 0, 0, 0, SSC_GET_RTC); - - time->Second = ssctime.second; - time->Minute = ssctime.minute; - time->Hour = ssctime.hour; - time->Day = ssctime.day; - time->Month = ssctime.month + 1; - time->Year = ssctime.year + 1900; - - return EFI_SUCCESS; -} - -static EFI_STATUS -ski_fake_efi_proc(void) -{ - return EFI_UNSUPPORTED; -} - -static struct ia64_sal_result -ski_fake_sal(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4, - u_int64_t a5, u_int64_t a6, u_int64_t a7, u_int64_t a8) -{ - struct ia64_sal_result res; - - if (a1 == SAL_FREQ_BASE) { - /* - * Fake the values from my SDV. - */ - res.sal_status = 0; - res.sal_result[0] = 133347096; - res.sal_result[1] = 0; - res.sal_result[2] = 0; - return res; - } - - /* - * Return an error for anything we don't care about. - */ - res.sal_status = -3; - res.sal_result[0] = 0; - res.sal_result[1] = 0; - res.sal_result[2] = 0; - return res; -} - -void -ia64_ski_init(void) -{ - if (!ia64_running_in_simulator()) - return; - - ia64_efi_runtime = &ski_fake_efi; - ia64_pal_entry = (u_int64_t) ski_fake_pal; - ia64_sal_entry = ski_fake_sal; -} Property changes on: head/sys/ia64/ia64/ski.c ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/ia64/ia64/pal_stub.s =================================================================== --- head/sys/ia64/ia64/pal_stub.s (revision 110210) +++ head/sys/ia64/ia64/pal_stub.s (nonexistent) @@ -1,66 +0,0 @@ -/*- - * Copyright (c) 2001 Doug Rabson - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ - -#include -#include - -/* - * Stub for running in simulation. Fakes the values from an SDV. - */ -ENTRY(ski_fake_pal, 0) - - mov r8=-3 // default to return error - - cmp.eq p6,p0=PAL_PTCE_INFO,r28 - ;; -(p6) mov r8=0 -(p6) mov r9=0 -(p6) movl r10=0x100000001 -(p6) mov r11=0 - ;; - cmp.eq p6,p0=PAL_FREQ_RATIOS,r28 - ;; -(p6) mov r8=0 -(p6) movl r9=0xb00000002 // proc 11/1 -(p6) movl r10=0x100000001 // bus 1/1 -(p6) movl r11=0xb00000002 // itc 11/1 - mov r14=PAL_VM_SUMMARY - ;; - cmp.eq p6,p0=r14,r28 - ;; -(p6) mov r8=0 -(p6) movl r9=(8<<40)|(8<<32) -(p6) movl r10=(18<<8)|(41<<0) -(p6) mov r11=0 - ;; - tbit.nz p6,p7=r28,8 // static or stacked? - ;; -(p6) br.ret.sptk.few rp -(p7) br.cond.sptk.few rp - -END(ski_fake_pal) Property changes on: head/sys/ia64/ia64/pal_stub.s ___________________________________________________________________ Deleted: svn:keywords ## -1 +0,0 ## -FreeBSD=%H \ No newline at end of property Index: head/sys/ia64/ia64/autoconf.c =================================================================== --- head/sys/ia64/ia64/autoconf.c (revision 110210) +++ head/sys/ia64/ia64/autoconf.c (revision 110211) @@ -1,118 +1,117 @@ /*- * Copyright (c) 1998 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_bootp.h" #include "opt_isa.h" #include "opt_nfs.h" #include "opt_nfsroot.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void configure(void *); SYSINIT(configure, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL) #ifdef BOOTP void bootpc_init(void); #endif #ifdef DEV_ISA #include device_t isa_bus_device = 0; #endif extern int nfs_diskless_valid; /* XXX use include file */ /* * Determine i/o configuration for a machine. */ static void configure(void *dummy) { device_add_child(root_bus, "nexus", 0); root_bus_configure(); /* * Probe ISA devices after everything. */ #ifdef DEV_ISA if (isa_bus_device) isa_probe_children(isa_bus_device); #endif /* * Now we're ready to handle (pending) interrupts. * XXX this is slightly misplaced. */ enable_intr(); cold = 0; } /* * Do legacy root filesystem discovery. This isn't really * needed on the Alpha, which has always used the loader. */ void cpu_rootconf() { #if defined(NFSCLIENT) && defined(NFS_ROOT) int order = 0; #endif #ifdef BOOTP - if (!ia64_running_in_simulator()) - bootpc_init(); + bootpc_init(); #endif #if defined(NFSCLIENT) && defined(NFS_ROOT) #if !defined(BOOTP_NFSROOT) if (nfs_diskless_valid) #endif rootdevnames[order++] = "nfs:"; #endif } SYSINIT(cpu_rootconf, SI_SUB_ROOT_CONF, SI_ORDER_FIRST, cpu_rootconf, NULL) Index: head/sys/ia64/ia64/efi.c =================================================================== --- head/sys/ia64/ia64/efi.c (revision 110210) +++ head/sys/ia64/ia64/efi.c (revision 110211) @@ -1,148 +1,118 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include EFI_SYSTEM_TABLE *ia64_efi_systab; EFI_RUNTIME_SERVICES *ia64_efi_runtime; u_int64_t ia64_efi_acpi_table; u_int64_t ia64_efi_acpi20_table; extern u_int64_t ia64_call_efi_physical(u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t); -static EFI_STATUS fake_efi_proc(void); - -static EFI_RUNTIME_SERVICES fake_efi = { - { EFI_RUNTIME_SERVICES_SIGNATURE, - EFI_RUNTIME_SERVICES_REVISION, - 0, 0, 0 }, - - (EFI_GET_TIME) fake_efi_proc, - (EFI_SET_TIME) fake_efi_proc, - (EFI_GET_WAKEUP_TIME) fake_efi_proc, - (EFI_SET_WAKEUP_TIME) fake_efi_proc, - - (EFI_SET_VIRTUAL_ADDRESS_MAP) fake_efi_proc, - (EFI_CONVERT_POINTER) fake_efi_proc, - - (EFI_GET_VARIABLE) fake_efi_proc, - (EFI_GET_NEXT_VARIABLE_NAME) fake_efi_proc, - (EFI_SET_VARIABLE) fake_efi_proc, - - (EFI_GET_NEXT_HIGH_MONO_COUNT) fake_efi_proc, - (EFI_RESET_SYSTEM) fake_efi_proc -}; - -static EFI_STATUS -fake_efi_proc(void) -{ - return EFI_UNSUPPORTED; -} - void ia64_efi_init(void) { EFI_CONFIGURATION_TABLE *conf; struct sal_system_table *saltab = 0; EFI_RUNTIME_SERVICES *rs; EFI_MEMORY_DESCRIPTOR *md, *mdp; int mdcount, i; EFI_STATUS status; - - ia64_efi_runtime = &fake_efi; if (!bootinfo.bi_systab) { printf("No system table!\n"); return; } + ia64_efi_systab = (EFI_SYSTEM_TABLE *) + IA64_PHYS_TO_RR7(bootinfo.bi_systab); + rs = (EFI_RUNTIME_SERVICES *) + IA64_PHYS_TO_RR7((u_int64_t)ia64_efi_systab->RuntimeServices); + if (!rs) + panic("No runtime services!"); + + ia64_efi_runtime = rs; + conf = (EFI_CONFIGURATION_TABLE *) + IA64_PHYS_TO_RR7((u_int64_t)ia64_efi_systab->ConfigurationTable); + if (!conf) + panic("No configuration tables!"); + mdcount = bootinfo.bi_memmap_size / bootinfo.bi_memdesc_size; md = (EFI_MEMORY_DESCRIPTOR *) IA64_PHYS_TO_RR7(bootinfo.bi_memmap); for (i = 0, mdp = md; i < mdcount; i++, mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) { /* * Relocate runtime memory segments for firmware. */ if (mdp->Attribute & EFI_MEMORY_RUNTIME) { if (mdp->Attribute & EFI_MEMORY_WB) mdp->VirtualStart = IA64_PHYS_TO_RR7(mdp->PhysicalStart); else if (mdp->Attribute & EFI_MEMORY_UC) mdp->VirtualStart = IA64_PHYS_TO_RR6(mdp->PhysicalStart); } } - ia64_efi_systab = (EFI_SYSTEM_TABLE *) - IA64_PHYS_TO_RR7(bootinfo.bi_systab); + status = ia64_call_efi_physical((u_int64_t)rs->SetVirtualAddressMap, + bootinfo.bi_memmap_size, bootinfo.bi_memdesc_size, + bootinfo.bi_memdesc_version, bootinfo.bi_memmap, 0); - rs = (EFI_RUNTIME_SERVICES *) - IA64_PHYS_TO_RR7((u_int64_t) ia64_efi_systab->RuntimeServices); - ia64_efi_runtime = rs; - - status = ia64_call_efi_physical - ((u_int64_t) rs->SetVirtualAddressMap, - bootinfo.bi_memmap_size, - bootinfo.bi_memdesc_size, - bootinfo.bi_memdesc_version, - bootinfo.bi_memmap, 0); - if (EFI_ERROR(status)) { /* * We could wrap EFI in a virtual->physical shim here. */ printf("SetVirtualAddressMap returned 0x%lx\n", status); panic("Can't set firmware into virtual mode"); } - conf = (EFI_CONFIGURATION_TABLE *) - IA64_PHYS_TO_RR7((u_int64_t) ia64_efi_systab->ConfigurationTable); for (i = 0; i < ia64_efi_systab->NumberOfTableEntries; i++) { static EFI_GUID sal = SAL_SYSTEM_TABLE_GUID; static EFI_GUID acpi = ACPI_TABLE_GUID; static EFI_GUID acpi20 = ACPI_20_TABLE_GUID; if (!memcmp(&conf[i].VendorGuid, &sal, sizeof(EFI_GUID))) saltab = (struct sal_system_table *) IA64_PHYS_TO_RR7((u_int64_t) conf[i].VendorTable); if (!memcmp(&conf[i].VendorGuid, &acpi, sizeof(EFI_GUID))) ia64_efi_acpi_table = (u_int64_t) conf[i].VendorTable; if (!memcmp(&conf[i].VendorGuid, &acpi20, sizeof(EFI_GUID))) ia64_efi_acpi20_table = (u_int64_t) conf[i].VendorTable; } if (saltab) ia64_sal_init(saltab); } Index: head/sys/ia64/ia64/machdep.c =================================================================== --- head/sys/ia64/ia64/machdep.c (revision 110210) +++ head/sys/ia64/ia64/machdep.c (revision 110211) @@ -1,1501 +1,1448 @@ /*- * Copyright (c) 2000,2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include "opt_compat.h" #include "opt_ddb.h" -#include "opt_ski.h" #include "opt_msgbuf.h" #include "opt_acpi.h" -#if !defined(SKI) && !defined(DEV_ACPI) -#error "You need the SKI option and/or the acpi device" -#endif - #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include -#ifdef SKI -extern void ia64_ski_init(void); -#endif - u_int64_t processor_frequency; u_int64_t bus_frequency; u_int64_t itc_frequency; int cold = 1; u_int64_t pa_bootinfo; struct bootinfo bootinfo; +struct pcpu early_pcpu; extern char kstack[]; struct user *proc0uarea; vm_offset_t proc0kstack; extern u_int64_t kernel_text[], _end[]; FPSWA_INTERFACE *fpswa_interface; u_int64_t ia64_pal_base; u_int64_t ia64_port_base; char machine[] = MACHINE; SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0, ""); static char cpu_model[128]; SYSCTL_STRING(_hw, HW_MODEL, model, CTLFLAG_RD, cpu_model, 0, ""); #ifdef DDB /* start and end of kernel symbol table */ void *ksym_start, *ksym_end; #endif int ia64_unaligned_print = 1; /* warn about unaligned accesses */ int ia64_unaligned_fix = 1; /* fix up unaligned accesses */ int ia64_unaligned_sigbus = 0; /* don't SIGBUS on fixed-up accesses */ SYSCTL_INT(_machdep, CPU_UNALIGNED_PRINT, unaligned_print, CTLFLAG_RW, &ia64_unaligned_print, 0, ""); SYSCTL_INT(_machdep, CPU_UNALIGNED_FIX, unaligned_fix, CTLFLAG_RW, &ia64_unaligned_fix, 0, ""); SYSCTL_INT(_machdep, CPU_UNALIGNED_SIGBUS, unaligned_sigbus, CTLFLAG_RW, &ia64_unaligned_sigbus, 0, ""); static void cpu_startup(void *); SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) struct msgbuf *msgbufp=0; long Maxmem = 0; vm_offset_t phys_avail[100]; /* must be 2 less so 0 0 can signal end of chunks */ #define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) static void identifycpu(void); struct kva_md_info kmi; static void cpu_startup(dummy) void *dummy; { /* * Good {morning,afternoon,evening,night}. */ identifycpu(); /* startrtclock(); */ #ifdef PERFMON perfmon_init(); #endif printf("real memory = %ld (%ld MB)\n", ia64_ptob(Maxmem), ia64_ptob(Maxmem) / 1048576); /* * Display any holes after the first chunk of extended memory. */ if (bootverbose) { int indx; printf("Physical memory chunk(s):\n"); for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { int size1 = phys_avail[indx + 1] - phys_avail[indx]; printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx], phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE); } } vm_ksubmap_init(&kmi); printf("avail memory = %ld (%ld MB)\n", ptoa(cnt.v_free_count), ptoa(cnt.v_free_count) / 1048576); if (fpswa_interface == NULL) printf("Warning: no FPSWA package supplied\n"); else printf("FPSWA Revision = 0x%lx, Entry = %p\n", (long)fpswa_interface->Revision, (void *)fpswa_interface->Fpswa); /* * Set up buffers, so they can be used to read disk labels. */ bufinit(); vm_pager_bufferinit(); - if (!ia64_running_in_simulator()) { -#ifdef DEV_ACPI - /* - * Traverse the MADT to discover IOSAPIC and Local SAPIC - * information. - */ - ia64_probe_sapics(); - ia64_mca_init(); -#else - /* - * It is an error to boot a SKI-only kernel on hardware. - */ - panic("Mandatory 'device acpi' is missing"); -#endif - } + /* + * Traverse the MADT to discover IOSAPIC and Local SAPIC + * information. + */ + ia64_probe_sapics(); + ia64_mca_init(); } void cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) { KASSERT(size >= sizeof(struct pcpu) + sizeof(struct pcb), ("%s: too small an allocation for pcpu", __func__)); pcpu->pc_pcb = (void*)(pcpu+1); } static void identifycpu(void) { char vendor[17]; u_int64_t t; int number, revision, model, family, archrev; u_int64_t features; /* * Assumes little-endian. */ *(u_int64_t *) &vendor[0] = ia64_get_cpuid(0); *(u_int64_t *) &vendor[8] = ia64_get_cpuid(1); vendor[16] = '\0'; t = ia64_get_cpuid(3); number = (t >> 0) & 0xff; revision = (t >> 8) & 0xff; model = (t >> 16) & 0xff; family = (t >> 24) & 0xff; archrev = (t >> 32) & 0xff; if (family == 0x7) strcpy(cpu_model, "Itanium"); else if (family == 0x1f) strcpy(cpu_model, "Itanium 2"); /* McKinley */ else snprintf(cpu_model, sizeof(cpu_model), "Family=%d", family); features = ia64_get_cpuid(4); printf("CPU: %s", cpu_model); if (processor_frequency) printf(" (%ld.%02ld-Mhz)\n", (processor_frequency + 4999) / 1000000, ((processor_frequency + 4999) / 10000) % 100); else printf("\n"); printf(" Origin = \"%s\" Model = %d Revision = %d\n", vendor, model, revision); printf(" Features = 0x%b\n", (u_int32_t) features, "\020" "\001LB"); } void map_pal_code(void) { struct ia64_pte pte; u_int64_t psr; if (ia64_pal_base == 0) return; bzero(&pte, sizeof(pte)); pte.pte_p = 1; pte.pte_ma = PTE_MA_WB; pte.pte_a = 1; pte.pte_d = 1; pte.pte_pl = PTE_PL_KERN; pte.pte_ar = PTE_AR_RWX; pte.pte_ppn = ia64_pal_base >> 12; __asm __volatile("mov %0=psr;;" : "=r" (psr)); __asm __volatile("rsm psr.ic|psr.i;; srlz.i;;"); __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_PHYS_TO_RR7(ia64_pal_base))); __asm __volatile("mov cr.itir=%0" :: "r"(28 << 2)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(2), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); __asm __volatile("mov psr.l=%0;; srlz.i;;" :: "r" (psr)); } void map_port_space(void) { struct ia64_pte pte; u_int64_t psr; /* XXX we should fail hard if there's no I/O port space. */ if (ia64_port_base == 0) return; bzero(&pte, sizeof(pte)); pte.pte_p = 1; pte.pte_ma = PTE_MA_UC; pte.pte_a = 1; pte.pte_d = 1; pte.pte_pl = PTE_PL_KERN; pte.pte_ar = PTE_AR_RWX; pte.pte_ppn = ia64_port_base >> 12; __asm __volatile("mov %0=psr;;" : "=r" (psr)); __asm __volatile("rsm psr.ic|psr.i;; srlz.i;;"); __asm __volatile("mov cr.ifa=%0" :: "r"(IA64_PHYS_TO_RR6(ia64_port_base))); /* XXX We should use the size from the memory descriptor. */ __asm __volatile("mov cr.itir=%0" :: "r"(24 << 2)); __asm __volatile("srlz.i;;"); __asm __volatile("itr.i itr[%0]=%1;;" :: "r"(1), "r"(*(u_int64_t*)&pte)); __asm __volatile("srlz.i;;"); __asm __volatile("mov psr.l=%0;; srlz.i;;" :: "r" (psr)); } static void calculate_frequencies(void) { struct ia64_sal_result sal; struct ia64_pal_result pal; sal = ia64_sal_entry(SAL_FREQ_BASE, 0, 0, 0, 0, 0, 0, 0); pal = ia64_call_pal_static(PAL_FREQ_RATIOS, 0, 0, 0); if (sal.sal_status == 0 && pal.pal_status == 0) { if (bootverbose) { printf("Platform clock frequency %ld Hz\n", sal.sal_result[0]); printf("Processor ratio %ld/%ld, Bus ratio %ld/%ld, " "ITC ratio %ld/%ld\n", pal.pal_result[0] >> 32, pal.pal_result[0] & ((1L << 32) - 1), pal.pal_result[1] >> 32, pal.pal_result[1] & ((1L << 32) - 1), pal.pal_result[2] >> 32, pal.pal_result[2] & ((1L << 32) - 1)); } processor_frequency = sal.sal_result[0] * (pal.pal_result[0] >> 32) / (pal.pal_result[0] & ((1L << 32) - 1)); bus_frequency = sal.sal_result[0] * (pal.pal_result[1] >> 32) / (pal.pal_result[1] & ((1L << 32) - 1)); itc_frequency = sal.sal_result[0] * (pal.pal_result[2] >> 32) / (pal.pal_result[2] & ((1L << 32) - 1)); } } void ia64_init(u_int64_t arg1, u_int64_t arg2) { int phys_avail_cnt; vm_offset_t kernstart, kernend; vm_offset_t kernstartpfn, kernendpfn, pfn0, pfn1; char *p; EFI_MEMORY_DESCRIPTOR *md, *mdp; int mdcount, i, metadata_missing; /* NO OUTPUT ALLOWED UNTIL FURTHER NOTICE */ /* * TODO: Disable interrupts, floating point etc. * Maybe flush cache and tlb */ ia64_set_fpsr(IA64_FPSR_DEFAULT); /* * TODO: Get critical system information (if possible, from the * information provided by the boot program). */ /* * pa_bootinfo is the physical address of the bootinfo block as * passed to us by the loader and set in locore.s. */ bootinfo = *(struct bootinfo *)(IA64_PHYS_TO_RR7(pa_bootinfo)); if (bootinfo.bi_magic != BOOTINFO_MAGIC || bootinfo.bi_version != 1) { bzero(&bootinfo, sizeof(bootinfo)); bootinfo.bi_kernend = (vm_offset_t) round_page(_end); } /* * Look for the I/O ports first - we need them for console * probing. */ mdcount = bootinfo.bi_memmap_size / bootinfo.bi_memdesc_size; md = (EFI_MEMORY_DESCRIPTOR *) IA64_PHYS_TO_RR7(bootinfo.bi_memmap); - if (md == NULL || mdcount == 0) { -#ifdef SKI - static EFI_MEMORY_DESCRIPTOR ski_md[2]; - /* - * XXX hack for ski. In reality, the loader will probably ask - * EFI and pass the results to us. Possibly, we will call EFI - * directly. - */ - ski_md[0].Type = EfiConventionalMemory; - ski_md[0].PhysicalStart = 2L*1024*1024; - ski_md[0].VirtualStart = 0; - ski_md[0].NumberOfPages = (64L*1024*1024)>>12; - ski_md[0].Attribute = EFI_MEMORY_WB; - ski_md[1].Type = EfiMemoryMappedIOPortSpace; - ski_md[1].PhysicalStart = 0xffffc000000; - ski_md[1].VirtualStart = 0; - ski_md[1].NumberOfPages = (64L*1024*1024)>>12; - ski_md[1].Attribute = EFI_MEMORY_UC; - - md = ski_md; - mdcount = 2; -#endif - } - for (i = 0, mdp = md; i < mdcount; i++, mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) { if (mdp->Type == EfiMemoryMappedIOPortSpace) ia64_port_base = IA64_PHYS_TO_RR6(mdp->PhysicalStart); else if (mdp->Type == EfiPalCode) ia64_pal_base = mdp->PhysicalStart; } - /* Map the memory mapped I/O Port space */ - KASSERT(ia64_port_base != 0, - ("%s: no I/O port memory region", __func__)); map_port_space(); metadata_missing = 0; if (bootinfo.bi_modulep) preload_metadata = (caddr_t)bootinfo.bi_modulep; else metadata_missing = 1; if (envmode == 1) kern_envp = static_env; else kern_envp = (caddr_t)bootinfo.bi_envp; /* * Look at arguments passed to us and compute boothowto. */ boothowto = bootinfo.bi_boothowto; #ifdef KADB boothowto |= RB_KDB; #endif /* * Catch case of boot_verbose set in environment. */ if ((p = getenv("boot_verbose")) != NULL) { if (strcmp(p, "yes") == 0 || strcmp(p, "YES") == 0) { boothowto |= RB_VERBOSE; } freeenv(p); } if (boothowto & RB_VERBOSE) bootverbose = 1; /* * Initialize the console before we print anything out. */ cninit(); /* OUTPUT NOW ALLOWED */ if (ia64_pal_base != 0) { ia64_pal_base &= ~((1 << 28) - 1); /* * We use a TR to map the first 256M of memory - this might * cover the palcode too. */ if (ia64_pal_base == 0) printf("PAL code mapped by the kernel's TR\n"); } else printf("PAL code not found\n"); /* * Wire things up so we can call the firmware. */ map_pal_code(); ia64_efi_init(); -#ifdef SKI - ia64_ski_init(); -#endif calculate_frequencies(); /* * Find the beginning and end of the kernel. */ kernstart = trunc_page(kernel_text); #ifdef DDB ksym_start = (void *)bootinfo.bi_symtab; ksym_end = (void *)bootinfo.bi_esymtab; kernend = (vm_offset_t)round_page(ksym_end); #else kernend = (vm_offset_t)round_page(_end); #endif /* But if the bootstrap tells us otherwise, believe it! */ if (bootinfo.bi_kernend) kernend = round_page(bootinfo.bi_kernend); if (metadata_missing) printf("WARNING: loader(8) metadata is missing!\n"); /* Get FPSWA interface */ fpswa_interface = (FPSWA_INTERFACE*)IA64_PHYS_TO_RR7(bootinfo.bi_fpswa); /* Init basic tunables, including hz */ init_param1(); p = getenv("kernelname"); if (p) { strncpy(kernelname, p, sizeof(kernelname) - 1); freeenv(p); } kernstartpfn = atop(IA64_RR_MASK(kernstart)); kernendpfn = atop(IA64_RR_MASK(kernend)); /* * Size the memory regions and load phys_avail[] with the results. */ /* * Find out how much memory is available, by looking at * the memory descriptors. */ #ifdef DEBUG_MD printf("Memory descriptor count: %d\n", mdcount); #endif phys_avail_cnt = 0; for (i = 0, mdp = md; i < mdcount; i++, mdp = NextMemoryDescriptor(mdp, bootinfo.bi_memdesc_size)) { #ifdef DEBUG_MD printf("MD %d: type %d pa 0x%lx cnt 0x%lx\n", i, mdp->Type, mdp->PhysicalStart, mdp->NumberOfPages); #endif pfn0 = ia64_btop(round_page(mdp->PhysicalStart)); pfn1 = ia64_btop(trunc_page(mdp->PhysicalStart + mdp->NumberOfPages * 4096)); if (pfn1 <= pfn0) continue; if (mdp->Type != EfiConventionalMemory) continue; /* * Wimp out for now since we do not DTRT here with * pci bus mastering (no bounce buffering, for example). */ if (pfn0 >= ia64_btop(0x100000000UL)) { printf("Skipping memory chunk start 0x%lx\n", mdp->PhysicalStart); continue; } if (pfn1 >= ia64_btop(0x100000000UL)) { printf("Skipping memory chunk end 0x%lx\n", mdp->PhysicalStart + mdp->NumberOfPages * 4096); continue; } /* * We have a memory descriptor that describes conventional * memory that is for general use. We must determine if the * loader has put the kernel in this region. */ physmem += (pfn1 - pfn0); if (pfn0 <= kernendpfn && kernstartpfn <= pfn1) { /* * Must compute the location of the kernel * within the segment. */ #ifdef DEBUG_MD printf("Descriptor %d contains kernel\n", i); #endif if (pfn0 < kernstartpfn) { /* * There is a chunk before the kernel. */ #ifdef DEBUG_MD printf("Loading chunk before kernel: " "0x%lx / 0x%lx\n", pfn0, kernstartpfn); #endif phys_avail[phys_avail_cnt] = ia64_ptob(pfn0); phys_avail[phys_avail_cnt+1] = ia64_ptob(kernstartpfn); phys_avail_cnt += 2; } if (kernendpfn < pfn1) { /* * There is a chunk after the kernel. */ #ifdef DEBUG_MD printf("Loading chunk after kernel: " "0x%lx / 0x%lx\n", kernendpfn, pfn1); #endif phys_avail[phys_avail_cnt] = ia64_ptob(kernendpfn); phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1); phys_avail_cnt += 2; } } else { /* * Just load this cluster as one chunk. */ #ifdef DEBUG_MD printf("Loading descriptor %d: 0x%lx / 0x%lx\n", i, pfn0, pfn1); #endif phys_avail[phys_avail_cnt] = ia64_ptob(pfn0); phys_avail[phys_avail_cnt+1] = ia64_ptob(pfn1); phys_avail_cnt += 2; } } phys_avail[phys_avail_cnt] = 0; Maxmem = physmem; init_param2(physmem); /* * Initialize error message buffer (at end of core). */ { size_t sz = round_page(MSGBUF_SIZE); int i = phys_avail_cnt - 2; /* shrink so that it'll fit in the last segment */ if (phys_avail[i+1] - phys_avail[i] < sz) sz = phys_avail[i+1] - phys_avail[i]; phys_avail[i+1] -= sz; msgbufp = (struct msgbuf*) IA64_PHYS_TO_RR7(phys_avail[i+1]); msgbufinit(msgbufp, sz); /* Remove the last segment if it now has no pages. */ if (phys_avail[i] == phys_avail[i+1]) { phys_avail[i] = 0; phys_avail[i+1] = 0; } /* warn if the message buffer had to be shrunk */ if (sz != round_page(MSGBUF_SIZE)) printf("WARNING: %ld bytes not available for msgbuf in last cluster (%ld used)\n", round_page(MSGBUF_SIZE), sz); } proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); /* * Init mapping for u page(s) for proc 0 */ proc0uarea = (struct user *)pmap_steal_memory(UAREA_PAGES * PAGE_SIZE); proc0kstack = (vm_offset_t)kstack; proc0.p_uarea = proc0uarea; thread0.td_kstack = proc0kstack; thread0.td_pcb = (struct pcb *) (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; /* * Setup the global data for the bootstrap cpu. */ pcpup = (struct pcpu *) pmap_steal_memory(PAGE_SIZE); pcpu_init(pcpup, 0, PAGE_SIZE); ia64_set_k4((u_int64_t) pcpup); PCPU_SET(curthread, &thread0); /* * Set ia32 control registers. */ ia64_set_cflg((CR0_PE | CR0_PG) | ((long)(CR4_XMM | CR4_FXSR) << 32)); /* We pretend to own FP state so that ia64_fpstate_check() works */ PCPU_SET(fpcurthread, &thread0); /* * Initialize the rest of proc 0's PCB. * * Set the kernel sp, reserving space for an (empty) trapframe, * and make proc0's trapframe pointer point to it for sanity. * Initialise proc0's backing store to start after u area. * * XXX what is all this +/- 16 stuff? */ thread0.td_frame = (struct trapframe *)thread0.td_pcb - 1; thread0.td_pcb->pcb_sp = (u_int64_t)thread0.td_frame - 16; thread0.td_pcb->pcb_ar_bsp = (u_int64_t)proc0kstack; mutex_init(); /* * Initialize the virtual memory system. */ pmap_bootstrap(); /* * Initialize debuggers, and break into them if appropriate. */ #ifdef DDB kdb_init(); if (boothowto & RB_KDB) { printf("Boot flags requested debugger\n"); breakpoint(); } #endif ia64_set_tpr(0); -} - -int -ia64_running_in_simulator() -{ - return bootinfo.bi_systab == 0; } void bzero(void *buf, size_t len) { caddr_t p = buf; while (((vm_offset_t) p & (sizeof(u_long) - 1)) && len) { *p++ = 0; len--; } while (len >= sizeof(u_long) * 8) { *(u_long*) p = 0; *((u_long*) p + 1) = 0; *((u_long*) p + 2) = 0; *((u_long*) p + 3) = 0; len -= sizeof(u_long) * 8; *((u_long*) p + 4) = 0; *((u_long*) p + 5) = 0; *((u_long*) p + 6) = 0; *((u_long*) p + 7) = 0; p += sizeof(u_long) * 8; } while (len >= sizeof(u_long)) { *(u_long*) p = 0; len -= sizeof(u_long); p += sizeof(u_long); } while (len) { *p++ = 0; len--; } } void DELAY(int n) { u_int64_t start, end, now; start = ia64_get_itc(); end = start + (itc_frequency * n) / 1000000; /* printf("DELAY from 0x%lx to 0x%lx\n", start, end); */ do { now = ia64_get_itc(); } while (now < end || (now > start && end < start)); } /* * Send an interrupt to process. * * Stack is set up to allow sigcode stored * at top to call routine, followed by kcall * to sigreturn routine below. After sigreturn * resets the signal mask, the stack, and the * frame pointer, it returns to the user * specified pc, psl. */ void sendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) { struct proc *p; struct thread *td; struct trapframe *frame; struct sigacts *psp; struct sigframe sf, *sfp; u_int64_t sbs = 0; int oonstack, rndfsize; td = curthread; p = td->td_proc; PROC_LOCK_ASSERT(p, MA_OWNED); psp = p->p_sigacts; frame = td->td_frame; oonstack = sigonstack(frame->tf_r[FRAME_SP]); rndfsize = ((sizeof(sf) + 15) / 16) * 16; /* * Make sure that we restore the entire trapframe after a * signal. */ frame->tf_flags &= ~FRAME_SYSCALL; /* save user context */ bzero(&sf, sizeof(struct sigframe)); sf.sf_uc.uc_sigmask = *mask; sf.sf_uc.uc_stack = p->p_sigstk; sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; sf.sf_uc.uc_mcontext.mc_flags = IA64_MC_FLAG_ONSTACK; sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; sf.sf_uc.uc_mcontext.mc_nat = 0; /* XXX */ sf.sf_uc.uc_mcontext.mc_sp = frame->tf_r[FRAME_SP]; sf.sf_uc.uc_mcontext.mc_ip = (frame->tf_cr_iip | ((frame->tf_cr_ipsr >> 41) & 3)); sf.sf_uc.uc_mcontext.mc_cfm = frame->tf_cr_ifs & ~(1<<31); sf.sf_uc.uc_mcontext.mc_um = frame->tf_cr_ipsr & 0x1fff; sf.sf_uc.uc_mcontext.mc_ar_rsc = frame->tf_ar_rsc; sf.sf_uc.uc_mcontext.mc_ar_bsp = frame->tf_ar_bspstore; sf.sf_uc.uc_mcontext.mc_ar_rnat = frame->tf_ar_rnat; sf.sf_uc.uc_mcontext.mc_ar_ccv = frame->tf_ar_ccv; sf.sf_uc.uc_mcontext.mc_ar_unat = frame->tf_ar_unat; sf.sf_uc.uc_mcontext.mc_ar_fpsr = frame->tf_ar_fpsr; sf.sf_uc.uc_mcontext.mc_ar_pfs = frame->tf_ar_pfs; sf.sf_uc.uc_mcontext.mc_pr = frame->tf_pr; bcopy(&frame->tf_b[0], &sf.sf_uc.uc_mcontext.mc_br[0], 8 * sizeof(unsigned long)); sf.sf_uc.uc_mcontext.mc_gr[0] = 0; bcopy(&frame->tf_r[0], &sf.sf_uc.uc_mcontext.mc_gr[1], 31 * sizeof(unsigned long)); /* XXX mc_fr[] */ /* * Allocate and validate space for the signal handler * context. Note that if the stack is in P0 space, the * call to grow() is a nop, and the useracc() check * will fail if the process has not already allocated * the space with a `brk'. */ if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && SIGISMEMBER(psp->ps_sigonstack, sig)) { sbs = (u_int64_t) p->p_sigstk.ss_sp; sfp = (struct sigframe *)((caddr_t)p->p_sigstk.ss_sp + p->p_sigstk.ss_size - rndfsize); /* * Align sp and bsp. */ sbs = (sbs + 15) & ~15; sfp = (struct sigframe *)((u_int64_t)sfp & ~15); #if defined(COMPAT_43) || defined(COMPAT_SUNOS) p->p_sigstk.ss_flags |= SS_ONSTACK; #endif } else sfp = (struct sigframe *)(frame->tf_r[FRAME_SP] - rndfsize); PROC_UNLOCK(p); #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig(%d): sig %d ssp %p usp %p\n", p->p_pid, sig, &sf, sfp); #endif #if 0 /* save the floating-point state, if necessary, then copy it. */ ia64_fpstate_save(td, 1); sf.sf_uc.uc_mcontext.mc_ownedfp = td->td_md.md_flags & MDP_FPUSED; bcopy(&td->td_pcb->pcb_fp, (struct fpreg *)sf.sf_uc.uc_mcontext.mc_fpregs, sizeof(struct fpreg)); sf.sf_uc.uc_mcontext.mc_fp_control = td->td_pcb.pcb_fp_control; #endif /* * copy the frame out to userland. */ if (copyout((caddr_t)&sf, (caddr_t)sfp, sizeof(sf)) != 0) { #ifdef DEBUG if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig(%d): copyout failed on sig %d\n", p->p_pid, sig); #endif /* * Process has trashed its stack; give it an illegal * instruction to halt it in its tracks. */ PROC_LOCK(p); SIGACTION(p, SIGILL) = SIG_DFL; SIGDELSET(p->p_sigignore, SIGILL); SIGDELSET(p->p_sigcatch, SIGILL); SIGDELSET(p->p_sigmask, SIGILL); psignal(p, SIGILL); return; } #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sendsig(%d): sig %d sfp %p code %lx\n", p->p_pid, sig, sfp, code); #endif /* * Set up the registers to return to sigcode. */ frame->tf_cr_ipsr &= ~IA64_PSR_RI; frame->tf_cr_iip = PS_STRINGS - (esigcode - sigcode); frame->tf_r[FRAME_R1] = sig; PROC_LOCK(p); if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { frame->tf_r[FRAME_R15] = (u_int64_t)&(sfp->sf_si); /* Fill in POSIX parts */ sf.sf_si.si_signo = sig; sf.sf_si.si_code = code; sf.sf_si.si_addr = (void*)frame->tf_cr_ifa; } else frame->tf_r[FRAME_R15] = code; frame->tf_r[FRAME_SP] = (u_int64_t)sfp - 16; frame->tf_r[FRAME_R14] = sig; frame->tf_r[FRAME_R15] = (u_int64_t) &sfp->sf_si; frame->tf_r[FRAME_R16] = (u_int64_t) &sfp->sf_uc; frame->tf_r[FRAME_R17] = (u_int64_t)catcher; frame->tf_r[FRAME_R18] = sbs; #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sendsig(%d): pc %lx, catcher %lx\n", p->p_pid, frame->tf_cr_iip, frame->tf_regs[FRAME_R4]); if ((sigdebug & SDB_KSTACK) && p->p_pid == sigpid) printf("sendsig(%d): sig %d returns\n", p->p_pid, sig); #endif } /* * System call to cleanup state after a signal * has been taken. Reset signal mask and * stack state from context left by sendsig (above). * Return to previous pc and psl as specified by * context left by sendsig. Check carefully to * make sure that the user has not modified the * state to gain improper privileges. * * MPSAFE */ int sigreturn(struct thread *td, struct sigreturn_args /* { ucontext_t *sigcntxp; } */ *uap) { ucontext_t uc; const ucontext_t *ucp; struct pcb *pcb; struct trapframe *frame = td->td_frame; struct __mcontext *mcp; struct proc *p; ucp = uap->sigcntxp; pcb = td->td_pcb; p = td->td_proc; #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn: pid %d, scp %p\n", p->p_pid, ucp); #endif /* * Fetch the entire context structure at once for speed. * We don't use a normal argument to simplify RSE handling. */ if (copyin((caddr_t)frame->tf_r[FRAME_R4], (caddr_t)&uc, sizeof(ucontext_t))) return (EFAULT); if (frame->tf_ndirty != 0) { printf("sigreturn: dirty user stacked registers\n"); } /* * Restore the user-supplied information */ mcp = &uc.uc_mcontext; bcopy(&mcp->mc_br[0], &frame->tf_b[0], 8*sizeof(u_int64_t)); bcopy(&mcp->mc_gr[1], &frame->tf_r[0], 31*sizeof(u_int64_t)); /* XXX mc_fr */ frame->tf_flags &= ~FRAME_SYSCALL; frame->tf_cr_iip = mcp->mc_ip & ~15; frame->tf_cr_ipsr &= ~IA64_PSR_RI; switch (mcp->mc_ip & 15) { case 1: frame->tf_cr_ipsr |= IA64_PSR_RI_1; break; case 2: frame->tf_cr_ipsr |= IA64_PSR_RI_2; break; } frame->tf_cr_ipsr = ((frame->tf_cr_ipsr & ~0x1fff) | (mcp->mc_um & 0x1fff)); frame->tf_pr = mcp->mc_pr; frame->tf_ar_rsc = (mcp->mc_ar_rsc & 3) | 12; /* user, loadrs=0 */ frame->tf_ar_pfs = mcp->mc_ar_pfs; frame->tf_cr_ifs = mcp->mc_cfm | (1UL<<63); frame->tf_ar_bspstore = mcp->mc_ar_bsp; frame->tf_ar_rnat = mcp->mc_ar_rnat; frame->tf_ndirty = 0; /* assumes flushrs in sigcode */ frame->tf_ar_unat = mcp->mc_ar_unat; frame->tf_ar_ccv = mcp->mc_ar_ccv; frame->tf_ar_fpsr = mcp->mc_ar_fpsr; frame->tf_r[FRAME_SP] = mcp->mc_sp; PROC_LOCK(p); #if defined(COMPAT_43) || defined(COMPAT_SUNOS) if (uc.uc_mcontext.mc_onstack & 1) p->p_sigstk.ss_flags |= SS_ONSTACK; else p->p_sigstk.ss_flags &= ~SS_ONSTACK; #endif p->p_sigmask = uc.uc_sigmask; SIG_CANTMASK(p->p_sigmask); signotify(p); PROC_UNLOCK(p); /* XXX ksc.sc_ownedfp ? */ ia64_fpstate_drop(td); #if 0 bcopy((struct fpreg *)uc.uc_mcontext.mc_fpregs, &td->td_pcb->pcb_fp, sizeof(struct fpreg)); td->td_pcb->pcb_fp_control = uc.uc_mcontext.mc_fp_control; #endif #ifdef DEBUG if (sigdebug & SDB_FOLLOW) printf("sigreturn(%d): returns\n", p->p_pid); #endif return (EJUSTRETURN); } #ifdef COMPAT_FREEBSD4 int freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) { return sigreturn(td, (struct sigreturn_args *)uap); } #endif int get_mcontext(struct thread *td, mcontext_t *mcp) { return (ENOSYS); } int set_mcontext(struct thread *td, const mcontext_t *mcp) { return (ENOSYS); } /* * Machine dependent boot() routine */ void cpu_boot(int howto) { ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0); } /* * Shutdown the CPU as much as possible */ void cpu_halt(void) { ia64_efi_runtime->ResetSystem(EfiResetWarm, EFI_SUCCESS, 0, 0); } /* * Clear registers on exec */ void exec_setregs(struct thread *td, u_long entry, u_long stack, u_long ps_strings) { struct trapframe *frame; frame = td->td_frame; /* * Make sure that we restore the entire trapframe after an * execve. */ frame->tf_flags &= ~FRAME_SYSCALL; bzero(frame->tf_r, sizeof(frame->tf_r)); bzero(frame->tf_f, sizeof(frame->tf_f)); frame->tf_cr_iip = entry; frame->tf_cr_ipsr = (IA64_PSR_IC | IA64_PSR_I | IA64_PSR_IT | IA64_PSR_DT | IA64_PSR_RT | IA64_PSR_DFH | IA64_PSR_BN | IA64_PSR_CPL_USER); /* * Make sure that sp is aligned to a 16 byte boundary and * reserve 16 bytes of scratch space for _start. */ frame->tf_r[FRAME_SP] = (stack & ~15) - 16; /* * Write values for out0, out1 and out2 to the user's backing * store and arrange for them to be restored into the user's * initial register frame. Assumes that (bspstore & 0x1f8) < * 0x1e0. */ frame->tf_ar_bspstore = td->td_md.md_bspstore + 24; suword((caddr_t) frame->tf_ar_bspstore - 24, stack); suword((caddr_t) frame->tf_ar_bspstore - 16, ps_strings); suword((caddr_t) frame->tf_ar_bspstore - 8, 0); frame->tf_ndirty = 0; frame->tf_cr_ifs = (1L<<63) | 3; /* sof=3, v=1 */ frame->tf_ar_rsc = 0xf; /* user mode rsc */ frame->tf_ar_fpsr = IA64_FPSR_DEFAULT; td->td_md.md_flags &= ~MDP_FPUSED; ia64_fpstate_drop(td); } int ptrace_set_pc(struct thread *td, unsigned long addr) { uint64_t slot; switch (addr & 0xFUL) { case 0: slot = IA64_PSR_RI_0; break; case 1: /* XXX we need to deal with MLX bundles here */ slot = IA64_PSR_RI_1; break; case 2: slot = IA64_PSR_RI_2; break; default: return (EINVAL); } td->td_frame->tf_cr_iip = addr & ~0x0FULL; td->td_frame->tf_cr_ipsr = (td->td_frame->tf_cr_ipsr & ~IA64_PSR_RI) | slot; return (0); } int ptrace_single_step(struct thread *td) { td->td_frame->tf_cr_ipsr |= IA64_PSR_SS; return (0); } int ia64_pa_access(vm_offset_t pa) { return VM_PROT_READ|VM_PROT_WRITE; } int fill_regs(td, regs) struct thread *td; struct reg *regs; { bcopy(td->td_frame->tf_b, regs->r_br, sizeof(regs->r_br)); bcopy(td->td_frame->tf_r, regs->r_gr+1, sizeof(td->td_frame->tf_r)); /* TODO copy registers from the register stack. */ regs->r_cfm = td->td_frame->tf_cr_ifs; regs->r_ip = td->td_frame->tf_cr_iip; regs->r_ip |= (td->td_frame->tf_cr_ipsr & IA64_PSR_RI) >> 41; regs->r_pr = td->td_frame->tf_pr; regs->r_psr = td->td_frame->tf_cr_ipsr; regs->r_ar_rsc = td->td_frame->tf_ar_rsc; regs->r_ar_bsp = 0; /* XXX */ regs->r_ar_bspstore = td->td_frame->tf_ar_bspstore; regs->r_ar_rnat = td->td_frame->tf_ar_rnat; regs->r_ar_ccv = td->td_frame->tf_ar_ccv; regs->r_ar_unat = td->td_frame->tf_ar_unat; regs->r_ar_fpsr = td->td_frame->tf_ar_fpsr; regs->r_ar_pfs = td->td_frame->tf_ar_pfs; regs->r_ar_lc = td->td_frame->tf_ar_lc; regs->r_ar_ec = td->td_frame->tf_ar_ec; return (0); } int set_regs(td, regs) struct thread *td; struct reg *regs; { int error; error = ptrace_set_pc(td, regs->r_ip); if (error) return (error); td->td_frame->tf_cr_ipsr &= ~0x1FUL; /* clear user mask */ td->td_frame->tf_cr_ipsr |= regs->r_psr & 0x1FUL; td->td_frame->tf_pr = regs->r_pr; /* XXX r_ar_bsp */ td->td_frame->tf_ar_rsc = regs->r_ar_rsc; td->td_frame->tf_ar_pfs = regs->r_ar_pfs; td->td_frame->tf_cr_ifs = regs->r_cfm; td->td_frame->tf_ar_bspstore = regs->r_ar_bspstore; td->td_frame->tf_ar_rnat = regs->r_ar_rnat; td->td_frame->tf_ar_unat = regs->r_ar_unat; td->td_frame->tf_ar_ccv = regs->r_ar_ccv; td->td_frame->tf_ar_fpsr = regs->r_ar_fpsr; td->td_frame->tf_ar_lc = regs->r_ar_lc; td->td_frame->tf_ar_ec = regs->r_ar_ec; bcopy(regs->r_br, td->td_frame->tf_b, sizeof(td->td_frame->tf_b)); bcopy(regs->r_gr+1, td->td_frame->tf_r, sizeof(td->td_frame->tf_r)); /* TODO copy registers to the register stack. */ return (0); } int fill_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int set_dbregs(struct thread *td, struct dbreg *dbregs) { return (ENOSYS); } int fill_fpregs(td, fpregs) struct thread *td; struct fpreg *fpregs; { struct pcb *pcb = td->td_pcb; /* * XXX - The PCB pointer should not point to the actual PCB, * because it will not contain the preserved registers of * the program being debugged. Instead, it should point to * a PCB constructed by unwinding all the way up to the * IVT handler. */ bcopy(pcb->pcb_f + PCB_F2, fpregs->fpr_regs + 2, sizeof(pcb->pcb_f[0]) * 4); bcopy(td->td_frame->tf_f, fpregs->fpr_regs + 6, sizeof(td->td_frame->tf_f)); bcopy(pcb->pcb_f + PCB_F16, fpregs->fpr_regs + 16, sizeof(pcb->pcb_f[0]) * 16); ia64_fpstate_save(td, 0); bcopy(pcb->pcb_highfp, fpregs->fpr_regs + 32, sizeof(td->td_pcb->pcb_highfp)); return (0); } int set_fpregs(td, fpregs) struct thread *td; struct fpreg *fpregs; { struct pcb *pcb = td->td_pcb; /* * XXX - The PCB pointer should not point to the actual PCB, * because it will not contain the preserved registers of * the program being debugged. Instead, it should point to * a PCB constructed by unwinding all the way up to the * IVT handler. * XXX - An additional complication here is that we need to * have the actual location of where the values should be * stored as well. Some values may still reside in registers, * while other may have been saved somewhere. */ bcopy(fpregs->fpr_regs + 2, pcb->pcb_f + PCB_F2, sizeof(pcb->pcb_f[0]) * 4); bcopy(fpregs->fpr_regs + 6, td->td_frame->tf_f, sizeof(td->td_frame->tf_f)); bcopy(fpregs->fpr_regs + 16, pcb->pcb_f + PCB_F16, sizeof(pcb->pcb_f[0]) * 16); ia64_fpstate_drop(td); bcopy(fpregs->fpr_regs + 32, pcb->pcb_highfp, sizeof(td->td_pcb->pcb_highfp)); return (0); } #ifndef DDB void Debugger(const char *msg) { printf("Debugger(\"%s\") called.\n", msg); } #endif /* no DDB */ static int sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) { int error; error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req); if (!error && req->newptr) resettodr(); return (error); } SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, CTLFLAG_RW, &disable_rtc_set, 0, ""); SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, CTLFLAG_RW, &wall_cmos_clock, 0, ""); void ia64_fpstate_check(struct thread *td) { if ((td->td_frame->tf_cr_ipsr & IA64_PSR_DFH) == 0) if (td != PCPU_GET(fpcurthread)) panic("ia64_fpstate_check: bogus"); } /* * Save the high floating point state in the pcb. Use this to get * read-only access to the floating point state. If write is true, the * current fp process is cleared so that fp state can safely be * modified. The process will automatically reload the changed state * by generating a disabled fp trap. */ void ia64_fpstate_save(struct thread *td, int write) { if (td == PCPU_GET(fpcurthread)) { /* * Save the state in the pcb. */ savehighfp(td->td_pcb->pcb_highfp); if (write) { td->td_frame->tf_cr_ipsr |= IA64_PSR_DFH; PCPU_SET(fpcurthread, NULL); } } } /* * Relinquish ownership of the FP state. This is called instead of * ia64_save_fpstate() if the entire FP state is being changed * (e.g. on sigreturn). */ void ia64_fpstate_drop(struct thread *td) { if (td == PCPU_GET(fpcurthread)) { td->td_frame->tf_cr_ipsr |= IA64_PSR_DFH; PCPU_SET(fpcurthread, NULL); } } /* * Switch the current owner of the fp state to p, reloading the state * from the pcb. */ void ia64_fpstate_switch(struct thread *td) { if (PCPU_GET(fpcurthread)) { /* * Dump the old fp state if its valid. */ savehighfp(PCPU_GET(fpcurthread)->td_pcb->pcb_highfp); PCPU_GET(fpcurthread)->td_frame->tf_cr_ipsr |= IA64_PSR_DFH; } /* * Remember the new FP owner and reload its state. */ PCPU_SET(fpcurthread, td); restorehighfp(td->td_pcb->pcb_highfp); td->td_frame->tf_cr_ipsr &= ~IA64_PSR_DFH; td->td_md.md_flags |= MDP_FPUSED; } /* * Utility functions for manipulating instruction bundles. */ void ia64_unpack_bundle(u_int64_t low, u_int64_t high, struct ia64_bundle *bp) { bp->template = low & 0x1f; bp->slot[0] = (low >> 5) & ((1L<<41) - 1); bp->slot[1] = (low >> 46) | ((high & ((1L<<23) - 1)) << 18); bp->slot[2] = (high >> 23); } void ia64_pack_bundle(u_int64_t *lowp, u_int64_t *highp, const struct ia64_bundle *bp) { u_int64_t low, high; low = bp->template | (bp->slot[0] << 5) | (bp->slot[1] << 46); high = (bp->slot[1] >> 18) | (bp->slot[2] << 23); *lowp = low; *highp = high; } static int rse_slot(u_int64_t *bsp) { return ((u_int64_t) bsp >> 3) & 0x3f; } /* * Return the address of register regno (regno >= 32) given that bsp * points at the base of the register stack frame. */ u_int64_t * ia64_rse_register_address(u_int64_t *bsp, int regno) { int off = regno - 32; u_int64_t rnats = (rse_slot(bsp) + off) / 63; return bsp + off + rnats; } /* * Calculate the base address of the previous frame given that the * current frame's locals area is 'size'. */ u_int64_t * ia64_rse_previous_frame(u_int64_t *bsp, int size) { int slot = rse_slot(bsp); int rnats = 0; int count = size; while (count > slot) { count -= 63; rnats++; slot = 63; } return bsp - size - rnats; } Index: head/sys/ia64/ia64/mca.c =================================================================== --- head/sys/ia64/ia64/mca.c (revision 110210) +++ head/sys/ia64/ia64/mca.c (revision 110211) @@ -1,227 +1,228 @@ /* * Copyright (c) 2002 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_MCA, "MCA", "Machine Check Architecture"); int64_t mca_info_size[SAL_INFO_TYPES]; vm_offset_t mca_info_block; struct mtx mca_info_block_lock; SYSCTL_NODE(_hw, OID_AUTO, mca, CTLFLAG_RW, 0, "MCA container"); static int mca_count; /* Number of records stored. */ static int mca_first; /* First (lowest) record ID. */ static int mca_last; /* Last (highest) record ID. */ SYSCTL_INT(_hw_mca, OID_AUTO, count, CTLFLAG_RD, &mca_count, 0, "Record count"); SYSCTL_INT(_hw_mca, OID_AUTO, first, CTLFLAG_RD, &mca_first, 0, "First record id"); SYSCTL_INT(_hw_mca, OID_AUTO, last, CTLFLAG_RD, &mca_last, 0, "Last record id"); static int mca_sysctl_handler(SYSCTL_HANDLER_ARGS) { int error = 0; if (!arg1) return (EINVAL); error = SYSCTL_OUT(req, arg1, arg2); if (error || !req->newptr) return (error); error = SYSCTL_IN(req, arg1, arg2); return (error); } void ia64_mca_save_state(int type) { struct ia64_sal_result result; struct mca_record_header *hdr; struct sysctl_oid *oidp; char *name, *state; uint64_t seqnr; size_t recsz, totsz; /* * Don't try to get the state if we couldn't get the size of * the state information previously. */ if (mca_info_size[type] == -1) return; while (1) { mtx_lock_spin(&mca_info_block_lock); result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (result.sal_status < 0) { mtx_unlock_spin(&mca_info_block_lock); return; } hdr = (struct mca_record_header *)mca_info_block; recsz = hdr->rh_length; seqnr = hdr->rh_seqnr; mtx_unlock_spin(&mca_info_block_lock); totsz = sizeof(struct sysctl_oid) + recsz + 32; oidp = malloc(totsz, M_MCA, M_ZERO); state = (char*)(oidp + 1); name = state + recsz; sprintf(name, "%lld", (long long)seqnr); mtx_lock_spin(&mca_info_block_lock); /* * If the info block doesn't have our record anymore because * we temporarily unlocked it, get it again from SAL. I assume * that it's possible that we could get a different record. * I expect this to happen in a SMP configuration where the * record has been cleared by a different processor. So, if * we get a different record we simply abort with this record * and start over. */ if (seqnr != hdr->rh_seqnr) { result = ia64_sal_entry(SAL_GET_STATE_INFO, type, 0, mca_info_block, 0, 0, 0, 0); if (seqnr != hdr->rh_seqnr) { mtx_unlock_spin(&mca_info_block_lock); free(oidp, M_MCA); continue; } } bcopy((char*)mca_info_block, state, recsz); oidp->oid_parent = &sysctl__hw_mca_children; oidp->oid_number = OID_AUTO; oidp->oid_kind = CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_DYN; oidp->oid_arg1 = state; oidp->oid_arg2 = recsz; oidp->oid_name = name; oidp->oid_handler = mca_sysctl_handler; oidp->oid_fmt = "S,MCA"; oidp->descr = "Error record"; sysctl_register_oid(oidp); if (mca_count > 0) { if (seqnr < mca_first) mca_first = seqnr; else if (seqnr > mca_last) mca_last = seqnr; } else mca_first = mca_last = seqnr; mca_count++; /* * Clear the state so that we get any other records when * they exist. */ result = ia64_sal_entry(SAL_CLEAR_STATE_INFO, type, 0, 0, 0, 0, 0, 0); mtx_unlock_spin(&mca_info_block_lock); } } void ia64_mca_init(void) { struct ia64_sal_result result; uint64_t max_size; char *p; int i; /* * Get the sizes of the state information we can get from SAL and * allocate a common block (forgive me my Fortran :-) for use by * support functions. We create a region 7 address to make it * easy on the OS_MCA or OS_INIT handlers to get the state info * under unreliable conditions. */ max_size = 0; for (i = 0; i < SAL_INFO_TYPES; i++) { result = ia64_sal_entry(SAL_GET_STATE_INFO_SIZE, i, 0, 0, 0, 0, 0, 0); if (result.sal_status == 0) { mca_info_size[i] = result.sal_result[0]; if (mca_info_size[i] > max_size) max_size = mca_info_size[i]; } else mca_info_size[i] = -1; } max_size = round_page(max_size); - p = contigmalloc(max_size, M_TEMP, 0, 0ul, 256*1024*1024 - 1, - PAGE_SIZE, 256*1024*1024); + if (max_size) { + p = contigmalloc(max_size, M_TEMP, 0, 0ul, 256*1024*1024 - 1, + PAGE_SIZE, 256*1024*1024); + mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p)); - mca_info_block = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)p)); - - if (bootverbose) - printf("MCA: allocated %ld bytes for state information\n", - max_size); + if (bootverbose) + printf("MCA: allocated %ld bytes for state info.\n", + max_size); + } /* * Initialize the spin lock used to protect the info block. When APs * get launched, there's a short moment of contention, but in all other * cases it's not a hot spot. I think it's possible to have the MCA * handler be called on multiple processors at the same time, but that * should be rare. On top of that, performance is not an issue when * dealing with machine checks... */ mtx_init(&mca_info_block_lock, "MCA spin lock", NULL, MTX_SPIN); /* * Get and save any processor and platfom error records. Note that in * a SMP configuration the processor records are for the BSP only. We * let the APs get and save their own records when we wake them up. */ for (i = 0; i < SAL_INFO_TYPES; i++) ia64_mca_save_state(i); } Index: head/sys/ia64/ia64/pal.S =================================================================== --- head/sys/ia64/ia64/pal.S (revision 110210) +++ head/sys/ia64/ia64/pal.S (revision 110211) @@ -1,236 +1,223 @@ /*- * Copyright (c) 2000-2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include .data .global ia64_pal_entry -ia64_pal_entry: .quad ia64_call_pal_stub +ia64_pal_entry: .quad 0 .text - -/* - * Stub for running in simulation. - */ -ENTRY(ia64_call_pal_stub, 0) - - mov r8=-3 - tbit.nz p6,p7=r28,8 // static or stacked? - ;; -(p6) br.ret.sptk.few rp -(p7) br.cond.sptk.few rp - -END(ia64_call_pal_stub) /* * struct ia64_pal_result ia64_call_pal_static(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static, 4) .regstk 4,5,0,0 palret = loc0 entry = loc1 rpsave = loc2 pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) 1: mov palret=ip // for return address ;; add entry=entry,gp mov psrsave=psr mov r28=in0 // procedure number ;; ld8 entry=[entry] // read entry point mov r29=in1 // copy arguments mov r30=in2 mov r31=in3 ;; mov b6=entry add palret=2f-1b,palret // calculate return address ;; mov b0=palret rsm psr.i // disable interrupts ;; br.cond.sptk b6 // call into firmware 2: mov psr.l=psrsave mov rp=rpsave mov ar.pfs=pfssave ;; srlz.d br.ret.sptk rp END(ia64_call_pal_static) #ifdef _KERNEL /* * struct ia64_pal_result ia64_call_pal_static_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static_physical, 4) .regstk 4,5,0,0 palret = loc0 entry = loc1 rpsave = loc2 pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) 1: mov palret=ip // for return address ;; add entry=entry,gp mov r28=in0 // procedure number ;; ld8 entry=[entry] // read entry point mov r29=in1 // copy arguments mov r30=in2 mov r31=in3 ;; dep entry=0,entry,61,3 // physical address dep palret=0,palret,61,3 // physical address br.call.sptk.many rp=ia64_physical_mode mov psrsave=ret0 ;; mov b6=entry add palret=2f-1b,palret // calculate return address ;; mov b0=palret br.cond.sptk b6 // call into firmware ;; 2: mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode ;; mov rp=rpsave mov ar.pfs=pfssave ;; br.ret.sptk rp END(ia64_call_pal_static_physical) #endif /* * struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked, 4) .regstk 4,4,4,0 entry = loc0 rpsave = loc1 pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; add entry=entry,gp mov psrsave=psr mov r28=in0 // procedure number mov out0=in0 ;; ld8 entry=[entry] // read entry point mov out1=in1 // copy arguments mov out2=in2 mov out3=in3 ;; mov b6=entry ;; rsm psr.i // disable interrupts ;; br.call.sptk.many rp=b6 // call into firmware mov psr.l=psrsave mov rp=rpsave mov ar.pfs=pfssave ;; srlz.d br.ret.sptk rp END(ia64_call_pal_stacked) #ifdef _KERNEL /* * struct ia64_pal_result ia64_call_pal_stacked_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked_physical, 4) .regstk 4,4,4,0 entry = loc0 rpsave = loc1 pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; add entry=entry,gp mov r28=in0 // procedure number mov out0=in0 ;; ld8 entry=[entry] // read entry point mov out1=in1 // copy arguments mov out2=in2 mov out3=in3 ;; dep entry=0,entry,61,3 // physical address br.call.sptk.many rp=ia64_physical_mode mov psrsave=ret0 ;; mov b6=entry ;; br.call.sptk.many rp=b6 // call into firmware ;; mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode ;; mov rp=rpsave mov ar.pfs=pfssave ;; br.ret.sptk rp END(ia64_call_pal_stacked_physical) #endif Index: head/sys/ia64/ia64/pal.s =================================================================== --- head/sys/ia64/ia64/pal.s (revision 110210) +++ head/sys/ia64/ia64/pal.s (revision 110211) @@ -1,236 +1,223 @@ /*- * Copyright (c) 2000-2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include .data .global ia64_pal_entry -ia64_pal_entry: .quad ia64_call_pal_stub +ia64_pal_entry: .quad 0 .text - -/* - * Stub for running in simulation. - */ -ENTRY(ia64_call_pal_stub, 0) - - mov r8=-3 - tbit.nz p6,p7=r28,8 // static or stacked? - ;; -(p6) br.ret.sptk.few rp -(p7) br.cond.sptk.few rp - -END(ia64_call_pal_stub) /* * struct ia64_pal_result ia64_call_pal_static(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static, 4) .regstk 4,5,0,0 palret = loc0 entry = loc1 rpsave = loc2 pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) 1: mov palret=ip // for return address ;; add entry=entry,gp mov psrsave=psr mov r28=in0 // procedure number ;; ld8 entry=[entry] // read entry point mov r29=in1 // copy arguments mov r30=in2 mov r31=in3 ;; mov b6=entry add palret=2f-1b,palret // calculate return address ;; mov b0=palret rsm psr.i // disable interrupts ;; br.cond.sptk b6 // call into firmware 2: mov psr.l=psrsave mov rp=rpsave mov ar.pfs=pfssave ;; srlz.d br.ret.sptk rp END(ia64_call_pal_static) #ifdef _KERNEL /* * struct ia64_pal_result ia64_call_pal_static_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_static_physical, 4) .regstk 4,5,0,0 palret = loc0 entry = loc1 rpsave = loc2 pfssave = loc3 psrsave = loc4 alloc pfssave=ar.pfs,4,5,0,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) 1: mov palret=ip // for return address ;; add entry=entry,gp mov r28=in0 // procedure number ;; ld8 entry=[entry] // read entry point mov r29=in1 // copy arguments mov r30=in2 mov r31=in3 ;; dep entry=0,entry,61,3 // physical address dep palret=0,palret,61,3 // physical address br.call.sptk.many rp=ia64_physical_mode mov psrsave=ret0 ;; mov b6=entry add palret=2f-1b,palret // calculate return address ;; mov b0=palret br.cond.sptk b6 // call into firmware ;; 2: mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode ;; mov rp=rpsave mov ar.pfs=pfssave ;; br.ret.sptk rp END(ia64_call_pal_static_physical) #endif /* * struct ia64_pal_result ia64_call_pal_stacked(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked, 4) .regstk 4,4,4,0 entry = loc0 rpsave = loc1 pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; add entry=entry,gp mov psrsave=psr mov r28=in0 // procedure number mov out0=in0 ;; ld8 entry=[entry] // read entry point mov out1=in1 // copy arguments mov out2=in2 mov out3=in3 ;; mov b6=entry ;; rsm psr.i // disable interrupts ;; br.call.sptk.many rp=b6 // call into firmware mov psr.l=psrsave mov rp=rpsave mov ar.pfs=pfssave ;; srlz.d br.ret.sptk rp END(ia64_call_pal_stacked) #ifdef _KERNEL /* * struct ia64_pal_result ia64_call_pal_stacked_physical(u_int64_t proc, * u_int64_t arg1, u_int64_t arg2, u_int64_t arg3) */ ENTRY(ia64_call_pal_stacked_physical, 4) .regstk 4,4,4,0 entry = loc0 rpsave = loc1 pfssave = loc2 psrsave = loc3 alloc pfssave=ar.pfs,4,4,4,0 ;; mov rpsave=rp movl entry=@gprel(ia64_pal_entry) ;; add entry=entry,gp mov r28=in0 // procedure number mov out0=in0 ;; ld8 entry=[entry] // read entry point mov out1=in1 // copy arguments mov out2=in2 mov out3=in3 ;; dep entry=0,entry,61,3 // physical address br.call.sptk.many rp=ia64_physical_mode mov psrsave=ret0 ;; mov b6=entry ;; br.call.sptk.many rp=b6 // call into firmware ;; mov r14=psrsave ;; br.call.sptk.many rp=ia64_change_mode ;; mov rp=rpsave mov ar.pfs=pfssave ;; br.ret.sptk rp END(ia64_call_pal_stacked_physical) #endif Index: head/sys/ia64/ia64/pmap.c =================================================================== --- head/sys/ia64/ia64/pmap.c (revision 110210) +++ head/sys/ia64/ia64/pmap.c (revision 110211) @@ -1,2700 +1,2698 @@ /* * Copyright (c) 1991 Regents of the University of California. * All rights reserved. * Copyright (c) 1994 John S. Dyson * All rights reserved. * Copyright (c) 1994 David Greenman * All rights reserved. * Copyright (c) 1998,2000 Doug Rabson * All rights reserved. * * This code is derived from software contributed to Berkeley by * the Systems Programming Group of the University of Utah Computer * Science Department and William Jolitz of UUNET Technologies Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by the University of * California, Berkeley and its contributors. * 4. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91 * from: i386 Id: pmap.c,v 1.193 1998/04/19 15:22:48 bde Exp * with some ideas from NetBSD's alpha pmap * $FreeBSD$ */ /* * Manages physical address maps. * * In addition to hardware address maps, this * module is called upon to provide software-use-only * maps which may or may not be stored in the same * form as hardware maps. These pseudo-maps are * used to store intermediate results from copy * operations to and from address spaces. * * Since the information managed by this module is * also stored by the logical address mapping module, * this module may throw away valid virtual-to-physical * mappings at almost any time. However, invalidations * of virtual-to-physical mappings must be done as * requested. * * In order to cope with hardware architectures which * make virtual-to-physical map invalidates expensive, * this module may delay invalidate or reduced protection * operations until such time as they are actually * necessary. This module is given full information as * to which processors are currently using which maps, * and to when physical maps must be made correct. */ /* * Following the Linux model, region IDs are allocated in groups of * eight so that a single region ID can be used for as many RRs as we * want by encoding the RR number into the low bits of the ID. * * We reserve region ID 0 for the kernel and allocate the remaining * IDs for user pmaps. * * Region 0..4 * User virtually mapped * * Region 5 * Kernel virtually mapped * * Region 6 * Kernel physically mapped uncacheable * * Region 7 * Kernel physically mapped cacheable */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures"); #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 #endif #if defined(DIAGNOSTIC) #define PMAP_DIAGNOSTIC #endif #define MINPV 2048 /* Preallocate at least this many */ #define MAXPV 20480 /* But no more than this */ #if 0 #define PMAP_DIAGNOSTIC #define PMAP_DEBUG #endif #if !defined(PMAP_DIAGNOSTIC) #define PMAP_INLINE __inline #else #define PMAP_INLINE #endif /* * Get PDEs and PTEs for user/kernel address space */ #define pmap_pte_w(pte) ((pte)->pte_ig & PTE_IG_WIRED) #define pmap_pte_managed(pte) ((pte)->pte_ig & PTE_IG_MANAGED) #define pmap_pte_v(pte) ((pte)->pte_p) #define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12) #define pmap_pte_prot(pte) (((pte)->pte_ar << 2) | (pte)->pte_pl) #define pmap_pte_set_w(pte, v) ((v)?((pte)->pte_ig |= PTE_IG_WIRED) \ :((pte)->pte_ig &= ~PTE_IG_WIRED)) #define pmap_pte_set_prot(pte, v) do { \ (pte)->pte_ar = v >> 2; \ (pte)->pte_pl = v & 3; \ } while (0) /* * Given a map and a machine independent protection code, * convert to an ia64 protection code. */ #define pte_prot(m, p) (protection_codes[m == kernel_pmap ? 0 : 1][p]) #define pte_prot_pl(m, p) (pte_prot(m, p) & 3) #define pte_prot_ar(m, p) (pte_prot(m, p) >> 2) int protection_codes[2][8]; /* * Return non-zero if this pmap is currently active */ #define pmap_isactive(pmap) (pmap->pm_active) /* * Statically allocated kernel pmap */ struct pmap kernel_pmap_store; vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ vm_offset_t vhpt_base, vhpt_size; /* * We use an object to own the kernel's 'page tables'. For simplicity, * we use one page directory to index a set of pages containing * ia64_lptes. This gives us up to 2Gb of kernel virtual space. */ static vm_object_t kptobj; static int nkpt; static struct ia64_lpte **kptdir; #define KPTE_DIR_INDEX(va) \ ((va >> (2*PAGE_SHIFT-5)) & ((1<<(PAGE_SHIFT-3))-1)) #define KPTE_PTE_INDEX(va) \ ((va >> PAGE_SHIFT) & ((1<<(PAGE_SHIFT-5))-1)) #define NKPTEPG (PAGE_SIZE / sizeof(struct ia64_lpte)) vm_offset_t kernel_vm_end; -/* - * Values for ptc.e. XXX values for SKI. - */ +/* Values for ptc.e. XXX values for SKI. */ static u_int64_t pmap_ptc_e_base = 0x100000000; static u_int64_t pmap_ptc_e_count1 = 3; static u_int64_t pmap_ptc_e_count2 = 2; static u_int64_t pmap_ptc_e_stride1 = 0x2000; static u_int64_t pmap_ptc_e_stride2 = 0x100000000; /* * Data for the RID allocator */ static u_int64_t *pmap_ridbusy; static int pmap_ridmax, pmap_ridcount; struct mtx pmap_ridmutex; /* * Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; int pmap_pagedaemon_waken; static struct pv_entry *pvbootentries; static int pvbootnext, pvbootmax; /* * Data for allocating PTEs for user processes. */ static uma_zone_t ptezone; /* * VHPT instrumentation. */ static int pmap_vhpt_inserts; static int pmap_vhpt_collisions; static int pmap_vhpt_resident; SYSCTL_DECL(_vm_stats); SYSCTL_NODE(_vm_stats, OID_AUTO, vhpt, CTLFLAG_RD, 0, ""); SYSCTL_INT(_vm_stats_vhpt, OID_AUTO, inserts, CTLFLAG_RD, &pmap_vhpt_inserts, 0, ""); SYSCTL_INT(_vm_stats_vhpt, OID_AUTO, collisions, CTLFLAG_RD, &pmap_vhpt_collisions, 0, ""); SYSCTL_INT(_vm_stats_vhpt, OID_AUTO, resident, CTLFLAG_RD, &pmap_vhpt_resident, 0, ""); static PMAP_INLINE void free_pv_entry(pv_entry_t pv); static pv_entry_t get_pv_entry(void); static void ia64_protection_init(void); static void pmap_invalidate_all(pmap_t pmap); static void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m); vm_offset_t pmap_steal_memory(vm_size_t size) { vm_size_t bank_size; vm_offset_t pa, va; size = round_page(size); bank_size = phys_avail[1] - phys_avail[0]; while (size > bank_size) { int i; for (i = 0; phys_avail[i+2]; i+= 2) { phys_avail[i] = phys_avail[i+2]; phys_avail[i+1] = phys_avail[i+3]; } phys_avail[i] = 0; phys_avail[i+1] = 0; if (!phys_avail[0]) panic("pmap_steal_memory: out of memory"); bank_size = phys_avail[1] - phys_avail[0]; } pa = phys_avail[0]; phys_avail[0] += size; va = IA64_PHYS_TO_RR7(pa); bzero((caddr_t) va, size); return va; } /* * Bootstrap the system enough to run with virtual memory. */ void pmap_bootstrap() { int i, j, count, ridbits; struct ia64_pal_result res; /* * Query the PAL Code to find the loop parameters for the * ptc.e instruction. */ res = ia64_call_pal_static(PAL_PTCE_INFO, 0, 0, 0); if (res.pal_status != 0) panic("Can't configure ptc.e parameters"); pmap_ptc_e_base = res.pal_result[0]; pmap_ptc_e_count1 = res.pal_result[1] >> 32; pmap_ptc_e_count2 = res.pal_result[1] & ((1L<<32) - 1); pmap_ptc_e_stride1 = res.pal_result[2] >> 32; pmap_ptc_e_stride2 = res.pal_result[2] & ((1L<<32) - 1); if (bootverbose) printf("ptc.e base=0x%lx, count1=%ld, count2=%ld, " "stride1=0x%lx, stride2=0x%lx\n", pmap_ptc_e_base, pmap_ptc_e_count1, pmap_ptc_e_count2, pmap_ptc_e_stride1, pmap_ptc_e_stride2); /* * Setup RIDs. RIDs 0..7 are reserved for the kernel. */ res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { if (bootverbose) printf("Can't read VM Summary - assuming 18 Region ID bits\n"); ridbits = 18; /* guaranteed minimum */ } else { ridbits = (res.pal_result[1] >> 8) & 0xff; if (bootverbose) printf("Processor supports %d Region ID bits\n", ridbits); } pmap_ridmax = (1 << ridbits); pmap_ridcount = 8; pmap_ridbusy = (u_int64_t *) pmap_steal_memory(pmap_ridmax / 8); bzero(pmap_ridbusy, pmap_ridmax / 8); pmap_ridbusy[0] |= 0xff; mtx_init(&pmap_ridmutex, "RID allocator lock", NULL, MTX_DEF); /* * Allocate some memory for initial kernel 'page tables'. */ kptdir = (struct ia64_lpte **) pmap_steal_memory(PAGE_SIZE); for (i = 0; i < NKPT; i++) { kptdir[i] = (struct ia64_lpte *) pmap_steal_memory(PAGE_SIZE); } nkpt = NKPT; avail_start = phys_avail[0]; for (i = 0; phys_avail[i+2]; i+= 2) ; avail_end = phys_avail[i+1]; count = i+2; /* * Figure out a useful size for the VHPT, based on the size of * physical memory and try to locate a region which is large * enough to contain the VHPT (which must be a power of two in * size and aligned to a natural boundary). * Don't use the difference between avail_start and avail_end * as a measure for memory size. The address space is often * enough sparse, causing us to (try to) create a huge VHPT. */ vhpt_size = 15; while ((1< i; j -= 2) { phys_avail[j] = phys_avail[j-2]; phys_avail[j+1] = phys_avail[j-2+1]; } phys_avail[count+2] = 0; phys_avail[count+3] = 0; phys_avail[i+1] = vhpt_base; phys_avail[i+2] = vhpt_base + (1L << vhpt_size); } else { phys_avail[i] = vhpt_base + (1L << vhpt_size); } vhpt_base = IA64_PHYS_TO_RR7(vhpt_base); bzero((void *) vhpt_base, (1L << vhpt_size)); __asm __volatile("mov cr.pta=%0;; srlz.i;;" :: "r" (vhpt_base + (1<<8) + (vhpt_size<<2) + 1)); virtual_avail = IA64_RR_BASE(5); virtual_end = IA64_RR_BASE(6)-1; /* * Initialize protection array. */ ia64_protection_init(); /* * Initialize the kernel pmap (which is statically allocated). */ for (i = 0; i < 5; i++) kernel_pmap->pm_rid[i] = 0; kernel_pmap->pm_active = 1; TAILQ_INIT(&kernel_pmap->pm_pvlist); PCPU_SET(current_pmap, kernel_pmap); /* * Region 5 is mapped via the vhpt. */ ia64_set_rr(IA64_RR_BASE(5), (5 << 8) | (PAGE_SHIFT << 2) | 1); /* * Region 6 is direct mapped UC and region 7 is direct mapped * WC. The details of this is controlled by the Alt {I,D}TLB * handlers. Here we just make sure that they have the largest * possible page size to minimise TLB usage. */ ia64_set_rr(IA64_RR_BASE(6), (6 << 8) | (28 << 2)); ia64_set_rr(IA64_RR_BASE(7), (7 << 8) | (28 << 2)); /* * Reserve some memory for allocating pvs while bootstrapping * the pv allocator. We need to have enough to cover mapping * the kmem_alloc region used to allocate the initial_pvs in * pmap_init. In general, the size of this region is * approximately (# physical pages) * (size of pv entry). */ pvbootmax = ((physmem * sizeof(struct pv_entry)) >> PAGE_SHIFT) + 128; pvbootentries = (struct pv_entry *) pmap_steal_memory(pvbootmax * sizeof(struct pv_entry)); pvbootnext = 0; /* * Clear out any random TLB entries left over from booting. */ pmap_invalidate_all(kernel_pmap); } void * uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { static vm_pindex_t color; vm_page_t m; int pflags; void *va; *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) pflags = VM_ALLOC_INTERRUPT; else pflags = VM_ALLOC_SYSTEM; if (wait & M_ZERO) pflags |= VM_ALLOC_ZERO; for (;;) { m = vm_page_alloc(NULL, color++, pflags | VM_ALLOC_NOOBJ); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); else VM_WAIT; } else break; } va = (void *)IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); if ((m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); } void uma_small_free(void *mem, int size, u_int8_t flags) { vm_page_t m; m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem)); vm_page_lock_queues(); vm_page_free(m); vm_page_unlock_queues(); } /* * Initialize the pmap module. * Called by vm_init, to initialize any structures that the pmap * system needs to map virtual memory. * pmap_init has been enhanced to support in a fairly consistant * way, discontiguous physical memory. */ void pmap_init(vm_offset_t phys_start, vm_offset_t phys_end) { int i; int initial_pvs; /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ for(i = 0; i < vm_page_array_size; i++) { vm_page_t m; m = &vm_page_array[i]; TAILQ_INIT(&m->md.pv_list); m->md.pv_list_count = 0; } /* * Init the pv free list and the PTE free list. */ initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; if (initial_pvs > MAXPV) initial_pvs = MAXPV; pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_prealloc(pvzone, initial_pvs); ptezone = uma_zcreate("PT ENTRY", sizeof (struct ia64_lpte), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM); uma_prealloc(ptezone, initial_pvs); /* * Create the object for the kernel's page tables. */ kptobj = vm_object_allocate(OBJT_DEFAULT, MAXKPT); /* * Now it is safe to enable pv_table recording. */ pmap_initialized = TRUE; } /* * Initialize the address space (zone) for the pv_entries. Set a * high water mark so that the system can recover from excessive * numbers of pv entries. */ void pmap_init2() { int shpgperproc = PMAP_SHPGPERPROC; TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); } /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ static void pmap_invalidate_page(pmap_t pmap, vm_offset_t va) { KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("invalidating TLB for non-current pmap")); ia64_ptc_g(va, PAGE_SHIFT << 2); } static void pmap_invalidate_all_1(void *arg) { u_int64_t addr; int i, j; register_t psr; psr = intr_disable(); addr = pmap_ptc_e_base; for (i = 0; i < pmap_ptc_e_count1; i++) { for (j = 0; j < pmap_ptc_e_count2; j++) { ia64_ptc_e(addr); addr += pmap_ptc_e_stride2; } addr += pmap_ptc_e_stride1; } intr_restore(psr); } static void pmap_invalidate_all(pmap_t pmap) { KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("invalidating TLB for non-current pmap")); #ifdef SMP smp_rendezvous(0, pmap_invalidate_all_1, 0, 0); #else pmap_invalidate_all_1(0); #endif } static u_int32_t pmap_allocate_rid(void) { int rid; if (pmap_ridcount == pmap_ridmax) panic("pmap_allocate_rid: All Region IDs used"); do { rid = arc4random() & (pmap_ridmax - 1); } while (pmap_ridbusy[rid / 64] & (1L << (rid & 63))); pmap_ridbusy[rid / 64] |= (1L << (rid & 63)); pmap_ridcount++; return rid; } static void pmap_free_rid(u_int32_t rid) { mtx_lock(&pmap_ridmutex); pmap_ridbusy[rid / 64] &= ~(1L << (rid & 63)); pmap_ridcount--; mtx_unlock(&pmap_ridmutex); } static void pmap_ensure_rid(pmap_t pmap, vm_offset_t va) { int rr; rr = va >> 61; /* * We get called for virtual addresses that may just as well be * kernel addresses (ie region 5, 6 or 7). Since the pm_rid field * only holds region IDs for user regions, we have to make sure * the region is within bounds. */ if (rr >= 5) return; if (pmap->pm_rid[rr]) return; mtx_lock(&pmap_ridmutex); pmap->pm_rid[rr] = pmap_allocate_rid(); if (pmap == PCPU_GET(current_pmap)) ia64_set_rr(IA64_RR_BASE(rr), (pmap->pm_rid[rr] << 8)|(PAGE_SHIFT << 2)|1); mtx_unlock(&pmap_ridmutex); } /*************************************************** * Low level helper routines..... ***************************************************/ /* * Install a pte into the VHPT */ static PMAP_INLINE void pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte) { u_int64_t *vhp, *p; /* invalidate the pte */ atomic_set_64(&vhpte->pte_tag, 1L << 63); ia64_mf(); /* make sure everyone sees */ vhp = (u_int64_t *) vhpte; p = (u_int64_t *) pte; vhp[0] = p[0]; vhp[1] = p[1]; vhp[2] = p[2]; /* sets ti to one */ ia64_mf(); } /* * Compare essential parts of pte. */ static PMAP_INLINE int pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2) { return *(u_int64_t *) pte1 == *(u_int64_t *) pte2; } /* * this routine defines the region(s) of memory that should * not be tested for the modified bit. */ static PMAP_INLINE int pmap_track_modified(vm_offset_t va) { if ((va < kmi.clean_sva) || (va >= kmi.clean_eva)) return 1; else return 0; } #ifndef KSTACK_MAX_PAGES #define KSTACK_MAX_PAGES 32 #endif /* * Create the KSTACK for a new thread. * This routine directly affects the fork perf for a process/thread. */ void pmap_new_thread(struct thread *td, int pages) { vm_offset_t *ks; /* Bounds check */ if (pages <= 1) pages = KSTACK_PAGES; else if (pages > KSTACK_MAX_PAGES) pages = KSTACK_MAX_PAGES; /* * Use contigmalloc for user area so that we can use a region * 7 address for it which makes it impossible to accidentally * lose when recording a trapframe. */ ks = contigmalloc(pages * PAGE_SIZE, M_PMAP, 0, 0ul, 256*1024*1024 - 1, PAGE_SIZE, 256*1024*1024); if (ks == NULL) panic("pmap_new_thread: could not contigmalloc %d pages\n", pages); td->td_md.md_kstackvirt = ks; td->td_kstack = IA64_PHYS_TO_RR7(ia64_tpa((u_int64_t)ks)); td->td_kstack_pages = pages; } /* * Dispose the KSTACK for a thread that has exited. * This routine directly impacts the exit perf of a process/thread. */ void pmap_dispose_thread(struct thread *td) { int pages; pages = td->td_kstack_pages; contigfree(td->td_md.md_kstackvirt, pages * PAGE_SIZE, M_PMAP); td->td_md.md_kstackvirt = NULL; td->td_kstack = 0; } /* * Set up a variable sized alternate kstack. This appears to be MI. */ void pmap_new_altkstack(struct thread *td, int pages) { /* * Shuffle the original stack. Save the virtual kstack address * instead of the physical address because 1) we can derive the * physical address from the virtual address and 2) we need the * virtual address in pmap_dispose_thread. */ td->td_altkstack_obj = td->td_kstack_obj; td->td_altkstack = (vm_offset_t)td->td_md.md_kstackvirt; td->td_altkstack_pages = td->td_kstack_pages; pmap_new_thread(td, pages); } void pmap_dispose_altkstack(struct thread *td) { pmap_dispose_thread(td); /* * Restore the original kstack. Note that td_altkstack holds the * virtual kstack address of the previous kstack. */ td->td_md.md_kstackvirt = (void*)td->td_altkstack; td->td_kstack = IA64_PHYS_TO_RR7(ia64_tpa(td->td_altkstack)); td->td_kstack_obj = td->td_altkstack_obj; td->td_kstack_pages = td->td_altkstack_pages; td->td_altkstack = 0; td->td_altkstack_obj = NULL; td->td_altkstack_pages = 0; } /* * Allow the KSTACK for a thread to be prejudicially paged out. */ void pmap_swapout_thread(struct thread *td) { } /* * Bring the KSTACK for a specified thread back in. */ void pmap_swapin_thread(struct thread *td) { } /*************************************************** * Page table page management routines..... ***************************************************/ void pmap_pinit0(struct pmap *pmap) { /* kernel_pmap is the same as any other pmap. */ pmap_pinit(pmap); } /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. */ void pmap_pinit(struct pmap *pmap) { int i; pmap->pm_flags = 0; for (i = 0; i < 5; i++) pmap->pm_rid[i] = 0; pmap->pm_ptphint = NULL; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); } /* * Wire in kernel global address entries. To avoid a race condition * between pmap initialization and pmap_growkernel, this procedure * should be called after the vmspace is attached to the process * but before this pmap is activated. */ void pmap_pinit2(struct pmap *pmap) { } /*************************************************** * Pmap allocation/deallocation routines. ***************************************************/ /* * Release any resources held by the given physical map. * Called when a pmap initialized by pmap_pinit is being released. * Should only be called if the map contains no valid mappings. */ void pmap_release(pmap_t pmap) { int i; for (i = 0; i < 5; i++) if (pmap->pm_rid[i]) pmap_free_rid(pmap->pm_rid[i]); } /* * grow the number of kernel page table entries, if needed */ void pmap_growkernel(vm_offset_t addr) { struct ia64_lpte *ptepage; vm_page_t nkpg; if (kernel_vm_end == 0) { kernel_vm_end = nkpt * PAGE_SIZE * NKPTEPG + IA64_RR_BASE(5); } addr = (addr + PAGE_SIZE * NKPTEPG) & ~(PAGE_SIZE * NKPTEPG - 1); while (kernel_vm_end < addr) { if (kptdir[KPTE_DIR_INDEX(kernel_vm_end)]) { kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NKPTEPG) & ~(PAGE_SIZE * NKPTEPG - 1); continue; } /* * We could handle more by increasing the size of kptdir. */ if (nkpt == MAXKPT) panic("pmap_growkernel: out of kernel address space"); /* * This index is bogus, but out of the way */ nkpg = vm_page_alloc(kptobj, nkpt, VM_ALLOC_SYSTEM | VM_ALLOC_WIRED); if (!nkpg) panic("pmap_growkernel: no memory to grow kernel"); nkpt++; ptepage = (struct ia64_lpte *) IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(nkpg)); bzero(ptepage, PAGE_SIZE); kptdir[KPTE_DIR_INDEX(kernel_vm_end)] = ptepage; kernel_vm_end = (kernel_vm_end + PAGE_SIZE * NKPTEPG) & ~(PAGE_SIZE * NKPTEPG - 1); } } /*************************************************** * page management routines. ***************************************************/ /* * free the pv_entry back to the free list */ static PMAP_INLINE void free_pv_entry(pv_entry_t pv) { pv_entry_count--; uma_zfree(pvzone, pv); } /* * get a new pv_entry, allocating a block from the system * when needed. * the memory allocation is performed bypassing the malloc code * because of the possibility of allocations at interrupt time. */ static pv_entry_t get_pv_entry(void) { pv_entry_count++; if (pv_entry_high_water && (pv_entry_count > pv_entry_high_water) && (pmap_pagedaemon_waken == 0)) { pmap_pagedaemon_waken = 1; wakeup (&vm_pages_needed); } return uma_zalloc(pvzone, M_NOWAIT); } /* * Add an ia64_lpte to the VHPT. */ static void pmap_enter_vhpt(struct ia64_lpte *pte, vm_offset_t va) { struct ia64_lpte *vhpte; pmap_vhpt_inserts++; pmap_vhpt_resident++; vhpte = (struct ia64_lpte *) ia64_thash(va); if (vhpte->pte_chain) pmap_vhpt_collisions++; pte->pte_chain = vhpte->pte_chain; vhpte->pte_chain = ia64_tpa((vm_offset_t) pte); if (!vhpte->pte_p && pte->pte_p) pmap_install_pte(vhpte, pte); else ia64_mf(); } /* * Update VHPT after a pte has changed. */ static void pmap_update_vhpt(struct ia64_lpte *pte, vm_offset_t va) { struct ia64_lpte *vhpte; vhpte = (struct ia64_lpte *) ia64_thash(va); if ((!vhpte->pte_p || vhpte->pte_tag == pte->pte_tag) && pte->pte_p) pmap_install_pte(vhpte, pte); } /* * Remove the ia64_lpte matching va from the VHPT. Return zero if it * worked or an appropriate error code otherwise. */ static int pmap_remove_vhpt(vm_offset_t va) { struct ia64_lpte *pte; struct ia64_lpte *lpte; struct ia64_lpte *vhpte; u_int64_t tag; int error = ENOENT; vhpte = (struct ia64_lpte *) ia64_thash(va); /* * If the VHPTE is invalid, there can't be a collision chain. */ if (!vhpte->pte_p) { KASSERT(!vhpte->pte_chain, ("bad vhpte")); printf("can't remove vhpt entry for 0x%lx\n", va); goto done; } lpte = vhpte; pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain); tag = ia64_ttag(va); while (pte->pte_tag != tag) { lpte = pte; if (pte->pte_chain) pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); else { printf("can't remove vhpt entry for 0x%lx\n", va); goto done; } } /* * Snip this pv_entry out of the collision chain. */ lpte->pte_chain = pte->pte_chain; /* * If the VHPTE matches as well, change it to map the first * element from the chain if there is one. */ if (vhpte->pte_tag == tag) { if (vhpte->pte_chain) { pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(vhpte->pte_chain); pmap_install_pte(vhpte, pte); } else { vhpte->pte_p = 0; ia64_mf(); } } pmap_vhpt_resident--; error = 0; done: return error; } /* * Find the ia64_lpte for the given va, if any. */ static struct ia64_lpte * pmap_find_vhpt(vm_offset_t va) { struct ia64_lpte *pte; u_int64_t tag; pte = (struct ia64_lpte *) ia64_thash(va); if (!pte->pte_chain) { pte = 0; goto done; } tag = ia64_ttag(va); pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); while (pte->pte_tag != tag) { if (pte->pte_chain) { pte = (struct ia64_lpte *) IA64_PHYS_TO_RR7(pte->pte_chain); } else { pte = 0; break; } } done: return pte; } /* * Remove an entry from the list of managed mappings. */ static int pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va, pv_entry_t pv) { if (!pv) { if (m->md.pv_list_count < pmap->pm_stats.resident_count) { TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; } } else { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { if (va == pv->pv_va) break; } } } if (pv) { TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); m->md.pv_list_count--; if (TAILQ_FIRST(&m->md.pv_list) == NULL) vm_page_flag_clear(m, PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); return 0; } else { return ENOENT; } } /* * Create a pv entry for page at pa for * (pmap, va). */ static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; pv = get_pv_entry(); pv->pv_pmap = pmap; pv->pv_va = va; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); m->md.pv_list_count++; } /* * Routine: pmap_extract * Function: * Extract the physical page address associated * with the given map/virtual_address pair. */ vm_offset_t pmap_extract(pmap, va) register pmap_t pmap; vm_offset_t va; { struct ia64_lpte *pte; pmap_t oldpmap; if (!pmap) return 0; oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); pmap_install(oldpmap); if (!pte) return 0; return pmap_pte_pa(pte); } /*************************************************** * Low level mapping routines..... ***************************************************/ /* * Find the kernel lpte for mapping the given virtual address, which * must be in the part of region 5 which we can cover with our kernel * 'page tables'. */ static struct ia64_lpte * pmap_find_kpte(vm_offset_t va) { KASSERT((va >> 61) == 5, ("kernel mapping 0x%lx not in region 5", va)); KASSERT(IA64_RR_MASK(va) < (nkpt * PAGE_SIZE * NKPTEPG), ("kernel mapping 0x%lx out of range", va)); return &kptdir[KPTE_DIR_INDEX(va)][KPTE_PTE_INDEX(va)]; } /* * Find a pte suitable for mapping a user-space address. If one exists * in the VHPT, that one will be returned, otherwise a new pte is * allocated. */ static struct ia64_lpte * pmap_find_pte(vm_offset_t va) { struct ia64_lpte *pte; if (va >= VM_MAXUSER_ADDRESS) return pmap_find_kpte(va); pte = pmap_find_vhpt(va); if (!pte) { pte = uma_zalloc(ptezone, 0); pte->pte_p = 0; } return pte; } /* * Free a pte which is now unused. This simply returns it to the zone * allocator if it is a user mapping. For kernel mappings, clear the * valid bit to make it clear that the mapping is not currently used. */ static void pmap_free_pte(struct ia64_lpte *pte, vm_offset_t va) { if (va < VM_MAXUSER_ADDRESS) uma_zfree(ptezone, pte); else pte->pte_p = 0; } /* * Set a pte to contain a valid mapping and enter it in the VHPT. If * the pte was orginally valid, then its assumed to already be in the * VHPT. */ static void pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa, int ig, int pl, int ar) { int wasvalid = pte->pte_p; pte->pte_p = 1; pte->pte_ma = PTE_MA_WB; if (ig & PTE_IG_MANAGED) { pte->pte_a = 0; pte->pte_d = 0; } else { pte->pte_a = 1; pte->pte_d = 1; } pte->pte_pl = pl; pte->pte_ar = ar; pte->pte_ppn = pa >> 12; pte->pte_ed = 0; pte->pte_ig = ig; pte->pte_ps = PAGE_SHIFT; pte->pte_key = 0; pte->pte_tag = ia64_ttag(va); if (wasvalid) { pmap_update_vhpt(pte, va); } else { pmap_enter_vhpt(pte, va); } } /* * If a pte contains a valid mapping, clear it and update the VHPT. */ static void pmap_clear_pte(struct ia64_lpte *pte, vm_offset_t va) { if (pte->pte_p) { pmap_remove_vhpt(va); ia64_ptc_g(va, PAGE_SHIFT << 2); pte->pte_p = 0; } } /* * Remove the (possibly managed) mapping represented by pte from the * given pmap. */ static int pmap_remove_pte(pmap_t pmap, struct ia64_lpte *pte, vm_offset_t va, pv_entry_t pv, int freepte) { int error; vm_page_t m; KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("removing pte for non-current pmap")); /* * First remove from the VHPT. */ error = pmap_remove_vhpt(va); if (error) return error; /* * Make sure pmap_set_pte() knows it isn't in the VHPT. */ pte->pte_p = 0; if (pte->pte_ig & PTE_IG_WIRED) pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (pte->pte_ig & PTE_IG_MANAGED) { m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte)); if (pte->pte_d) if (pmap_track_modified(va)) vm_page_dirty(m); if (pte->pte_a) vm_page_flag_set(m, PG_REFERENCED); if (freepte) pmap_free_pte(pte, va); return pmap_remove_entry(pmap, m, va, pv); } else { if (freepte) pmap_free_pte(pte, va); return 0; } } /* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. * Note that old mappings are simply written * over. The page *must* be wired. */ void pmap_qenter(vm_offset_t va, vm_page_t *m, int count) { int i; struct ia64_lpte *pte; for (i = 0; i < count; i++) { vm_offset_t tva = va + i * PAGE_SIZE; int wasvalid; pte = pmap_find_kpte(tva); wasvalid = pte->pte_p; pmap_set_pte(pte, tva, VM_PAGE_TO_PHYS(m[i]), 0, PTE_PL_KERN, PTE_AR_RWX); if (wasvalid) ia64_ptc_g(tva, PAGE_SHIFT << 2); } } /* * this routine jerks page mappings from the * kernel -- it is meant only for temporary mappings. */ void pmap_qremove(vm_offset_t va, int count) { int i; struct ia64_lpte *pte; for (i = 0; i < count; i++) { pte = pmap_find_kpte(va); pmap_clear_pte(pte, va); va += PAGE_SIZE; } } /* * Add a wired page to the kva. */ void pmap_kenter(vm_offset_t va, vm_offset_t pa) { struct ia64_lpte *pte; int wasvalid; pte = pmap_find_kpte(va); wasvalid = pte->pte_p; pmap_set_pte(pte, va, pa, 0, PTE_PL_KERN, PTE_AR_RWX); if (wasvalid) ia64_ptc_g(va, PAGE_SHIFT << 2); } /* * Remove a page from the kva */ void pmap_kremove(vm_offset_t va) { struct ia64_lpte *pte; pte = pmap_find_kpte(va); pmap_clear_pte(pte, va); } /* * Used to map a range of physical addresses into kernel * virtual address space. * * The value passed in '*virt' is a suggested virtual address for * the mapping. Architectures which can support a direct-mapped * physical to virtual region can return the appropriate address * within that region, leaving '*virt' unchanged. Other * architectures should map the pages starting at '*virt' and * update '*virt' with the first usable address after the mapped * region. */ vm_offset_t pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) { return IA64_PHYS_TO_RR7(start); } /* * Remove a single page from a process address space */ static void pmap_remove_page(pmap_t pmap, vm_offset_t va) { struct ia64_lpte *pte; KASSERT((pmap == kernel_pmap || pmap == PCPU_GET(current_pmap)), ("removing page for non-current pmap")); pte = pmap_find_vhpt(va); if (pte) { pmap_remove_pte(pmap, pte, va, 0, 1); pmap_invalidate_page(pmap, va); } return; } /* * Remove the given range of addresses from the specified map. * * It is assumed that the start and end are properly * rounded to the page size. */ void pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pmap_t oldpmap; vm_offset_t va; pv_entry_t pv; struct ia64_lpte *pte; if (pmap == NULL) return; if (pmap->pm_stats.resident_count == 0) return; oldpmap = pmap_install(pmap); /* * special handling of removing one page. a very * common operation and easy to short circuit some * code. */ if (sva + PAGE_SIZE == eva) { pmap_remove_page(pmap, sva); pmap_install(oldpmap); return; } if (pmap->pm_stats.resident_count < ((eva - sva) >> PAGE_SHIFT)) { TAILQ_FOREACH(pv, &pmap->pm_pvlist, pv_plist) { va = pv->pv_va; if (va >= sva && va < eva) { pte = pmap_find_vhpt(va); pmap_remove_pte(pmap, pte, va, pv, 1); pmap_invalidate_page(pmap, va); } } } else { for (va = sva; va < eva; va = va += PAGE_SIZE) { pte = pmap_find_vhpt(va); if (pte) { pmap_remove_pte(pmap, pte, va, 0, 1); pmap_invalidate_page(pmap, va); } } } pmap_install(oldpmap); } /* * Routine: pmap_remove_all * Function: * Removes this physical page from * all physical maps in which it resides. * Reflects back modify bits to the pager. * * Notes: * Original versions of this routine were very * inefficient because they iteratively called * pmap_remove (slow...) */ void pmap_remove_all(vm_page_t m) { pmap_t oldpmap; pv_entry_t pv; int s; #if defined(PMAP_DIAGNOSTIC) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", VM_PAGE_TO_PHYS(m)); } #endif s = splvm(); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { struct ia64_lpte *pte; pmap_t pmap = pv->pv_pmap; vm_offset_t va = pv->pv_va; oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m)) panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); pmap_remove_pte(pmap, pte, va, pv, 1); pmap_invalidate_page(pmap, va); pmap_install(oldpmap); } vm_page_flag_clear(m, PG_WRITEABLE); splx(s); return; } /* * Set the physical protection on the * specified range of this map as requested. */ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { pmap_t oldpmap; struct ia64_lpte *pte; int newprot; if (pmap == NULL) return; oldpmap = pmap_install(pmap); if ((prot & VM_PROT_READ) == VM_PROT_NONE) { pmap_remove(pmap, sva, eva); pmap_install(oldpmap); return; } if (prot & VM_PROT_WRITE) { pmap_install(oldpmap); return; } newprot = pte_prot(pmap, prot); if ((sva & PAGE_MASK) || (eva & PAGE_MASK)) panic("pmap_protect: unaligned addresses"); while (sva < eva) { /* * If page is invalid, skip this page */ pte = pmap_find_vhpt(sva); if (!pte) { sva += PAGE_SIZE; continue; } if (pmap_pte_prot(pte) != newprot) { if (pte->pte_ig & PTE_IG_MANAGED) { vm_offset_t pa = pmap_pte_pa(pte); vm_page_t m = PHYS_TO_VM_PAGE(pa); if (pte->pte_d) { if (pmap_track_modified(sva)) vm_page_dirty(m); pte->pte_d = 0; } if (pte->pte_a) { vm_page_flag_set(m, PG_REFERENCED); pte->pte_a = 0; } } pmap_pte_set_prot(pte, newprot); pmap_update_vhpt(pte, sva); pmap_invalidate_page(pmap, sva); } sva += PAGE_SIZE; } pmap_install(oldpmap); } /* * Insert the given physical page (p) at * the specified virtual address (v) in the * target physical map with the protection requested. * * If specified, the page will be wired down, meaning * that the related pte can not be reclaimed. * * NB: This is the only routine which MAY NOT lazy-evaluate * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { pmap_t oldpmap; vm_offset_t pa; vm_offset_t opa; struct ia64_lpte origpte; struct ia64_lpte *pte; int managed; if (pmap == NULL) return; pmap_ensure_rid(pmap, va); oldpmap = pmap_install(pmap); va &= ~PAGE_MASK; #ifdef PMAP_DIAGNOSTIC if (va > VM_MAX_KERNEL_ADDRESS) panic("pmap_enter: toobig"); #endif /* * Find (or create) a pte for the given mapping. */ pte = pmap_find_pte(va); origpte = *pte; if (origpte.pte_p) opa = pmap_pte_pa(&origpte); else opa = 0; managed = 0; pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; /* * Mapping has not changed, must be protection or wiring change. */ if (origpte.pte_p && (opa == pa)) { /* * Wiring change, just update stats. We don't worry about * wiring PT pages as they remain resident as long as there * are valid mappings in them. Hence, if a user page is wired, * the PT page will be also. */ if (wired && ((origpte.pte_ig & PTE_IG_WIRED) == 0)) pmap->pm_stats.wired_count++; else if (!wired && (origpte.pte_ig & PTE_IG_WIRED)) pmap->pm_stats.wired_count--; /* * We might be turning off write access to the page, * so we go ahead and sense modify status. */ if (origpte.pte_ig & PTE_IG_MANAGED) { if (origpte.pte_d && pmap_track_modified(va)) { vm_page_t om; om = PHYS_TO_VM_PAGE(opa); vm_page_dirty(om); } } managed = origpte.pte_ig & PTE_IG_MANAGED; goto validate; } /* * Mapping has changed, invalidate old range and fall * through to handle validating new mapping. */ if (opa) { int error; vm_page_lock_queues(); error = pmap_remove_pte(pmap, pte, va, 0, 0); vm_page_unlock_queues(); if (error) panic("pmap_enter: pte vanished, va: 0x%lx", va); } /* * Enter on the PV list if part of our managed memory. */ if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { pmap_insert_entry(pmap, va, m); managed |= PTE_IG_MANAGED; } /* * Increment counters */ pmap->pm_stats.resident_count++; if (wired) pmap->pm_stats.wired_count++; validate: /* * Now validate mapping with desired protection/wiring. This * adds the pte to the VHPT if necessary. */ pmap_set_pte(pte, va, pa, managed | (wired ? PTE_IG_WIRED : 0), pte_prot_pl(pmap, prot), pte_prot_ar(pmap, prot)); /* * if the mapping or permission bits are different, we need * to invalidate the page. */ if (!pmap_equal_pte(&origpte, pte)) pmap_invalidate_page(pmap, va); pmap_install(oldpmap); } /* * this code makes some *MAJOR* assumptions: * 1. Current pmap & pmap exists. * 2. Not wired. * 3. Read access. * 4. No page table pages. * 5. Tlbflush is deferred to calling procedure. * 6. Page IS managed. * but is *MUCH* faster than pmap_enter... */ static void pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m) { struct ia64_lpte *pte; pmap_t oldpmap; pmap_ensure_rid(pmap, va); oldpmap = pmap_install(pmap); pte = pmap_find_pte(va); if (pte->pte_p) return; /* * Enter on the PV list since its part of our managed memory. */ pmap_insert_entry(pmap, va, m); /* * Increment counters */ pmap->pm_stats.resident_count++; /* * Initialise PTE with read-only protection and enter into VHPT. */ pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), PTE_IG_MANAGED, PTE_PL_USER, PTE_AR_R); pmap_install(oldpmap); } /* * Make temporary mapping for a physical address. This is called * during dump. */ void * pmap_kenter_temporary(vm_offset_t pa, int i) { return (void *) IA64_PHYS_TO_RR7(pa - (i * PAGE_SIZE)); } #define MAX_INIT_PT (96) /* * pmap_object_init_pt preloads the ptes for a given object * into the specified pmap. This eliminates the blast of soft * faults on process startup and immediately after an mmap. */ void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size, int limit) { pmap_t oldpmap; vm_offset_t tmpidx; int psize; vm_page_t p; int objpgs; if (pmap == NULL || object == NULL) return; oldpmap = pmap_install(pmap); psize = ia64_btop(size); if ((object->type != OBJT_VNODE) || ((limit & MAP_PREFAULT_PARTIAL) && (psize > MAX_INIT_PT) && (object->resident_page_count > MAX_INIT_PT))) { pmap_install(oldpmap); return; } if (psize + pindex > object->size) { if (object->size < pindex) return; psize = object->size - pindex; } /* * if we are processing a major portion of the object, then scan the * entire thing. */ if (psize > (object->resident_page_count >> 2)) { objpgs = psize; for (p = TAILQ_FIRST(&object->memq); ((objpgs > 0) && (p != NULL)); p = TAILQ_NEXT(p, listq)) { tmpidx = p->pindex; if (tmpidx < pindex) { continue; } tmpidx -= pindex; if (tmpidx >= psize) { continue; } /* * don't allow an madvise to blow away our really * free pages allocating pv entries. */ if ((limit & MAP_PREFAULT_MADVISE) && cnt.v_free_count < cnt.v_free_reserved) { break; } vm_page_lock_queues(); if (((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); vm_page_unlock_queues(); pmap_enter_quick(pmap, addr + ia64_ptob(tmpidx), p); vm_page_lock_queues(); vm_page_wakeup(p); } vm_page_unlock_queues(); objpgs -= 1; } } else { /* * else lookup the pages one-by-one. */ for (tmpidx = 0; tmpidx < psize; tmpidx += 1) { /* * don't allow an madvise to blow away our really * free pages allocating pv entries. */ if ((limit & MAP_PREFAULT_MADVISE) && cnt.v_free_count < cnt.v_free_reserved) { break; } p = vm_page_lookup(object, tmpidx + pindex); if (p == NULL) continue; vm_page_lock_queues(); if ((p->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL && (p->busy == 0) && (p->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); vm_page_unlock_queues(); pmap_enter_quick(pmap, addr + ia64_ptob(tmpidx), p); vm_page_lock_queues(); vm_page_wakeup(p); } vm_page_unlock_queues(); } } pmap_install(oldpmap); return; } /* * pmap_prefault provides a quick way of clustering * pagefaults into a processes address space. It is a "cousin" * of pmap_object_init_pt, except it runs at page fault time instead * of mmap time. */ #define PFBAK 4 #define PFFOR 4 #define PAGEORDER_SIZE (PFBAK+PFFOR) static int pmap_prefault_pageorder[] = { -1 * PAGE_SIZE, 1 * PAGE_SIZE, -2 * PAGE_SIZE, 2 * PAGE_SIZE, -3 * PAGE_SIZE, 3 * PAGE_SIZE, -4 * PAGE_SIZE, 4 * PAGE_SIZE }; void pmap_prefault(pmap, addra, entry) pmap_t pmap; vm_offset_t addra; vm_map_entry_t entry; { int i; vm_offset_t starta; vm_offset_t addr; vm_pindex_t pindex; vm_page_t m, mpte; vm_object_t object; if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) return; object = entry->object.vm_object; starta = addra - PFBAK * PAGE_SIZE; if (starta < entry->start) { starta = entry->start; } else if (starta > addra) { starta = 0; } mpte = NULL; for (i = 0; i < PAGEORDER_SIZE; i++) { vm_object_t lobject; struct ia64_lpte *pte; addr = addra + pmap_prefault_pageorder[i]; if (addr > addra + (PFFOR * PAGE_SIZE)) addr = 0; if (addr < starta || addr >= entry->end) continue; pte = pmap_find_vhpt(addr); if (pte && pte->pte_p) continue; pindex = ((addr - entry->start) + entry->offset) >> PAGE_SHIFT; lobject = object; for (m = vm_page_lookup(lobject, pindex); (!m && (lobject->type == OBJT_DEFAULT) && (lobject->backing_object)); lobject = lobject->backing_object) { if (lobject->backing_object_offset & PAGE_MASK) break; pindex += (lobject->backing_object_offset >> PAGE_SHIFT); m = vm_page_lookup(lobject->backing_object, pindex); } /* * give-up when a page is not in memory */ if (m == NULL) break; vm_page_lock_queues(); if (((m->valid & VM_PAGE_BITS_ALL) == VM_PAGE_BITS_ALL) && (m->busy == 0) && (m->flags & (PG_BUSY | PG_FICTITIOUS)) == 0) { if ((m->queue - m->pc) == PQ_CACHE) { vm_page_deactivate(m); } vm_page_busy(m); vm_page_unlock_queues(); pmap_enter_quick(pmap, addr, m); vm_page_lock_queues(); vm_page_wakeup(m); } vm_page_unlock_queues(); } } /* * Routine: pmap_change_wiring * Function: Change the wiring attribute for a map/virtual-address * pair. * In/out conditions: * The mapping must already exist in the pmap. */ void pmap_change_wiring(pmap, va, wired) register pmap_t pmap; vm_offset_t va; boolean_t wired; { pmap_t oldpmap; struct ia64_lpte *pte; if (pmap == NULL) return; oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(va); if (wired && !pmap_pte_w(pte)) pmap->pm_stats.wired_count++; else if (!wired && pmap_pte_w(pte)) pmap->pm_stats.wired_count--; /* * Wiring is not a hardware characteristic so there is no need to * invalidate TLB. */ pmap_pte_set_w(pte, wired); pmap_install(oldpmap); } /* * Copy the range specified by src_addr/len * from the source map to the range dst_addr/len * in the destination map. * * This routine is only advisory and need not do anything. */ void pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len, vm_offset_t src_addr) { } /* * pmap_zero_page zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. */ void pmap_zero_page(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_zero_page_area zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. * * off and size must reside within a single page. */ void pmap_zero_page_area(vm_page_t m, int off, int size) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((char *)(caddr_t)va + off, size); } /* * pmap_zero_page_idle zeros the specified hardware page by * mapping it into virtual memory and using bzero to clear * its contents. This is for the vm_idlezero process. */ void pmap_zero_page_idle(vm_page_t m) { vm_offset_t va = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(m)); bzero((caddr_t) va, PAGE_SIZE); } /* * pmap_copy_page copies the specified (machine independent) * page by mapping the page into virtual memory and using * bcopy to copy the page, one machine dependent page at a * time. */ void pmap_copy_page(vm_page_t msrc, vm_page_t mdst) { vm_offset_t src = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(msrc)); vm_offset_t dst = IA64_PHYS_TO_RR7(VM_PAGE_TO_PHYS(mdst)); bcopy((caddr_t) src, (caddr_t) dst, PAGE_SIZE); } /* * Returns true if the pmap's pv is one of the first * 16 pvs linked to from this page. This count may * be changed upwards or downwards in the future; it * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops = 0; int s; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); /* * Not found, check current mappings returning immediately if found. */ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { if (pv->pv_pmap == pmap) { splx(s); return TRUE; } loops++; if (loops >= 16) break; } splx(s); return (FALSE); } #define PMAP_REMOVE_PAGES_CURPROC_ONLY /* * Remove all pages from specified address space * this aids process exit speeds. Also, this code * is special cased for current process only, but * can have the more generic (and slightly slower) * mode enabled. This is much faster than pmap_remove * in the case of running down an entire address space. */ void pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { pv_entry_t pv, npv; int s; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (!curthread || (pmap != vmspace_pmap(curthread->td_proc->p_vmspace))) { printf("warning: pmap_remove_pages called with non-current pmap\n"); return; } #endif s = splvm(); for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { struct ia64_lpte *pte; npv = TAILQ_NEXT(pv, pv_plist); if (pv->pv_va >= eva || pv->pv_va < sva) { continue; } pte = pmap_find_vhpt(pv->pv_va); if (!pte) panic("pmap_remove_pages: page on pm_pvlist has no pte\n"); /* * We cannot remove wired pages from a process' mapping at this time */ if (pte->pte_ig & PTE_IG_WIRED) { continue; } pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1); } splx(s); pmap_invalidate_all(pmap); } /* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void pmap_page_protect(vm_page_t m, vm_prot_t prot) { pv_entry_t pv; if ((prot & VM_PROT_WRITE) != 0) return; if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { if ((m->flags & PG_WRITEABLE) == 0) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { int newprot = pte_prot(pv->pv_pmap, prot); pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte; pte = pmap_find_vhpt(pv->pv_va); pmap_pte_set_prot(pte, newprot); pmap_update_vhpt(pte, pv->pv_va); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); pmap_install(oldpmap); } vm_page_flag_clear(m, PG_WRITEABLE); } else { pmap_remove_all(m); } } vm_offset_t pmap_phys_address(int ppn) { return (ia64_ptob(ppn)); } /* * pmap_ts_referenced: * * Return a count of reference bits for a page, clearing those bits. * It is not necessary for every reference bit to be cleared, but it * is necessary that 0 only be returned when there are truly no * reference bits set. * * XXX: The exact number of bits to check and clear is a matter that * should be tested and standardized at some point in the future for * optimal aging of shared pages. */ int pmap_ts_referenced(vm_page_t m) { pv_entry_t pv; int count = 0; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return 0; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte; pte = pmap_find_vhpt(pv->pv_va); if (pte->pte_a) { count++; pte->pte_a = 0; pmap_update_vhpt(pte, pv->pv_va); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); } return count; } #if 0 /* * pmap_is_referenced: * * Return whether or not the specified physical page was referenced * in any physical maps. */ static boolean_t pmap_is_referenced(vm_page_t m) { pv_entry_t pv; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte = pmap_find_vhpt(pv->pv_va); pmap_install(oldpmap); if (pte->pte_a) return 1; } return 0; } #endif /* * pmap_is_modified: * * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t pmap_is_modified(vm_page_t m) { pv_entry_t pv; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte = pmap_find_vhpt(pv->pv_va); pmap_install(oldpmap); if (pte->pte_d) return 1; } return 0; } /* * Clear the modify bits on the specified physical page. */ void pmap_clear_modify(vm_page_t m) { pv_entry_t pv; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte = pmap_find_vhpt(pv->pv_va); if (pte->pte_d) { pte->pte_d = 0; pmap_update_vhpt(pte, pv->pv_va); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); } } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void pmap_clear_reference(vm_page_t m) { pv_entry_t pv; if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap_t oldpmap = pmap_install(pv->pv_pmap); struct ia64_lpte *pte = pmap_find_vhpt(pv->pv_va); if (pte->pte_a) { pte->pte_a = 0; pmap_update_vhpt(pte, pv->pv_va); pmap_invalidate_page(pv->pv_pmap, pv->pv_va); } pmap_install(oldpmap); } } /* * Miscellaneous support routines follow */ static void ia64_protection_init() { int prot, *kp, *up; kp = protection_codes[0]; up = protection_codes[1]; for (prot = 0; prot < 8; prot++) { switch (prot) { case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; *up++ = (PTE_AR_R << 2) | PTE_PL_KERN; break; case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: *kp++ = (PTE_AR_X_RX << 2) | PTE_PL_KERN; *up++ = (PTE_AR_X_RX << 2) | PTE_PL_USER; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; break; case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; break; case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: *kp++ = (PTE_AR_R << 2) | PTE_PL_KERN; *up++ = (PTE_AR_R << 2) | PTE_PL_USER; break; case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: *kp++ = (PTE_AR_RX << 2) | PTE_PL_KERN; *up++ = (PTE_AR_RX << 2) | PTE_PL_USER; break; case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: *kp++ = (PTE_AR_RW << 2) | PTE_PL_KERN; *up++ = (PTE_AR_RW << 2) | PTE_PL_USER; break; case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: *kp++ = (PTE_AR_RWX << 2) | PTE_PL_KERN; *up++ = (PTE_AR_RWX << 2) | PTE_PL_USER; break; } } } /* * Map a set of physical memory pages into the kernel virtual * address space. Return a pointer to where it is mapped. This * routine is intended to be used for mapping device memory, * NOT real memory. */ void * pmap_mapdev(vm_offset_t pa, vm_size_t size) { return (void*) IA64_PHYS_TO_RR6(pa); } /* * 'Unmap' a range mapped by pmap_mapdev(). */ void pmap_unmapdev(vm_offset_t va, vm_size_t size) { return; } /* * perform the pmap work for mincore */ int pmap_mincore(pmap_t pmap, vm_offset_t addr) { pmap_t oldpmap; struct ia64_lpte *pte; int val = 0; oldpmap = pmap_install(pmap); pte = pmap_find_vhpt(addr); pmap_install(oldpmap); if (!pte) return 0; if (pmap_pte_v(pte)) { vm_page_t m; vm_offset_t pa; val = MINCORE_INCORE; if ((pte->pte_ig & PTE_IG_MANAGED) == 0) return val; pa = pmap_pte_pa(pte); m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ if (pte->pte_d) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; /* * Modified by someone */ else if (pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* * Referenced by us */ if (pte->pte_a) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; /* * Referenced by someone */ else if (pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } } return val; } void pmap_activate(struct thread *td) { pmap_install(vmspace_pmap(td->td_proc->p_vmspace)); } pmap_t pmap_install(pmap_t pmap) { pmap_t oldpmap; int i; critical_enter(); oldpmap = PCPU_GET(current_pmap); if (pmap == oldpmap || pmap == kernel_pmap) { critical_exit(); return pmap; } if (oldpmap) { atomic_clear_32(&pmap->pm_active, PCPU_GET(cpumask)); } PCPU_SET(current_pmap, pmap); if (!pmap) { /* * RIDs 0..4 have no mappings to make sure we generate * page faults on accesses. */ ia64_set_rr(IA64_RR_BASE(0), (0 << 8)|(PAGE_SHIFT << 2)|1); ia64_set_rr(IA64_RR_BASE(1), (1 << 8)|(PAGE_SHIFT << 2)|1); ia64_set_rr(IA64_RR_BASE(2), (2 << 8)|(PAGE_SHIFT << 2)|1); ia64_set_rr(IA64_RR_BASE(3), (3 << 8)|(PAGE_SHIFT << 2)|1); ia64_set_rr(IA64_RR_BASE(4), (4 << 8)|(PAGE_SHIFT << 2)|1); critical_exit(); return oldpmap; } atomic_set_32(&pmap->pm_active, PCPU_GET(cpumask)); for (i = 0; i < 5; i++) ia64_set_rr(IA64_RR_BASE(i), (pmap->pm_rid[i] << 8)|(PAGE_SHIFT << 2)|1); critical_exit(); return oldpmap; } vm_offset_t pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { return addr; } #include "opt_ddb.h" #ifdef DDB #include static const char* psnames[] = { "1B", "2B", "4B", "8B", "16B", "32B", "64B", "128B", "256B", "512B", "1K", "2K", "4K", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M", "4M", "8M", "16M", "32M", "64M", "128M", "256M", "512M", "1G", "2G" }; static void print_trs(int type) { struct ia64_pal_result res; int i, maxtr; struct { struct ia64_pte pte; struct ia64_itir itir; struct ia64_ifa ifa; struct ia64_rr rr; } buf; static const char* manames[] = { "WB", "bad", "bad", "bad", "UC", "UCE", "WC", "NaT", }; res = ia64_call_pal_static(PAL_VM_SUMMARY, 0, 0, 0); if (res.pal_status != 0) { db_printf("Can't get VM summary\n"); return; } if (type == 0) maxtr = (res.pal_result[0] >> 40) & 0xff; else maxtr = (res.pal_result[0] >> 32) & 0xff; db_printf("V RID Virtual Page Physical Page PgSz ED AR PL D A MA P KEY\n"); for (i = 0; i <= maxtr; i++) { bzero(&buf, sizeof(buf)); res = ia64_call_pal_stacked_physical (PAL_VM_TR_READ, i, type, ia64_tpa((u_int64_t) &buf)); if (!(res.pal_result[0] & 1)) buf.pte.pte_ar = 0; if (!(res.pal_result[0] & 2)) buf.pte.pte_pl = 0; if (!(res.pal_result[0] & 4)) buf.pte.pte_d = 0; if (!(res.pal_result[0] & 8)) buf.pte.pte_ma = 0; db_printf( "%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s %d %06x\n", buf.ifa.ifa_ig & 1, buf.rr.rr_rid, buf.ifa.ifa_vpn, buf.pte.pte_ppn, psnames[buf.itir.itir_ps], buf.pte.pte_ed, buf.pte.pte_ar, buf.pte.pte_pl, buf.pte.pte_d, buf.pte.pte_a, manames[buf.pte.pte_ma], buf.pte.pte_p, buf.itir.itir_key); } } DB_COMMAND(itr, db_itr) { print_trs(0); } DB_COMMAND(dtr, db_dtr) { print_trs(1); } DB_COMMAND(rr, db_rr) { int i; u_int64_t t; struct ia64_rr rr; printf("RR RID PgSz VE\n"); for (i = 0; i < 8; i++) { __asm __volatile ("mov %0=rr[%1]" : "=r"(t) : "r"(IA64_RR_BASE(i))); *(u_int64_t *) &rr = t; printf("%d %06x %4s %d\n", i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve); } } DB_COMMAND(thash, db_thash) { if (!have_addr) return; db_printf("%p\n", (void *) ia64_thash(addr)); } DB_COMMAND(ttag, db_ttag) { if (!have_addr) return; db_printf("0x%lx\n", ia64_ttag(addr)); } #endif Index: head/sys/ia64/ia64/sal.c =================================================================== --- head/sys/ia64/ia64/sal.c (revision 110210) +++ head/sys/ia64/ia64/sal.c (revision 110211) @@ -1,171 +1,171 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include /* * IPIs are used more genericly than only * for inter-processor interrupts. Don't * make it a SMP specific thing... */ int ipi_vector[IPI_COUNT]; static struct ia64_fdesc sal_fdesc; static sal_entry_t fake_sal; extern u_int64_t ia64_pal_entry; sal_entry_t *ia64_sal_entry = fake_sal; static struct ia64_sal_result fake_sal(u_int64_t a1, u_int64_t a2, u_int64_t a3, u_int64_t a4, u_int64_t a5, u_int64_t a6, u_int64_t a7, u_int64_t a8) { struct ia64_sal_result res; res.sal_status = -3; res.sal_result[0] = 0; res.sal_result[1] = 0; res.sal_result[2] = 0; return res; } static void setup_ipi_vectors(int ceil) { int ipi; ipi_vector[IPI_MCA_RENDEZ] = ceil - 0x10; ipi_vector[IPI_MCA_CMCV] = ceil - 0x30; ipi_vector[IPI_TEST] = ceil - 0x30 + 1; ipi = IPI_AST; /* First generic IPI. */ ceil -= 0x20; /* First vector in group. */ while (ipi < IPI_COUNT) ipi_vector[ipi++] = ceil++; } void ia64_sal_init(struct sal_system_table *saltab) { static int sizes[6] = { 48, 32, 16, 32, 16, 16 }; u_int8_t *p; int i; - if (memcmp(saltab->sal_signature, "SST_", 4)) { + if (memcmp(saltab->sal_signature, SAL_SIGNATURE, 4)) { printf("Bad signature for SAL System Table\n"); return; } p = (u_int8_t *) (saltab + 1); for (i = 0; i < saltab->sal_entry_count; i++) { switch (*p) { case 0: { struct sal_entrypoint_descriptor *dp; dp = (struct sal_entrypoint_descriptor*)p; ia64_pal_entry = IA64_PHYS_TO_RR7(dp->sale_pal_proc); if (bootverbose) printf("PAL Proc at 0x%lx\n", ia64_pal_entry); sal_fdesc.func = IA64_PHYS_TO_RR7(dp->sale_sal_proc); sal_fdesc.gp = IA64_PHYS_TO_RR7(dp->sale_sal_gp); if (bootverbose) printf("SAL Proc at 0x%lx, GP at 0x%lx\n", sal_fdesc.func, sal_fdesc.gp); ia64_sal_entry = (sal_entry_t *) &sal_fdesc; break; } case 5: { struct sal_ap_wakeup_descriptor *dp; #ifdef SMP struct ia64_sal_result result; #endif dp = (struct sal_ap_wakeup_descriptor*)p; if (dp->sale_mechanism != 0) { printf("SAL: unsupported AP wake-up mechanism " "(%d)\n", dp->sale_mechanism); break; } if (dp->sale_vector < 0x10 || dp->sale_vector > 0xff) { printf("SAL: invalid AP wake-up vector " "(0x%lx)\n", dp->sale_vector); break; } /* * SAL documents that the wake-up vector should be * high (close to 255). The MCA rendezvous vector * should be less than the wake-up vector, but still * "high". We use the following priority assignment: * Wake-up: priority of the sale_vector * Rendezvous: priority-1 * Generic IPIs: priority-2 * Special IPIs: priority-3 * Consequently, the wake-up priority should be at * least 4 (ie vector >= 0x40). */ if (dp->sale_vector < 0x40) { printf("SAL: AP wake-up vector too low " "(0x%lx)\n", dp->sale_vector); break; } if (bootverbose) printf("SAL: AP wake-up vector: 0x%lx\n", dp->sale_vector); ipi_vector[IPI_AP_WAKEUP] = dp->sale_vector; setup_ipi_vectors(dp->sale_vector & 0xf0); #ifdef SMP result = ia64_sal_entry(SAL_SET_VECTORS, SAL_OS_BOOT_RENDEZ, ia64_tpa(FDESC_FUNC(os_boot_rendez)), ia64_tpa(FDESC_GP(os_boot_rendez)), 0, 0, 0, 0); #endif break; } } p += sizes[*p]; } if (ipi_vector[IPI_AP_WAKEUP] == 0) setup_ipi_vectors(0xf0); } Index: head/sys/ia64/ia64/ssc.c =================================================================== --- head/sys/ia64/ia64/ssc.c (revision 110210) +++ head/sys/ia64/ia64/ssc.c (revision 110211) @@ -1,285 +1,280 @@ /*- * Copyright (c) 2000 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define SSC_GETCHAR 21 #define SSC_PUTCHAR 31 #define SSC_POLL_HZ 50 static d_open_t sscopen; static d_close_t sscclose; static d_ioctl_t sscioctl; #define CDEV_MAJOR 97 static struct cdevsw ssc_cdevsw = { /* open */ sscopen, /* close */ sscclose, /* read */ ttyread, /* write */ ttywrite, /* ioctl */ sscioctl, /* poll */ ttypoll, /* mmap */ nommap, /* strategy */ nostrategy, /* name */ "ssc", /* maj */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ 0, }; static struct tty *ssc_tp = NULL; static int polltime; static struct callout_handle ssctimeouthandle = CALLOUT_HANDLE_INITIALIZER(&ssctimeouthandle); static void sscstart(struct tty *); static void ssctimeout(void *); static int sscparam(struct tty *, struct termios *); static void sscstop(struct tty *, int); static u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which) { register u_int64_t ret0 __asm("r8"); __asm __volatile("mov r15=%1\n\t" "break 0x80001" : "=r"(ret0) : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3)); return ret0; } static void ssccnprobe(struct consdev *cp) { - if (!ia64_running_in_simulator()) - return; - cp->cn_dev = makedev(CDEV_MAJOR, 0); cp->cn_pri = CN_INTERNAL; } static void ssccninit(struct consdev *cp) { } static void ssccnattach(void *arg) { - if (!ia64_running_in_simulator()) - return; make_dev(&ssc_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, "ssccons"); } SYSINIT(ssccnattach, SI_SUB_DRIVERS, SI_ORDER_ANY, ssccnattach, 0); static void ssccnputc(dev_t dev, int c) { ssc(c, 0, 0, 0, SSC_PUTCHAR); } static int ssccngetc(dev_t dev) { int c; do { c = ssc(0, 0, 0, 0, SSC_GETCHAR); } while (c == 0); return c; } static int ssccncheckc(dev_t dev) { int c; c = ssc(0, 0, 0, 0, SSC_GETCHAR); if (!c) return -1; return c; } static int sscopen(dev_t dev, int flag, int mode, struct thread *td) { struct tty *tp; int s; int error = 0, setuptimeout = 0; tp = ssc_tp = dev->si_tty = ttymalloc(ssc_tp); s = spltty(); tp->t_oproc = sscstart; tp->t_param = sscparam; tp->t_stop = sscstop; tp->t_dev = dev; if ((tp->t_state & TS_ISOPEN) == 0) { tp->t_state |= TS_CARR_ON; ttychars(tp); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_cflag = TTYDEF_CFLAG|CLOCAL; tp->t_lflag = TTYDEF_LFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; ttsetwater(tp); setuptimeout = 1; } else if ((tp->t_state & TS_XCLUDE) && suser(td)) { splx(s); return EBUSY; } splx(s); error = (*linesw[tp->t_line].l_open)(dev, tp); if (error == 0 && setuptimeout) { polltime = hz / SSC_POLL_HZ; if (polltime < 1) polltime = 1; ssctimeouthandle = timeout(ssctimeout, tp, polltime); } return error; } static int sscclose(dev_t dev, int flag, int mode, struct thread *td) { int unit = minor(dev); struct tty *tp = ssc_tp; if (unit != 0) return ENXIO; untimeout(ssctimeout, tp, ssctimeouthandle); (*linesw[tp->t_line].l_close)(tp, flag); ttyclose(tp); return 0; } static int sscioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct thread *td) { int unit = minor(dev); struct tty *tp = ssc_tp; int error; if (unit != 0) return ENXIO; error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, td); if (error != ENOIOCTL) return error; error = ttioctl(tp, cmd, data, flag); if (error != ENOIOCTL) return error; return ENOTTY; } static int sscparam(struct tty *tp, struct termios *t) { return 0; } static void sscstart(struct tty *tp) { int s; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); splx(s); return; } tp->t_state |= TS_BUSY; while (tp->t_outq.c_cc != 0) ssccnputc(tp->t_dev, getc(&tp->t_outq)); tp->t_state &= ~TS_BUSY; ttwwakeup(tp); splx(s); } /* * Stop output on a line. */ static void sscstop(struct tty *tp, int flag) { int s; s = spltty(); if (tp->t_state & TS_BUSY) if ((tp->t_state & TS_TTSTOP) == 0) tp->t_state |= TS_FLUSH; splx(s); } static void ssctimeout(void *v) { struct tty *tp = v; int c; while ((c = ssccncheckc(tp->t_dev)) != -1) { if (tp->t_state & TS_ISOPEN) (*linesw[tp->t_line].l_rint)(c, tp); } ssctimeouthandle = timeout(ssctimeout, tp, polltime); } CONS_DRIVER(ssc, ssccnprobe, ssccninit, NULL, ssccngetc, ssccncheckc, ssccnputc, NULL); Index: head/sys/ia64/ia64/sscdisk.c =================================================================== --- head/sys/ia64/ia64/sscdisk.c (revision 110210) +++ head/sys/ia64/ia64/sscdisk.c (revision 110211) @@ -1,307 +1,304 @@ /* * ---------------------------------------------------------------------------- * "THE BEER-WARE LICENSE" (Revision 42): * wrote this file. As long as you retain this notice you * can do whatever you want with this stuff. If we meet some day, and you think * this stuff is worth it, you can buy me a beer in return. Poul-Henning Kamp * ---------------------------------------------------------------------------- * * $FreeBSD$ * */ #include "opt_md.h" #include "opt_ski.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef SKI_ROOT_FILESYSTEM #define SKI_ROOT_FILESYSTEM "ia64-root.fs" #endif #define SSC_OPEN 50 #define SSC_CLOSE 51 #define SSC_READ 52 #define SSC_WRITE 53 #define SSC_GET_COMPLETION 54 #define SSC_WAIT_COMPLETION 55 struct disk_req { unsigned long addr; unsigned len; }; struct disk_stat { int fd; unsigned count; }; static u_int64_t ssc(u_int64_t in0, u_int64_t in1, u_int64_t in2, u_int64_t in3, int which) { register u_int64_t ret0 __asm("r8"); __asm __volatile("mov r15=%1\n\t" "break 0x80001" : "=r"(ret0) : "r"(which), "r"(in0), "r"(in1), "r"(in2), "r"(in3)); return ret0; } #ifndef SSC_NSECT #define SSC_NSECT 409600 #endif MALLOC_DEFINE(M_SSC, "SSC disk", "Memory Disk"); MALLOC_DEFINE(M_SSCSECT, "SSC sectors", "Memory Disk Sectors"); static int ssc_debug; SYSCTL_INT(_debug, OID_AUTO, sscdebug, CTLFLAG_RW, &ssc_debug, 0, ""); static int sscrootready; #define CDEV_MAJOR 157 static d_strategy_t sscstrategy; static d_open_t sscopen; static d_ioctl_t sscioctl; static struct cdevsw ssc_cdevsw = { /* open */ sscopen, /* close */ nullclose, /* read */ physread, /* write */ physwrite, /* ioctl */ sscioctl, /* poll */ nopoll, /* mmap */ nommap, /* strategy */ sscstrategy, /* name */ "sscdisk", /* maj */ CDEV_MAJOR, /* dump */ nodump, /* psize */ nopsize, /* flags */ D_DISK, }; static struct cdevsw sscdisk_cdevsw; static LIST_HEAD(, ssc_s) ssc_softc_list = LIST_HEAD_INITIALIZER(&ssc_softc_list); struct ssc_s { int unit; LIST_ENTRY(ssc_s) list; struct devstat stats; struct bio_queue_head bio_queue; struct disk disk; dev_t dev; int busy; unsigned nsect; int fd; }; static int sscunits; static int sscopen(dev_t dev, int flag, int fmt, struct thread *td) { struct ssc_s *sc; if (ssc_debug) printf("sscopen(%s %x %x %p)\n", devtoname(dev), flag, fmt, td); sc = dev->si_drv1; sc->disk.d_sectorsize = DEV_BSIZE; sc->disk.d_mediasize = (off_t)sc->nsect * DEV_BSIZE; sc->disk.d_fwsectors = 0; sc->disk.d_fwheads = 0; return (0); } static int sscioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, struct thread *td) { if (ssc_debug) printf("sscioctl(%s %lx %p %x %p)\n", devtoname(dev), cmd, addr, flags, td); return (ENOIOCTL); } static void sscstrategy(struct bio *bp) { struct ssc_s *sc; int s; devstat_trans_flags dop; unsigned sscop = 0; struct disk_req req; struct disk_stat stat; u_long len, va, off; if (ssc_debug > 1) printf("sscstrategy(%p) %s %x, %ld, %ld, %p)\n", bp, devtoname(bp->bio_dev), bp->bio_flags, bp->bio_blkno, bp->bio_bcount / DEV_BSIZE, bp->bio_data); sc = bp->bio_dev->si_drv1; s = splbio(); bioqdisksort(&sc->bio_queue, bp); if (sc->busy) { splx(s); return; } sc->busy++; while (1) { bp = bioq_first(&sc->bio_queue); if (bp) bioq_remove(&sc->bio_queue, bp); splx(s); if (!bp) break; devstat_start_transaction(&sc->stats); if (bp->bio_cmd == BIO_READ) { dop = DEVSTAT_READ; sscop = SSC_READ; } else { dop = DEVSTAT_WRITE; sscop = SSC_WRITE; } va = (u_long) bp->bio_data; len = bp->bio_bcount; off = bp->bio_pblkno << DEV_BSHIFT; while (len > 0) { u_int t; if ((va & PAGE_MASK) + len > PAGE_SIZE) t = PAGE_SIZE - (va & PAGE_MASK); else t = len; req.len = t; req.addr = ia64_tpa(va); if (ssc_debug > 1) printf("sscstrategy: reading %d bytes from 0x%ld into 0x%lx\n", req.len, off, req.addr); ssc(sc->fd, 1, ia64_tpa((long) &req), off, sscop); stat.fd = sc->fd; ssc(ia64_tpa((long)&stat), 0, 0, 0, SSC_WAIT_COMPLETION); va += t; len -= t; off += t; } bp->bio_resid = 0; biofinish(bp, &sc->stats, 0); s = splbio(); } sc->busy = 0; return; } static struct ssc_s * ssccreate(int unit) { struct ssc_s *sc; int fd; fd = ssc(ia64_tpa((u_int64_t) SKI_ROOT_FILESYSTEM), 1, 0, 0, SSC_OPEN); if (fd == -1) return (NULL); if (unit == -1) unit = sscunits++; /* Make sure this unit isn't already in action */ LIST_FOREACH(sc, &ssc_softc_list, list) { if (sc->unit == unit) return (NULL); } MALLOC(sc, struct ssc_s *,sizeof(*sc), M_SSC, M_ZERO); LIST_INSERT_HEAD(&ssc_softc_list, sc, list); sc->unit = unit; bioq_init(&sc->bio_queue); devstat_add_entry(&sc->stats, "sscdisk", sc->unit, DEV_BSIZE, DEVSTAT_NO_ORDERED_TAGS, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); sc->dev = disk_create(sc->unit, &sc->disk, 0, &ssc_cdevsw, &sscdisk_cdevsw); sc->dev->si_drv1 = sc; sc->nsect = SSC_NSECT; sc->fd = fd; if (sc->unit == 0) sscrootready = 1; return (sc); } #if 0 static void ssc_clone (void *arg, char *name, int namelen, dev_t *dev) { int i, u; if (*dev != NODEV) return; i = dev_stdclone(name, NULL, "ssc", &u); if (i == 0) return; /* XXX: should check that next char is [\0sa-h] */ /* * Now we cheat: We just create the disk, but don't match. * Since we run before it, subr_disk.c::disk_clone() will * find our disk and match the sought for device. */ ssccreate(u); return; } #endif static void ssc_drvinit(void *unused) { - if (!ia64_running_in_simulator()) - return; - ssccreate(-1); } SYSINIT(sscdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR, ssc_drvinit,NULL) static void ssc_takeroot(void *junk) { if (sscrootready) rootdevnames[0] = "ufs:/dev/sscdisk0c"; } SYSINIT(ssc_root, SI_SUB_MOUNT_ROOT, SI_ORDER_FIRST, ssc_takeroot, NULL); Index: head/sys/ia64/include/md_var.h =================================================================== --- head/sys/ia64/include/md_var.h (revision 110210) +++ head/sys/ia64/include/md_var.h (revision 110211) @@ -1,62 +1,61 @@ /*- * Copyright (c) 1998 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_MD_VAR_H_ #define _MACHINE_MD_VAR_H_ /* * Miscellaneous machine-dependent declarations. */ extern char sigcode[]; extern char esigcode[]; extern int szsigcode; extern long Maxmem; struct fpreg; struct thread; struct reg; struct ia64_fdesc { u_int64_t func; u_int64_t gp; }; #define FDESC_FUNC(fn) (((struct ia64_fdesc *) fn)->func) #define FDESC_GP(fn) (((struct ia64_fdesc *) fn)->gp) void busdma_swi(void); void cpu_halt(void); void cpu_reset(void); -int ia64_running_in_simulator(void); int is_physical_memory(vm_offset_t addr); void os_boot_rendez(void); void os_mca(void); void swi_vm(void *); #endif /* !_MACHINE_MD_VAR_H_ */ Index: head/sys/ia64/include/sal.h =================================================================== --- head/sys/ia64/include/sal.h (revision 110210) +++ head/sys/ia64/include/sal.h (revision 110211) @@ -1,141 +1,142 @@ /*- * Copyright (c) 2001 Doug Rabson * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef _MACHINE_SAL_H_ #define _MACHINE_SAL_H_ struct sal_system_table { char sal_signature[4]; +#define SAL_SIGNATURE "SST_" u_int32_t sal_length; u_int8_t sal_rev[2]; u_int16_t sal_entry_count; u_int8_t sal_checksum; u_int8_t sal_reserved1[7]; u_int8_t sal_a_version[2]; u_int8_t sal_b_version[2]; char sal_oem_id[32]; char sal_product_id[32]; u_int8_t sal_reserved2[8]; }; struct sal_entrypoint_descriptor { u_int8_t sale_type; /* == 0 */ u_int8_t sale_reserved1[7]; u_int64_t sale_pal_proc; u_int64_t sale_sal_proc; u_int64_t sale_sal_gp; u_int8_t sale_reserved2[16]; }; struct sal_memory_descriptor { u_int8_t sale_type; /* == 1 */ u_int8_t sale_need_virtual; u_int8_t sale_current_attribute; u_int8_t sale_access_rights; u_int8_t sale_supported_attributes; u_int8_t sale_reserved1; u_int8_t sale_memory_type[2]; u_int64_t sale_physical_address; u_int32_t sale_length; u_int8_t sale_reserved2[12]; }; struct sal_platform_descriptor { u_int8_t sale_type; /* == 2 */ u_int8_t sale_features; u_int8_t sale_reserved[14]; }; struct sal_tr_descriptor { u_int8_t sale_type; /* == 3 */ u_int8_t sale_register_type; u_int8_t sale_register_number; u_int8_t sale_reserved1[5]; u_int64_t sale_virtual_address; u_int64_t sale_page_size; u_int8_t sale_reserved2[8]; }; struct sal_ptc_cache_descriptor { u_int8_t sale_type; /* == 4 */ u_int8_t sale_reserved[3]; u_int32_t sale_domains; u_int64_t sale_address; }; struct sal_ap_wakeup_descriptor { u_int8_t sale_type; /* == 5 */ u_int8_t sale_mechanism; u_int8_t sale_reserved[6]; u_int64_t sale_vector; }; /* * SAL Procedure numbers. */ #define SAL_SET_VECTORS 0x01000000 #define SAL_GET_STATE_INFO 0x01000001 #define SAL_GET_STATE_INFO_SIZE 0x01000002 #define SAL_CLEAR_STATE_INFO 0x01000003 #define SAL_MC_RENDEZ 0x01000004 #define SAL_MC_SET_PARAMS 0x01000005 #define SAL_REGISTER_PHYSICAL_ADDR 0x01000006 #define SAL_CACHE_FLUSH 0x01000008 #define SAL_CACHE_INIT 0x01000009 #define SAL_PCI_CONFIG_READ 0x01000010 #define SAL_PCI_CONFIG_WRITE 0x01000011 #define SAL_FREQ_BASE 0x01000012 #define SAL_UPDATE_PAL 0x01000020 /* SAL_SET_VECTORS event handler types */ #define SAL_OS_MCA 0 #define SAL_OS_INIT 1 #define SAL_OS_BOOT_RENDEZ 2 /* SAL_GET_STATE_INFO, SAL_GET_STATE_INFO_SIZE types */ #define SAL_INFO_MCA 0 #define SAL_INFO_INIT 1 #define SAL_INFO_CMC 2 #define SAL_INFO_CPE 3 #define SAL_INFO_TYPES 4 /* number of types we know about */ struct ia64_sal_result { int64_t sal_status; u_int64_t sal_result[3]; }; typedef struct ia64_sal_result sal_entry_t (u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t, u_int64_t); extern sal_entry_t *ia64_sal_entry; extern void ia64_sal_init(struct sal_system_table *saltab); #endif /* _MACHINE_SAL_H_ */