Page MenuHomeFreeBSD

D2199.id4656.diff
No OneTemporary

D2199.id4656.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: sys/arm/arm/devmap.c
===================================================================
--- sys/arm/arm/devmap.c
+++ sys/arm/arm/devmap.c
@@ -40,10 +40,18 @@
#include <vm/pmap.h>
#include <machine/armreg.h>
#include <machine/devmap.h>
+#include <machine/vmparam.h>
static const struct arm_devmap_entry *devmap_table;
static boolean_t devmap_bootstrap_done = false;
+#if defined(__aarch64__)
+#define MAX_VADDR VM_MAX_KERNEL_ADDRESS
+#define PTE_DEVICE VM_MEMATTR_DEVICE
+#elif defined(__arm__)
+#define MAX_VADDR ARM_VECTORS_HIGH
+#endif
+
/*
* The allocated-kva (akva) devmap table and metadata. Platforms can call
* arm_devmap_add_entry() to add static device mappings to this table using
@@ -53,7 +61,11 @@
#define AKVA_DEVMAP_MAX_ENTRIES 32
static struct arm_devmap_entry akva_devmap_entries[AKVA_DEVMAP_MAX_ENTRIES];
static u_int akva_devmap_idx;
-static vm_offset_t akva_devmap_vaddr = ARM_VECTORS_HIGH;
+static vm_offset_t akva_devmap_vaddr = MAX_VADDR;
+
+#ifdef __aarch64__
+extern int early_boot;
+#endif
/*
* Print the contents of the static mapping table using the provided printf-like
@@ -99,7 +111,7 @@
if (akva_devmap_idx > 0)
return (akva_devmap_vaddr);
- lowaddr = ARM_VECTORS_HIGH;
+ lowaddr = MAX_VADDR;
for (pd = devmap_table; pd != NULL && pd->pd_size != 0; ++pd) {
if (lowaddr > pd->pd_va)
lowaddr = pd->pd_va;
@@ -136,9 +148,12 @@
* align the virtual address to the next-lower 1MB boundary so that we
* end up with a nice efficient section mapping.
*/
+#ifdef __arm__
if ((pa & 0x000fffff) == 0 && (sz & 0x000fffff) == 0) {
akva_devmap_vaddr = trunc_1mpage(akva_devmap_vaddr - sz);
- } else {
+ } else
+#endif
+ {
akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - sz);
}
m = &akva_devmap_entries[akva_devmap_idx++];
@@ -173,6 +188,10 @@
arm_devmap_bootstrap(vm_offset_t l1pt, const struct arm_devmap_entry *table)
{
const struct arm_devmap_entry *pd;
+#ifdef __aarch64__
+ vm_offset_t pa, va;
+ vm_size_t size;
+#endif
devmap_bootstrap_done = true;
@@ -186,8 +205,21 @@
return;
for (pd = devmap_table; pd->pd_size != 0; ++pd) {
+#if defined(__arm__)
pmap_map_chunk(l1pt, pd->pd_va, pd->pd_pa, pd->pd_size,
pd->pd_prot,pd->pd_cache);
+#elif defined(__aarch64__)
+ va = pd->pd_va;
+ pa = pd->pd_pa;
+ size = pd->pd_size;
+
+ while (size > 0) {
+ pmap_kenter_device(va, pa);
+ size -= PAGE_SIZE;
+ va += PAGE_SIZE;
+ pa += PAGE_SIZE;
+ }
+#endif
}
}
@@ -252,12 +284,20 @@
/* First look in the static mapping table. */
if ((rva = arm_devmap_ptov(pa, size)) != NULL)
return (rva);
-
+
offset = pa & PAGE_MASK;
pa = trunc_page(pa);
size = round_page(size + offset);
-
- va = kva_alloc(size);
+
+#ifdef __aarch64__
+ if (early_boot) {
+ akva_devmap_vaddr = trunc_page(akva_devmap_vaddr - size);
+ va = akva_devmap_vaddr;
+ KASSERT(va >= VM_MAX_KERNEL_ADDRESS - L2_SIZE,
+ ("Too many early devmap mappings"));
+ } else
+#endif
+ va = kva_alloc(size);
if (!va)
panic("pmap_mapdev: Couldn't alloc kernel virtual memory");
Index: sys/arm64/arm64/autoconf.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/autoconf.c
@@ -0,0 +1,109 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)autoconf.c 7.1 (Berkeley) 5/9/91
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Setup the system to run on the current machine.
+ *
+ * Configure() is called at boot time and initializes the vba
+ * device tables and the memory controller monitoring. Available
+ * devices are determined (from possibilities mentioned in ioconf.c),
+ * and the drivers are initialized.
+ */
+#include "opt_bootp.h"
+#include "opt_bus.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/conf.h>
+#include <sys/reboot.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/mount.h>
+#include <sys/cons.h>
+
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/if_dl.h>
+#include <net/if_types.h>
+#include <net/if_var.h>
+#include <net/ethernet.h>
+#include <netinet/in.h>
+
+#include <machine/intr.h>
+#include <machine/md_var.h>
+
+static void configure_first(void *);
+static void configure(void *);
+static void configure_final(void *);
+
+SYSINIT(configure1, SI_SUB_CONFIGURE, SI_ORDER_FIRST, configure_first, NULL);
+/* SI_ORDER_SECOND is hookable */
+SYSINIT(configure2, SI_SUB_CONFIGURE, SI_ORDER_THIRD, configure, NULL);
+/* SI_ORDER_MIDDLE is hookable */
+SYSINIT(configure3, SI_SUB_CONFIGURE, SI_ORDER_ANY, configure_final, NULL);
+
+/*
+ * Determine i/o configuration for a machine.
+ */
+static void
+configure_first(void *dummy)
+{
+
+ /* nexus0 is the top of the device tree */
+ device_add_child(root_bus, "nexus", 0);
+}
+
+static void
+configure(void *dummy)
+{
+
+ /* initialize new bus architecture */
+ root_bus_configure();
+}
+
+static void
+configure_final(void *dummy)
+{
+ arm_enable_intr();
+ cninit_finish();
+
+ if (bootverbose)
+ printf("Device configuration finished.\n");
+
+ cold = 0;
+}
Index: sys/arm64/arm64/bcopy.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/bcopy.c
@@ -0,0 +1,139 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * From: sys/powerpc/powerpc/bcopy.c
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+/*
+ * sizeof(word) MUST BE A POWER OF TWO
+ * SO THAT wmask BELOW IS ALL ONES
+ */
+typedef long word; /* "word" used for optimal copy speed */
+
+#define wsize sizeof(word)
+#define wmask (wsize - 1)
+
+/*
+ * Copy a block of memory, handling overlap.
+ * This is the routine that actually implements
+ * (the portable versions of) bcopy, memcpy, and memmove.
+ */
+void *
+memcpy(void *dst0, const void *src0, size_t length)
+{
+ char *dst;
+ const char *src;
+ size_t t;
+
+ dst = dst0;
+ src = src0;
+
+ if (length == 0 || dst == src) { /* nothing to do */
+ goto done;
+ }
+
+ /*
+ * Macros: loop-t-times; and loop-t-times, t>0
+ */
+#define TLOOP(s) if (t) TLOOP1(s)
+#define TLOOP1(s) do { s; } while (--t)
+
+ if ((unsigned long)dst < (unsigned long)src) {
+ /*
+ * Copy forward.
+ */
+ t = (size_t)src; /* only need low bits */
+
+ if ((t | (uintptr_t)dst) & wmask) {
+ /*
+ * Try to align operands. This cannot be done
+ * unless the low bits match.
+ */
+ if ((t ^ (uintptr_t)dst) & wmask || length < wsize) {
+ t = length;
+ } else {
+ t = wsize - (t & wmask);
+ }
+
+ length -= t;
+ TLOOP1(*dst++ = *src++);
+ }
+ /*
+ * Copy whole words, then mop up any trailing bytes.
+ */
+ t = length / wsize;
+ TLOOP(*(word *)dst = *(const word *)src; src += wsize;
+ dst += wsize);
+ t = length & wmask;
+ TLOOP(*dst++ = *src++);
+ } else {
+ /*
+ * Copy backwards. Otherwise essentially the same.
+ * Alignment works as before, except that it takes
+ * (t&wmask) bytes to align, not wsize-(t&wmask).
+ */
+ src += length;
+ dst += length;
+ t = (uintptr_t)src;
+
+ if ((t | (uintptr_t)dst) & wmask) {
+ if ((t ^ (uintptr_t)dst) & wmask || length <= wsize) {
+ t = length;
+ } else {
+ t &= wmask;
+ }
+
+ length -= t;
+ TLOOP1(*--dst = *--src);
+ }
+ t = length / wsize;
+ TLOOP(src -= wsize; dst -= wsize;
+ *(word *)dst = *(const word *)src);
+ t = length & wmask;
+ TLOOP(*--dst = *--src);
+ }
+done:
+ return (dst0);
+}
+
+void
+bcopy(const void *src0, void *dst0, size_t length)
+{
+
+ memcpy(dst0, src0, length);
+}
+
Index: sys/arm64/arm64/bus_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/bus_machdep.c
@@ -0,0 +1,204 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_platform.h"
+
+#include <sys/param.h>
+__FBSDID("$FreeBSD$");
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+
+uint8_t generic_bs_r_1(void *, bus_space_handle_t, bus_size_t);
+uint16_t generic_bs_r_2(void *, bus_space_handle_t, bus_size_t);
+uint32_t generic_bs_r_4(void *, bus_space_handle_t, bus_size_t);
+uint64_t generic_bs_r_8(void *, bus_space_handle_t, bus_size_t);
+
+void generic_bs_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *,
+ bus_size_t);
+void generic_bs_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *,
+ bus_size_t);
+void generic_bs_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *,
+ bus_size_t);
+void generic_bs_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *,
+ bus_size_t);
+
+void generic_bs_w_1(void *, bus_space_handle_t, bus_size_t, uint8_t);
+void generic_bs_w_2(void *, bus_space_handle_t, bus_size_t, uint16_t);
+void generic_bs_w_4(void *, bus_space_handle_t, bus_size_t, uint32_t);
+void generic_bs_w_8(void *, bus_space_handle_t, bus_size_t, uint64_t);
+
+void generic_bs_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *,
+ bus_size_t);
+void generic_bs_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *,
+ bus_size_t);
+void generic_bs_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *,
+ bus_size_t);
+void generic_bs_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *,
+ bus_size_t);
+
+static int
+generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
+ bus_space_handle_t *bshp)
+{
+ void *va;
+
+ va = pmap_mapdev(bpa, size);
+ if (va == NULL)
+ return (ENOMEM);
+ *bshp = (bus_space_handle_t)va;
+ return (0);
+}
+
+static void
+generic_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
+{
+
+ pmap_unmapdev(bsh, size);
+}
+
+static void
+generic_bs_barrier(void *t, bus_space_handle_t bsh, bus_size_t offset,
+ bus_size_t size, int flags)
+{
+}
+
+static int
+generic_bs_subregion(void *t, bus_space_handle_t bsh, bus_size_t offset,
+ bus_size_t size, bus_space_handle_t *nbshp)
+{
+
+ *nbshp = bsh + offset;
+ return (0);
+}
+
+struct bus_space memmap_bus = {
+ /* cookie */
+ .bs_cookie = NULL,
+
+ /* mapping/unmapping */
+ .bs_map = generic_bs_map,
+ .bs_unmap = generic_bs_unmap,
+ .bs_subregion = generic_bs_subregion,
+
+ /* allocation/deallocation */
+ .bs_alloc = NULL,
+ .bs_free = NULL,
+
+ /* barrier */
+ .bs_barrier = generic_bs_barrier,
+
+ /* read single */
+ .bs_r_1 = generic_bs_r_1,
+ .bs_r_2 = generic_bs_r_2,
+ .bs_r_4 = generic_bs_r_4,
+ .bs_r_8 = generic_bs_r_8,
+
+ /* read multiple */
+ .bs_rm_1 = generic_bs_rm_1,
+ .bs_rm_2 = generic_bs_rm_2,
+ .bs_rm_4 = generic_bs_rm_4,
+ .bs_rm_8 = generic_bs_rm_8,
+
+ /* write single */
+ .bs_w_1 = generic_bs_w_1,
+ .bs_w_2 = generic_bs_w_2,
+ .bs_w_4 = generic_bs_w_4,
+ .bs_w_8 = generic_bs_w_8,
+
+ /* write multiple */
+ .bs_wm_1 = generic_bs_wm_1,
+ .bs_wm_2 = generic_bs_wm_2,
+ .bs_wm_4 = generic_bs_wm_4,
+ .bs_wm_8 = generic_bs_wm_8,
+
+ /* write region */
+ .bs_wr_1 = NULL,
+ .bs_wr_2 = NULL,
+ .bs_wr_4 = NULL,
+ .bs_wr_8 = NULL,
+
+ /* set multiple */
+ .bs_sm_1 = NULL,
+ .bs_sm_2 = NULL,
+ .bs_sm_4 = NULL,
+ .bs_sm_8 = NULL,
+
+ /* set region */
+ .bs_sr_1 = NULL,
+ .bs_sr_2 = NULL,
+ .bs_sr_4 = NULL,
+ .bs_sr_8 = NULL,
+
+ /* copy */
+ .bs_c_1 = NULL,
+ .bs_c_2 = NULL,
+ .bs_c_4 = NULL,
+ .bs_c_8 = NULL,
+
+ /* read single stream */
+ .bs_r_1_s = NULL,
+ .bs_r_2_s = NULL,
+ .bs_r_4_s = NULL,
+ .bs_r_8_s = NULL,
+
+ /* read multiple stream */
+ .bs_rm_1_s = NULL,
+ .bs_rm_2_s = NULL,
+ .bs_rm_4_s = NULL,
+ .bs_rm_8_s = NULL,
+
+ /* read region stream */
+ .bs_rr_1_s = NULL,
+ .bs_rr_2_s = NULL,
+ .bs_rr_4_s = NULL,
+ .bs_rr_8_s = NULL,
+
+ /* write single stream */
+ .bs_w_1_s = NULL,
+ .bs_w_2_s = NULL,
+ .bs_w_4_s = NULL,
+ .bs_w_8_s = NULL,
+
+ /* write multiple stream */
+ .bs_wm_1_s = NULL,
+ .bs_wm_2_s = NULL,
+ .bs_wm_4_s = NULL,
+ .bs_wm_8_s = NULL,
+
+ /* write region stream */
+ .bs_wr_1_s = NULL,
+ .bs_wr_2_s = NULL,
+ .bs_wr_4_s = NULL,
+ .bs_wr_8_s = NULL,
+};
+
+#ifdef FDT
+bus_space_tag_t fdtbus_bs_tag = &memmap_bus;
+#endif
Index: sys/arm64/arm64/bus_space_asm.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/bus_space_asm.S
@@ -0,0 +1,235 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+
+ENTRY(generic_bs_r_1)
+ ldrb w0, [x1, x2]
+ ret
+END(generic_bs_r_1)
+
+ENTRY(generic_bs_r_2)
+ ldrh w0, [x1, x2]
+ ret
+END(generic_bs_r_2)
+
+ENTRY(generic_bs_r_4)
+ ldr w0, [x1, x2]
+ ret
+END(generic_bs_r_4)
+
+ENTRY(generic_bs_r_8)
+ ldr x0, [x1, x2]
+ ret
+END(generic_bs_r_8)
+
+ENTRY(generic_bs_rm_1)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrb w1, [x0]
+ strb w1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_1)
+
+ENTRY(generic_bs_rm_2)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldrh w1, [x0]
+ strh w1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_2)
+
+ENTRY(generic_bs_rm_4)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr w1, [x0]
+ str w1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_4)
+
+ENTRY(generic_bs_rm_8)
+ /* If there is anything to read. */
+ cbz x4, 2f
+
+ /* Calculate the device address. */
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Read the data. */
+1: ldr x1, [x0]
+ str x1, [x3], #2
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_rm_8)
+
+
+ENTRY(generic_bs_w_1)
+ strb w3, [x1, x2]
+ ret
+END(generic_bs_w_1)
+
+ENTRY(generic_bs_w_2)
+ strh w3, [x1, x2]
+ ret
+END(generic_bs_w_2)
+
+ENTRY(generic_bs_w_4)
+ str w3, [x1, x2]
+ ret
+END(generic_bs_w_4)
+
+ENTRY(generic_bs_w_8)
+ str x3, [x1, x2]
+ ret
+END(generic_bs_w_8)
+
+ENTRY(generic_bs_wm_1)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrb w1, [x3], #1
+ strb w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_1)
+
+ENTRY(generic_bs_wm_2)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldrh w1, [x3], #2
+ strh w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_2)
+
+ENTRY(generic_bs_wm_4)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr w1, [x3], #4
+ str w1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_4)
+
+ENTRY(generic_bs_wm_8)
+ /* If there is anything to write. */
+ cbz x4, 2f
+
+ add x0, x1, x2
+ /*
+ * x0 = The device address.
+ * x3 = The kernel address.
+ * x4 = Count
+ */
+
+ /* Write the data */
+1: ldr x1, [x3], #8
+ str x1, [x0]
+ subs x4, x4, #1
+ b.ne 1b
+
+2: ret
+END(generic_bs_wm_8)
Index: sys/arm64/arm64/busdma_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/busdma_machdep.c
@@ -0,0 +1,68 @@
+/* $FreeBSD$ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/bus.h>
+
+int
+_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf,
+ bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp)
+{
+
+ panic("_bus_dmamap_load_phys");
+}
+
+int
+_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, struct vm_page **ma,
+ bus_size_t tlen, int ma_offs, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+
+ panic("_bus_dmamap_load_ma");
+}
+
+int
+_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
+ bus_size_t buflen, pmap_t pmap, int flags, bus_dma_segment_t *segs,
+ int *segp)
+{
+
+ panic("_bus_dmamap_load_buffer");
+}
+
+void
+__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct memdesc *mem, bus_dmamap_callback_t *callback, void *callback_arg)
+{
+
+ panic("__bus_dmamap_waitok");
+}
+
+bus_dma_segment_t *
+_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map,
+ bus_dma_segment_t *segs, int nsegs, int error)
+{
+
+ panic("_bus_dmamap_complete");
+}
+
+void
+_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
+{
+
+ panic("_bus_dmamap_unload");
+}
+
+void
+_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
+{
+
+ panic("_bus_dmamap_sync");
+}
\ No newline at end of file
Index: sys/arm64/arm64/clock.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/clock.c
@@ -0,0 +1,39 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/types.h>
+#include <sys/systm.h>
+
+void
+cpu_initclocks(void)
+{
+
+ cpu_initclocks_bsp();
+}
Index: sys/arm64/arm64/copyinout.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/copyinout.S
@@ -0,0 +1,118 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/errno.h>
+
+#include "assym.s"
+
+/*
+ * Fault handler for the copy{in,out} functions below.
+ */
+ENTRY(copyio_fault)
+ SET_FAULT_HANDLER(xzr, x1) /* Clear the handler */
+ mov x0, #EFAULT
+ ret
+END(copyio_fault)
+
+/*
+ * Copies from a kernel to user address
+ *
+ * int copyout(const void *kaddr, void *udaddr, size_t len)
+ */
+ENTRY(copyout)
+ cbz x2, 2f /* If len == 0 then skip loop */
+
+ adr x6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+
+1: ldrb w4, [x0], #1 /* Load from kaddr */
+ strb w4, [x1], #1 /* Store in uaddr */
+ sub x2, x2, #1 /* len-- */
+ cbnz x2, 1b
+
+ SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
+2: mov x0, xzr /* return 0 */
+ ret
+END(copyout)
+
+/*
+ * Copies from a user to kernel address
+ *
+ * int copyin(const void *uaddr, void *kdaddr, size_t len)
+ */
+ENTRY(copyin)
+ cbz x2, 2f /* If len == 0 then skip loop */
+
+ adr x6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+
+1: ldrb w4, [x0], #1 /* Load from uaddr */
+ strb w4, [x1], #1 /* Store in kaddr */
+ sub x2, x2, #1 /* len-- */
+ cbnz x2, 1b
+
+ SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
+2: mov x0, xzr /* return 0 */
+ ret
+END(copyin)
+
+/*
+ * Copies a string from a user to kernel address
+ *
+ * int copyinstr(const void *udaddr, void *kaddr, size_t len, size_t *done)
+ */
+ENTRY(copyinstr)
+ mov x5, xzr /* count = 0 */
+ cbz x2, 3f /* If len == 0 then skip loop */
+
+ adr x6, copyio_fault /* Get the handler address */
+ SET_FAULT_HANDLER(x6, x7) /* Set the handler */
+
+1: ldrb w4, [x0], #1 /* Load from uaddr */
+ strb w4, [x1], #1 /* Store in kaddr */
+ cbz w4, 2f /* If == 0 then break */
+ sub x2, x2, #1 /* len-- */
+ add x5, x5, #1 /* count++ */
+ cbnz x2, 1b
+
+2: SET_FAULT_HANDLER(xzr, x7) /* Clear the handler */
+
+3: cbz x3, 4f /* Check if done != NULL */
+ add x5, x5, #1 /* count++ */
+ str x5, [x3] /* done = count */
+
+4: mov x0, xzr /* return 0 */
+ ret
+END(copyinstr)
Index: sys/arm64/arm64/copystr.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/copystr.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+int
+copystr(const void * __restrict kfaddr, void * __restrict kdaddr, size_t len,
+ size_t * __restrict lencopied)
+{
+ const char *src;
+ size_t pos;
+ char *dst;
+ int error;
+
+ error = ENAMETOOLONG;
+ src = kfaddr;
+ dst = kdaddr;
+ for (pos = 0; pos < len; pos++) {
+ dst[pos] = src[pos];
+ if (src[pos] == '\0') {
+ /* Increment pos to hold the number of bytes copied */
+ pos++;
+ error = 0;
+ break;
+ }
+ }
+
+ if (lencopied != NULL)
+ *lencopied = pos;
+
+ return (0);
+}
+
Index: sys/arm64/arm64/cpufunc_asm.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/cpufunc_asm.S
@@ -0,0 +1,154 @@
+/*-
+ * Copyright (c) 2014 Robin Randhawa
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+#include <machine/param.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * FIXME:
+ * Need big.LITTLE awareness at some point.
+ * Using arm64_p[id]cache_line_size may not be the best option.
+ * Need better SMP awareness.
+ */
+ .text
+ .align 2
+
+.Lpage_mask:
+ .word PAGE_MASK
+
+/*
+ * Macro to handle the cache. This takes the start address in x0, length
+ * in x1. It will corrupt x0, x1, x2, and x3.
+ */
+.macro cache_handle_range dcop = 0, ic = 0, icop = 0
+.if \ic == 0
+ ldr x3, =dcache_line_size /* Load the D cache line size */
+.else
+ ldr x3, =idcache_line_size /* Load the I & D cache line size */
+.endif
+ ldr x3, [x3]
+ sub x4, x3, #1 /* Get the address mask */
+ and x2, x0, x4 /* Get the low bits of the address */
+ add x1, x1, x2 /* Add these to the size */
+ bic x0, x0, x4 /* Clear the low bit of the address */
+1:
+.if \ic != 0
+ ic \icop, x0
+.endif
+ dc \dcop, x0
+ add x0, x0, x3 /* Move to the next line */
+ subs x1, x1, x3 /* Reduce the size */
+ b.hi 1b /* Check if we are done */
+.if \ic != 0
+ isb
+.endif
+ dsb ish
+ ret
+.endm
+
+ENTRY(arm64_nullop)
+ ret
+END(arm64_nullop)
+
+/*
+ * Generic functions to read/modify/write the internal coprocessor registers
+ */
+
+ENTRY(arm64_setttb)
+ dsb ish
+ msr ttbr0_el1, x0
+ dsb ish
+ isb
+ ret
+END(arm64_setttb)
+
+ENTRY(arm64_tlb_flushID)
+#ifdef SMP
+ tlbi vmalle1is
+#else
+ tlbi vmalle1
+#endif
+ dsb ish
+ isb
+ ret
+END(arm64_tlb_flushID)
+
+ENTRY(arm64_tlb_flushID_SE)
+ ldr x1, .Lpage_mask
+ bic x0, x0, x1
+#ifdef SMP
+ tlbi vae1is, x0
+#else
+ tlbi vae1, x0
+#endif
+ dsb ish
+ isb
+ ret
+END(arm64_tlb_flushID_SE)
+
+/*
+ * void arm64_dcache_wb_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wb_range)
+ cache_handle_range dcop = cvac
+END(arm64_dcache_wb_range)
+
+/*
+ * void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_dcache_wbinv_range)
+ cache_handle_range dcop = civac
+END(arm64_dcache_wbinv_range)
+
+/*
+ * void arm64_dcache_inv_range(vm_offset_t, vm_size_t)
+ *
+ * Note, we must not invalidate everything. If the range is too big we
+ * must use wb-inv of the entire cache.
+ */
+ENTRY(arm64_dcache_inv_range)
+ cache_handle_range dcop = ivac
+END(arm64_dcache_inv_range)
+
+/*
+ * void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_idcache_wbinv_range)
+ cache_handle_range dcop = civac, ic = 1, icop = ivau
+END(arm64_idcache_wbinv_range)
+
+/*
+ * void arm64_icache_sync_range(vm_offset_t, vm_size_t)
+ */
+ENTRY(arm64_icache_sync_range)
+ cache_handle_range dcop = cvac, ic = 1, icop = ivau
+END(arm64_icache_sync_range)
Index: sys/arm64/arm64/dump_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/dump_machdep.c
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2002 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+#include <sys/proc.h>
+#include <sys/sysctl.h>
+
+#include <machine/dump.h>
+
+int do_minidump = 1;
+TUNABLE_INT("debug.minidump", &do_minidump);
+SYSCTL_INT(_debug, OID_AUTO, minidump, CTLFLAG_RW, &do_minidump, 0,
+ "Enable mini crash dumps");
+
+void
+dumpsys_wbinv_all(void)
+{
+
+ printf("dumpsys_wbinv_all\n");
+}
+
+void
+dumpsys_map_chunk(vm_paddr_t pa, size_t chunk __unused, void **va)
+{
+
+ printf("dumpsys_map_chunk\n");
+ while(1);
+}
+
+/*
+ * Add a header to be used by libkvm to get the va to pa delta
+ */
+int
+dumpsys_write_aux_headers(struct dumperinfo *di)
+{
+
+ printf("dumpsys_map_chunk\n");
+ return (0);
+}
Index: sys/arm64/arm64/elf_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/elf_machdep.c
@@ -0,0 +1,159 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * Copyright (c) 2000 Eduardo Horvath.
+ * Copyright (c) 1999 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Paul Kranenburg.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * from: NetBSD: mdreloc.c,v 1.42 2008/04/28 20:23:04 martin Exp
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/systm.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/linker.h>
+#include <sys/proc.h>
+#include <sys/sysent.h>
+#include <sys/imgact_elf.h>
+#include <sys/syscall.h>
+#include <sys/signalvar.h>
+#include <sys/vnode.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+
+#include <machine/elf.h>
+#include <machine/md_var.h>
+
+#include "linker_if.h"
+
+static struct sysentvec elf64_freebsd_sysvec = {
+ .sv_size = SYS_MAXSYSCALL,
+ .sv_table = sysent,
+ .sv_mask = 0,
+ .sv_sigsize = 0,
+ .sv_sigtbl = NULL,
+ .sv_errsize = 0,
+ .sv_errtbl = NULL,
+ .sv_transtrap = NULL,
+ .sv_fixup = __elfN(freebsd_fixup),
+ .sv_sendsig = sendsig,
+ .sv_sigcode = sigcode,
+ .sv_szsigcode = &szsigcode,
+ .sv_prepsyscall = NULL,
+ .sv_name = "FreeBSD ELF64",
+ .sv_coredump = __elfN(coredump),
+ .sv_imgact_try = NULL,
+ .sv_minsigstksz = MINSIGSTKSZ,
+ .sv_pagesize = PAGE_SIZE,
+ .sv_minuser = VM_MIN_ADDRESS,
+ .sv_maxuser = VM_MAXUSER_ADDRESS,
+ .sv_usrstack = USRSTACK,
+ .sv_psstrings = PS_STRINGS,
+ .sv_stackprot = VM_PROT_READ | VM_PROT_WRITE,
+ .sv_copyout_strings = exec_copyout_strings,
+ .sv_setregs = exec_setregs,
+ .sv_fixlimit = NULL,
+ .sv_maxssiz = NULL,
+ .sv_flags = SV_ABI_FREEBSD | SV_LP64,
+ .sv_set_syscall_retval = cpu_set_syscall_retval,
+ .sv_fetch_syscall_args = cpu_fetch_syscall_args,
+ .sv_syscallnames = syscallnames,
+ .sv_schedtail = NULL,
+};
+
+static Elf64_Brandinfo freebsd_brand_info = {
+ .brand = ELFOSABI_FREEBSD,
+ .machine = EM_AARCH64,
+ .compat_3_brand = "FreeBSD",
+ .emul_path = NULL,
+ .interp_path = "/libexec/ld-elf.so.1",
+ .sysvec = &elf64_freebsd_sysvec,
+ .interp_newpath = NULL,
+ .brand_note = &elf64_freebsd_brandnote,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+
+SYSINIT(elf64, SI_SUB_EXEC, SI_ORDER_FIRST,
+ (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_info);
+
+static Elf64_Brandinfo freebsd_brand_oinfo = {
+ .brand = ELFOSABI_FREEBSD,
+ .machine = EM_AARCH64,
+ .compat_3_brand = "FreeBSD",
+ .emul_path = NULL,
+ .interp_path = "/usr/libexec/ld-elf.so.1",
+ .sysvec = &elf64_freebsd_sysvec,
+ .interp_newpath = NULL,
+ .brand_note = &elf64_freebsd_brandnote,
+ .flags = BI_CAN_EXEC_DYN | BI_BRAND_NOTE
+};
+
+SYSINIT(oelf64, SI_SUB_EXEC, SI_ORDER_ANY,
+ (sysinit_cfunc_t)elf64_insert_brand_entry, &freebsd_brand_oinfo);
+
+void
+elf64_dump_thread(struct thread *td __unused, void *dst __unused,
+ size_t *off __unused)
+{
+
+}
+
+int
+elf_reloc_local(linker_file_t lf, Elf_Addr relocbase, const void *data,
+ int type, elf_lookup_fn lookup __unused)
+{
+
+ panic("elf_reloc_local");
+}
+
+/* Process one elf relocation with addend. */
+int
+elf_reloc(linker_file_t lf, Elf_Addr relocbase, const void *data, int type,
+ elf_lookup_fn lookup)
+{
+
+ panic("elf_reloc");
+}
+
+int
+elf_cpu_load_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
+
+int
+elf_cpu_unload_file(linker_file_t lf __unused)
+{
+
+ return (0);
+}
Index: sys/arm64/arm64/exception.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/exception.S
@@ -0,0 +1,198 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include "assym.s"
+
+ .text
+
+.macro save_registers el
+ stp x28, x29, [sp, #-16]!
+ stp x26, x27, [sp, #-16]!
+ stp x24, x25, [sp, #-16]!
+ stp x22, x23, [sp, #-16]!
+ stp x20, x21, [sp, #-16]!
+ stp x18, x19, [sp, #-16]!
+ stp x16, x17, [sp, #-16]!
+ stp x14, x15, [sp, #-16]!
+ stp x12, x13, [sp, #-16]!
+ stp x10, x11, [sp, #-16]!
+ stp x8, x9, [sp, #-16]!
+ stp x6, x7, [sp, #-16]!
+ stp x4, x5, [sp, #-16]!
+ stp x2, x3, [sp, #-16]!
+ stp x0, x1, [sp, #-16]!
+ mrs x10, elr_el1
+ mrs x11, spsr_el1
+.if \el == 0
+ mrs x12, sp_el0
+.else
+ mov x12, sp
+.endif
+ stp x10, x11, [sp, #-16]!
+ stp x12, lr, [sp, #-16]!
+ mrs x18, tpidr_el1
+.endm
+
+.macro restore_registers el
+ ldp x12, lr, [sp], #16
+ ldp x10, x11, [sp], #16
+.if \el == 0
+ msr sp_el0, x12
+.else
+ mov sp, x12
+.endif
+ msr spsr_el1, x11
+ msr elr_el1, x10
+ ldp x0, x1, [sp], #16
+ ldp x2, x3, [sp], #16
+ ldp x4, x5, [sp], #16
+ ldp x6, x7, [sp], #16
+ ldp x8, x9, [sp], #16
+ ldp x10, x11, [sp], #16
+ ldp x12, x13, [sp], #16
+ ldp x14, x15, [sp], #16
+ ldp x16, x17, [sp], #16
+.if \el == 0
+ ldp x18, x19, [sp], #16
+.else
+ ldp xzr, x19, [sp], #16
+.endif
+ ldp x20, x21, [sp], #16
+ ldp x22, x23, [sp], #16
+ ldp x24, x25, [sp], #16
+ ldp x26, x27, [sp], #16
+ ldp x28, x29, [sp], #16
+.endm
+
+.macro do_ast
+ /* Disable interrupts */
+ mrs x19, daif
+ msr daifset, #2
+
+ /* Read the current thread flags */
+1: ldr x1, [x18, #PC_CURTHREAD] /* Load curthread */
+ ldr x2, [x1, #TD_FLAGS]! /* TODO: No need for the ! but clang fails without it */
+
+ /* Check if we have either bits set */
+ mov x3, #((TDF_ASTPENDING|TDF_NEEDRESCHED) >> 8)
+ lsl x3, x3, #8
+ and x2, x2, x3
+ cmp x2, #0
+ b.eq 2f
+
+ /* Restore interrupts */
+ msr daif, x19
+
+ /* handle the ast */
+ mov x0, sp
+ bl _C_LABEL(ast)
+
+ /* Disable interrupts */
+ mrs x19, daif
+ msr daifset, #2
+
+2:
+ /* Restore interrupts */
+ msr daif, x19
+.endm
+
+handle_el1h_sync:
+ save_registers 1
+ mov x0, sp
+ bl do_el1h_sync
+ restore_registers 1
+ eret
+
+handle_el1h_irq:
+ save_registers 1
+ mov x0, sp
+ bl arm_cpu_intr
+ restore_registers 1
+ eret
+
+handle_el1h_error:
+ brk 0xf13
+
+handle_el0_sync:
+ save_registers 0
+ mov x0, sp
+ bl do_el0_sync
+ do_ast
+ restore_registers 0
+ eret
+
+handle_el0_irq:
+ save_registers 0
+ mov x0, sp
+ bl arm_cpu_intr
+ restore_registers 0
+ eret
+
+handle_el0_error:
+ save_registers 0
+ mov x0, sp
+ bl do_el0_error
+ brk 0xf23
+ 1: b 1b
+
+.macro vempty
+ .align 7
+ brk 0xfff
+ 1: b 1b
+.endm
+
+.macro vector name
+ .align 7
+ b handle_\name
+.endm
+
+ .align 11
+ .globl exception_vectors
+exception_vectors:
+ vempty /* Synchronous EL1t */
+ vempty /* IRQ EL1t */
+ vempty /* FIQ EL1t */
+ vempty /* Error EL1t */
+
+ vector el1h_sync /* Synchronous EL1h */
+ vector el1h_irq /* IRQ EL1h */
+ vempty /* FIQ EL1h */
+ vector el1h_error /* Error EL1h */
+
+ vector el0_sync /* Synchronous 64-bit EL0 */
+ vector el0_irq /* IRQ 64-bit EL0 */
+ vempty /* FIQ 64-bit EL0 */
+ vector el0_error /* Error 64-bit EL0 */
+
+ vempty /* Synchronous 32-bit EL0 */
+ vempty /* IRQ 32-bit EL0 */
+ vempty /* FIQ 32-bit EL0 */
+ vempty /* Error 32-bit EL0 */
+
Index: sys/arm64/arm64/genassym.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/genassym.c
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2004 Olivier Houchard
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+#include <sys/param.h>
+#include <sys/assym.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/vmparam.h>
+
+ASSYM(KERNBASE, KERNBASE);
+ASSYM(TDF_ASTPENDING, TDF_ASTPENDING);
+ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED);
+
+ASSYM(PCPU_SIZE, sizeof(struct pcpu));
+ASSYM(PC_CURPCB, offsetof(struct pcpu, pc_curpcb));
+ASSYM(PC_CURTHREAD, offsetof(struct pcpu, pc_curthread));
+
+/* Size of pcb, rounded to keep stack alignment */
+ASSYM(PCB_SIZE, roundup2(sizeof(struct pcb), STACKALIGNBYTES + 1));
+ASSYM(PCB_REGS, offsetof(struct pcb, pcb_x));
+ASSYM(PCB_SP, offsetof(struct pcb, pcb_sp));
+ASSYM(PCB_L1ADDR, offsetof(struct pcb, pcb_l1addr));
+ASSYM(PCB_ONFAULT, offsetof(struct pcb, pcb_onfault));
+
+ASSYM(SF_UC, offsetof(struct sigframe, sf_uc));
+
+ASSYM(TD_PCB, offsetof(struct thread, td_pcb));
+ASSYM(TD_FLAGS, offsetof(struct thread, td_flags));
+ASSYM(TD_LOCK, offsetof(struct thread, td_lock));
+
+ASSYM(TF_X, offsetof(struct trapframe, tf_x));
Index: sys/arm64/arm64/gic.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/gic.c
@@ -0,0 +1,396 @@
+/*-
+ * Copyright (c) 2011 The FreeBSD Foundation
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Developed by Damjan Marion <damjan.marion@gmail.com>
+ *
+ * Based on OMAP4 GIC code by Ben Gray
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company nor the name of the author may be used to
+ * endorse or promote products derived from this software without specific
+ * prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/module.h>
+#include <sys/rman.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/cpuset.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <machine/bus.h>
+#include <machine/intr.h>
+#include <machine/smp.h>
+
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+
+
+#include "pic_if.h"
+
+/* We are using GICv2 register naming */
+
+/* Distributor Registers */
+#define GICD_CTLR 0x000 /* v1 ICDDCR */
+#define GICD_TYPER 0x004 /* v1 ICDICTR */
+#define GICD_IIDR 0x008 /* v1 ICDIIDR */
+#define GICD_IGROUPR(n) (0x0080 + ((n) * 4)) /* v1 ICDISER */
+#define GICD_ISENABLER(n) (0x0100 + ((n) * 4)) /* v1 ICDISER */
+#define GICD_ICENABLER(n) (0x0180 + ((n) * 4)) /* v1 ICDICER */
+#define GICD_ISPENDR(n) (0x0200 + ((n) * 4)) /* v1 ICDISPR */
+#define GICD_ICPENDR(n) (0x0280 + ((n) * 4)) /* v1 ICDICPR */
+#define GICD_ICACTIVER(n) (0x0380 + ((n) * 4)) /* v1 ICDABR */
+#define GICD_IPRIORITYR(n) (0x0400 + ((n) * 4)) /* v1 ICDIPR */
+#define GICD_ITARGETSR(n) (0x0800 + ((n) * 4)) /* v1 ICDIPTR */
+#define GICD_ICFGR(n) (0x0C00 + ((n) * 4)) /* v1 ICDICFR */
+#define GICD_SGIR(n) (0x0F00 + ((n) * 4)) /* v1 ICDSGIR */
+
+/* CPU Registers */
+#define GICC_CTLR 0x0000 /* v1 ICCICR */
+#define GICC_PMR 0x0004 /* v1 ICCPMR */
+#define GICC_BPR 0x0008 /* v1 ICCBPR */
+#define GICC_IAR 0x000C /* v1 ICCIAR */
+#define GICC_EOIR 0x0010 /* v1 ICCEOIR */
+#define GICC_RPR 0x0014 /* v1 ICCRPR */
+#define GICC_HPPIR 0x0018 /* v1 ICCHPIR */
+#define GICC_ABPR 0x001C /* v1 ICCABPR */
+#define GICC_IIDR 0x00FC /* v1 ICCIIDR*/
+
+#define GIC_FIRST_IPI 0 /* Irqs 0-15 are SGIs/IPIs. */
+#define GIC_LAST_IPI 15
+#define GIC_FIRST_PPI 16 /* Irqs 16-31 are private (per */
+#define GIC_LAST_PPI 31 /* core) peripheral interrupts. */
+#define GIC_FIRST_SPI 32 /* Irqs 32+ are shared peripherals. */
+
+/* First bit is a polarity bit (0 - low, 1 - high) */
+#define GICD_ICFGR_POL_LOW (0 << 0)
+#define GICD_ICFGR_POL_HIGH (1 << 0)
+#define GICD_ICFGR_POL_MASK 0x1
+/* Second bit is a trigger bit (0 - level, 1 - edge) */
+#define GICD_ICFGR_TRIG_LVL (0 << 1)
+#define GICD_ICFGR_TRIG_EDGE (1 << 1)
+#define GICD_ICFGR_TRIG_MASK 0x2
+
+struct arm_gic_softc {
+ device_t gic_dev;
+ struct resource * gic_res[3];
+ bus_space_tag_t gic_c_bst;
+ bus_space_tag_t gic_d_bst;
+ bus_space_handle_t gic_c_bsh;
+ bus_space_handle_t gic_d_bsh;
+ uint8_t ver;
+ struct mtx mutex;
+ uint32_t nirqs;
+};
+
+static struct resource_spec arm_gic_spec[] = {
+ { SYS_RES_MEMORY, 0, RF_ACTIVE }, /* Distributor registers */
+ { SYS_RES_MEMORY, 1, RF_ACTIVE }, /* CPU Interrupt Intf. registers */
+ { -1, 0 }
+};
+
+static struct arm_gic_softc *arm_gic_sc = NULL;
+
+#define gic_c_read_4(_sc, _reg) \
+ bus_space_read_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg))
+#define gic_c_write_4(_sc, _reg, _val) \
+ bus_space_write_4((_sc)->gic_c_bst, (_sc)->gic_c_bsh, (_reg), (_val))
+#define gic_d_read_4(_sc, _reg) \
+ bus_space_read_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg))
+#define gic_d_write_4(_sc, _reg, _val) \
+ bus_space_write_4((_sc)->gic_d_bst, (_sc)->gic_d_bsh, (_reg), (_val))
+
+static pic_dispatch_t gic_dispatch;
+static pic_eoi_t gic_eoi;
+static pic_mask_t gic_mask_irq;
+static pic_unmask_t gic_unmask_irq;
+
+static struct ofw_compat_data compat_data[] = {
+ {"arm,gic", true}, /* Non-standard, used in FreeBSD dts. */
+ {"arm,gic-400", true},
+ {"arm,cortex-a15-gic", true},
+ {"arm,cortex-a9-gic", true},
+ {"arm,cortex-a7-gic", true},
+ {"arm,arm11mp-gic", true},
+ {"brcm,brahma-b15-gic", true},
+ {NULL, false}
+};
+
+static int
+arm_gic_probe(device_t dev)
+{
+
+ if (!ofw_bus_status_okay(dev))
+ return (ENXIO);
+
+ if (!ofw_bus_search_compatible(dev, compat_data)->ocd_data)
+ return (ENXIO);
+
+ device_set_desc(dev, "ARM Generic Interrupt Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static void
+gic_init_secondary(device_t dev)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ int i;
+
+ for (i = 0; i < sc->nirqs; i += 4)
+ gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0);
+
+ /* Set all the interrupts to be in Group 0 (secure) */
+ for (i = 0; i < sc->nirqs; i += 32) {
+ gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0);
+ }
+
+ /* Enable CPU interface */
+ gic_c_write_4(sc, GICC_CTLR, 1);
+
+ /* Set priority mask register. */
+ gic_c_write_4(sc, GICC_PMR, 0xff);
+
+ /* Enable interrupt distribution */
+ gic_d_write_4(sc, GICD_CTLR, 0x01);
+
+ /*
+ * Activate the timer interrupts: virtual, secure, and non-secure.
+ */
+ gic_d_write_4(sc, GICD_ISENABLER(27 >> 5), (1UL << (27 & 0x1F)));
+ gic_d_write_4(sc, GICD_ISENABLER(29 >> 5), (1UL << (29 & 0x1F)));
+ gic_d_write_4(sc, GICD_ISENABLER(30 >> 5), (1UL << (30 & 0x1F)));
+}
+
+static int
+arm_gic_attach(device_t dev)
+{
+ struct arm_gic_softc *sc;
+ int i;
+ uint32_t icciidr;
+
+ if (arm_gic_sc)
+ return (ENXIO);
+
+ sc = device_get_softc(dev);
+
+ if (bus_alloc_resources(dev, arm_gic_spec, sc->gic_res)) {
+ device_printf(dev, "could not allocate resources\n");
+ return (ENXIO);
+ }
+
+ sc->gic_dev = dev;
+ arm_gic_sc = sc;
+
+ /* Initialize mutex */
+ mtx_init(&sc->mutex, "GIC lock", "", MTX_SPIN);
+
+ /* Distributor Interface */
+ sc->gic_d_bst = rman_get_bustag(sc->gic_res[0]);
+ sc->gic_d_bsh = rman_get_bushandle(sc->gic_res[0]);
+
+ /* CPU Interface */
+ sc->gic_c_bst = rman_get_bustag(sc->gic_res[1]);
+ sc->gic_c_bsh = rman_get_bushandle(sc->gic_res[1]);
+
+ /* Disable interrupt forwarding to the CPU interface */
+ gic_d_write_4(sc, GICD_CTLR, 0x00);
+
+ /* Get the number of interrupts */
+ sc->nirqs = gic_d_read_4(sc, GICD_TYPER);
+ sc->nirqs = 32 * ((sc->nirqs & 0x1f) + 1);
+
+ arm_register_root_pic(dev, sc->nirqs);
+
+ icciidr = gic_c_read_4(sc, GICC_IIDR);
+ device_printf(dev,"pn 0x%x, arch 0x%x, rev 0x%x, implementer 0x%x irqs %u\n",
+ icciidr>>20, (icciidr>>16) & 0xF, (icciidr>>12) & 0xf,
+ (icciidr & 0xfff), sc->nirqs);
+
+ /* Set all global interrupts to be level triggered, active low. */
+ for (i = 32; i < sc->nirqs; i += 16) {
+ gic_d_write_4(sc, GICD_ICFGR(i >> 4), 0x00000000);
+ }
+
+ /* Disable all interrupts. */
+ for (i = 32; i < sc->nirqs; i += 32) {
+ gic_d_write_4(sc, GICD_ICENABLER(i >> 5), 0xFFFFFFFF);
+ }
+
+ for (i = 0; i < sc->nirqs; i += 4) {
+ gic_d_write_4(sc, GICD_IPRIORITYR(i >> 2), 0);
+ gic_d_write_4(sc, GICD_ITARGETSR(i >> 2),
+ 1 << 0 | 1 << 8 | 1 << 16 | 1 << 24);
+ }
+
+ /* Set all the interrupts to be in Group 0 (secure) */
+ for (i = 0; i < sc->nirqs; i += 32) {
+ gic_d_write_4(sc, GICD_IGROUPR(i >> 5), 0);
+ }
+
+ /* Enable CPU interface */
+ gic_c_write_4(sc, GICC_CTLR, 1);
+
+ /* Set priority mask register. */
+ gic_c_write_4(sc, GICC_PMR, 0xff);
+
+ /* Enable interrupt distribution */
+ gic_d_write_4(sc, GICD_CTLR, 0x01);
+
+ return (0);
+}
+
+static void gic_dispatch(device_t dev, struct trapframe *frame)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ uint32_t active_irq;
+ int first = 1;
+
+ while (1) {
+ active_irq = gic_c_read_4(sc, GICC_IAR);
+
+ /*
+ * Immediatly EOIR the SGIs, because doing so requires the other
+ * bits (ie CPU number), not just the IRQ number, and we do not
+ * have this information later.
+ */
+
+ if ((active_irq & 0x3ff) <= GIC_LAST_IPI)
+ gic_c_write_4(sc, GICC_EOIR, active_irq);
+ active_irq &= 0x3FF;
+
+ if (active_irq == 0x3FF) {
+ if (first)
+ printf("Spurious interrupt detected\n");
+ return;
+ }
+
+ arm_dispatch_intr(active_irq, frame);
+ first = 0;
+ }
+}
+
+static void
+gic_eoi(device_t dev, u_int irq)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+
+ gic_c_write_4(sc, GICC_EOIR, irq);
+}
+
+void
+gic_mask_irq(device_t dev, u_int irq)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+
+ gic_d_write_4(sc, GICD_ICENABLER(irq >> 5), (1UL << (irq & 0x1F)));
+ gic_c_write_4(sc, GICC_EOIR, irq);
+}
+
+void
+gic_unmask_irq(device_t dev, u_int irq)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+
+ gic_d_write_4(sc, GICD_ISENABLER(irq >> 5), (1UL << (irq & 0x1F)));
+}
+
+#ifdef SMP
+static void
+gic_ipi_send(device_t dev, cpuset_t cpus, u_int ipi)
+{
+ struct arm_gic_softc *sc = device_get_softc(dev);
+ uint32_t val = 0, i;
+
+ for (i = 0; i < MAXCPU; i++)
+ if (CPU_ISSET(i, &cpus))
+ val |= 1 << (16 + i);
+
+ gic_d_write_4(sc, GICD_SGIR(0), val | ipi);
+}
+
+static int
+arm_gic_ipi_read(device_t dev, int i)
+{
+
+ if (i != -1) {
+ /*
+ * The intr code will automagically give the frame pointer
+ * if the interrupt argument is 0.
+ */
+ if ((unsigned int)i > 16)
+ return (0);
+ return (i);
+ }
+
+ return (0x3ff);
+}
+
+static void
+arm_gic_ipi_clear(device_t dev, int ipi)
+{
+ /* no-op */
+}
+#endif
+
+static device_method_t arm_gic_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, arm_gic_probe),
+ DEVMETHOD(device_attach, arm_gic_attach),
+
+ /* pic_if */
+ DEVMETHOD(pic_dispatch, gic_dispatch),
+ DEVMETHOD(pic_eoi, gic_eoi),
+ DEVMETHOD(pic_mask, gic_mask_irq),
+ DEVMETHOD(pic_unmask, gic_unmask_irq),
+
+#ifdef SMP
+ DEVMETHOD(pic_init_secondary, gic_init_secondary),
+ DEVMETHOD(pic_ipi_send, gic_ipi_send),
+#endif
+
+ { 0, 0 }
+};
+
+static driver_t arm_gic_driver = {
+ "gic",
+ arm_gic_methods,
+ sizeof(struct arm_gic_softc),
+};
+
+static devclass_t arm_gic_devclass;
+
+EARLY_DRIVER_MODULE(gic, simplebus, arm_gic_driver, arm_gic_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
+EARLY_DRIVER_MODULE(gic, ofwbus, arm_gic_driver, arm_gic_devclass, 0, 0,
+ BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
Index: sys/arm64/arm64/identcpu.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/identcpu.c
@@ -0,0 +1,199 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Semihalf
+ * under sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/pcpu.h>
+#include <sys/sysctl.h>
+#include <sys/systm.h>
+
+#include <machine/cpu.h>
+#include <machine/cpufunc.h>
+
+char machine[] = "arm64";
+
+SYSCTL_STRING(_hw, HW_MACHINE, machine, CTLFLAG_RD, machine, 0,
+ "Machine class");
+
+/*
+ * Per-CPU affinity as provided in MPIDR_EL1
+ * Indexed by CPU number in logical order selected by the system.
+ * Relevant fields can be extracetd using CPU_AFFn macros,
+ * Aff3.Aff2.Aff1.Aff0 construct a unique CPU address in the system.
+ *
+ * Fields used by us:
+ * Aff1 - Cluster number
+ * Aff0 - CPU number in Aff1 cluster
+ */
+uint64_t __cpu_affinity[MAXCPU];
+
+#define CPU_IMPL_ARM 0x41
+#define CPU_IMPL_BROADCOM 0x42
+#define CPU_IMPL_CAVIUM 0x43
+#define CPU_IMPL_DEC 0x44
+#define CPU_IMPL_INFINEON 0x49
+#define CPU_IMPL_FREESCALE 0x4D
+#define CPU_IMPL_NVIDIA 0x4E
+#define CPU_IMPL_APM 0x50
+#define CPU_IMPL_QUALCOMM 0x51
+#define CPU_IMPL_MARVELL 0x56
+#define CPU_IMPL_INTEL 0x69
+
+#define CPU_PART_THUNDER 0x0A1
+#define CPU_PART_FOUNDATION 0xD00
+#define CPU_PART_CORTEX_A53 0xD03
+#define CPU_PART_CORTEX_A57 0xD07
+
+#define CPU_IMPL(midr) (((midr) >> 24) & 0xff)
+#define CPU_PART(midr) (((midr) >> 4) & 0xfff)
+#define CPU_VAR(midr) (((midr) >> 20) & 0xf)
+#define CPU_REV(midr) (((midr) >> 0) & 0xf)
+
+struct cpu_desc {
+ u_int cpu_impl;
+ u_int cpu_part_num;
+ u_int cpu_variant;
+ u_int cpu_revision;
+ const char *cpu_impl_name;
+ const char *cpu_part_name;
+};
+
+struct cpu_desc cpu_desc[MAXCPU];
+
+struct cpu_parts {
+ u_int part_id;
+ const char *part_name;
+};
+#define CPU_PART_NONE { 0, "Unknown Processor" }
+
+struct cpu_implementers {
+ u_int impl_id;
+ const char *impl_name;
+ /*
+ * Part number is implementation defined
+ * so each vendor will have its own set of values and names.
+ */
+ const struct cpu_parts *cpu_parts;
+};
+#define CPU_IMPLEMENTER_NONE { 0, "Unknown Implementer", cpu_parts_none }
+
+/*
+ * Per-implementer table of (PartNum, CPU Name) pairs.
+ */
+/* ARM Ltd. */
+static const struct cpu_parts cpu_parts_arm[] = {
+ { 0xD00, "Foundation-Model" },
+ { 0xD03, "Cortex-A53" },
+ { 0xD07, "Cortex-A57" },
+ CPU_PART_NONE,
+};
+/* Cavium */
+static const struct cpu_parts cpu_parts_cavium[] = {
+ { 0x0A1, "Thunder" },
+ CPU_PART_NONE,
+};
+
+/* Unknown */
+static const struct cpu_parts cpu_parts_none[] = {
+ CPU_PART_NONE,
+};
+
+/*
+ * Implementers table.
+ */
+const struct cpu_implementers cpu_implementers[] = {
+ { CPU_IMPL_ARM, "ARM", cpu_parts_arm },
+ { CPU_IMPL_BROADCOM, "Broadcom", cpu_parts_none },
+ { CPU_IMPL_CAVIUM, "Cavium", cpu_parts_cavium },
+ { CPU_IMPL_DEC, "DEC", cpu_parts_none },
+ { CPU_IMPL_INFINEON, "IFX", cpu_parts_none },
+ { CPU_IMPL_FREESCALE, "Freescale", cpu_parts_none },
+ { CPU_IMPL_NVIDIA, "NVIDIA", cpu_parts_none },
+ { CPU_IMPL_APM, "APM", cpu_parts_none },
+ { CPU_IMPL_QUALCOMM, "Qualcomm", cpu_parts_none },
+ { CPU_IMPL_MARVELL, "Marvell", cpu_parts_none },
+ { CPU_IMPL_INTEL, "Intel", cpu_parts_none },
+ CPU_IMPLEMENTER_NONE,
+};
+
+void identify_cpu(void);
+
+void
+identify_cpu(void)
+{
+ u_int midr;
+ u_int impl_id;
+ u_int part_id;
+ u_int cpu;
+ uint64_t mpidr;
+ size_t i;
+ const struct cpu_parts *cpu_partsp = NULL;
+
+ cpu = PCPU_GET(cpuid);
+ midr = get_midr();
+
+ impl_id = CPU_IMPL(midr);
+ for (i = 0; i < nitems(cpu_implementers); i++) {
+ if (impl_id == cpu_implementers[i].impl_id ||
+ cpu_implementers[i].impl_id == 0) {
+ cpu_desc[cpu].cpu_impl = impl_id;
+ cpu_desc[cpu].cpu_impl_name = cpu_implementers[i].impl_name;
+ cpu_partsp = cpu_implementers[i].cpu_parts;
+ break;
+ }
+ }
+
+ part_id = CPU_PART(midr);
+ for (i = 0; &cpu_partsp[i] != NULL; i++) {
+ if (part_id == cpu_partsp[i].part_id ||
+ cpu_partsp[i].part_id == 0) {
+ cpu_desc[cpu].cpu_part_num = part_id;
+ cpu_desc[cpu].cpu_part_name = cpu_partsp[i].part_name;
+ break;
+ }
+ }
+
+ printf("CPU: %s %s r%dp%d\n", cpu_desc[cpu].cpu_impl_name,
+ cpu_desc[cpu].cpu_part_name, CPU_VAR(midr), CPU_REV(midr));
+
+ /*
+ * Save affinity for the boot CPU.
+ * (CPU0 in the internal system enumeration.
+ */
+ mpidr = get_mpidr();
+ CPU_AFFINITY(0) = mpidr & CPU_AFF_MASK;
+
+ if (bootverbose)
+ printf("CPU%u affinity: %u.%u.%u.%u\n", 0, CPU_AFF0(mpidr),
+ CPU_AFF1(mpidr), CPU_AFF2(mpidr), CPU_AFF3(mpidr));
+}
Index: sys/arm64/arm64/in_cksum.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/in_cksum.c
@@ -0,0 +1,241 @@
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ * The Regents of the University of California. All rights reserved.
+ * Copyright (c) 1996
+ * Matt Thomas <matt@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)in_cksum.c 8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h> /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ * (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x) (x > 65535 ? x -= 65535 : x)
+#define REDUCE32 \
+ { \
+ q_util.q = sum; \
+ sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ }
+#define REDUCE16 \
+ { \
+ q_util.q = sum; \
+ l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+ sum = l_util.s[0] + l_util.s[1]; \
+ ADDCARRY(sum); \
+ }
+
+static const u_int32_t in_masks[] = {
+ /*0 bytes*/ /*1 byte*/ /*2 bytes*/ /*3 bytes*/
+ 0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+ 0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+ 0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+ 0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+ u_int16_t s[2];
+ u_int32_t l;
+};
+union q_util {
+ u_int16_t s[4];
+ u_int32_t l[2];
+ u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+ const u_int32_t *lw = (const u_int32_t *) buf;
+ u_int64_t sum = 0;
+ u_int64_t prefilled;
+ int offset;
+ union q_util q_util;
+
+ if ((3 & (long) lw) == 0 && len == 20) {
+ sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+ REDUCE32;
+ return sum;
+ }
+
+ if ((offset = 3 & (long) lw) != 0) {
+ const u_int32_t *masks = in_masks + (offset << 2);
+ lw = (u_int32_t *) (((long) lw) - offset);
+ sum = *lw++ & masks[len >= 3 ? 3 : len];
+ len -= 4 - offset;
+ if (len <= 0) {
+ REDUCE32;
+ return sum;
+ }
+ }
+#if 0
+ /*
+ * Force to cache line boundary.
+ */
+ offset = 32 - (0x1f & (long) lw);
+ if (offset < 32 && len > offset) {
+ len -= offset;
+ if (4 & offset) {
+ sum += (u_int64_t) lw[0];
+ lw += 1;
+ }
+ if (8 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1];
+ lw += 2;
+ }
+ if (16 & offset) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ }
+#endif
+ /*
+ * access prefilling to start load of next cache line.
+ * then add current cache line
+ * save result of prefilling for loop iteration.
+ */
+ prefilled = lw[0];
+ while ((len -= 32) >= 4) {
+ u_int64_t prefilling = lw[8];
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ prefilled = prefilling;
+ }
+ if (len >= 0) {
+ sum += prefilled + lw[1] + lw[2] + lw[3]
+ + lw[4] + lw[5] + lw[6] + lw[7];
+ lw += 8;
+ } else {
+ len += 32;
+ }
+ while ((len -= 16) >= 0) {
+ sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+ lw += 4;
+ }
+ len += 16;
+ while ((len -= 4) >= 0) {
+ sum += (u_int64_t) *lw++;
+ }
+ len += 4;
+ if (len > 0)
+ sum += (u_int64_t) (in_masks[len] & *lw);
+ REDUCE32;
+ return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+ u_int64_t sum = a + b;
+
+ ADDCARRY(sum);
+ return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+ u_int64_t sum;
+ union q_util q_util;
+ union l_util l_util;
+
+ sum = (u_int64_t) a + b + c;
+ REDUCE16;
+ return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+ u_int64_t sum = 0;
+ int mlen = 0;
+ int clen = 0;
+ caddr_t addr;
+ union q_util q_util;
+ union l_util l_util;
+
+ len -= skip;
+ for (; skip && m; m = m->m_next) {
+ if (m->m_len > skip) {
+ mlen = m->m_len - skip;
+ addr = mtod(m, caddr_t) + skip;
+ goto skip_start;
+ } else {
+ skip -= m->m_len;
+ }
+ }
+
+ for (; m && len; m = m->m_next) {
+ if (m->m_len == 0)
+ continue;
+ mlen = m->m_len;
+ addr = mtod(m, caddr_t);
+skip_start:
+ if (len < mlen)
+ mlen = len;
+ if ((clen ^ (long) addr) & 1)
+ sum += in_cksumdata(addr, mlen) << 8;
+ else
+ sum += in_cksumdata(addr, mlen);
+
+ clen += mlen;
+ len -= mlen;
+ }
+ REDUCE16;
+ return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+ u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+ union q_util q_util;
+ union l_util l_util;
+ REDUCE16;
+ return (~sum & 0xffff);
+}
Index: sys/arm64/arm64/intr_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/intr_machdep.c
@@ -0,0 +1,503 @@
+/*-
+ * Copyright (c) 1991 The Regents of the University of California.
+ * Copyright (c) 2002 Benno Rice.
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * This code is derived from software contributed by
+ * William Jolitz (Berkeley) and Benno Rice.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * form: src/sys/powerpc/powerpc/intr_machdep.c, r271712 2014/09/17
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/cpuset.h>
+#include <sys/interrupt.h>
+#include <sys/queue.h>
+
+#include <machine/cpufunc.h>
+#include <machine/intr.h>
+
+#ifdef SMP
+#include <machine/smp.h>
+#endif
+
+#include "pic_if.h"
+
+#define MAX_STRAY_LOG 5
+#define INTRNAME_LEN (MAXCOMLEN + 1)
+
+#define NIRQS 1024 /* Maximum number of interrupts in the system */
+
+static MALLOC_DEFINE(M_INTR, "intr", "Interrupt Services");
+
+/*
+ * Linked list of interrupts that have been set-up.
+ * Each element holds the interrupt description
+ * and has to be allocated and freed dynamically.
+ */
+static SLIST_HEAD(, arm64_intr_entry) irq_slist_head =
+ SLIST_HEAD_INITIALIZER(irq_slist_head);
+
+struct arm64_intr_entry {
+ SLIST_ENTRY(arm64_intr_entry) entries;
+ struct intr_event *i_event;
+
+ enum intr_trigger i_trig;
+ enum intr_polarity i_pol;
+
+ u_int i_hw_irq; /* Physical interrupt number */
+ u_int i_cntidx; /* Index in intrcnt table */
+ u_int i_handlers; /* Allocated handlers */
+ u_long *i_cntp; /* Interrupt hit counter */
+};
+
+/* Counts and names for statistics - see sys/sys/interrupt.h */
+/* Tables are indexed by i_cntidx */
+u_long intrcnt[NIRQS];
+char intrnames[NIRQS * INTRNAME_LEN];
+size_t sintrcnt = sizeof(intrcnt);
+size_t sintrnames = sizeof(intrnames);
+
+static u_int intrcntidx; /* Current index into intrcnt table */
+static u_int arm64_nintrs; /* Max interrupts number of the root PIC */
+static u_int arm64_nstray; /* Number of received stray interrupts */
+static device_t root_pic; /* PIC device for all incoming interrupts */
+static device_t msi_pic; /* Device which handles MSI/MSI-X interrupts */
+static struct mtx intr_list_lock;
+
+static void
+intr_init(void *dummy __unused)
+{
+
+ mtx_init(&intr_list_lock, "intr sources lock", NULL, MTX_DEF);
+}
+SYSINIT(intr_init, SI_SUB_INTR, SI_ORDER_FIRST, intr_init, NULL);
+
+/*
+ * Helper routines.
+ */
+
+/* Set interrupt name for statistics */
+static void
+intrcnt_setname(const char *name, u_int idx)
+{
+
+ snprintf(&intrnames[idx * INTRNAME_LEN], INTRNAME_LEN, "%-*s",
+ INTRNAME_LEN - 1, name);
+}
+
+/*
+ * Get intr structure for the given interrupt number.
+ * Allocate one if this is the first time.
+ * (Similar to ppc's intr_lookup() but without actual
+ * lookup since irq number is an index in arm64_intrs[]).
+ */
+static struct arm64_intr_entry *
+intr_acquire(u_int hw_irq)
+{
+ struct arm64_intr_entry *intr;
+
+ mtx_lock(&intr_list_lock);
+
+ SLIST_FOREACH(intr, &irq_slist_head, entries) {
+ if (intr->i_hw_irq == hw_irq) {
+ break;
+ }
+ }
+ if (intr != NULL)
+ goto out;
+
+ /* Do not alloc another intr when max number of IRQs has been reached */
+ if (intrcntidx >= NIRQS)
+ goto out;
+
+ intr = malloc(sizeof(*intr), M_INTR, M_NOWAIT);
+ if (intr == NULL)
+ goto out;
+
+ intr->i_event = NULL;
+ intr->i_handlers = 0;
+ intr->i_trig = INTR_TRIGGER_CONFORM;
+ intr->i_pol = INTR_POLARITY_CONFORM;
+ intr->i_cntidx = atomic_fetchadd_int(&intrcntidx, 1);
+ intr->i_cntp = &intrcnt[intr->i_cntidx];
+ intr->i_hw_irq = hw_irq;
+ SLIST_INSERT_HEAD(&irq_slist_head, intr, entries);
+out:
+ mtx_unlock(&intr_list_lock);
+ return intr;
+}
+
+static void
+intr_pre_ithread(void *arg)
+{
+ struct arm64_intr_entry *intr = arg;
+
+ PIC_PRE_ITHREAD(root_pic, intr->i_hw_irq);
+}
+
+static void
+intr_post_ithread(void *arg)
+{
+ struct arm64_intr_entry *intr = arg;
+
+ PIC_POST_ITHREAD(root_pic, intr->i_hw_irq);
+}
+
+static void
+intr_post_filter(void *arg)
+{
+ struct arm64_intr_entry *intr = arg;
+
+ PIC_POST_FILTER(root_pic, intr->i_hw_irq);
+}
+
+/*
+ * Register PIC driver.
+ * This is intended to be called by the very first PIC driver
+ * at the end of the successful attach.
+ * Note that during boot this can be called after first references
+ * to bus_setup_intr() so it is required to not use root_pic if it
+ * is not 100% safe.
+ */
+void
+arm_register_root_pic(device_t dev, u_int nirq)
+{
+
+ KASSERT(root_pic == NULL, ("Unable to set the pic twice"));
+ KASSERT(nirq <= NIRQS, ("PIC is trying to handle too many IRQs"));
+
+ arm64_nintrs = NIRQS; /* Number of IRQs limited only by array size */
+ root_pic = dev;
+}
+
+/* Register device which allocates MSI interrupts */
+void
+arm_register_msi_pic(device_t dev)
+{
+
+ KASSERT(msi_pic == NULL, ("Unable to set msi_pic twice"));
+ msi_pic = dev;
+}
+
+int
+arm_alloc_msi(device_t pci_dev, int count, int *irqs)
+{
+
+ return PIC_ALLOC_MSI(msi_pic, pci_dev, count, irqs);
+}
+
+int
+arm_release_msi(device_t pci_dev, int count, int *irqs)
+{
+
+ return PIC_RELEASE_MSI(msi_pic, pci_dev, count, irqs);
+}
+
+int
+arm_map_msi(device_t pci_dev, int irq, uint64_t *addr, uint32_t *data)
+{
+
+ return PIC_MAP_MSI(msi_pic, pci_dev, irq, addr, data);
+}
+
+int
+arm_alloc_msix(device_t pci_dev, int *irq)
+{
+
+ return PIC_ALLOC_MSIX(msi_pic, pci_dev, irq);
+}
+
+int
+arm_release_msix(device_t pci_dev, int irq)
+{
+
+ return PIC_RELEASE_MSIX(msi_pic, pci_dev, irq);
+}
+
+
+int
+arm_map_msix(device_t pci_dev, int irq, uint64_t *addr, uint32_t *data)
+{
+
+ return PIC_MAP_MSIX(msi_pic, pci_dev, irq, addr, data);
+}
+
+/*
+ * Finalize interrupts bring-up (should be called from configure_final()).
+ * Enables all interrupts registered by bus_setup_intr() during boot
+ * as well as unlocks interrups reception on primary CPU.
+ */
+int
+arm_enable_intr(void)
+{
+ struct arm64_intr_entry *intr;
+
+ if (root_pic == NULL)
+ panic("Cannot enable interrupts. No PIC configured");
+
+ /*
+ * Iterate through all possible interrupts and perform
+ * configuration if the interrupt is registered.
+ */
+ SLIST_FOREACH(intr, &irq_slist_head, entries) {
+ /*
+ * XXX: In case we allowed to set up interrupt whose number
+ * exceeds maximum number of interrupts for the root PIC
+ * disable it and print proper error message.
+ *
+ * This can happen only when calling bus_setup_intr()
+ * before the interrupt controller is attached.
+ */
+ if (intr->i_cntidx >= arm64_nintrs) {
+ /* Better fail when IVARIANTS enabled */
+ KASSERT(0, ("%s: Interrupt %u cannot be handled by the "
+ "registered PIC. Max interrupt number: %u", __func__,
+ intr->i_cntidx, arm64_nintrs - 1));
+ /* Print message and disable otherwise */
+ printf("ERROR: Cannot enable irq %u. Disabling.\n",
+ intr->i_cntidx);
+ PIC_MASK(root_pic, intr->i_hw_irq);
+ }
+
+ if (intr->i_trig != INTR_TRIGGER_CONFORM ||
+ intr->i_pol != INTR_POLARITY_CONFORM) {
+ PIC_CONFIG(root_pic, intr->i_hw_irq,
+ intr->i_trig, intr->i_pol);
+ }
+
+ if (intr->i_handlers > 0)
+ PIC_UNMASK(root_pic, intr->i_hw_irq);
+
+ }
+ /* Enable interrupt reception on this CPU */
+ intr_enable();
+
+ return (0);
+}
+
+int
+arm_setup_intr(const char *name, driver_filter_t *filt, driver_intr_t handler,
+ void *arg, u_int hw_irq, enum intr_type flags, void **cookiep)
+{
+ struct arm64_intr_entry *intr;
+ int error;
+
+ intr = intr_acquire(hw_irq);
+ if (intr == NULL)
+ return (ENOMEM);
+
+ /*
+ * Watch out for interrupts' numbers.
+ * If this is a system boot then don't allow to overfill interrupts
+ * table (the interrupts will be deconfigured in arm_enable_intr()).
+ */
+ if (intr->i_cntidx >= NIRQS)
+ return (EINVAL);
+
+ if (intr->i_event == NULL) {
+ error = intr_event_create(&intr->i_event, (void *)intr, 0,
+ hw_irq, intr_pre_ithread, intr_post_ithread,
+ intr_post_filter, NULL, "irq%u", hw_irq);
+ if (error)
+ return (error);
+ }
+
+ error = intr_event_add_handler(intr->i_event, name, filt, handler, arg,
+ intr_priority(flags), flags, cookiep);
+
+ if (!error) {
+ mtx_lock(&intr_list_lock);
+ intrcnt_setname(intr->i_event->ie_fullname, intr->i_cntidx);
+ intr->i_handlers++;
+
+ if (!cold && intr->i_handlers == 1) {
+ if (intr->i_trig != INTR_TRIGGER_CONFORM ||
+ intr->i_pol != INTR_POLARITY_CONFORM) {
+ PIC_CONFIG(root_pic, intr->i_hw_irq, intr->i_trig,
+ intr->i_pol);
+ }
+
+ PIC_UNMASK(root_pic, intr->i_hw_irq);
+ }
+ mtx_unlock(&intr_list_lock);
+ }
+
+ return (error);
+}
+
+int
+arm_teardown_intr(void *cookie)
+{
+ struct arm64_intr_entry *intr;
+ int error;
+
+ intr = intr_handler_source(cookie);
+ error = intr_event_remove_handler(cookie);
+ if (!error) {
+ mtx_lock(&intr_list_lock);
+ intr->i_handlers--;
+ if (intr->i_handlers == 0)
+ PIC_MASK(root_pic, intr->i_hw_irq);
+ intrcnt_setname(intr->i_event->ie_fullname, intr->i_cntidx);
+ mtx_unlock(&intr_list_lock);
+ }
+
+ return (error);
+}
+
+int
+arm_config_intr(u_int hw_irq, enum intr_trigger trig, enum intr_polarity pol)
+{
+ struct arm64_intr_entry *intr;
+
+ intr = intr_acquire(hw_irq);
+ if (intr == NULL)
+ return (ENOMEM);
+
+ intr->i_trig = trig;
+ intr->i_pol = pol;
+
+ if (!cold && root_pic != NULL)
+ PIC_CONFIG(root_pic, intr->i_hw_irq, trig, pol);
+
+ return (0);
+}
+
+void
+arm_dispatch_intr(u_int hw_irq, struct trapframe *tf)
+{
+ struct arm64_intr_entry *intr;
+
+ SLIST_FOREACH(intr, &irq_slist_head, entries) {
+ if (intr->i_hw_irq == hw_irq) {
+ break;
+ }
+ }
+
+ if (intr == NULL)
+ goto stray;
+
+ (*intr->i_cntp)++;
+
+ if (!intr_event_handle(intr->i_event, tf))
+ return;
+
+stray:
+ if (arm64_nstray < MAX_STRAY_LOG) {
+ arm64_nstray++;
+ printf("Stray IRQ %u\n", hw_irq);
+ if (arm64_nstray >= MAX_STRAY_LOG) {
+ printf("Got %d stray IRQs. Not logging anymore.\n",
+ MAX_STRAY_LOG);
+ }
+ }
+
+ if (intr != NULL)
+ PIC_MASK(root_pic, intr->i_hw_irq);
+}
+
+void
+arm_cpu_intr(struct trapframe *tf)
+{
+
+ critical_enter();
+ PIC_DISPATCH(root_pic, tf);
+ critical_exit();
+}
+
+#ifdef SMP
+void
+arm_setup_ipihandler(driver_filter_t *filt, u_int ipi)
+{
+
+ /* ARM64TODO: The hard coded 16 will be fixed with am_intrng */
+ arm_setup_intr("ipi", filt, NULL, (void *)((uintptr_t)ipi | 1<<16), ipi + 16,
+ INTR_TYPE_MISC | INTR_EXCL, NULL);
+ arm_unmask_ipi(ipi);
+}
+
+void
+arm_unmask_ipi(u_int ipi)
+{
+ PIC_UNMASK(root_pic, ipi + 16);
+}
+
+void
+arm_init_secondary(void)
+{
+
+ PIC_INIT_SECONDARY(root_pic);
+}
+
+/* Sending IPI */
+void
+ipi_all_but_self(u_int ipi)
+{
+
+ /* ARM64TODO: We should support this */
+ panic("ipi_all_but_self");
+}
+
+void
+ipi_cpu(int cpu, u_int ipi)
+{
+ cpuset_t cpus;
+
+ CPU_ZERO(&cpus);
+ CPU_SET(cpu, &cpus);
+
+ /* ARM64TODO: This will be fixed with arm_intrng */
+ ipi += 16;
+
+ CTR2(KTR_SMP, "ipi_cpu: cpu: %d, ipi: %x", cpu, ipi);
+ PIC_IPI_SEND(root_pic, cpus, ipi);
+}
+
+void
+ipi_selected(cpuset_t cpus, u_int ipi)
+{
+
+ /* ARM64TODO: This will be fixed with arm_intrng */
+ ipi += 16;
+
+ CTR1(KTR_SMP, "ipi_selected: ipi: %x", ipi);
+ PIC_IPI_SEND(root_pic, cpus, ipi);
+}
+
+#endif
Index: sys/arm64/arm64/locore.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/locore.S
@@ -0,0 +1,544 @@
+/*-
+ * Copyright (c) 2012-2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#include "assym.s"
+#include <sys/syscall.h>
+#include <machine/asm.h>
+#include <machine/armreg.h>
+#include <machine/hypervisor.h>
+#include <machine/param.h>
+#include <machine/pte.h>
+
+#define VIRT_BITS 39
+
+ .globl kernbase
+ .set kernbase, KERNBASE
+
+#define DEVICE_MEM 0
+#define NORMAL_UNCACHED 1
+#define NORMAL_MEM 2
+
+/*
+ * We assume:
+ * MMU on with an identity map, or off
+ * D-Cache: off
+ * I-Cache: on or off
+ * We are loaded at a 2MiB aligned address
+ */
+
+#define INIT_STACK_SIZE (PAGE_SIZE * 4)
+
+ .text
+ .globl _start
+_start:
+ /* Drop to EL1 */
+ bl drop_to_el1
+
+ /*
+ * Disable the MMU. We may have entered the kernel with it on and
+ * will need to update the tables later. If this has been set up
+ * with anything other than a VA == PA map then this will fail,
+ * but in this case the code to find where we are running from
+ * would have also failed.
+ */
+ dsb sy
+ mrs x2, sctlr_el1
+ bic x2, x2, SCTLR_M
+ msr sctlr_el1, x2
+ isb
+
+
+ /* Get the virt -> phys offset */
+ bl get_virt_delta
+
+ /*
+ * At this point:
+ * x29 = PA - VA
+ * x28 = Our physical load address
+ */
+
+ /* Create the page tables */
+ bl create_pagetables
+
+ /*
+ * At this point:
+ * x27 = TTBR0 table
+ * x26 = TTBR1 table
+ */
+
+ /* Enable the mmu */
+ bl start_mmu
+
+ /* Jump to the virtual address space */
+ ldr x15, .Lvirtdone
+ br x15
+
+virtdone:
+ /* Set up the stack */
+ adr x25, initstack_end
+ mov sp, x25
+ sub sp, sp, #PCB_SIZE
+
+ /* Zero the BSS */
+ ldr x15, .Lbss
+ ldr x14, .Lend
+1:
+ str xzr, [x15], #8
+ cmp x15, x14
+ b.lo 1b
+
+ /* Backup the module pointer */
+ mov x1, x0
+
+ /* Make the page table base a virtual address */
+ sub x26, x26, x29
+
+ sub sp, sp, #(64 * 4)
+ mov x0, sp
+
+ /* Degate the delda so it is VA -> PA */
+ neg x29, x29
+
+ str x1, [x0] /* modulep */
+ str x26, [x0, 8] /* kern_l1pt */
+ str x29, [x0, 16] /* kern_delta */
+ str x25, [x0, 24] /* kern_stack */
+
+ /* trace back starts here */
+ mov fp, #0
+ /* Branch to C code */
+ bl initarm
+ bl mi_startup
+
+ /* We should not get here */
+ brk 0
+
+ .align 3
+.Lvirtdone:
+ .quad virtdone
+.Lbss:
+ .quad __bss_start
+.Lend:
+ .quad _end
+
+/*
+ * If we are started in EL2, configure the required hypervisor
+ * registers and drop to EL1.
+ */
+drop_to_el1:
+ mrs x1, CurrentEL
+ lsr x1, x1, #2
+ cmp x1, #0x2
+ b.eq 1f
+ ret
+1:
+ /* Configure the Hypervisor */
+ mov x2, #(HCR_RW)
+ msr hcr_el2, x2
+
+ /* Load the Virtualization Process ID Register */
+ mrs x2, midr_el1
+ msr vpidr_el2, x2
+
+ /* Load the Virtualization Multiprocess ID Register */
+ mrs x2, mpidr_el1
+ msr vmpidr_el2, x2
+
+ /* Set the bits that need to be 1 in sctlr_el1 */
+ ldr x2, .Lsctlr_res1
+ msr sctlr_el1, x2
+
+ /* Don't trap to EL2 for exceptions */
+ mov x2, #CPTR_RES1
+ msr cptr_el2, x2
+
+ /* Don't trap to EL2 for CP15 traps */
+ msr hstr_el2, xzr
+
+ /* Hypervisor trap functions */
+ adr x2, hyp_vectors
+ msr vbar_el2, x2
+
+ mov x2, #(PSR_F | PSR_I | PSR_A | PSR_D | PSR_M_EL1h)
+ msr spsr_el2, x2
+
+ /* Set the address to return to our return address */
+ msr elr_el2, x30
+
+ eret
+
+ .align 3
+.Lsctlr_res1:
+ .quad SCTLR_RES1
+
+#define VECT_EMPTY \
+ .align 7; \
+ 1: b 1b
+
+ .align 11
+hyp_vectors:
+ VECT_EMPTY /* Synchronous EL2t */
+ VECT_EMPTY /* IRQ EL2t */
+ VECT_EMPTY /* FIQ EL2t */
+ VECT_EMPTY /* Error EL2t */
+
+ VECT_EMPTY /* Synchronous EL2h */
+ VECT_EMPTY /* IRQ EL2h */
+ VECT_EMPTY /* FIQ EL2h */
+ VECT_EMPTY /* Error EL2h */
+
+ VECT_EMPTY /* Synchronous 64-bit EL1 */
+ VECT_EMPTY /* IRQ 64-bit EL1 */
+ VECT_EMPTY /* FIQ 64-bit EL1 */
+ VECT_EMPTY /* Error 64-bit EL1 */
+
+ VECT_EMPTY /* Synchronous 32-bit EL1 */
+ VECT_EMPTY /* IRQ 32-bit EL1 */
+ VECT_EMPTY /* FIQ 32-bit EL1 */
+ VECT_EMPTY /* Error 32-bit EL1 */
+
+/*
+ * Get the delta between the physical address we were loaded to and the
+ * virtual address we expect to run from. This is used when building the
+ * initial page table.
+ */
+get_virt_delta:
+ /* Load the physical address of virt_map */
+ adr x29, virt_map
+ /* Load the virtual address of virt_map stored in virt_map */
+ ldr x28, [x29]
+ /* Find PA - VA as PA' = VA' - VA + PA = VA' + (PA - VA) = VA' + x29 */
+ sub x29, x29, x28
+ /* Find the load address for the kernel */
+ mov x28, #(KERNBASE)
+ add x28, x28, x29
+ ret
+
+ .align 3
+virt_map:
+ .quad virt_map
+
+/*
+ * This builds the page tables containing the identity map, and the kernel
+ * virtual map.
+ *
+ * It relys on:
+ * We were loaded to an address that is on a 2MiB boundary
+ * All the memory must not cross a 1GiB boundaty
+ * x28 contains the physical address we were loaded from
+ *
+ * TODO: This is out of date.
+ * There are at least 5 pages before that address for the page tables
+ * The pages used are:
+ * - The identity (PA = VA) table (TTBR0)
+ * - The Kernel L1 table (TTBR1)(not yet)
+ * - The PA != VA L2 table to jump into (not yet)
+ * - The FDT L2 table (not yet)
+ */
+create_pagetables:
+ /* Save the Link register */
+ mov x5, x30
+
+ /* Clean the page table */
+ adr x6, pagetable
+ mov x26, x6
+ adr x27, pagetable_end
+1:
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ stp xzr, xzr, [x6], #16
+ cmp x6, x27
+ b.lo 1b
+
+ /*
+ * Build the TTBR1 maps.
+ */
+
+ /* Find the size of the kernel */
+ mov x6, #(KERNBASE)
+ ldr x7, .Lend
+ /* Find the end - begin */
+ sub x8, x7, x6
+ /* Get the number of l2 pages to allocate, rounded down */
+ lsr x10, x8, #(L2_SHIFT)
+ /* Add 4 MiB for any rounding above and the module data */
+ add x10, x10, #2
+
+ /* Create the kernel space L2 table */
+ mov x6, x26
+ mov x7, #NORMAL_MEM
+ mov x8, #(KERNBASE & L2_BLOCK_MASK)
+ mov x9, x28
+ bl build_block_pagetable
+
+ /* Move to the l1 table */
+ add x26, x26, #PAGE_SIZE
+
+ /* Link the l1 -> l2 table */
+ mov x9, x6
+ mov x6, x26
+ bl link_l1_pagetable
+
+
+ /*
+ * Build the TTBR0 maps.
+ */
+ add x27, x26, #PAGE_SIZE
+
+#if defined(SOCDEV_PA) && defined(SOCDEV_VA)
+ /* Create a table for the UART */
+ mov x6, x27 /* The initial page table */
+ mov x7, #DEVICE_MEM
+ mov x8, #(SOCDEV_VA) /* VA start */
+ mov x9, #(SOCDEV_PA) /* PA start */
+ bl build_section_pagetable
+#endif
+
+ /* Create the VA = PA map */
+ mov x6, x27 /* The initial page table */
+ mov x7, #NORMAL_UNCACHED /* Uncached as it's only needed early on */
+ mov x9, x27
+ mov x8, x9 /* VA start (== PA start) */
+ bl build_section_pagetable
+
+ /* Restore the Link register */
+ mov x30, x5
+ ret
+
+/*
+ * Builds a 1 GiB page table entry
+ * x6 = L1 table
+ * x7 = Type (0 = Device, 1 = Normal)
+ * x8 = VA start
+ * x9 = PA start (trashed)
+ * x11, x12 and x13 are trashed
+ */
+build_section_pagetable:
+ /*
+ * Build the L1 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L1_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L1 block entry */
+ lsl x12, x7, #2
+ orr x12, x12, #L1_BLOCK
+ orr x12, x12, #(ATTR_AF)
+
+ /* Only use the output address bits */
+ lsr x9, x9, #L1_SHIFT
+ orr x12, x12, x9, lsl #L1_SHIFT
+
+ /* Store the entry */
+ str x12, [x6, x11, lsl #3]
+
+ ret
+
+/*
+ * Builds an L1 -> L2 table descriptor
+ *
+ * This is a link for a 1GiB block of memory with up to 2MiB regions mapped
+ * within it by build_block_pagetable.
+ *
+ * x6 = L1 table
+ * x8 = Virtual Address
+ * x9 = L2 PA (trashed)
+ * x11, x12 and x13 are trashed
+ */
+link_l1_pagetable:
+ /*
+ * Link an L1 -> L2 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L1_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L1 block entry */
+ mov x12, #L1_TABLE
+
+ /* Only use the output address bits */
+ lsr x9, x9, #12
+ orr x12, x12, x9, lsl #12
+
+ /* Store the entry */
+ str x12, [x6, x11, lsl #3]
+
+ ret
+
+/*
+ * Builds count 2 MiB page table entry
+ * x6 = L2 table
+ * x7 = Type (0 = Device, 1 = Normal)
+ * x8 = VA start
+ * x9 = PA start (trashed)
+ * x10 = Entry count (TODO)
+ * x11, x12 and x13 are trashed
+ */
+build_block_pagetable:
+ /*
+ * Build the L2 table entry.
+ */
+ /* Find the table index */
+ lsr x11, x8, #L2_SHIFT
+ and x11, x11, #Ln_ADDR_MASK
+
+ /* Build the L2 block entry */
+ lsl x12, x7, #2
+ orr x12, x12, #L2_BLOCK
+ orr x12, x12, #(ATTR_AF)
+
+ /* Only use the output address bits */
+ lsr x9, x9, #L2_SHIFT
+
+ /* Set the physical address for this virtual address */
+1: orr x12, x12, x9, lsl #L2_SHIFT
+
+ /* Store the entry */
+ str x12, [x6, x11, lsl #3]
+
+ /* Clear the address bits */
+ and x12, x12, #ATTR_MASK_L
+
+ sub x10, x10, #1
+ add x11, x11, #1
+ add x9, x9, #1
+ cbnz x10, 1b
+
+2: ret
+
+start_mmu:
+ dsb sy
+
+ /* Load the exception vectors */
+ ldr x2, =exception_vectors
+ msr vbar_el1, x2
+
+ /* Load ttbr0 and ttbr1 */
+ msr ttbr0_el1, x27
+ msr ttbr1_el1, x26
+ isb
+
+ /* Clear the Monitor Debug System control register */
+ msr mdscr_el1, xzr
+
+ /* Invalidate the TLB */
+ tlbi vmalle1is
+
+ ldr x2, mair
+ msr mair_el1, x2
+
+ /* Setup TCR according to PARange bits from ID_AA64MMFR0_EL1 */
+ ldr x2, tcr
+ mrs x3, id_aa64mmfr0_el1
+ bfi x2, x3, #32, #3
+ msr tcr_el1, x2
+
+ /* Setup SCTLR */
+ ldr x2, sctlr_set
+ ldr x3, sctlr_clear
+ mrs x1, sctlr_el1
+ bic x1, x1, x3 /* Clear the required bits */
+ orr x1, x1, x2 /* Set the required bits */
+ msr sctlr_el1, x1
+ isb
+
+ ret
+
+ .align 3
+mair:
+ /* Device Normal, no cache Normal, write-back */
+ .quad MAIR_ATTR(0x00, 0) | MAIR_ATTR(0x44, 1) | MAIR_ATTR(0xff, 2)
+tcr:
+ .quad (TCR_TxSZ(64 - VIRT_BITS) | TCR_ASID_16 | TCR_TG1_4K)
+sctlr_set:
+ /* Bits to set */
+ .quad (SCTLR_UCI | SCTLR_nTWE | SCTLR_nTWI | SCTLR_UCT | SCTLR_DZE | \
+ SCTLR_I | SCTLR_SED | SCTLR_C | SCTLR_M)
+sctlr_clear:
+ /* Bits to clear */
+ .quad (SCTLR_EE | SCTLR_EOE | SCTLR_WXN | SCTLR_UMA | SCTLR_ITD | \
+ SCTLR_THEE | SCTLR_CP15BEN | SCTLR_SA0 | SCTLR_SA | SCTLR_A)
+
+ .globl abort
+abort:
+ b abort
+
+ //.section .init_pagetable
+ .align 12 /* 4KiB aligned */
+ /*
+ * 3 initial tables (in the following order):
+ * L2 for kernel (High addresses)
+ * L1 for kernel
+ * L1 for user (Low addresses)
+ */
+pagetable:
+ .space PAGE_SIZE
+pagetable_l1_ttbr1:
+ .space PAGE_SIZE
+pagetable_l1_ttbr0:
+ .space PAGE_SIZE
+pagetable_end:
+
+el2_pagetable:
+ .space PAGE_SIZE
+
+ .globl init_pt_va
+init_pt_va:
+ .quad pagetable /* XXX: Keep page tables VA */
+
+ .align 4
+initstack:
+ .space (PAGE_SIZE * KSTACK_PAGES)
+initstack_end:
+
+
+ENTRY(sigcode)
+ mov x0, sp
+ add x0, x0, #SF_UC
+
+1:
+ mov x8, #SYS_sigreturn
+ svc 0
+
+ /* sigreturn failed, exit */
+ mov x8, #SYS_exit
+ svc 0
+
+ b 1b
+END(sigcode)
+ /* This may be copied to the stack, keep it 16-byte aligned */
+ .align 3
+esigcode:
+
+ .data
+ .align 3
+ .global szsigcode
+szsigcode:
+ .quad esigcode - sigcode
Index: sys/arm64/arm64/machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/machdep.c
@@ -0,0 +1,854 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "opt_platform.h"
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/buf.h>
+#include <sys/bus.h>
+#include <sys/cons.h>
+#include <sys/cpu.h>
+#include <sys/efi.h>
+#include <sys/exec.h>
+#include <sys/imgact.h>
+#include <sys/kdb.h>
+#include <sys/kernel.h>
+#include <sys/limits.h>
+#include <sys/linker.h>
+#include <sys/msgbuf.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/reboot.h>
+#include <sys/rwlock.h>
+#include <sys/sched.h>
+#include <sys/signalvar.h>
+#include <sys/syscallsubr.h>
+#include <sys/sysent.h>
+#include <sys/sysproto.h>
+#include <sys/ucontext.h>
+
+#include <vm/vm.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_pager.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+#include <machine/debug_monitor.h>
+#include <machine/kdb.h>
+#include <machine/devmap.h>
+#include <machine/machdep.h>
+#include <machine/metadata.h>
+#include <machine/pcb.h>
+#include <machine/reg.h>
+#include <machine/vmparam.h>
+
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include <dev/ofw/openfirm.h>
+#endif
+
+struct pcpu __pcpu[MAXCPU];
+
+static struct trapframe proc0_tf;
+
+vm_paddr_t phys_avail[PHYS_AVAIL_SIZE + 2];
+vm_paddr_t dump_avail[PHYS_AVAIL_SIZE + 2];
+
+int early_boot = 1;
+int cold = 1;
+long realmem = 0;
+long Maxmem = 0;
+
+#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1))
+vm_paddr_t physmap[PHYSMAP_SIZE];
+u_int physmap_idx;
+
+struct kva_md_info kmi;
+
+int64_t dcache_line_size; /* The minimum D cache line size */
+int64_t icache_line_size; /* The minimum I cache line size */
+int64_t idcache_line_size; /* The minimum cache line size */
+
+static void
+cpu_startup(void *dummy)
+{
+
+ identify_cpu();
+
+ vm_ksubmap_init(&kmi);
+ bufinit();
+ vm_pager_bufferinit();
+}
+
+SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL);
+
+void
+bzero(void *buf, size_t len)
+{
+ uint8_t *p;
+
+ p = buf;
+ while(len-- > 0)
+ *p++ = 0;
+}
+
+int
+fill_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+ regs->sp = frame->tf_sp;
+ regs->lr = frame->tf_lr;
+ regs->elr = frame->tf_elr;
+ regs->spsr = frame->tf_spsr;
+
+ memcpy(regs->x, frame->tf_x, sizeof(regs->x));
+
+ return (0);
+}
+
+int
+set_regs(struct thread *td, struct reg *regs)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+ frame->tf_sp = regs->sp;
+ frame->tf_lr = regs->lr;
+ frame->tf_elr = regs->elr;
+ frame->tf_spsr = regs->spsr;
+
+ memcpy(frame->tf_x, regs->x, sizeof(frame->tf_x));
+
+ return (0);
+}
+
+int
+fill_fpregs(struct thread *td, struct fpreg *regs)
+{
+#ifdef VFP
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
+ /*
+ * If we have just been running VFP instructions we will
+ * need to save the state to memcpy it below.
+ */
+ vfp_save_state(td);
+
+ memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
+ regs->fp_cr = pcb->pcb_fpcr;
+ regs->fp_sr = pcb->pcb_fpsr;
+ } else
+#endif
+ memset(regs->fp_q, 0, sizeof(regs->fp_q));
+ return (0);
+}
+
+int
+set_fpregs(struct thread *td, struct fpreg *regs)
+{
+#ifdef VFP
+ struct pcb *pcb;
+
+ pcb = td->td_pcb;
+ memcpy(pcb->pcb_vfp, regs->fp_q, sizeof(regs->fp_q));
+ pcb->pcb_fpcr = regs->fp_cr;
+ pcb->pcb_fpsr = regs->fp_sr;
+#endif
+ return (0);
+}
+
+int
+fill_dbregs(struct thread *td, struct dbreg *regs)
+{
+
+ panic("fill_dbregs");
+}
+
+int
+set_dbregs(struct thread *td, struct dbreg *regs)
+{
+
+ panic("set_dbregs");
+}
+
+int
+ptrace_set_pc(struct thread *td, u_long addr)
+{
+
+ panic("ptrace_set_pc");
+ return (0);
+}
+
+int
+ptrace_single_step(struct thread *td)
+{
+
+ /* TODO; */
+ return (0);
+}
+
+int
+ptrace_clear_single_step(struct thread *td)
+{
+
+ /* TODO; */
+ return (0);
+}
+
+void
+exec_setregs(struct thread *td, struct image_params *imgp, u_long stack)
+{
+ struct trapframe *tf = td->td_frame;
+
+ memset(tf, 0, sizeof(struct trapframe));
+
+ tf->tf_sp = stack;
+ tf->tf_lr = imgp->entry_addr;
+ tf->tf_elr = imgp->entry_addr;
+}
+
+/* Sanity check these are the same size, they will be memcpy'd to and fro */
+CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
+ sizeof((struct gpregs *)0)->gp_x);
+CTASSERT(sizeof(((struct trapframe *)0)->tf_x) ==
+ sizeof((struct reg *)0)->x);
+
+int
+get_mcontext(struct thread *td, mcontext_t *mcp, int clear_ret)
+{
+ struct trapframe *tf = td->td_frame;
+
+ if (clear_ret & GET_MC_CLEAR_RET)
+ mcp->mc_gpregs.gp_x[0] = 0;
+ else
+ mcp->mc_gpregs.gp_x[0] = tf->tf_x[0];
+
+ memcpy(&mcp->mc_gpregs.gp_x[1], &tf->tf_x[1],
+ sizeof(mcp->mc_gpregs.gp_x[1]) * (nitems(mcp->mc_gpregs.gp_x) - 1));
+
+ mcp->mc_gpregs.gp_sp = tf->tf_sp;
+ mcp->mc_gpregs.gp_lr = tf->tf_lr;
+ mcp->mc_gpregs.gp_elr = tf->tf_elr;
+ mcp->mc_gpregs.gp_spsr = tf->tf_spsr;
+
+ return (0);
+}
+
+int
+set_mcontext(struct thread *td, mcontext_t *mcp)
+{
+ struct trapframe *tf = td->td_frame;
+
+ memcpy(tf->tf_x, mcp->mc_gpregs.gp_x, sizeof(tf->tf_x));
+
+ tf->tf_sp = mcp->mc_gpregs.gp_sp;
+ tf->tf_lr = mcp->mc_gpregs.gp_lr;
+ tf->tf_elr = mcp->mc_gpregs.gp_elr;
+ tf->tf_spsr = mcp->mc_gpregs.gp_spsr;
+
+ return (0);
+}
+
+static void
+get_fpcontext(struct thread *td, mcontext_t *mcp)
+{
+#ifdef VFP
+ struct pcb *curpcb;
+
+ critical_enter();
+
+ curpcb = curthread->td_pcb;
+
+ if ((curpcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
+ /*
+ * If we have just been running VFP instructions we will
+ * need to save the state to memcpy it below.
+ */
+ vfp_save_state(td);
+
+ memcpy(mcp->mc_fpregs.fp_q, curpcb->pcb_vfp,
+ sizeof(mcp->mc_fpregs));
+ mcp->mc_fpregs.fp_cr = curpcb->pcb_fpcr;
+ mcp->mc_fpregs.fp_sr = curpcb->pcb_fpsr;
+ mcp->mc_fpregs.fp_flags = curpcb->pcb_fpflags;
+ mcp->mc_flags |= _MC_FP_VALID;
+ }
+
+ critical_exit();
+#endif
+}
+
+static void
+set_fpcontext(struct thread *td, mcontext_t *mcp)
+{
+#ifdef VFP
+ struct pcb *curpcb;
+
+ critical_enter();
+
+ if ((mcp->mc_flags & _MC_FP_VALID) != 0) {
+ curpcb = curthread->td_pcb;
+
+ /*
+ * Discard any vfp state for the current thread, we
+ * are about to override it.
+ */
+ vfp_discard(td);
+
+ memcpy(curpcb->pcb_vfp, mcp->mc_fpregs.fp_q,
+ sizeof(mcp->mc_fpregs));
+ curpcb->pcb_fpcr = mcp->mc_fpregs.fp_cr;
+ curpcb->pcb_fpsr = mcp->mc_fpregs.fp_sr;
+ curpcb->pcb_fpflags = mcp->mc_fpregs.fp_flags;
+ }
+
+ critical_exit();
+#endif
+}
+
+void
+cpu_idle(int busy)
+{
+
+ spinlock_enter();
+ if (!busy)
+ cpu_idleclock();
+ if (!sched_runnable())
+ __asm __volatile(
+ "dsb sy \n"
+ "wfi \n");
+ if (!busy)
+ cpu_activeclock();
+ spinlock_exit();
+}
+
+void
+cpu_halt(void)
+{
+
+ panic("cpu_halt");
+}
+
+/*
+ * Flush the D-cache for non-DMA I/O so that the I-cache can
+ * be made coherent later.
+ */
+void
+cpu_flush_dcache(void *ptr, size_t len)
+{
+
+ /* TBD */
+}
+
+/* Get current clock frequency for the given CPU ID. */
+int
+cpu_est_clockrate(int cpu_id, uint64_t *rate)
+{
+
+ panic("cpu_est_clockrate");
+}
+
+void
+cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size)
+{
+}
+
+void
+spinlock_enter(void)
+{
+ struct thread *td;
+ register_t daif;
+
+ td = curthread;
+ if (td->td_md.md_spinlock_count == 0) {
+ daif = intr_disable();
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_daif = daif;
+ } else
+ td->td_md.md_spinlock_count++;
+ critical_enter();
+}
+
+void
+spinlock_exit(void)
+{
+ struct thread *td;
+ register_t daif;
+
+ td = curthread;
+ critical_exit();
+ daif = td->td_md.md_saved_daif;
+ td->td_md.md_spinlock_count--;
+ if (td->td_md.md_spinlock_count == 0)
+ intr_restore(daif);
+}
+
+#ifndef _SYS_SYSPROTO_H_
+struct sigreturn_args {
+ ucontext_t *ucp;
+};
+#endif
+
+int
+sys_sigreturn(struct thread *td, struct sigreturn_args *uap)
+{
+ ucontext_t uc;
+ uint32_t spsr;
+
+ if (uap == NULL)
+ return (EFAULT);
+ if (copyin(uap->sigcntxp, &uc, sizeof(uc)))
+ return (EFAULT);
+
+ spsr = uc.uc_mcontext.mc_gpregs.gp_spsr;
+ if ((spsr & PSR_M_MASK) != PSR_M_EL0t ||
+ (spsr & (PSR_F | PSR_I | PSR_A | PSR_D)) != 0)
+ return (EINVAL);
+
+ set_mcontext(td, &uc.uc_mcontext);
+ set_fpcontext(td, &uc.uc_mcontext);
+
+ /* Restore signal mask. */
+ kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0);
+
+ return (EJUSTRETURN);
+}
+
+/*
+ * Construct a PCB from a trapframe. This is called from kdb_trap() where
+ * we want to start a backtrace from the function that caused us to enter
+ * the debugger. We have the context in the trapframe, but base the trace
+ * on the PCB. The PCB doesn't have to be perfect, as long as it contains
+ * enough for a backtrace.
+ */
+void
+makectx(struct trapframe *tf, struct pcb *pcb)
+{
+ int i;
+
+ for (i = 0; i < PCB_LR; i++)
+ pcb->pcb_x[i] = tf->tf_x[i];
+
+ pcb->pcb_x[PCB_LR] = tf->tf_lr;
+ pcb->pcb_pc = tf->tf_elr;
+ pcb->pcb_sp = tf->tf_sp;
+}
+
+void
+sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
+{
+ struct thread *td;
+ struct proc *p;
+ struct trapframe *tf;
+ struct sigframe *fp, frame;
+ struct sigacts *psp;
+ int code, onstack, sig;
+
+ td = curthread;
+ p = td->td_proc;
+ PROC_LOCK_ASSERT(p, MA_OWNED);
+
+ sig = ksi->ksi_signo;
+ code = ksi->ksi_code;
+ psp = p->p_sigacts;
+ mtx_assert(&psp->ps_mtx, MA_OWNED);
+
+ tf = td->td_frame;
+ onstack = sigonstack(tf->tf_sp);
+
+ CTR4(KTR_SIG, "sendsig: td=%p (%s) catcher=%p sig=%d", td, p->p_comm,
+ catcher, sig);
+
+ /* Allocate and validate space for the signal handler context. */
+ if ((td->td_pflags & TDP_ALTSTACK) != 0 && !onstack &&
+ SIGISMEMBER(psp->ps_sigonstack, sig)) {
+ fp = (struct sigframe *)(td->td_sigstk.ss_sp +
+ td->td_sigstk.ss_size);
+#if defined(COMPAT_43)
+ td->td_sigstk.ss_flags |= SS_ONSTACK;
+#endif
+ } else {
+ fp = (struct sigframe *)td->td_frame->tf_sp;
+ }
+
+ /* Make room, keeping the stack aligned */
+ fp--;
+ fp = (struct sigframe *)STACKALIGN(fp);
+
+ /* Fill in the frame to copy out */
+ get_mcontext(td, &frame.sf_uc.uc_mcontext, 0);
+ get_fpcontext(td, &frame.sf_uc.uc_mcontext);
+ frame.sf_si = ksi->ksi_info;
+ frame.sf_uc.uc_sigmask = *mask;
+ frame.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) ?
+ ((onstack) ? SS_ONSTACK : 0) : SS_DISABLE;
+ frame.sf_uc.uc_stack = td->td_sigstk;
+ mtx_unlock(&psp->ps_mtx);
+ PROC_UNLOCK(td->td_proc);
+
+ /* Copy the sigframe out to the user's stack. */
+ if (copyout(&frame, fp, sizeof(*fp)) != 0) {
+ /* Process has trashed its stack. Kill it. */
+ CTR2(KTR_SIG, "sendsig: sigexit td=%p fp=%p", td, fp);
+ PROC_LOCK(p);
+ sigexit(td, SIGILL);
+ }
+
+ /* Translate the signal if appropriate. */
+ if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize)
+ sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)];
+
+ tf->tf_x[0]= sig;
+ tf->tf_x[1] = (register_t)&fp->sf_si;
+ tf->tf_x[2] = (register_t)&fp->sf_uc;
+
+ tf->tf_elr = (register_t)catcher;
+ tf->tf_sp = (register_t)fp;
+ tf->tf_lr = (register_t)(PS_STRINGS - *(p->p_sysent->sv_szsigcode));
+
+ CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_elr,
+ tf->tf_sp);
+
+ PROC_LOCK(p);
+ mtx_lock(&psp->ps_mtx);
+}
+
+static void
+init_proc0(vm_offset_t kstack)
+{
+ struct pcpu *pcpup = &__pcpu[0];
+
+ proc_linkup0(&proc0, &thread0);
+ thread0.td_kstack = kstack;
+ thread0.td_pcb = (struct pcb *)(thread0.td_kstack) - 1;
+ thread0.td_pcb->pcb_fpflags = 0;
+ thread0.td_pcb->pcb_vfpcpu = UINT_MAX;
+ thread0.td_frame = &proc0_tf;
+ pcpup->pc_curpcb = thread0.td_pcb;
+}
+
+typedef struct {
+ uint32_t type;
+ uint64_t phys_start;
+ uint64_t virt_start;
+ uint64_t num_pages;
+ uint64_t attr;
+} EFI_MEMORY_DESCRIPTOR;
+
+static int
+add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap,
+ u_int *physmap_idxp)
+{
+ u_int i, insert_idx, _physmap_idx;
+
+ _physmap_idx = *physmap_idxp;
+
+ if (length == 0)
+ return (1);
+
+ /*
+ * Find insertion point while checking for overlap. Start off by
+ * assuming the new entry will be added to the end.
+ */
+ insert_idx = _physmap_idx;
+ for (i = 0; i <= _physmap_idx; i += 2) {
+ if (base < physmap[i + 1]) {
+ if (base + length <= physmap[i]) {
+ insert_idx = i;
+ break;
+ }
+ if (boothowto & RB_VERBOSE)
+ printf(
+ "Overlapping memory regions, ignoring second region\n");
+ return (1);
+ }
+ }
+
+ /* See if we can prepend to the next entry. */
+ if (insert_idx <= _physmap_idx &&
+ base + length == physmap[insert_idx]) {
+ physmap[insert_idx] = base;
+ return (1);
+ }
+
+ /* See if we can append to the previous entry. */
+ if (insert_idx > 0 && base == physmap[insert_idx - 1]) {
+ physmap[insert_idx - 1] += length;
+ return (1);
+ }
+
+ _physmap_idx += 2;
+ *physmap_idxp = _physmap_idx;
+ if (_physmap_idx == PHYSMAP_SIZE) {
+ printf(
+ "Too many segments in the physical address map, giving up\n");
+ return (0);
+ }
+
+ /*
+ * Move the last 'N' entries down to make room for the new
+ * entry if needed.
+ */
+ for (i = _physmap_idx; i > insert_idx; i -= 2) {
+ physmap[i] = physmap[i - 2];
+ physmap[i + 1] = physmap[i - 1];
+ }
+
+ /* Insert the new entry. */
+ physmap[insert_idx] = base;
+ physmap[insert_idx + 1] = base + length;
+ return (1);
+}
+
+#define efi_next_descriptor(ptr, size) \
+ ((struct efi_md *)(((uint8_t *) ptr) + size))
+
+static void
+add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap,
+ u_int *physmap_idxp)
+{
+ struct efi_md *map, *p;
+ const char *type;
+ size_t efisz;
+ int ndesc, i;
+
+ static const char *types[] = {
+ "Reserved",
+ "LoaderCode",
+ "LoaderData",
+ "BootServicesCode",
+ "BootServicesData",
+ "RuntimeServicesCode",
+ "RuntimeServicesData",
+ "ConventionalMemory",
+ "UnusableMemory",
+ "ACPIReclaimMemory",
+ "ACPIMemoryNVS",
+ "MemoryMappedIO",
+ "MemoryMappedIOPortSpace",
+ "PalCode"
+ };
+
+ /*
+ * Memory map data provided by UEFI via the GetMemoryMap
+ * Boot Services API.
+ */
+ efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf;
+ map = (struct efi_md *)((uint8_t *)efihdr + efisz);
+
+ if (efihdr->descriptor_size == 0)
+ return;
+ ndesc = efihdr->memory_size / efihdr->descriptor_size;
+
+ if (boothowto & RB_VERBOSE)
+ printf("%23s %12s %12s %8s %4s\n",
+ "Type", "Physical", "Virtual", "#Pages", "Attr");
+
+ for (i = 0, p = map; i < ndesc; i++,
+ p = efi_next_descriptor(p, efihdr->descriptor_size)) {
+ if (boothowto & RB_VERBOSE) {
+ if (p->md_type <= EFI_MD_TYPE_PALCODE)
+ type = types[p->md_type];
+ else
+ type = "<INVALID>";
+ printf("%23s %012lx %12p %08lx ", type, p->md_phys,
+ p->md_virt, p->md_pages);
+ if (p->md_attr & EFI_MD_ATTR_UC)
+ printf("UC ");
+ if (p->md_attr & EFI_MD_ATTR_WC)
+ printf("WC ");
+ if (p->md_attr & EFI_MD_ATTR_WT)
+ printf("WT ");
+ if (p->md_attr & EFI_MD_ATTR_WB)
+ printf("WB ");
+ if (p->md_attr & EFI_MD_ATTR_UCE)
+ printf("UCE ");
+ if (p->md_attr & EFI_MD_ATTR_WP)
+ printf("WP ");
+ if (p->md_attr & EFI_MD_ATTR_RP)
+ printf("RP ");
+ if (p->md_attr & EFI_MD_ATTR_XP)
+ printf("XP ");
+ if (p->md_attr & EFI_MD_ATTR_RT)
+ printf("RUNTIME");
+ printf("\n");
+ }
+
+ switch (p->md_type) {
+ case EFI_MD_TYPE_CODE:
+ case EFI_MD_TYPE_DATA:
+ case EFI_MD_TYPE_BS_CODE:
+ case EFI_MD_TYPE_BS_DATA:
+ case EFI_MD_TYPE_FREE:
+ /*
+ * We're allowed to use any entry with these types.
+ */
+ break;
+ default:
+ continue;
+ }
+
+ if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE),
+ physmap, physmap_idxp))
+ break;
+ }
+}
+
+#ifdef FDT
+static void
+try_load_dtb(caddr_t kmdp)
+{
+ vm_offset_t dtbp;
+
+ dtbp = MD_FETCH(kmdp, MODINFOMD_DTBP, vm_offset_t);
+ if (dtbp == (vm_offset_t)NULL) {
+ printf("ERROR loading DTB\n");
+ return;
+ }
+
+ if (OF_install(OFW_FDT, 0) == FALSE)
+ panic("Cannot install FDT");
+
+ if (OF_init((void *)dtbp) != 0)
+ panic("OF_init failed with the found device tree");
+}
+#endif
+
+static void
+cache_setup(void)
+{
+ int dcache_line_shift, icache_line_shift;
+ uint32_t ctr_el0;
+
+ ctr_el0 = READ_SPECIALREG(ctr_el0);
+
+ /* Read the log2 words in each D cache line */
+ dcache_line_shift = CTR_DLINE_SIZE(ctr_el0);
+ /* Get the D cache line size */
+ dcache_line_size = sizeof(int) << dcache_line_shift;
+
+ /* And the same for the I cache */
+ icache_line_shift = CTR_ILINE_SIZE(ctr_el0);
+ icache_line_size = sizeof(int) << icache_line_shift;
+
+ idcache_line_size = MIN(dcache_line_size, icache_line_size);
+}
+
+void
+initarm(struct arm64_bootparams *abp)
+{
+ struct efi_map_header *efihdr;
+ struct pcpu *pcpup;
+ vm_offset_t lastaddr;
+ caddr_t kmdp;
+ vm_paddr_t mem_len;
+ int i;
+
+ /* Set the module data location */
+ preload_metadata = (caddr_t)(uintptr_t)(abp->modulep);
+
+ /* Find the kernel address */
+ kmdp = preload_search_by_type("elf kernel");
+ if (kmdp == NULL)
+ kmdp = preload_search_by_type("elf64 kernel");
+
+ boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int);
+ kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *);
+
+#ifdef FDT
+ try_load_dtb(kmdp);
+#endif
+
+ /* Find the address to start allocating from */
+ lastaddr = MD_FETCH(kmdp, MODINFOMD_KERNEND, vm_offset_t);
+
+ /* Load the physical memory ranges */
+ physmap_idx = 0;
+ efihdr = (struct efi_map_header *)preload_search_info(kmdp,
+ MODINFO_METADATA | MODINFOMD_EFI_MAP);
+ add_efi_map_entries(efihdr, physmap, &physmap_idx);
+
+ /* Print the memory map */
+ mem_len = 0;
+ for (i = 0; i < physmap_idx; i += 2)
+ mem_len += physmap[i + 1] - physmap[i];
+
+ /* Set the pcpu data, this is needed by pmap_bootstrap */
+ pcpup = &__pcpu[0];
+ pcpu_init(pcpup, 0, sizeof(struct pcpu));
+
+ /*
+ * Set the pcpu pointer with a backup in tpidr_el1 to be
+ * loaded when entering the kernel from userland.
+ */
+ __asm __volatile(
+ "mov x18, %0 \n"
+ "msr tpidr_el1, %0" :: "r"(pcpup));
+
+ PCPU_SET(curthread, &thread0);
+
+ /* Do basic tuning, hz etc */
+ init_param1();
+
+ cache_setup();
+
+ /* Bootstrap enough of pmap to enter the kernel proper */
+ pmap_bootstrap(abp->kern_l1pt, KERNBASE - abp->kern_delta,
+ lastaddr - KERNBASE);
+
+ arm_devmap_bootstrap(0, NULL);
+
+ cninit();
+
+ init_proc0(abp->kern_stack);
+ msgbufinit(msgbufp, msgbufsize);
+ mutex_init();
+ init_param2(physmem);
+
+ dbg_monitor_init();
+ kdb_init();
+
+ early_boot = 0;
+}
+
Index: sys/arm64/arm64/mem.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/mem.c
@@ -0,0 +1,47 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/malloc.h>
+#include <sys/memrange.h>
+
+#include <machine/memdev.h>
+
+struct mem_range_softc mem_range_softc;
+
+int
+memrw(struct cdev *dev, struct uio *uio, int flags)
+{
+
+ panic("memrw");
+}
+
Index: sys/arm64/arm64/minidump_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/minidump_machdep.c
@@ -0,0 +1,50 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_watchdog.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/conf.h>
+#include <sys/kernel.h>
+#include <sys/kerneldump.h>
+
+#include <machine/md_var.h>
+
+int
+minidumpsys(struct dumperinfo *di)
+{
+
+ printf("minidumpsys\n");
+ while (1);
+}
+
Index: sys/arm64/arm64/nexus.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/nexus.c
@@ -0,0 +1,334 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This code implements a `root nexus' for Arm Architecture
+ * machines. The function of the root nexus is to serve as an
+ * attachment point for both processors and buses, and to manage
+ * resources which are common to all of them. In particular,
+ * this code implements the core resource managers for interrupt
+ * requests, DMA requests (which rightfully should be a part of the
+ * ISA code but it's easier to do it here for now), I/O port addresses,
+ * and I/O memory address space.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/malloc.h>
+#include <sys/module.h>
+#include <machine/bus.h>
+#include <sys/rman.h>
+#include <sys/interrupt.h>
+
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#include <machine/resource.h>
+#include <machine/intr.h>
+
+#include "opt_platform.h"
+
+#ifdef FDT
+#include <dev/fdt/fdt_common.h>
+#include "ofw_bus_if.h"
+#endif
+
+extern struct bus_space memmap_bus;
+
+static MALLOC_DEFINE(M_NEXUSDEV, "nexusdev", "Nexus device");
+
+struct nexus_device {
+ struct resource_list nx_resources;
+};
+
+#define DEVTONX(dev) ((struct nexus_device *)device_get_ivars(dev))
+
+static struct rman mem_rman;
+
+static int nexus_probe(device_t);
+static int nexus_attach(device_t);
+static int nexus_print_child(device_t, device_t);
+static device_t nexus_add_child(device_t, u_int, const char *, int);
+static struct resource *nexus_alloc_resource(device_t, device_t, int, int *,
+ u_long, u_long, u_long, u_int);
+static int nexus_activate_resource(device_t, device_t, int, int,
+ struct resource *);
+static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol);
+static int nexus_deactivate_resource(device_t, device_t, int, int,
+ struct resource *);
+
+static int nexus_setup_intr(device_t dev, device_t child, struct resource *res,
+ int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep);
+static int nexus_teardown_intr(device_t, device_t, struct resource *, void *);
+
+#ifdef FDT
+static int nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent,
+ int icells, pcell_t *intr);
+#endif
+
+static device_method_t nexus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nexus_probe),
+ DEVMETHOD(device_attach, nexus_attach),
+ /* Bus interface */
+ DEVMETHOD(bus_print_child, nexus_print_child),
+ DEVMETHOD(bus_add_child, nexus_add_child),
+ DEVMETHOD(bus_alloc_resource, nexus_alloc_resource),
+ DEVMETHOD(bus_activate_resource, nexus_activate_resource),
+ DEVMETHOD(bus_config_intr, nexus_config_intr),
+ DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource),
+ DEVMETHOD(bus_setup_intr, nexus_setup_intr),
+ DEVMETHOD(bus_teardown_intr, nexus_teardown_intr),
+#ifdef FDT
+ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr),
+#endif
+ { 0, 0 }
+};
+
+static devclass_t nexus_devclass;
+static driver_t nexus_driver = {
+ "nexus",
+ nexus_methods,
+ 1 /* no softc */
+};
+DRIVER_MODULE(nexus, root, nexus_driver, nexus_devclass, 0, 0);
+
+static int
+nexus_probe(device_t dev)
+{
+
+ device_quiet(dev); /* suppress attach message for neatness */
+
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+nexus_attach(device_t dev)
+{
+
+ mem_rman.rm_start = 0;
+ mem_rman.rm_end = ~0ul;
+ mem_rman.rm_type = RMAN_ARRAY;
+ mem_rman.rm_descr = "I/O memory addresses";
+ if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0))
+ panic("nexus_probe mem_rman");
+
+ /*
+ * First, deal with the children we know about already
+ */
+ bus_generic_probe(dev);
+ bus_generic_attach(dev);
+
+ return (0);
+}
+
+static int
+nexus_print_child(device_t bus, device_t child)
+{
+ int retval = 0;
+
+ retval += bus_print_child_header(bus, child);
+ retval += printf("\n");
+
+ return (retval);
+}
+
+static device_t
+nexus_add_child(device_t bus, u_int order, const char *name, int unit)
+{
+ device_t child;
+ struct nexus_device *ndev;
+
+ ndev = malloc(sizeof(struct nexus_device), M_NEXUSDEV, M_NOWAIT|M_ZERO);
+ if (!ndev)
+ return (0);
+ resource_list_init(&ndev->nx_resources);
+
+ child = device_add_child_ordered(bus, order, name, unit);
+
+ /* should we free this in nexus_child_detached? */
+ device_set_ivars(child, ndev);
+
+ return (child);
+}
+
+
+/*
+ * Allocate a resource on behalf of child. NB: child is usually going to be a
+ * child of one of our descendants, not a direct child of nexus0.
+ * (Exceptions include footbridge.)
+ */
+static struct resource *
+nexus_alloc_resource(device_t bus, device_t child, int type, int *rid,
+ u_long start, u_long end, u_long count, u_int flags)
+{
+ struct resource *rv;
+ struct rman *rm;
+ int needactivate = flags & RF_ACTIVE;
+
+ switch (type) {
+ case SYS_RES_MEMORY:
+ case SYS_RES_IOPORT:
+ rm = &mem_rman;
+ break;
+
+ default:
+ return (0);
+ }
+
+ rv = rman_reserve_resource(rm, start, end, count, flags, child);
+ if (rv == 0)
+ return (0);
+
+ rman_set_rid(rv, *rid);
+ rman_set_bushandle(rv, rman_get_start(rv));
+
+ if (needactivate) {
+ if (bus_activate_resource(child, type, *rid, rv)) {
+ rman_release_resource(rv);
+ return (0);
+ }
+ }
+
+ return (rv);
+}
+
+static int
+nexus_config_intr(device_t dev, int irq, enum intr_trigger trig,
+ enum intr_polarity pol)
+{
+
+ return (arm_config_intr(irq, trig, pol));
+}
+
+static int
+nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags,
+ driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep)
+{
+ int error;
+
+ if ((rman_get_flags(res) & RF_SHAREABLE) == 0)
+ flags |= INTR_EXCL;
+
+ /* We depend here on rman_activate_resource() being idempotent. */
+ error = rman_activate_resource(res);
+ if (error)
+ return (error);
+
+ error = arm_setup_intr(device_get_nameunit(child), filt, intr,
+ arg, rman_get_start(res), flags, cookiep);
+
+ return (error);
+}
+
+static int
+nexus_teardown_intr(device_t dev, device_t child, struct resource *r, void *ih)
+{
+
+ return (arm_teardown_intr(ih));
+}
+
+static int
+nexus_activate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ int err;
+ bus_addr_t paddr;
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+
+ if ((err = rman_activate_resource(r)) != 0)
+ return (err);
+
+ /*
+ * If this is a memory resource, map it into the kernel.
+ */
+ if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) {
+ paddr = (bus_addr_t)rman_get_start(r);
+ psize = (bus_size_t)rman_get_size(r);
+ err = bus_space_map(&memmap_bus, paddr, psize, 0, &vaddr);
+ if (err != 0) {
+ rman_deactivate_resource(r);
+ return (err);
+ }
+ rman_set_bustag(r, &memmap_bus);
+ rman_set_virtual(r, (void *)vaddr);
+ rman_set_bushandle(r, vaddr);
+ }
+ return (0);
+}
+
+static int
+nexus_deactivate_resource(device_t bus, device_t child, int type, int rid,
+ struct resource *r)
+{
+ bus_size_t psize;
+ bus_space_handle_t vaddr;
+
+ psize = (bus_size_t)rman_get_size(r);
+ vaddr = rman_get_bushandle(r);
+
+ if (vaddr != 0) {
+ bus_space_unmap(&memmap_bus, vaddr, psize);
+ rman_set_virtual(r, NULL);
+ rman_set_bushandle(r, 0);
+ }
+
+ return (rman_deactivate_resource(r));
+}
+
+#ifdef FDT
+static int
+nexus_ofw_map_intr(device_t dev, device_t child, phandle_t iparent, int icells,
+ pcell_t *intr)
+{
+ int irq;
+
+ if (icells == 3) {
+ irq = intr[1];
+ if (intr[0] == 0)
+ irq += 32; /* SPI */
+ else
+ irq += 16; /* PPI */
+ } else
+ irq = intr[0];
+
+ return (irq);
+}
+#endif
+
Index: sys/arm64/arm64/pic_if.m
===================================================================
--- /dev/null
+++ sys/arm64/arm64/pic_if.m
@@ -0,0 +1,180 @@
+#-
+# Copyright (c) 1998 Doug Rabson
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# from: src/sys/kern/bus_if.m,v 1.21 2002/04/21 11:16:10 markm Exp
+# $FreeBSD: projects/arm64/sys/powerpc/powerpc/pic_if.m 257059 2013-10-24 15:37:32Z nwhitehorn $
+#
+
+#include <sys/bus.h>
+#include <sys/cpuset.h>
+#include <machine/frame.h>
+
+INTERFACE pic;
+
+CODE {
+ static pic_translate_code_t pic_translate_code_default;
+
+ static void pic_translate_code_default(device_t dev, u_int irq,
+ int code, enum intr_trigger *trig, enum intr_polarity *pol)
+ {
+ *trig = INTR_TRIGGER_CONFORM;
+ *pol = INTR_POLARITY_CONFORM;
+ }
+
+ static void pic_pre_ithread(device_t dev, u_int irq)
+ {
+ PIC_MASK(dev, irq);
+ PIC_EOI(dev, irq);
+ }
+
+ static void pic_post_ithread(device_t dev, u_int irq)
+ {
+ PIC_UNMASK(dev, irq);
+ }
+
+ static void pic_post_filter(device_t dev, u_int irq)
+ {
+ PIC_EOI(dev, irq);
+ }
+};
+
+METHOD void bind {
+ device_t dev;
+ u_int irq;
+ cpuset_t cpumask;
+};
+
+METHOD void translate_code {
+ device_t dev;
+ u_int irq;
+ int code;
+ enum intr_trigger *trig;
+ enum intr_polarity *pol;
+} DEFAULT pic_translate_code_default;
+
+METHOD void config {
+ device_t dev;
+ u_int irq;
+ enum intr_trigger trig;
+ enum intr_polarity pol;
+};
+
+METHOD void dispatch {
+ device_t dev;
+ struct trapframe *tf;
+};
+
+METHOD void enable {
+ device_t dev;
+ u_int irq;
+ u_int vector;
+};
+
+METHOD void pre_ithread {
+ device_t dev;
+ u_int irq;
+} DEFAULT pic_pre_ithread;
+
+METHOD void post_ithread {
+ device_t dev;
+ u_int irq;
+} DEFAULT pic_post_ithread;
+
+METHOD void post_filter {
+ device_t dev;
+ u_int irq;
+} DEFAULT pic_post_filter;
+
+METHOD void eoi {
+ device_t dev;
+ u_int irq;
+};
+
+METHOD void ipi {
+ device_t dev;
+ u_int cpu;
+};
+
+METHOD void mask {
+ device_t dev;
+ u_int irq;
+};
+
+METHOD void unmask {
+ device_t dev;
+ u_int irq;
+};
+
+METHOD void init_secondary {
+ device_t dev;
+};
+
+METHOD void ipi_send {
+ device_t dev;
+ cpuset_t cpus;
+ u_int ipi;
+};
+
+METHOD int alloc_msi {
+ device_t dev;
+ device_t pci_dev;
+ int count;
+ int *irqs;
+};
+
+METHOD int alloc_msix {
+ device_t dev;
+ device_t pci_dev;
+ int *irq;
+};
+
+METHOD int map_msi {
+ device_t dev;
+ device_t pci_dev;
+ int irq;
+ uint64_t *addr;
+ uint32_t *data;
+};
+
+METHOD int map_msix {
+ device_t dev;
+ device_t pci_dev;
+ int irq;
+ uint64_t *addr;
+ uint32_t *data;
+};
+
+METHOD int release_msi {
+ device_t dev;
+ device_t pci_dev;
+ int count;
+ int *irqs;
+};
+
+METHOD int release_msix {
+ device_t dev;
+ device_t pci_dev;
+ int irq;
+};
Index: sys/arm64/arm64/pmap.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/pmap.c
@@ -0,0 +1,3037 @@
+/*-
+ * Copyright (c) 1991 Regents of the University of California.
+ * All rights reserved.
+ * Copyright (c) 1994 John S. Dyson
+ * All rights reserved.
+ * Copyright (c) 1994 David Greenman
+ * All rights reserved.
+ * Copyright (c) 2003 Peter Wemm
+ * All rights reserved.
+ * Copyright (c) 2005-2010 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * the Systems Programming Group of the University of Utah Computer
+ * Science Department and William Jolitz of UUNET Technologies Inc.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: @(#)pmap.c 7.7 (Berkeley) 5/12/91
+ */
+/*-
+ * Copyright (c) 2003 Networks Associates Technology, Inc.
+ * All rights reserved.
+ *
+ * This software was developed for the FreeBSD Project by Jake Burkholder,
+ * Safeport Network Services, and Network Associates Laboratories, the
+ * Security Research Division of Network Associates, Inc. under
+ * DARPA/SPAWAR contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA
+ * CHATS research program.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#define AMD64_NPT_AWARE
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+/*
+ * Manages physical address maps.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/ktr.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mman.h>
+#include <sys/msgbuf.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/rwlock.h>
+#include <sys/sx.h>
+#include <sys/vmem.h>
+#include <sys/vmmeter.h>
+#include <sys/sched.h>
+#include <sys/sysctl.h>
+#include <sys/_unrhdr.h>
+#include <sys/smp.h>
+
+#include <vm/vm.h>
+#include <vm/vm_param.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_pageout.h>
+#include <vm/vm_pager.h>
+#include <vm/vm_radix.h>
+#include <vm/vm_reserv.h>
+#include <vm/uma.h>
+
+#include <machine/machdep.h>
+#include <machine/md_var.h>
+#include <machine/pcb.h>
+
+#define NPDEPG (PAGE_SIZE/(sizeof (pd_entry_t)))
+#define NUPDE (NPDEPG * NPDEPG)
+#define NUSERPGTBLS (NUPDE + NPDEPG)
+
+#if !defined(DIAGNOSTIC)
+#ifdef __GNUC_GNU_INLINE__
+#define PMAP_INLINE __attribute__((__gnu_inline__)) inline
+#else
+#define PMAP_INLINE extern inline
+#endif
+#else
+#define PMAP_INLINE
+#endif
+
+/*
+ * These are configured by the mair_el1 register. This is set up in locore.S
+ */
+#define DEVICE_MEMORY 0
+#define UNCACHED_MEMORY 1
+#define CACHED_MEMORY 2
+
+
+#ifdef PV_STATS
+#define PV_STAT(x) do { x ; } while (0)
+#else
+#define PV_STAT(x) do { } while (0)
+#endif
+
+#define pmap_l2_pindex(v) ((v) >> L2_SHIFT)
+
+#define NPV_LIST_LOCKS MAXCPU
+
+#define PHYS_TO_PV_LIST_LOCK(pa) \
+ (&pv_list_locks[pa_index(pa) % NPV_LIST_LOCKS])
+
+#define CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa) do { \
+ struct rwlock **_lockp = (lockp); \
+ struct rwlock *_new_lock; \
+ \
+ _new_lock = PHYS_TO_PV_LIST_LOCK(pa); \
+ if (_new_lock != *_lockp) { \
+ if (*_lockp != NULL) \
+ rw_wunlock(*_lockp); \
+ *_lockp = _new_lock; \
+ rw_wlock(*_lockp); \
+ } \
+} while (0)
+
+#define CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m) \
+ CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, VM_PAGE_TO_PHYS(m))
+
+#define RELEASE_PV_LIST_LOCK(lockp) do { \
+ struct rwlock **_lockp = (lockp); \
+ \
+ if (*_lockp != NULL) { \
+ rw_wunlock(*_lockp); \
+ *_lockp = NULL; \
+ } \
+} while (0)
+
+#define VM_PAGE_TO_PV_LIST_LOCK(m) \
+ PHYS_TO_PV_LIST_LOCK(VM_PAGE_TO_PHYS(m))
+
+struct pmap kernel_pmap_store;
+
+vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */
+vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */
+vm_offset_t kernel_vm_end = 0;
+
+struct msgbuf *msgbufp = NULL;
+
+static struct rwlock_padalign pvh_global_lock;
+
+/*
+ * Data for the pv entry allocation mechanism
+ */
+static TAILQ_HEAD(pch, pv_chunk) pv_chunks = TAILQ_HEAD_INITIALIZER(pv_chunks);
+static struct mtx pv_chunks_mutex;
+static struct rwlock pv_list_locks[NPV_LIST_LOCKS];
+
+static void free_pv_chunk(struct pv_chunk *pc);
+static void free_pv_entry(pmap_t pmap, pv_entry_t pv);
+static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp);
+static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp);
+static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va);
+static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap,
+ vm_offset_t va);
+static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
+static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva,
+ pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp);
+static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va,
+ vm_page_t m, struct rwlock **lockp);
+
+static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex,
+ struct rwlock **lockp);
+
+static void _pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct spglist *free);
+static int pmap_unuse_l3(pmap_t, vm_offset_t, pd_entry_t, struct spglist *);
+
+/********************/
+/* Inline functions */
+/********************/
+
+static __inline void
+pagecopy(void *s, void *d)
+{
+
+ memcpy(d, s, PAGE_SIZE);
+}
+
+static __inline void
+pagezero(void *p)
+{
+
+ bzero(p, PAGE_SIZE);
+}
+
+#define pmap_l1_index(va) (((va) >> L1_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l2_index(va) (((va) >> L2_SHIFT) & Ln_ADDR_MASK)
+#define pmap_l3_index(va) (((va) >> L3_SHIFT) & Ln_ADDR_MASK)
+
+static __inline pd_entry_t *
+pmap_l1(pmap_t pmap, vm_offset_t va)
+{
+
+ return (&pmap->pm_l1[pmap_l1_index(va)]);
+}
+
+static __inline pd_entry_t *
+pmap_l1_to_l2(pd_entry_t *l1, vm_offset_t va)
+{
+ pd_entry_t *l2;
+
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(*l1 & ~ATTR_MASK);
+ return (&l2[pmap_l2_index(va)]);
+}
+
+static __inline pd_entry_t *
+pmap_l2(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *l1;
+
+ l1 = pmap_l1(pmap, va);
+ if ((*l1 & ATTR_DESCR_MASK) != L1_TABLE)
+ return (NULL);
+
+ return (pmap_l1_to_l2(l1, va));
+}
+
+static __inline pt_entry_t *
+pmap_l2_to_l3(pd_entry_t *l2, vm_offset_t va)
+{
+ pt_entry_t *l3;
+
+ l3 = (pd_entry_t *)PHYS_TO_DMAP(*l2 & ~ATTR_MASK);
+ return (&l3[pmap_l3_index(va)]);
+}
+
+static __inline pt_entry_t *
+pmap_l3(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *l2;
+
+ l2 = pmap_l2(pmap, va);
+ if (l2 == NULL || (*l2 & ATTR_DESCR_MASK) != L2_TABLE)
+ return (NULL);
+
+ return (pmap_l2_to_l3(l2, va));
+}
+
+/*
+ * These load the old table data and store the new value.
+ * They need to be atomic as the System MMU may write to the table at
+ * the same time as the CPU.
+ */
+#define pmap_load_store(table, entry) atomic_swap_64(table, entry)
+#define pmap_set(table, mask) atomic_set_64(table, mask)
+#define pmap_load_clear(table) atomic_swap_64(table, 0)
+#define pmap_load(table) (*table)
+
+static __inline int
+pmap_is_current(pmap_t pmap)
+{
+
+ return ((pmap == pmap_kernel()) ||
+ (pmap == curthread->td_proc->p_vmspace->vm_map.pmap));
+}
+
+static __inline int
+pmap_l3_valid(pt_entry_t l3)
+{
+
+ return ((l3 & ATTR_DESCR_MASK) == L3_PAGE);
+}
+
+static __inline int
+pmap_l3_valid_cacheable(pt_entry_t l3)
+{
+
+ return (((l3 & ATTR_DESCR_MASK) == L3_PAGE) &&
+ ((l3 & ATTR_IDX_MASK) == ATTR_IDX(CACHED_MEMORY)));
+}
+
+#define PTE_SYNC(pte) cpu_dcache_wb_range((vm_offset_t)pte, sizeof(*pte))
+
+/*
+ * Checks if the page is dirty. We currently lack proper tracking of this on
+ * arm64 so for now assume is a page mapped as rw was accessed it is.
+ */
+static inline int
+pmap_page_dirty(pt_entry_t pte)
+{
+
+ return ((pte & (ATTR_AF | ATTR_AP_RW_BIT)) ==
+ (ATTR_AF | ATTR_AP(ATTR_AP_RW)));
+}
+
+static __inline void
+pmap_resident_count_inc(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ pmap->pm_stats.resident_count += count;
+}
+
+static __inline void
+pmap_resident_count_dec(pmap_t pmap, int count)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ KASSERT(pmap->pm_stats.resident_count >= count,
+ ("pmap %p resident count underflow %ld %d", pmap,
+ pmap->pm_stats.resident_count, count));
+ pmap->pm_stats.resident_count -= count;
+}
+
+static pt_entry_t *
+pmap_early_page_idx(vm_offset_t l1pt, vm_offset_t va, u_int *l1_slot,
+ u_int *l2_slot)
+{
+ pt_entry_t *l2;
+ pd_entry_t *l1;
+
+ l1 = (pd_entry_t *)l1pt;
+ *l1_slot = (va >> L1_SHIFT) & Ln_ADDR_MASK;
+
+ /* Check locore has used a table L1 map */
+ KASSERT((l1[*l1_slot] & ATTR_DESCR_MASK) == L1_TABLE,
+ ("Invalid bootstrap L1 table"));
+ /* Find the address of the L2 table */
+ l2 = (pt_entry_t *)init_pt_va;
+ *l2_slot = pmap_l2_index(va);
+
+ return (l2);
+}
+
+static vm_paddr_t
+pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va)
+{
+ u_int l1_slot, l2_slot;
+ pt_entry_t *l2;
+
+ l2 = pmap_early_page_idx(l1pt, va, &l1_slot, &l2_slot);
+
+ return ((l2[l2_slot] & ~ATTR_MASK) + (va & L2_OFFSET));
+}
+
+static void
+pmap_bootstrap_dmap(vm_offset_t l1pt)
+{
+ vm_offset_t va;
+ vm_paddr_t pa;
+ pd_entry_t *l1;
+ u_int l1_slot;
+
+ va = DMAP_MIN_ADDRESS;
+ l1 = (pd_entry_t *)l1pt;
+ l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS);
+
+ for (pa = 0; va < DMAP_MAX_ADDRESS;
+ pa += L1_SIZE, va += L1_SIZE, l1_slot++) {
+ KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
+
+ /*
+ * TODO: Turn the cache on here when we have cache
+ * flushing code.
+ */
+ pmap_load_store(&l1[l1_slot],
+ (pa & ~L1_OFFSET) | ATTR_AF | L1_BLOCK |
+ ATTR_IDX(CACHED_MEMORY));
+ }
+
+ cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
+ cpu_tlb_flushID();
+}
+
+static vm_offset_t
+pmap_bootstrap_l2(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l2_start)
+{
+ vm_offset_t l2pt;
+ vm_paddr_t pa;
+ pd_entry_t *l1;
+ u_int l1_slot;
+
+ KASSERT((va & L1_OFFSET) == 0, ("Invalid virtual address"));
+
+ l1 = (pd_entry_t *)l1pt;
+ l1_slot = pmap_l1_index(va);
+ l2pt = l2_start;
+
+ for (; va < VM_MAX_KERNEL_ADDRESS; l1_slot++, va += L1_SIZE) {
+ KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index"));
+
+ pa = pmap_early_vtophys(l1pt, l2pt);
+ pmap_load_store(&l1[l1_slot],
+ (pa & ~Ln_TABLE_MASK) | L1_TABLE);
+ l2pt += PAGE_SIZE;
+ }
+
+ /* Clean the L2 page table */
+ memset((void *)l2_start, 0, l2pt - l2_start);
+ cpu_dcache_wb_range(l2_start, l2pt - l2_start);
+
+ /* Flush the l1 table to ram */
+ cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE);
+
+ return l2pt;
+}
+
+static vm_offset_t
+pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm_offset_t l3_start)
+{
+ vm_offset_t l2pt, l3pt;
+ vm_paddr_t pa;
+ pd_entry_t *l2;
+ u_int l2_slot;
+
+ KASSERT((va & L2_OFFSET) == 0, ("Invalid virtual address"));
+
+ l2 = pmap_l2(kernel_pmap, va);
+ l2 = (pd_entry_t *)((uintptr_t)l2 & ~(PAGE_SIZE - 1));
+ l2pt = (vm_offset_t)l2;
+ l2_slot = pmap_l2_index(va);
+ l3pt = l3_start;
+
+ for (; va < VM_MAX_KERNEL_ADDRESS; l2_slot++, va += L2_SIZE) {
+ KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index"));
+
+ pa = pmap_early_vtophys(l1pt, l3pt);
+ pmap_load_store(&l2[l2_slot],
+ (pa & ~Ln_TABLE_MASK) | L2_TABLE);
+ l3pt += PAGE_SIZE;
+ }
+
+ /* Clean the L2 page table */
+ memset((void *)l3_start, 0, l3pt - l3_start);
+ cpu_dcache_wb_range(l3_start, l3pt - l3_start);
+
+ cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE);
+
+ return l3pt;
+}
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ */
+void
+pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen)
+{
+ u_int l1_slot, l2_slot, avail_slot, map_slot, used_map_slot;
+ uint64_t kern_delta;
+ pt_entry_t *l2;
+ vm_offset_t va, freemempos;
+ vm_offset_t dpcpu, msgbufpv;
+ vm_paddr_t pa;
+
+ kern_delta = KERNBASE - kernstart;
+ physmem = 0;
+
+ printf("pmap_bootstrap %lx %lx %lx\n", l1pt, kernstart, kernlen);
+ printf("%lx\n", l1pt);
+ printf("%lx\n", (KERNBASE >> L1_SHIFT) & Ln_ADDR_MASK);
+
+ /* Set this early so we can use the pagetable walking functions */
+ kernel_pmap_store.pm_l1 = (pd_entry_t *)l1pt;
+ PMAP_LOCK_INIT(kernel_pmap);
+
+ /*
+ * Initialize the global pv list lock.
+ */
+ rw_init(&pvh_global_lock, "pmap pv global");
+
+ /* Create a direct map region early so we can use it for pa -> va */
+ pmap_bootstrap_dmap(l1pt);
+
+ va = KERNBASE;
+ pa = KERNBASE - kern_delta;
+
+ /*
+ * Start to initialise phys_avail by copying from physmap
+ * up to the physical address KERNBASE points at.
+ */
+ map_slot = avail_slot = 0;
+ for (; map_slot < (physmap_idx * 2); map_slot += 2) {
+ if (physmap[map_slot] == physmap[map_slot + 1])
+ continue;
+
+ if (physmap[map_slot] <= pa &&
+ physmap[map_slot + 1] > pa)
+ break;
+
+ phys_avail[avail_slot] = physmap[map_slot];
+ phys_avail[avail_slot + 1] = physmap[map_slot + 1];
+ physmem += (phys_avail[avail_slot + 1] -
+ phys_avail[avail_slot]) >> PAGE_SHIFT;
+ avail_slot += 2;
+ }
+
+ /* Add the memory before the kernel */
+ if (physmap[avail_slot] < pa) {
+ phys_avail[avail_slot] = physmap[map_slot];
+ phys_avail[avail_slot + 1] = pa;
+ physmem += (phys_avail[avail_slot + 1] -
+ phys_avail[avail_slot]) >> PAGE_SHIFT;
+ avail_slot += 2;
+ }
+ used_map_slot = map_slot;
+
+ /*
+ * Read the page table to find out what is already mapped.
+ * This assumes we have mapped a block of memory from KERNBASE
+ * using a single L1 entry.
+ */
+ l2 = pmap_early_page_idx(l1pt, KERNBASE, &l1_slot, &l2_slot);
+
+ /* Sanity check the index, KERNBASE should be the first VA */
+ KASSERT(l2_slot == 0, ("The L2 index is non-zero"));
+
+ /* Find how many pages we have mapped */
+ for (; l2_slot < Ln_ENTRIES; l2_slot++) {
+ if ((l2[l2_slot] & ATTR_DESCR_MASK) == 0)
+ break;
+
+ /* Check locore used L2 blocks */
+ KASSERT((l2[l2_slot] & ATTR_DESCR_MASK) == L2_BLOCK,
+ ("Invalid bootstrap L2 table"));
+ KASSERT((l2[l2_slot] & ~ATTR_MASK) == pa,
+ ("Incorrect PA in L2 table"));
+
+ va += L2_SIZE;
+ pa += L2_SIZE;
+ }
+
+ va = roundup2(va, L1_SIZE);
+
+ freemempos = KERNBASE + kernlen;
+ freemempos = roundup2(freemempos, PAGE_SIZE);
+ /* Create the l2 tables up to VM_MAX_KERNEL_ADDRESS */
+ freemempos = pmap_bootstrap_l2(l1pt, va, freemempos);
+ /* And the l3 tables for the early devmap */
+ freemempos = pmap_bootstrap_l3(l1pt,
+ VM_MAX_KERNEL_ADDRESS - L2_SIZE, freemempos);
+
+ cpu_tlb_flushID();
+
+#define alloc_pages(var, np) \
+ (var) = freemempos; \
+ freemempos += (np * PAGE_SIZE); \
+ memset((char *)(var), 0, ((np) * PAGE_SIZE));
+
+ /* Allocate dynamic per-cpu area. */
+ alloc_pages(dpcpu, DPCPU_SIZE / PAGE_SIZE);
+ dpcpu_init((void *)dpcpu, 0);
+
+ /* Allocate memory for the msgbuf, e.g. for /sbin/dmesg */
+ alloc_pages(msgbufpv, round_page(msgbufsize) / PAGE_SIZE);
+ msgbufp = (void *)msgbufpv;
+
+ virtual_avail = roundup2(freemempos, L1_SIZE);
+ virtual_end = VM_MAX_KERNEL_ADDRESS - L2_SIZE;
+ kernel_vm_end = virtual_avail;
+
+ pa = pmap_early_vtophys(l1pt, freemempos);
+
+ /* Finish initialising physmap */
+ map_slot = used_map_slot;
+ for (; avail_slot < (PHYS_AVAIL_SIZE - 2) &&
+ map_slot < (physmap_idx * 2); map_slot += 2) {
+ if (physmap[map_slot] == physmap[map_slot + 1])
+ continue;
+
+ /* Have we used the current range? */
+ if (physmap[map_slot + 1] <= pa)
+ continue;
+
+ /* Do we need to split the entry? */
+ if (physmap[map_slot] < pa) {
+ phys_avail[avail_slot] = pa;
+ phys_avail[avail_slot + 1] = physmap[map_slot + 1];
+ } else {
+ phys_avail[avail_slot] = physmap[map_slot];
+ phys_avail[avail_slot + 1] = physmap[map_slot + 1];
+ }
+ physmem += (phys_avail[avail_slot + 1] -
+ phys_avail[avail_slot]) >> PAGE_SHIFT;
+
+ avail_slot += 2;
+ }
+ phys_avail[avail_slot] = 0;
+ phys_avail[avail_slot + 1] = 0;
+
+ /*
+ * Maxmem isn't the "maximum memory", it's one larger than the
+ * highest page of the physical address space. It should be
+ * called something like "Maxphyspage".
+ */
+ Maxmem = atop(phys_avail[avail_slot - 1]);
+
+ cpu_tlb_flushID();
+}
+
+/*
+ * Initialize a vm_page's machine-dependent fields.
+ */
+void
+pmap_page_init(vm_page_t m)
+{
+
+ TAILQ_INIT(&m->md.pv_list);
+ m->md.pv_memattr = VM_MEMATTR_WRITE_BACK;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void
+pmap_init(void)
+{
+ int i;
+
+ /*
+ * Initialize the pv chunk list mutex.
+ */
+ mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF);
+
+ /*
+ * Initialize the pool of pv list locks.
+ */
+ for (i = 0; i < NPV_LIST_LOCKS; i++)
+ rw_init(&pv_list_locks[i], "pmap pv list");
+}
+
+/*
+ * Normal, non-SMP, invalidation functions.
+ * We inline these within pmap.c for speed.
+ */
+PMAP_INLINE void
+pmap_invalidate_page(pmap_t pmap, vm_offset_t va)
+{
+
+ sched_pin();
+ __asm __volatile(
+ "dsb sy \n"
+ "tlbi vaae1is, %0 \n"
+ "dsb sy \n"
+ "isb \n"
+ : : "r"(va >> PAGE_SHIFT));
+ sched_unpin();
+}
+
+PMAP_INLINE void
+pmap_invalidate_range(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t addr;
+
+ sched_pin();
+ sva >>= PAGE_SHIFT;
+ eva >>= PAGE_SHIFT;
+ __asm __volatile("dsb sy");
+ for (addr = sva; addr < eva; addr++) {
+ __asm __volatile(
+ "tlbi vaae1is, %0" : : "r"(addr));
+ }
+ __asm __volatile(
+ "dsb sy \n"
+ "isb \n");
+ sched_unpin();
+}
+
+PMAP_INLINE void
+pmap_invalidate_all(pmap_t pmap)
+{
+
+ sched_pin();
+ __asm __volatile(
+ "dsb sy \n"
+ "tlbi vmalle1is \n"
+ "dsb sy \n"
+ "isb \n");
+ sched_unpin();
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+vm_paddr_t
+pmap_extract(pmap_t pmap, vm_offset_t va)
+{
+ pd_entry_t *l2p, l2;
+ pt_entry_t *l3p, l3;
+ vm_paddr_t pa;
+
+ pa = 0;
+ PMAP_LOCK(pmap);
+ /*
+ * Start with the l2 tabel. We are unable to allocate
+ * pages in the l1 table.
+ */
+ l2p = pmap_l2(pmap, va);
+ if (l2p != NULL) {
+ l2 = *l2p;
+ if ((l2 & ATTR_DESCR_MASK) == L2_TABLE) {
+ l3p = pmap_l2_to_l3(l2p, va);
+ if (l3p != NULL) {
+ l3 = *l3p;
+
+ if ((l3 & ATTR_DESCR_MASK) == L3_PAGE)
+ pa = (l3 & ~ATTR_MASK) |
+ (va & L3_OFFSET);
+ }
+ } else if ((l2 & ATTR_DESCR_MASK) == L2_BLOCK)
+ pa = (l2 & ~ATTR_MASK) | (va & L2_OFFSET);
+ }
+ PMAP_UNLOCK(pmap);
+ return (pa);
+}
+
+/*
+ * Routine: pmap_extract_and_hold
+ * Function:
+ * Atomically extract and hold the physical page
+ * with the given pmap and virtual address pair
+ * if that mapping permits the given protection.
+ */
+vm_page_t
+pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pt_entry_t *l3p, l3;
+ vm_paddr_t pa;
+ vm_page_t m;
+
+ pa = 0;
+ m = NULL;
+ PMAP_LOCK(pmap);
+retry:
+ l3p = pmap_l3(pmap, va);
+ if (l3p != NULL && (l3 = *l3p) != 0) {
+ if (((l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) ||
+ ((prot & VM_PROT_WRITE) == 0)) {
+ if (vm_page_pa_tryrelock(pmap, l3 & ~ATTR_MASK, &pa))
+ goto retry;
+ m = PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK);
+ vm_page_hold(m);
+ }
+ }
+ PA_UNLOCK_COND(pa);
+ PMAP_UNLOCK(pmap);
+ return (m);
+}
+
+vm_paddr_t
+pmap_kextract(vm_offset_t va)
+{
+ pd_entry_t *l2;
+ pt_entry_t *l3;
+ vm_paddr_t pa;
+
+ if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) {
+ pa = DMAP_TO_PHYS(va);
+ } else {
+ l2 = pmap_l2(kernel_pmap, va);
+ if (l2 == NULL)
+ panic("pmap_kextract: No l2");
+ if ((*l2 & ATTR_DESCR_MASK) == L2_BLOCK)
+ return ((*l2 & ~ATTR_MASK) | (va & L2_OFFSET));
+
+ l3 = pmap_l2_to_l3(l2, va);
+ if (l3 == NULL)
+ panic("pmap_kextract: No l3...");
+ pa = (*l3 & ~ATTR_MASK) | (va & PAGE_MASK);
+ }
+ return (pa);
+}
+
+/***************************************************
+ * Low level mapping routines.....
+ ***************************************************/
+
+void
+pmap_kenter_device(vm_offset_t va, vm_paddr_t pa)
+{
+ pt_entry_t *l3;
+
+ KASSERT((pa & L3_OFFSET) == 0,
+ ("pmap_kenter_device: Invalid physical address"));
+ KASSERT((va & L3_OFFSET) == 0,
+ ("pmap_kenter_device: Invalid virtual address"));
+ l3 = pmap_l3(kernel_pmap, va);
+ KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
+ pmap_load_store(l3, (pa & ~L3_OFFSET) | ATTR_AF | L3_PAGE |
+ ATTR_IDX(DEVICE_MEMORY));
+ PTE_SYNC(l3);
+}
+
+/*
+ * Remove a page from the kernel pagetables.
+ * Note: not SMP coherent.
+ */
+PMAP_INLINE void
+pmap_kremove(vm_offset_t va)
+{
+ pt_entry_t *l3;
+
+ l3 = pmap_l3(kernel_pmap, va);
+ KASSERT(l3 != NULL, ("pmap_kremove: Invalid address"));
+
+ if (pmap_l3_valid_cacheable(pmap_load(l3)))
+ cpu_dcache_wb_range(va, L3_SIZE);
+ pmap_load_clear(l3);
+ PTE_SYNC(l3);
+}
+
+/*
+ * Used to map a range of physical addresses into kernel
+ * virtual address space.
+ *
+ * The value passed in '*virt' is a suggested virtual address for
+ * the mapping. Architectures which can support a direct-mapped
+ * physical to virtual region can return the appropriate address
+ * within that region, leaving '*virt' unchanged. Other
+ * architectures should map the pages starting at '*virt' and
+ * update '*virt' with the first usable address after the mapped
+ * region.
+ */
+vm_offset_t
+pmap_map(vm_offset_t *virt, vm_paddr_t start, vm_paddr_t end, int prot)
+{
+ return PHYS_TO_DMAP(start);
+}
+
+
+/*
+ * Add a list of wired pages to the kva
+ * this routine is only used for temporary
+ * kernel mappings that do not need to have
+ * page modification or references recorded.
+ * Note that old mappings are simply written
+ * over. The page *must* be wired.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
+{
+ pt_entry_t *l3, pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i;
+
+ va = sva;
+ for (i = 0; i < count; i++) {
+ m = ma[i];
+ pa = VM_PAGE_TO_PHYS(m) | ATTR_AF |
+ ATTR_IDX(m->md.pv_memattr) | ATTR_AP(ATTR_AP_RW) | L3_PAGE;
+ l3 = pmap_l3(kernel_pmap, va);
+ pmap_load_store(l3, pa);
+ PTE_SYNC(l3);
+
+ va += L3_SIZE;
+ }
+}
+
+/*
+ * This routine tears out page mappings from the
+ * kernel -- it is meant only for temporary mappings.
+ * Note: SMP coherent. Uses a ranged shootdown IPI.
+ */
+void
+pmap_qremove(vm_offset_t sva, int count)
+{
+ vm_offset_t va;
+
+ va = sva;
+ while (count-- > 0) {
+ KASSERT(va >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", va));
+ pmap_kremove(va);
+ va += PAGE_SIZE;
+ }
+ pmap_invalidate_range(kernel_pmap, sva, va);
+}
+
+/***************************************************
+ * Page table page management routines.....
+ ***************************************************/
+static __inline void
+pmap_free_zero_pages(struct spglist *free)
+{
+ vm_page_t m;
+
+ while ((m = SLIST_FIRST(free)) != NULL) {
+ SLIST_REMOVE_HEAD(free, plinks.s.ss);
+ /* Preserve the page's PG_ZERO setting. */
+ vm_page_free_toq(m);
+ }
+}
+
+/*
+ * Schedule the specified unused page table page to be freed. Specifically,
+ * add the page to the specified list of pages that will be released to the
+ * physical memory manager after the TLB has been updated.
+ */
+static __inline void
+pmap_add_delayed_free_list(vm_page_t m, struct spglist *free,
+ boolean_t set_PG_ZERO)
+{
+
+ if (set_PG_ZERO)
+ m->flags |= PG_ZERO;
+ else
+ m->flags &= ~PG_ZERO;
+ SLIST_INSERT_HEAD(free, m, plinks.s.ss);
+}
+
+/*
+ * Decrements a page table page's wire count, which is used to record the
+ * number of valid page table entries within the page. If the wire count
+ * drops to zero, then the page table page is unmapped. Returns TRUE if the
+ * page table page was unmapped and FALSE otherwise.
+ */
+static inline boolean_t
+pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ --m->wire_count;
+ if (m->wire_count == 0) {
+ _pmap_unwire_l3(pmap, va, m, free);
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+static void
+_pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free)
+{
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /*
+ * unmap the page table page
+ */
+ if (m->pindex >= NUPDE) {
+ /* PD page */
+ pd_entry_t *l1;
+ l1 = pmap_l1(pmap, va);
+ pmap_load_clear(l1);
+ PTE_SYNC(l1);
+ } else {
+ /* PTE page */
+ pd_entry_t *l2;
+ l2 = pmap_l2(pmap, va);
+ pmap_load_clear(l2);
+ PTE_SYNC(l2);
+ }
+ pmap_resident_count_dec(pmap, 1);
+ if (m->pindex < NUPDE) {
+ /* We just released a PT, unhold the matching PD */
+ vm_page_t pdpg;
+
+ pdpg = PHYS_TO_VM_PAGE(*pmap_l1(pmap, va) & ~ATTR_MASK);
+ pmap_unwire_l3(pmap, va, pdpg, free);
+ }
+
+ /*
+ * This is a release store so that the ordinary store unmapping
+ * the page table page is globally performed before TLB shoot-
+ * down is begun.
+ */
+ atomic_subtract_rel_int(&vm_cnt.v_wire_count, 1);
+
+ /*
+ * Put page on a list so that it is released after
+ * *ALL* TLB shootdown is done
+ */
+ pmap_add_delayed_free_list(m, free, TRUE);
+}
+
+/*
+ * After removing an l3 entry, this routine is used to
+ * conditionally free the page, and manage the hold/wire counts.
+ */
+static int
+pmap_unuse_l3(pmap_t pmap, vm_offset_t va, pd_entry_t ptepde,
+ struct spglist *free)
+{
+ vm_page_t mpte;
+
+ if (va >= VM_MAXUSER_ADDRESS)
+ return (0);
+ KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0"));
+ mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK);
+ return (pmap_unwire_l3(pmap, va, mpte, free));
+}
+
+void
+pmap_pinit0(pmap_t pmap)
+{
+
+ PMAP_LOCK_INIT(pmap);
+ bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+ pmap->pm_l1 = kernel_pmap->pm_l1;
+}
+
+int
+pmap_pinit(pmap_t pmap)
+{
+ vm_paddr_t l1phys;
+ vm_page_t l1pt;
+
+ /*
+ * allocate the l1 page
+ */
+ while ((l1pt = vm_page_alloc(NULL, 0xdeadbeef, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL)
+ VM_WAIT;
+
+ l1phys = VM_PAGE_TO_PHYS(l1pt);
+ pmap->pm_l1 = (pd_entry_t *)PHYS_TO_DMAP(l1phys);
+
+ if ((l1pt->flags & PG_ZERO) == 0)
+ pagezero(pmap->pm_l1);
+
+ bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+
+ return (1);
+}
+
+/*
+ * This routine is called if the desired page table page does not exist.
+ *
+ * If page table page allocation fails, this routine may sleep before
+ * returning NULL. It sleeps only if a lock pointer was given.
+ *
+ * Note: If a page allocation fails at page table level two or three,
+ * one or two pages may be held during the wait, only to be released
+ * afterwards. This conservative approach is easily argued to avoid
+ * race conditions.
+ */
+static vm_page_t
+_pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp)
+{
+ vm_page_t m, /*pdppg, */pdpg;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * Allocate a page table page.
+ */
+ if ((m = vm_page_alloc(NULL, ptepindex, VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO)) == NULL) {
+ if (lockp != NULL) {
+ RELEASE_PV_LIST_LOCK(lockp);
+ PMAP_UNLOCK(pmap);
+ rw_runlock(&pvh_global_lock);
+ VM_WAIT;
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ }
+
+ /*
+ * Indicate the need to retry. While waiting, the page table
+ * page may have been allocated.
+ */
+ return (NULL);
+ }
+ if ((m->flags & PG_ZERO) == 0)
+ pmap_zero_page(m);
+
+ /*
+ * Map the pagetable page into the process address space, if
+ * it isn't already there.
+ */
+
+ if (ptepindex >= NUPDE) {
+ pd_entry_t *l1;
+ vm_pindex_t l1index;
+
+ l1index = ptepindex - NUPDE;
+ l1 = &pmap->pm_l1[l1index];
+ pmap_load_store(l1, VM_PAGE_TO_PHYS(m) | L1_TABLE);
+ PTE_SYNC(l1);
+
+ } else {
+ vm_pindex_t l1index;
+ pd_entry_t *l1, *l2;
+
+ l1index = ptepindex >> (L1_SHIFT - L2_SHIFT);
+ l1 = &pmap->pm_l1[l1index];
+ if (*l1 == 0) {
+ /* recurse for allocating page dir */
+ if (_pmap_alloc_l3(pmap, NUPDE + l1index,
+ lockp) == NULL) {
+ --m->wire_count;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+ return (NULL);
+ }
+ } else {
+ pdpg = PHYS_TO_VM_PAGE(*l1 & ~ATTR_MASK);
+ pdpg->wire_count++;
+ }
+
+ l2 = (pd_entry_t *)PHYS_TO_DMAP(*l1 & ~ATTR_MASK);
+ l2 = &l2[ptepindex & Ln_ADDR_MASK];
+ pmap_load_store(l2, VM_PAGE_TO_PHYS(m) | ATTR_AF |
+ ATTR_IDX(CACHED_MEMORY) | L2_TABLE);
+ PTE_SYNC(l2);
+ }
+
+ pmap_resident_count_inc(pmap, 1);
+
+ return (m);
+}
+
+static vm_page_t
+pmap_alloc_l3(pmap_t pmap, vm_offset_t va, struct rwlock **lockp)
+{
+ vm_pindex_t ptepindex;
+ pd_entry_t *l2;
+ vm_page_t m;
+
+ /*
+ * Calculate pagetable page index
+ */
+ ptepindex = pmap_l2_pindex(va);
+retry:
+ /*
+ * Get the page directory entry
+ */
+ l2 = pmap_l2(pmap, va);
+
+ /*
+ * If the page table page is mapped, we just increment the
+ * hold count, and activate it.
+ */
+ if (l2 != NULL && *l2 != 0) {
+ m = PHYS_TO_VM_PAGE(*l2 & ~ATTR_MASK);
+ m->wire_count++;
+ } else {
+ /*
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
+ */
+ m = _pmap_alloc_l3(pmap, ptepindex, lockp);
+ if (m == NULL && lockp != NULL)
+ goto retry;
+ }
+ /*
+ * XXXARM64: I'm not sure why we need this but it fixes a crash
+ * when running things from a shell script.
+ */
+ pmap_invalidate_all(pmap);
+ return (m);
+}
+
+
+/***************************************************
+ * Pmap allocation/deallocation routines.
+ ***************************************************/
+
+/*
+ * Release any resources held by the given physical map.
+ * Called when a pmap initialized by pmap_pinit is being released.
+ * Should only be called if the map contains no valid mappings.
+ */
+void
+pmap_release(pmap_t pmap)
+{
+ vm_page_t m;
+
+ KASSERT(pmap->pm_stats.resident_count == 0,
+ ("pmap_release: pmap resident count %ld != 0",
+ pmap->pm_stats.resident_count));
+
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pmap->pm_l1));
+
+ m->wire_count--;
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ vm_page_free_zero(m);
+}
+
+#if 0
+static int
+kvm_size(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long ksize = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS;
+
+ return sysctl_handle_long(oidp, &ksize, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_size, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_size, "LU", "Size of KVM");
+
+static int
+kvm_free(SYSCTL_HANDLER_ARGS)
+{
+ unsigned long kfree = VM_MAX_KERNEL_ADDRESS - kernel_vm_end;
+
+ return sysctl_handle_long(oidp, &kfree, 0, req);
+}
+SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD,
+ 0, 0, kvm_free, "LU", "Amount of KVM free");
+#endif /* 0 */
+
+/*
+ * grow the number of kernel page table entries, if needed
+ */
+void
+pmap_growkernel(vm_offset_t addr)
+{
+ vm_paddr_t paddr;
+ vm_page_t nkpg;
+ pd_entry_t *l1, *l2;
+
+ mtx_assert(&kernel_map->system_mtx, MA_OWNED);
+
+ addr = roundup2(addr, L2_SIZE);
+ if (addr - 1 >= kernel_map->max_offset)
+ addr = kernel_map->max_offset;
+ while (kernel_vm_end < addr) {
+ l1 = pmap_l1(kernel_pmap, kernel_vm_end);
+ if (*l1 == 0) {
+ /* We need a new PDP entry */
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L1_SHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ pmap_load_store(l1, paddr | L1_TABLE);
+ PTE_SYNC(l1);
+ continue; /* try again */
+ }
+ l2 = pmap_l1_to_l2(l1, kernel_vm_end);
+ if ((*l2 & ATTR_AF) != 0) {
+ kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ continue;
+ }
+
+ nkpg = vm_page_alloc(NULL, kernel_vm_end >> L2_SHIFT,
+ VM_ALLOC_INTERRUPT | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (nkpg == NULL)
+ panic("pmap_growkernel: no memory to grow kernel");
+ if ((nkpg->flags & PG_ZERO) == 0)
+ pmap_zero_page(nkpg);
+ paddr = VM_PAGE_TO_PHYS(nkpg);
+ pmap_load_store(l2, paddr | L2_TABLE);
+ PTE_SYNC(l2);
+
+ kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
+ if (kernel_vm_end - 1 >= kernel_map->max_offset) {
+ kernel_vm_end = kernel_map->max_offset;
+ break;
+ }
+ }
+}
+
+
+/***************************************************
+ * page management routines.
+ ***************************************************/
+
+CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE);
+CTASSERT(_NPCM == 3);
+CTASSERT(_NPCPV == 168);
+
+static __inline struct pv_chunk *
+pv_to_chunk(pv_entry_t pv)
+{
+
+ return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK));
+}
+
+#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap)
+
+#define PC_FREE0 0xfffffffffffffffful
+#define PC_FREE1 0xfffffffffffffffful
+#define PC_FREE2 0x000000fffffffffful
+
+static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 };
+
+#if 0
+#ifdef PV_STATS
+static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail;
+
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_count, CTLFLAG_RD, &pc_chunk_count, 0,
+ "Current number of pv entry chunks");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_allocs, CTLFLAG_RD, &pc_chunk_allocs, 0,
+ "Current number of pv entry chunks allocated");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_frees, CTLFLAG_RD, &pc_chunk_frees, 0,
+ "Current number of pv entry chunks frees");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pc_chunk_tryfail, CTLFLAG_RD, &pc_chunk_tryfail, 0,
+ "Number of times tried to get a chunk page but failed.");
+
+static long pv_entry_frees, pv_entry_allocs, pv_entry_count;
+static int pv_entry_spare;
+
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_frees, CTLFLAG_RD, &pv_entry_frees, 0,
+ "Current number of pv entry frees");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_allocs, CTLFLAG_RD, &pv_entry_allocs, 0,
+ "Current number of pv entry allocs");
+SYSCTL_LONG(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0,
+ "Current number of pv entries");
+SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_spare, CTLFLAG_RD, &pv_entry_spare, 0,
+ "Current number of spare pv entries");
+#endif
+#endif /* 0 */
+
+/*
+ * We are in a serious low memory condition. Resort to
+ * drastic measures to free some pages so we can allocate
+ * another pv entry chunk.
+ *
+ * Returns NULL if PV entries were reclaimed from the specified pmap.
+ *
+ * We do not, however, unmap 2mpages because subsequent accesses will
+ * allocate per-page pv entries until repromotion occurs, thereby
+ * exacerbating the shortage of free pv entries.
+ */
+static vm_page_t
+reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp)
+{
+
+ panic("reclaim_pv_chunk");
+}
+
+/*
+ * free the pv_entry back to the free list
+ */
+static void
+free_pv_entry(pmap_t pmap, pv_entry_t pv)
+{
+ struct pv_chunk *pc;
+ int idx, field, bit;
+
+ rw_assert(&pvh_global_lock, RA_LOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_frees, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, 1));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, 1));
+ pc = pv_to_chunk(pv);
+ idx = pv - &pc->pc_pventry[0];
+ field = idx / 64;
+ bit = idx % 64;
+ pc->pc_map[field] |= 1ul << bit;
+ if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 ||
+ pc->pc_map[2] != PC_FREE2) {
+ /* 98% of the time, pc is already at the head of the list. */
+ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ }
+ return;
+ }
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+}
+
+static void
+free_pv_chunk(struct pv_chunk *pc)
+{
+ vm_page_t m;
+
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_REMOVE(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, _NPCPV));
+ PV_STAT(atomic_subtract_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_frees, 1));
+ /* entire chunk is free, return it */
+ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pc));
+#if 0 /* TODO: For minidump */
+ dump_drop_page(m->phys_addr);
+#endif
+ vm_page_unwire(m, PQ_INACTIVE);
+ vm_page_free(m);
+}
+
+/*
+ * Returns a new PV entry, allocating a new PV chunk from the system when
+ * needed. If this PV chunk allocation fails and a PV list lock pointer was
+ * given, a PV chunk is reclaimed from an arbitrary pmap. Otherwise, NULL is
+ * returned.
+ *
+ * The given PV list lock may be released.
+ */
+static pv_entry_t
+get_pv_entry(pmap_t pmap, struct rwlock **lockp)
+{
+ int bit, field;
+ pv_entry_t pv;
+ struct pv_chunk *pc;
+ vm_page_t m;
+
+ rw_assert(&pvh_global_lock, RA_LOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ PV_STAT(atomic_add_long(&pv_entry_allocs, 1));
+retry:
+ pc = TAILQ_FIRST(&pmap->pm_pvchunk);
+ if (pc != NULL) {
+ for (field = 0; field < _NPCM; field++) {
+ if (pc->pc_map[field]) {
+ bit = ffsl(pc->pc_map[field]) - 1;
+ break;
+ }
+ }
+ if (field < _NPCM) {
+ pv = &pc->pc_pventry[field * 64 + bit];
+ pc->pc_map[field] &= ~(1ul << bit);
+ /* If this was the last item, move it to tail */
+ if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 &&
+ pc->pc_map[2] == 0) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc,
+ pc_list);
+ }
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_subtract_int(&pv_entry_spare, 1));
+ return (pv);
+ }
+ }
+ /* No free items, allocate another chunk */
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
+ VM_ALLOC_WIRED);
+ if (m == NULL) {
+ if (lockp == NULL) {
+ PV_STAT(pc_chunk_tryfail++);
+ return (NULL);
+ }
+ m = reclaim_pv_chunk(pmap, lockp);
+ if (m == NULL)
+ goto retry;
+ }
+ PV_STAT(atomic_add_int(&pc_chunk_count, 1));
+ PV_STAT(atomic_add_int(&pc_chunk_allocs, 1));
+#if 0 /* TODO: This is for minidump */
+ dump_add_page(m->phys_addr);
+#endif
+ pc = (void *)PHYS_TO_DMAP(m->phys_addr);
+ pc->pc_pmap = pmap;
+ pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */
+ pc->pc_map[1] = PC_FREE1;
+ pc->pc_map[2] = PC_FREE2;
+ mtx_lock(&pv_chunks_mutex);
+ TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru);
+ mtx_unlock(&pv_chunks_mutex);
+ pv = &pc->pc_pventry[0];
+ TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list);
+ PV_STAT(atomic_add_long(&pv_entry_count, 1));
+ PV_STAT(atomic_add_int(&pv_entry_spare, _NPCPV - 1));
+ return (pv);
+}
+
+/*
+ * First find and then remove the pv entry for the specified pmap and virtual
+ * address from the specified pv list. Returns the pv entry if found and NULL
+ * otherwise. This operation can be performed on pv lists for either 4KB or
+ * 2MB page mappings.
+ */
+static __inline pv_entry_t
+pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_LOCKED);
+ TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) {
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va) {
+ TAILQ_REMOVE(&pvh->pv_list, pv, pv_next);
+ pvh->pv_gen++;
+ break;
+ }
+ }
+ return (pv);
+}
+
+/*
+ * First find and then destroy the pv entry for the specified pmap and virtual
+ * address. This operation can be performed on pv lists for either 4KB or 2MB
+ * page mappings.
+ */
+static void
+pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va)
+{
+ pv_entry_t pv;
+
+ pv = pmap_pvh_remove(pvh, pmap, va);
+ KASSERT(pv != NULL, ("pmap_pvh_free: pv not found"));
+ free_pv_entry(pmap, pv);
+}
+
+/*
+ * Conditionally create the PV entry for a 4KB page mapping if the required
+ * memory can be allocated without resorting to reclamation.
+ */
+static boolean_t
+pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ struct rwlock **lockp)
+{
+ pv_entry_t pv;
+
+ rw_assert(&pvh_global_lock, RA_LOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ /* Pass NULL instead of the lock pointer to disable reclamation. */
+ if ((pv = get_pv_entry(pmap, NULL)) != NULL) {
+ pv->pv_va = va;
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ return (TRUE);
+ } else
+ return (FALSE);
+}
+
+/*
+ * pmap_remove_l3: do the things to unmap a page in a process
+ */
+static int
+pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t va,
+ pd_entry_t l2e, struct spglist *free, struct rwlock **lockp)
+{
+ pt_entry_t old_l3;
+ vm_page_t m;
+
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+ if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(pmap_load(l3)))
+ cpu_dcache_wb_range(va, L3_SIZE);
+ old_l3 = pmap_load_clear(l3);
+ PTE_SYNC(l3);
+ if (old_l3 & ATTR_SW_WIRED)
+ pmap->pm_stats.wired_count -= 1;
+ pmap_resident_count_dec(pmap, 1);
+ if (old_l3 & ATTR_SW_MANAGED) {
+ m = PHYS_TO_VM_PAGE(old_l3 & ~ATTR_MASK);
+ if (pmap_page_dirty(old_l3))
+ vm_page_dirty(m);
+ if (old_l3 & ATTR_AF)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m);
+ pmap_pvh_free(&m->md, pmap, va);
+ }
+ return (pmap_unuse_l3(pmap, va, l2e, free));
+}
+
+/*
+ * Remove the given range of addresses from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the page size.
+ */
+void
+pmap_remove(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ struct rwlock *lock;
+ vm_offset_t va, va_next;
+ pd_entry_t *l1, *l2;
+ pt_entry_t l3_paddr, *l3;
+ struct spglist free;
+ int anyvalid;
+
+ /*
+ * Perform an unsynchronized read. This is, however, safe.
+ */
+ if (pmap->pm_stats.resident_count == 0)
+ return;
+
+ anyvalid = 0;
+ SLIST_INIT(&free);
+
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+
+ lock = NULL;
+ for (; sva < eva; sva = va_next) {
+
+ if (pmap->pm_stats.resident_count == 0)
+ break;
+
+ l1 = pmap_l1(pmap, sva);
+ if (*l1 == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ /*
+ * Calculate index for next page table.
+ */
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (l2 == NULL)
+ continue;
+
+ l3_paddr = *l2;
+
+ /*
+ * Weed out invalid mappings.
+ */
+ if ((l3_paddr & ATTR_DESCR_MASK) != L2_TABLE)
+ continue;
+
+ /*
+ * Limit our scan to either the end of the va represented
+ * by the current page table page, or to the end of the
+ * range being removed.
+ */
+ if (va_next > eva)
+ va_next = eva;
+
+ va = va_next;
+ for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
+ sva += L3_SIZE) {
+ if (l3 == NULL)
+ panic("l3 == NULL");
+ if (*l3 == 0) {
+ if (va != va_next) {
+ pmap_invalidate_range(pmap, va, sva);
+ va = va_next;
+ }
+ continue;
+ }
+ if (va == va_next)
+ va = sva;
+ if (pmap_remove_l3(pmap, l3, sva, l3_paddr, &free,
+ &lock)) {
+ sva += L3_SIZE;
+ break;
+ }
+ }
+ if (va != va_next)
+ pmap_invalidate_range(pmap, va, sva);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ if (anyvalid)
+ pmap_invalidate_all(pmap);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * Routine: pmap_remove_all
+ * Function:
+ * Removes this physical page from
+ * all physical maps in which it resides.
+ * Reflects back modify bits to the pager.
+ *
+ * Notes:
+ * Original versions of this routine were very
+ * inefficient because they iteratively called
+ * pmap_remove (slow...)
+ */
+
+void
+pmap_remove_all(vm_page_t m)
+{
+ pv_entry_t pv;
+ pmap_t pmap;
+ pt_entry_t *l3, tl3;
+ pd_entry_t *l2;
+ struct spglist free;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_all: page %p is not managed", m));
+ SLIST_INIT(&free);
+ rw_wlock(&pvh_global_lock);
+ while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
+ pmap = PV_PMAP(pv);
+ PMAP_LOCK(pmap);
+ pmap_resident_count_dec(pmap, 1);
+ l2 = pmap_l2(pmap, pv->pv_va);
+ KASSERT((*l2 & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_remove_all: found a table when expecting "
+ "a block in %p's pv list", m));
+ l3 = pmap_l2_to_l3(l2, pv->pv_va);
+ if (pmap_is_current(pmap) &&
+ pmap_l3_valid_cacheable(pmap_load(l3)))
+ cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
+ tl3 = pmap_load_clear(l3);
+ PTE_SYNC(l3);
+ if (tl3 & ATTR_SW_WIRED)
+ pmap->pm_stats.wired_count--;
+ if ((tl3 & ATTR_AF) != 0)
+ vm_page_aflag_set(m, PGA_REFERENCED);
+
+ /*
+ * Update the vm_page_t clean and reference bits.
+ */
+ if (pmap_page_dirty(tl3))
+ vm_page_dirty(m);
+ pmap_unuse_l3(pmap, pv->pv_va, *l2, &free);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ free_pv_entry(pmap, pv);
+ PMAP_UNLOCK(pmap);
+ }
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ rw_wunlock(&pvh_global_lock);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ */
+void
+pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
+{
+ vm_offset_t va, va_next;
+ pd_entry_t *l1, *l2;
+ pt_entry_t *l3p, l3;
+
+ if ((prot & VM_PROT_READ) == VM_PROT_NONE) {
+ pmap_remove(pmap, sva, eva);
+ return;
+ }
+
+ if ((prot & VM_PROT_WRITE) == VM_PROT_WRITE)
+ return;
+
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+
+ l1 = pmap_l1(pmap, sva);
+ if (*l1 == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (l2 == NULL || (*l2 & ATTR_DESCR_MASK) != L2_TABLE)
+ continue;
+
+ if (va_next > eva)
+ va_next = eva;
+
+ va = va_next;
+ for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
+ sva += L3_SIZE) {
+ l3 = pmap_load(l3p);
+ if (pmap_l3_valid(l3)) {
+ pmap_set(l3p, ATTR_AP(ATTR_AP_RO));
+ PTE_SYNC(l3p);
+ }
+ }
+ }
+ PMAP_UNLOCK(pmap);
+
+ /* TODO: Only invalidate entries we are touching */
+ pmap_invalidate_all(pmap);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+int
+pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
+ u_int flags, int8_t psind __unused)
+{
+ struct rwlock *lock;
+ pd_entry_t *l1, *l2;
+ pt_entry_t new_l3, orig_l3;
+ pt_entry_t *l3;
+ pv_entry_t pv;
+ vm_paddr_t opa, pa, l2_pa, l3_pa;
+ vm_page_t mpte, om, l2_m, l3_m;
+ boolean_t nosleep;
+
+ va = trunc_page(va);
+ if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m))
+ VM_OBJECT_ASSERT_LOCKED(m->object);
+ pa = VM_PAGE_TO_PHYS(m);
+ new_l3 = (pt_entry_t)(pa | ATTR_AF | L3_PAGE);
+ if ((prot & VM_PROT_WRITE) == 0)
+ new_l3 |= ATTR_AP(ATTR_AP_RO);
+ if ((flags & PMAP_ENTER_WIRED) != 0)
+ new_l3 |= ATTR_SW_WIRED;
+ if ((va >> 63) == 0)
+ new_l3 |= ATTR_AP(ATTR_AP_USER);
+ new_l3 |= ATTR_IDX(m->md.pv_memattr);
+
+ mpte = NULL;
+
+ lock = NULL;
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+
+ if (va < VM_MAXUSER_ADDRESS) {
+ nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0;
+ mpte = pmap_alloc_l3(pmap, va, nosleep ? NULL : &lock);
+ if (mpte == NULL && nosleep) {
+ if (lock != NULL)
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ l3 = pmap_l3(pmap, va);
+ } else {
+ l3 = pmap_l3(pmap, va);
+ /* TODO: This is not optimal, but should mostly work */
+ if (l3 == NULL) {
+ l2 = pmap_l2(pmap, va);
+
+ if (l2 == NULL) {
+ l2_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
+ VM_ALLOC_ZERO);
+ if (l2_m == NULL)
+ panic("pmap_enter: l2 pte_m == NULL");
+ if ((l2_m->flags & PG_ZERO) == 0)
+ pmap_zero_page(l2_m);
+
+ l2_pa = VM_PAGE_TO_PHYS(l2_m);
+ l1 = pmap_l1(pmap, va);
+ pmap_load_store(l1, l2_pa | L1_TABLE);
+ PTE_SYNC(l1);
+ l2 = pmap_l1_to_l2(l1, va);
+ }
+
+ KASSERT(l2 != NULL,
+ ("No l2 table after allocating one"));
+
+ l3_m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL |
+ VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO);
+ if (l3_m == NULL)
+ panic("pmap_enter: l3 pte_m == NULL");
+ if ((l3_m->flags & PG_ZERO) == 0)
+ pmap_zero_page(l3_m);
+
+ l3_pa = VM_PAGE_TO_PHYS(l3_m);
+ pmap_load_store(l2, l3_pa | L2_TABLE);
+ PTE_SYNC(l2);
+ l3 = pmap_l2_to_l3(l2, va);
+ }
+ }
+
+ om = NULL;
+ orig_l3 = pmap_load(l3);
+ opa = orig_l3 & ~ATTR_MASK;
+
+ /*
+ * Is the specified virtual address already mapped?
+ */
+ if (pmap_l3_valid(orig_l3)) {
+ /*
+ * Wiring change, just update stats. We don't worry about
+ * wiring PT pages as they remain resident as long as there
+ * are valid mappings in them. Hence, if a user page is wired,
+ * the PT page will be also.
+ */
+ if ((flags & PMAP_ENTER_WIRED) != 0 &&
+ (orig_l3 & ATTR_SW_WIRED) == 0)
+ pmap->pm_stats.wired_count++;
+ else if ((flags & PMAP_ENTER_WIRED) == 0 &&
+ (orig_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count--;
+
+ /*
+ * Remove the extra PT page reference.
+ */
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ KASSERT(mpte->wire_count > 0,
+ ("pmap_enter: missing reference to page table page,"
+ " va: 0x%lx", va));
+ }
+
+ /*
+ * Has the physical page changed?
+ */
+ if (opa == pa) {
+ /*
+ * No, might be a protection or wiring change.
+ */
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
+ new_l3 |= ATTR_SW_MANAGED;
+ if ((new_l3 & ATTR_AP(ATTR_AP_RW)) ==
+ ATTR_AP(ATTR_AP_RW)) {
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ }
+ }
+ goto validate;
+ }
+
+ /* Flush the cache, there might be uncommitted data in it */
+ if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
+ cpu_dcache_wb_range(va, L3_SIZE);
+ } else {
+ /*
+ * Increment the counters.
+ */
+ if ((new_l3 & ATTR_SW_WIRED) != 0)
+ pmap->pm_stats.wired_count++;
+ pmap_resident_count_inc(pmap, 1);
+ }
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ new_l3 |= ATTR_SW_MANAGED;
+ pv = get_pv_entry(pmap, &lock);
+ pv->pv_va = va;
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ if ((new_l3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW))
+ vm_page_aflag_set(m, PGA_WRITEABLE);
+ }
+
+ /*
+ * Update the L3 entry.
+ */
+ if (orig_l3 != 0) {
+validate:
+ orig_l3 = pmap_load_store(l3, new_l3);
+ PTE_SYNC(l3);
+ opa = orig_l3 & ~ATTR_MASK;
+
+ if (opa != pa) {
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0) {
+ om = PHYS_TO_VM_PAGE(opa);
+ if (pmap_page_dirty(orig_l3))
+ vm_page_dirty(om);
+ if ((orig_l3 & ATTR_AF) != 0)
+ vm_page_aflag_set(om, PGA_REFERENCED);
+ CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+ pmap_pvh_free(&om->md, pmap, va);
+ }
+ } else if (pmap_page_dirty(orig_l3)) {
+ if ((orig_l3 & ATTR_SW_MANAGED) != 0)
+ vm_page_dirty(m);
+ }
+ if ((orig_l3 & ATTR_AF) != 0)
+ pmap_invalidate_page(pmap, va);
+ } else {
+ pmap_load_store(l3, new_l3);
+ PTE_SYNC(l3);
+ }
+ if ((pmap != pmap_kernel()) && (pmap == &curproc->p_vmspace->vm_pmap))
+ cpu_icache_sync_range(va, PAGE_SIZE);
+
+ if (lock != NULL)
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ return (KERN_SUCCESS);
+}
+
+/*
+ * Maps a sequence of resident pages belonging to the same object.
+ * The sequence begins with the given page m_start. This page is
+ * mapped at the given virtual address start. Each subsequent page is
+ * mapped at a virtual address that is offset from start by the same
+ * amount as the page is offset from m_start within the object. The
+ * last page in the sequence is the page with the largest offset from
+ * m_start that can be mapped at a virtual address less than the given
+ * virtual address end. Not every virtual page between start and end
+ * is mapped; only those for which a resident page exists with the
+ * corresponding offset from m_start are mapped.
+ */
+void
+pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end,
+ vm_page_t m_start, vm_prot_t prot)
+{
+ struct rwlock *lock;
+ vm_offset_t va;
+ vm_page_t m, mpte;
+ vm_pindex_t diff, psize;
+
+ VM_OBJECT_ASSERT_LOCKED(m_start->object);
+
+ psize = atop(end - start);
+ mpte = NULL;
+ m = m_start;
+ lock = NULL;
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) {
+ va = start + ptoa(diff);
+ mpte = pmap_enter_quick_locked(pmap, va, m, prot, mpte, &lock);
+ m = TAILQ_NEXT(m, listq);
+ }
+ if (lock != NULL)
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * this code makes some *MAJOR* assumptions:
+ * 1. Current pmap & pmap exists.
+ * 2. Not wired.
+ * 3. Read access.
+ * 4. No page table pages.
+ * but is *MUCH* faster than pmap_enter...
+ */
+
+void
+pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot)
+{
+ struct rwlock *lock;
+
+ lock = NULL;
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ (void)pmap_enter_quick_locked(pmap, va, m, prot, NULL, &lock);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
+
+static vm_page_t
+pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp)
+{
+ struct spglist free;
+ pd_entry_t *l2;
+ pt_entry_t *l3;
+ vm_paddr_t pa;
+
+ KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva ||
+ (m->oflags & VPO_UNMANAGED) != 0,
+ ("pmap_enter_quick_locked: managed mapping within the clean submap"));
+ rw_assert(&pvh_global_lock, RA_LOCKED);
+ PMAP_LOCK_ASSERT(pmap, MA_OWNED);
+
+ /*
+ * In the case that a page table page is not
+ * resident, we are creating it here.
+ */
+ if (va < VM_MAXUSER_ADDRESS) {
+ vm_pindex_t l2pindex;
+
+ /*
+ * Calculate pagetable page index
+ */
+ l2pindex = pmap_l2_pindex(va);
+ if (mpte && (mpte->pindex == l2pindex)) {
+ mpte->wire_count++;
+ } else {
+ /*
+ * Get the l2 entry
+ */
+ l2 = pmap_l2(pmap, va);
+
+ /*
+ * If the page table page is mapped, we just increment
+ * the hold count, and activate it. Otherwise, we
+ * attempt to allocate a page table page. If this
+ * attempt fails, we don't retry. Instead, we give up.
+ */
+ if (l2 != NULL && *l2 != 0) {
+ mpte = PHYS_TO_VM_PAGE(*l2 & ~ATTR_MASK);
+ mpte->wire_count++;
+ } else {
+ /*
+ * Pass NULL instead of the PV list lock
+ * pointer, because we don't intend to sleep.
+ */
+ mpte = _pmap_alloc_l3(pmap, l2pindex, NULL);
+ if (mpte == NULL)
+ return (mpte);
+ }
+ }
+ l3 = (pt_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mpte));
+ l3 = &l3[pmap_l3_index(va)];
+ } else {
+ mpte = NULL;
+ l3 = pmap_l3(kernel_pmap, va);
+ }
+ if (l3 == NULL)
+ panic("pmap_enter_quick_locked: No l3");
+ if (*l3) {
+ if (mpte != NULL) {
+ mpte->wire_count--;
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Enter on the PV list if part of our managed memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0 &&
+ !pmap_try_insert_pv_entry(pmap, va, m, lockp)) {
+ if (mpte != NULL) {
+ SLIST_INIT(&free);
+ if (pmap_unwire_l3(pmap, va, mpte, &free)) {
+ pmap_invalidate_page(pmap, va);
+ pmap_free_zero_pages(&free);
+ }
+ mpte = NULL;
+ }
+ return (mpte);
+ }
+
+ /*
+ * Increment counters
+ */
+ pmap_resident_count_inc(pmap, 1);
+
+ pa = VM_PAGE_TO_PHYS(m) | ATTR_AF | ATTR_IDX(m->md.pv_memattr) |
+ ATTR_AP(ATTR_AP_RW) | L3_PAGE;
+
+ /*
+ * Now validate mapping with RO protection
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0)
+ pa |= ATTR_SW_MANAGED;
+ pmap_load_store(l3, pa);
+ PTE_SYNC(l3);
+ pmap_invalidate_page(pmap, va);
+ return (mpte);
+}
+
+/*
+ * This code maps large physical mmap regions into the
+ * processor address space. Note that some shortcuts
+ * are taken, but the code works.
+ */
+void
+pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object,
+ vm_pindex_t pindex, vm_size_t size)
+{
+
+ panic("pmap_object_init_pt");
+}
+
+/*
+ * Clear the wired attribute from the mappings for the specified range of
+ * addresses in the given pmap. Every valid mapping within that range
+ * must have the wired attribute set. In contrast, invalid mappings
+ * cannot have the wired attribute set, so they are ignored.
+ *
+ * The wired attribute of the page table entry is not a hardware feature,
+ * so there is no need to invalidate any TLB entries.
+ */
+void
+pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva)
+{
+ vm_offset_t va_next;
+ pd_entry_t *l1, *l2;
+ pt_entry_t *l3;
+ boolean_t pv_lists_locked;
+
+ pv_lists_locked = FALSE;
+ PMAP_LOCK(pmap);
+ for (; sva < eva; sva = va_next) {
+ l1 = pmap_l1(pmap, sva);
+ if (*l1 == 0) {
+ va_next = (sva + L1_SIZE) & ~L1_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+ continue;
+ }
+
+ va_next = (sva + L2_SIZE) & ~L2_OFFSET;
+ if (va_next < sva)
+ va_next = eva;
+
+ l2 = pmap_l1_to_l2(l1, sva);
+ if (*l2 == 0)
+ continue;
+
+ if (va_next > eva)
+ va_next = eva;
+ for (l3 = pmap_l2_to_l3(l2, sva); sva != va_next; l3++,
+ sva += L3_SIZE) {
+ if (*l3 == 0)
+ continue;
+ if ((*l3 & ATTR_SW_WIRED) == 0)
+ panic("pmap_unwire: l3 %#jx is missing "
+ "ATTR_SW_WIRED", (uintmax_t)*l3);
+
+ /*
+ * PG_W must be cleared atomically. Although the pmap
+ * lock synchronizes access to PG_W, another processor
+ * could be setting PG_M and/or PG_A concurrently.
+ */
+ atomic_clear_long(l3, ATTR_SW_WIRED);
+ pmap->pm_stats.wired_count--;
+ }
+ }
+ if (pv_lists_locked)
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+
+void
+pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
+ vm_offset_t src_addr)
+{
+}
+
+/*
+ * pmap_zero_page zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ */
+void
+pmap_zero_page(vm_page_t m)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ pagezero((void *)va);
+}
+
+/*
+ * pmap_zero_page_area zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents.
+ *
+ * off and size may not cover an area beyond a single hardware page.
+ */
+void
+pmap_zero_page_area(vm_page_t m, int off, int size)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ if (off == 0 && size == PAGE_SIZE)
+ pagezero((void *)va);
+ else
+ bzero((char *)va + off, size);
+}
+
+/*
+ * pmap_zero_page_idle zeros the specified hardware page by mapping
+ * the page into KVM and using bzero to clear its contents. This
+ * is intended to be called from the vm_pagezero process only and
+ * outside of Giant.
+ */
+void
+pmap_zero_page_idle(vm_page_t m)
+{
+ vm_offset_t va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+
+ pagezero((void *)va);
+}
+
+/*
+ * pmap_copy_page copies the specified (machine independent)
+ * page by mapping the page into virtual memory and using
+ * bcopy to copy the page, one machine dependent page at a
+ * time.
+ */
+void
+pmap_copy_page(vm_page_t msrc, vm_page_t mdst)
+{
+ vm_offset_t src = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(msrc));
+ vm_offset_t dst = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(mdst));
+
+ pagecopy((void *)src, (void *)dst);
+}
+
+int unmapped_buf_allowed = 1;
+
+void
+pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[],
+ vm_offset_t b_offset, int xfersize)
+{
+ void *a_cp, *b_cp;
+ vm_page_t m_a, m_b;
+ vm_paddr_t p_a, p_b;
+ vm_offset_t a_pg_offset, b_pg_offset;
+ int cnt;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ m_a = ma[a_offset >> PAGE_SHIFT];
+ p_a = m_a->phys_addr;
+ b_pg_offset = b_offset & PAGE_MASK;
+ m_b = mb[b_offset >> PAGE_SHIFT];
+ p_b = m_b->phys_addr;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ if (__predict_false(!PHYS_IN_DMAP(p_a))) {
+ panic("!DMAP a %lx", p_a);
+ } else {
+ a_cp = (char *)PHYS_TO_DMAP(p_a) + a_pg_offset;
+ }
+ if (__predict_false(!PHYS_IN_DMAP(p_b))) {
+ panic("!DMAP b %lx", p_b);
+ } else {
+ b_cp = (char *)PHYS_TO_DMAP(p_b) + b_pg_offset;
+ }
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+}
+
+/*
+ * Returns true if the pmap's pv is one of the first
+ * 16 pvs linked to from this page. This count may
+ * be changed upwards or downwards in the future; it
+ * is only necessary that true be returned for a small
+ * subset of pmaps for proper page aging.
+ */
+boolean_t
+pmap_page_exists_quick(pmap_t pmap, vm_page_t m)
+{
+ struct rwlock *lock;
+ pv_entry_t pv;
+ int loops = 0;
+ boolean_t rv;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_page_exists_quick: page %p is not managed", m));
+ rv = FALSE;
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ if (PV_PMAP(pv) == pmap) {
+ rv = TRUE;
+ break;
+ }
+ loops++;
+ if (loops >= 16)
+ break;
+ }
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * pmap_page_wired_mappings:
+ *
+ * Return the number of managed mappings to the given physical page
+ * that are wired.
+ */
+int
+pmap_page_wired_mappings(vm_page_t m)
+{
+ struct rwlock *lock;
+ pmap_t pmap;
+ pt_entry_t *l3;
+ pv_entry_t pv;
+ int count, md_gen;
+
+ if ((m->oflags & VPO_UNMANAGED) != 0)
+ return (0);
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ count = 0;
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ l3 = pmap_l3(pmap, pv->pv_va);
+ if (l3 != NULL && (*l3 & ATTR_SW_WIRED) != 0)
+ count++;
+ PMAP_UNLOCK(pmap);
+ }
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
+ return (count);
+}
+
+/*
+ * Destroy all managed, non-wired mappings in the given user-space
+ * pmap. This pmap cannot be active on any processor besides the
+ * caller.
+ *
+ * This function cannot be applied to the kernel pmap. Moreover, it
+ * is not intended for general use. It is only to be used during
+ * process termination. Consequently, it can be implemented in ways
+ * that make it faster than pmap_remove(). First, it can more quickly
+ * destroy mappings by iterating over the pmap's collection of PV
+ * entries, rather than searching the page table. Second, it doesn't
+ * have to test and clear the page table entries atomically, because
+ * no processor is currently accessing the user address space. In
+ * particular, a page table entry's dirty bit won't change state once
+ * this function starts.
+ */
+void
+pmap_remove_pages(pmap_t pmap)
+{
+ pd_entry_t ptepde, *l2;
+ pt_entry_t *l3, tl3;
+ struct spglist free;
+ vm_page_t m;
+ pv_entry_t pv;
+ struct pv_chunk *pc, *npc;
+ struct rwlock *lock;
+ int64_t bit;
+ uint64_t inuse, bitmask;
+ int allfree, field, freed, idx;
+ vm_paddr_t pa;
+
+ lock = NULL;
+
+ SLIST_INIT(&free);
+ rw_rlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) {
+ allfree = 1;
+ freed = 0;
+ for (field = 0; field < _NPCM; field++) {
+ inuse = ~pc->pc_map[field] & pc_freemask[field];
+ while (inuse != 0) {
+ bit = ffsl(inuse) - 1;
+ bitmask = 1UL << bit;
+ idx = field * 64 + bit;
+ pv = &pc->pc_pventry[idx];
+ inuse &= ~bitmask;
+
+ l2 = pmap_l2(pmap, pv->pv_va);
+ ptepde = pmap_load(l2);
+ l3 = pmap_l2_to_l3(l2, pv->pv_va);
+ tl3 = pmap_load(l3);
+
+/*
+ * We cannot remove wired pages from a process' mapping at this time
+ */
+ if (tl3 & ATTR_SW_WIRED) {
+ allfree = 0;
+ continue;
+ }
+
+ pa = tl3 & ~ATTR_MASK;
+
+ m = PHYS_TO_VM_PAGE(pa);
+ KASSERT(m->phys_addr == pa,
+ ("vm_page_t %p phys_addr mismatch %016jx %016jx",
+ m, (uintmax_t)m->phys_addr,
+ (uintmax_t)tl3));
+
+ KASSERT((m->flags & PG_FICTITIOUS) != 0 ||
+ m < &vm_page_array[vm_page_array_size],
+ ("pmap_remove_pages: bad l3 %#jx",
+ (uintmax_t)tl3));
+
+ if (pmap_is_current(pmap) &&
+ pmap_l3_valid_cacheable(pmap_load(l3)))
+ cpu_dcache_wb_range(pv->pv_va, L3_SIZE);
+ pmap_load_clear(l3);
+ PTE_SYNC(l3);
+
+ /*
+ * Update the vm_page_t clean/reference bits.
+ */
+ if ((tl3 & ATTR_AP_RW_BIT) ==
+ ATTR_AP(ATTR_AP_RW))
+ vm_page_dirty(m);
+
+ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, m);
+
+ /* Mark free */
+ pc->pc_map[field] |= bitmask;
+
+ pmap_resident_count_dec(pmap, 1);
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+
+ pmap_unuse_l3(pmap, pv->pv_va, ptepde, &free);
+ freed++;
+ }
+ }
+ PV_STAT(atomic_add_long(&pv_entry_frees, freed));
+ PV_STAT(atomic_add_int(&pv_entry_spare, freed));
+ PV_STAT(atomic_subtract_long(&pv_entry_count, freed));
+ if (allfree) {
+ TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list);
+ free_pv_chunk(pc);
+ }
+ }
+ pmap_invalidate_all(pmap);
+ if (lock != NULL)
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ PMAP_UNLOCK(pmap);
+ pmap_free_zero_pages(&free);
+}
+
+/*
+ * This is used to check if a page has been accessed or modified. As we
+ * don't have a bit to see if it has been modified we have to assume it
+ * has been if the page is read/write.
+ */
+static boolean_t
+pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified)
+{
+ struct rwlock *lock;
+ pv_entry_t pv;
+ pt_entry_t *l3, mask, value;
+ pmap_t pmap;
+ int md_gen;
+ boolean_t rv;
+
+ rv = FALSE;
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+ rw_rlock(lock);
+restart:
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_runlock(lock);
+ PMAP_LOCK(pmap);
+ rw_rlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto restart;
+ }
+ }
+ l3 = pmap_l3(pmap, pv->pv_va);
+ mask = 0;
+ value = 0;
+ if (modified) {
+ mask |= ATTR_AP_RW_BIT;
+ value |= ATTR_AP(ATTR_AP_RW);
+ }
+ if (accessed) {
+ mask |= ATTR_AF | ATTR_DESCR_MASK;
+ value |= ATTR_AF | L3_PAGE;
+ }
+ rv = (pmap_load(l3) & mask) == value;
+ PMAP_UNLOCK(pmap);
+ if (rv)
+ goto out;
+ }
+out:
+ rw_runlock(lock);
+ rw_runlock(&pvh_global_lock);
+ return (rv);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page was modified
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_modified(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_modified: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * concurrently set while the object is locked. Thus, if PGA_WRITEABLE
+ * is clear, no PTEs can have PG_M set.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return (FALSE);
+ return (pmap_page_test_mappings(m, FALSE, TRUE));
+}
+
+/*
+ * pmap_is_prefaultable:
+ *
+ * Return whether or not the specified virtual address is eligible
+ * for prefault.
+ */
+boolean_t
+pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
+{
+ pt_entry_t *l3;
+ boolean_t rv;
+
+ rv = FALSE;
+ PMAP_LOCK(pmap);
+ l3 = pmap_l3(pmap, addr);
+ if (l3 != NULL && *l3 != 0) {
+ rv = TRUE;
+ }
+ PMAP_UNLOCK(pmap);
+ return (rv);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page was referenced
+ * in any physical maps.
+ */
+boolean_t
+pmap_is_referenced(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_is_referenced: page %p is not managed", m));
+ return (pmap_page_test_mappings(m, TRUE, FALSE));
+}
+
+/*
+ * Clear the write and modified bits in each of the given page's mappings.
+ */
+void
+pmap_remove_write(vm_page_t m)
+{
+ pmap_t pmap;
+ struct rwlock *lock;
+ pv_entry_t pv;
+ pt_entry_t *l3, oldl3;
+ int md_gen;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_remove_write: page %p is not managed", m));
+
+ /*
+ * If the page is not exclusive busied, then PGA_WRITEABLE cannot be
+ * set by another thread while the object is locked. Thus,
+ * if PGA_WRITEABLE is clear, no page table entries need updating.
+ */
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ if (!vm_page_xbusied(m) && (m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ rw_rlock(&pvh_global_lock);
+ lock = VM_PAGE_TO_PV_LIST_LOCK(m);
+retry_pv_loop:
+ rw_wlock(lock);
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) {
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(lock);
+ goto retry_pv_loop;
+ }
+ }
+ l3 = pmap_l3(pmap, pv->pv_va);
+retry:
+ oldl3 = *l3;
+ if ((oldl3 & ATTR_AP_RW_BIT) == ATTR_AP(ATTR_AP_RW)) {
+ if (!atomic_cmpset_long(l3, oldl3,
+ oldl3 | ATTR_AP(ATTR_AP_RO)))
+ goto retry;
+ if ((oldl3 & ATTR_AF) != 0)
+ vm_page_dirty(m);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ }
+ PMAP_UNLOCK(pmap);
+ }
+ rw_wunlock(lock);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
+ rw_runlock(&pvh_global_lock);
+}
+
+static __inline boolean_t
+safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte)
+{
+
+ return (FALSE);
+}
+
+#define PMAP_TS_REFERENCED_MAX 5
+
+/*
+ * pmap_ts_referenced:
+ *
+ * Return a count of reference bits for a page, clearing those bits.
+ * It is not necessary for every reference bit to be cleared, but it
+ * is necessary that 0 only be returned when there are truly no
+ * reference bits set.
+ *
+ * XXX: The exact number of bits to check and clear is a matter that
+ * should be tested and standardized at some point in the future for
+ * optimal aging of shared pages.
+ */
+int
+pmap_ts_referenced(vm_page_t m)
+{
+ pv_entry_t pv, pvf;
+ pmap_t pmap;
+ struct rwlock *lock;
+ pd_entry_t *l2;
+ pt_entry_t *l3;
+ vm_paddr_t pa;
+ int cleared, md_gen, not_cleared;
+ struct spglist free;
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_ts_referenced: page %p is not managed", m));
+ SLIST_INIT(&free);
+ cleared = 0;
+ pa = VM_PAGE_TO_PHYS(m);
+ lock = PHYS_TO_PV_LIST_LOCK(pa);
+ rw_rlock(&pvh_global_lock);
+ rw_wlock(lock);
+retry:
+ not_cleared = 0;
+ if ((pvf = TAILQ_FIRST(&m->md.pv_list)) == NULL)
+ goto out;
+ pv = pvf;
+ do {
+ if (pvf == NULL)
+ pvf = pv;
+ pmap = PV_PMAP(pv);
+ if (!PMAP_TRYLOCK(pmap)) {
+ md_gen = m->md.pv_gen;
+ rw_wunlock(lock);
+ PMAP_LOCK(pmap);
+ rw_wlock(lock);
+ if (md_gen != m->md.pv_gen) {
+ PMAP_UNLOCK(pmap);
+ goto retry;
+ }
+ }
+ l2 = pmap_l2(pmap, pv->pv_va);
+ KASSERT((*l2 & ATTR_DESCR_MASK) == L2_TABLE,
+ ("pmap_ts_referenced: found an invalid l2 table"));
+ l3 = pmap_l2_to_l3(l2, pv->pv_va);
+ if ((*l3 & ATTR_AF) != 0) {
+ if (safe_to_clear_referenced(pmap, *l3)) {
+ /*
+ * TODO: We don't handle the access flag
+ * at all. We need to be able to set it in
+ * the exception handler.
+ */
+ panic("TODO: safe_to_clear_referenced\n");
+ } else if ((*l3 & ATTR_SW_WIRED) == 0) {
+ /*
+ * Wired pages cannot be paged out so
+ * doing accessed bit emulation for
+ * them is wasted effort. We do the
+ * hard work for unwired pages only.
+ */
+ pmap_remove_l3(pmap, l3, pv->pv_va,
+ *l2, &free, &lock);
+ pmap_invalidate_page(pmap, pv->pv_va);
+ cleared++;
+ if (pvf == pv)
+ pvf = NULL;
+ pv = NULL;
+ KASSERT(lock == VM_PAGE_TO_PV_LIST_LOCK(m),
+ ("inconsistent pv lock %p %p for page %p",
+ lock, VM_PAGE_TO_PV_LIST_LOCK(m), m));
+ } else
+ not_cleared++;
+ }
+ PMAP_UNLOCK(pmap);
+ /* Rotate the PV list if it has more than one entry. */
+ if (pv != NULL && TAILQ_NEXT(pv, pv_next) != NULL) {
+ TAILQ_REMOVE(&m->md.pv_list, pv, pv_next);
+ TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
+ m->md.pv_gen++;
+ }
+ } while ((pv = TAILQ_FIRST(&m->md.pv_list)) != pvf && cleared +
+ not_cleared < PMAP_TS_REFERENCED_MAX);
+out:
+ rw_wunlock(lock);
+ rw_runlock(&pvh_global_lock);
+ pmap_free_zero_pages(&free);
+ return (cleared + not_cleared);
+}
+
+/*
+ * Apply the given advice to the specified range of addresses within the
+ * given pmap. Depending on the advice, clear the referenced and/or
+ * modified flags in each mapping and set the mapped page's dirty field.
+ */
+void
+pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice)
+{
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+void
+pmap_clear_modify(vm_page_t m)
+{
+
+ KASSERT((m->oflags & VPO_UNMANAGED) == 0,
+ ("pmap_clear_modify: page %p is not managed", m));
+ VM_OBJECT_ASSERT_WLOCKED(m->object);
+ KASSERT(!vm_page_xbusied(m),
+ ("pmap_clear_modify: page %p is exclusive busied", m));
+
+ /*
+ * If the page is not PGA_WRITEABLE, then no PTEs can have PG_M set.
+ * If the object containing the page is locked and the page is not
+ * exclusive busied, then PGA_WRITEABLE cannot be concurrently set.
+ */
+ if ((m->aflags & PGA_WRITEABLE) == 0)
+ return;
+ panic("pmap_clear_modify");
+}
+
+/*
+ * Sets the memory attribute for the specified page.
+ */
+void
+pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma)
+{
+
+ panic("pmap_page_set_memattr");
+}
+
+/*
+ * perform the pmap work for mincore
+ */
+int
+pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa)
+{
+
+ panic("pmap_mincore");
+}
+
+void
+pmap_activate(struct thread *td)
+{
+ pmap_t pmap;
+
+ critical_enter();
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
+ td->td_pcb->pcb_l1addr = vtophys(pmap->pm_l1);
+ __asm __volatile("msr ttbr0_el1, %0" : : "r"(td->td_pcb->pcb_l1addr));
+ critical_exit();
+}
+
+void
+pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz)
+{
+
+ panic("pmap_sync_icache");
+}
+
+/*
+ * Increase the starting virtual address of the given mapping if a
+ * different alignment might result in more superpage mappings.
+ */
+void
+pmap_align_superpage(vm_object_t object, vm_ooffset_t offset,
+ vm_offset_t *addr, vm_size_t size)
+{
+}
+
+/**
+ * Get the kernel virtual address of a set of physical pages. If there are
+ * physical addresses not covered by the DMAP perform a transient mapping
+ * that will be removed when calling pmap_unmap_io_transient.
+ *
+ * \param page The pages the caller wishes to obtain the virtual
+ * address on the kernel memory map.
+ * \param vaddr On return contains the kernel virtual memory address
+ * of the pages passed in the page parameter.
+ * \param count Number of pages passed in.
+ * \param can_fault TRUE if the thread using the mapped pages can take
+ * page faults, FALSE otherwise.
+ *
+ * \returns TRUE if the caller must call pmap_unmap_io_transient when
+ * finished or FALSE otherwise.
+ *
+ */
+boolean_t
+pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ boolean_t needs_mapping;
+ int error, i;
+
+ /*
+ * Allocate any KVA space that we need, this is done in a separate
+ * loop to prevent calling vmem_alloc while pinned.
+ */
+ needs_mapping = FALSE;
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (__predict_false(paddr >= DMAP_MAX_PHYSADDR)) {
+ error = vmem_alloc(kernel_arena, PAGE_SIZE,
+ M_BESTFIT | M_WAITOK, &vaddr[i]);
+ KASSERT(error == 0, ("vmem_alloc failed: %d", error));
+ needs_mapping = TRUE;
+ } else {
+ vaddr[i] = PHYS_TO_DMAP(paddr);
+ }
+ }
+
+ /* Exit early if everything is covered by the DMAP */
+ if (!needs_mapping)
+ return (FALSE);
+
+ /*
+ * NB: The sequence of updating a page table followed by accesses
+ * to the corresponding pages used in the !DMAP case is subject to
+ * the situation described in the "AMD64 Architecture Programmer's
+ * Manual Volume 2: System Programming" rev. 3.23, "7.3.1 Special
+ * Coherency Considerations". Therefore, issuing the INVLPG right
+ * after modifying the PTE bits is crucial.
+ */
+ if (!can_fault)
+ sched_pin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (paddr >= DMAP_MAX_PHYSADDR) {
+ panic(
+ "pmap_map_io_transient: TODO: Map out of DMAP data");
+ }
+ }
+
+ return (needs_mapping);
+}
+
+void
+pmap_unmap_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count,
+ boolean_t can_fault)
+{
+ vm_paddr_t paddr;
+ int i;
+
+ if (!can_fault)
+ sched_unpin();
+ for (i = 0; i < count; i++) {
+ paddr = VM_PAGE_TO_PHYS(page[i]);
+ if (paddr >= DMAP_MAX_PHYSADDR) {
+ panic("pmap_unmap_io_transient: TODO: Unmap data");
+ }
+ }
+}
Index: sys/arm64/arm64/stack_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/stack_machdep.c
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2005 Antoine Brodin
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/proc.h>
+#include <sys/stack.h>
+
+#include <machine/vmparam.h>
+#include <machine/pcb.h>
+#include <machine/stack.h>
+
+void
+stack_save_td(struct stack *st, struct thread *td)
+{
+ panic("stack_save_td");
+}
+
+void
+stack_save(struct stack *st)
+{
+ panic("stack_save");
+}
Index: sys/arm64/arm64/support.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/support.S
@@ -0,0 +1,255 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014-2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <machine/asm.h>
+__FBSDID("$FreeBSD$");
+
+#include <machine/setjmp.h>
+
+#include "assym.s"
+
+/*
+ * One of the fu* or su* functions failed, return -1.
+ */
+ENTRY(fsu_fault)
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */
+ mov x0, #-1
+ ret
+END(fsu_fault)
+
+/*
+ * int casueword32(volatile uint32_t *, uint32_t, uint32_t *, uint32_t)
+ */
+ENTRY(casueword32)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x4) /* And set it */
+1: ldxr w4, [x0] /* Load-exclusive the data */
+ cmp w4, w1 /* Compare */
+ b.ne 2f /* Not equal, exit */
+ stxr w5, w3, [x0] /* Store the new data */
+ cbnz w5, 1b /* Retry on failure */
+ ldrb w0, [x0] /* Try loading the data */
+2: SET_FAULT_HANDLER(xzr, x5) /* Reset the fault handler */
+ str w4, [x2] /* Store the read data */
+ ret /* Return */
+END(casueword32)
+
+/*
+ * int casueword(volatile u_long *, u_long, u_long *, u_long)
+ */
+ENTRY(casueword)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x4) /* And set it */
+1: ldxr x4, [x0] /* Load-exclusive the data */
+ cmp x4, x1 /* Compare */
+ b.ne 2f /* Not equal, exit */
+ stxr w5, x3, [x0] /* Store the new data */
+ cbnz w5, 1b /* Retry on failure */
+ ldrb w0, [x0] /* Try loading the data */
+2: SET_FAULT_HANDLER(xzr, x5) /* Reset the fault handler */
+ str x4, [x2] /* Store the read data */
+ ret /* Return */
+END(casueword)
+
+/*
+ * int fubyte(volatile const void *)
+ */
+ENTRY(fubyte)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x1) /* And set it */
+ ldrb w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */
+ ret /* Return */
+END(fubyte)
+
+/*
+ * int fuword(volatile const void *)
+ */
+ENTRY(fuword16)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x1) /* And set it */
+ ldrh w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */
+ ret /* Return */
+END(fuword16)
+
+/*
+ * int32_t fueword32(volatile const void *, int32_t *)
+ */
+ENTRY(fueword32)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ ldr w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ str w0, [x1] /* Save the data in kernel space */
+ mov w0, #0 /* Success */
+ ret /* Return */
+END(fueword32)
+
+/*
+ * long fueword(volatile const void *, int64_t *)
+ * int64_t fueword64(volatile const void *, int64_t *)
+ */
+ENTRY(fueword)
+EENTRY(fueword64)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ ldr x0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ str x0, [x1] /* Save the data in kernel space */
+ mov x0, #0 /* Success */
+ ret /* Return */
+EEND(fueword64)
+END(fueword)
+
+/*
+ * int subyte(volatile void *, int)
+ */
+ENTRY(subyte)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ strb w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(subyte)
+
+/*
+ * int suword16(volatile void *, int)
+ */
+ENTRY(suword16)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ strh w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(suword16)
+
+/*
+ * int suword32(volatile void *, int)
+ */
+ENTRY(suword32)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ str w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(suword32)
+
+/*
+ * int suword(volatile void *, long)
+ */
+ENTRY(suword)
+EENTRY(suword64)
+ adr x6, fsu_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ str x1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+EEND(suword64)
+END(suword)
+
+/*
+ * fuswintr and suswintr are just like fusword and susword except that if
+ * the page is not in memory or would cause a trap, then we return an error.
+ * The important thing is to prevent sleep() and switch().
+ */
+
+/*
+ * Special handler so the trap code knows not to sleep.
+ */
+ENTRY(fsu_intr_fault)
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the handler function */
+ mov x0, #-1
+ ret
+END(fsu_fault)
+
+/*
+ * int fuswintr(void *)
+ */
+ENTRY(fuswintr)
+ adr x6, fsu_intr_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x1) /* And set it */
+ ldr w0, [x0] /* Try loading the data */
+ SET_FAULT_HANDLER(xzr, x1) /* Reset the fault handler */
+ ret /* Return */
+END(fuswintr)
+
+/*
+ * int suswintr(void *base, int word)
+ */
+ENTRY(suswintr)
+ adr x6, fsu_intr_fault /* Load the fault handler */
+ SET_FAULT_HANDLER(x6, x2) /* And set it */
+ str w1, [x0] /* Try storing the data */
+ SET_FAULT_HANDLER(xzr, x2) /* Reset the fault handler */
+ mov x0, #0 /* Success */
+ ret /* Return */
+END(suswintr)
+
+ENTRY(setjmp)
+ /* Store the stack pointer */
+ mov x8, sp
+ str x8, [x0]
+
+ /* Store the general purpose registers and lr */
+ stp x19, x20, [x0], #16
+ stp x21, x22, [x0], #16
+ stp x23, x24, [x0], #16
+ stp x25, x26, [x0], #16
+ stp x27, x28, [x0], #16
+ stp x29, lr, [x0], #16
+
+ /* Return value */
+ mov x0, #0
+ ret
+END(setjmp)
+
+ENTRY(longjmp)
+ /* Restore the stack pointer */
+ ldr x8, [x0], #8
+ mov sp, x8
+
+ /* Restore the general purpose registers and lr */
+ ldp x19, x20, [x0], #16
+ ldp x21, x22, [x0], #16
+ ldp x23, x24, [x0], #16
+ ldp x25, x26, [x0], #16
+ ldp x27, x28, [x0], #16
+ ldp x29, lr, [x0], #16
+
+ /* Load the return value */
+ mov x0, x1
+ ret
+END(longjmp)
Index: sys/arm64/arm64/swtch.S
===================================================================
--- /dev/null
+++ sys/arm64/arm64/swtch.S
@@ -0,0 +1,255 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include "assym.s"
+
+#include <machine/asm.h>
+
+__FBSDID("$FreeBSD$");
+
+/*
+ * void cpu_throw(struct thread *old, struct thread *new)
+ */
+ENTRY(cpu_throw)
+#ifdef SMP
+#error cpu_throw needs to be ported to support SMP
+#endif
+
+#ifdef VFP
+ /* Backup the new thread pointer around a call to C code */
+ mov x19, x1
+ bl vfp_discard
+ mov x1, x19
+#endif
+
+ /* Store the new curthread */
+ str x1, [x18, #PC_CURTHREAD]
+ /* And the new pcb */
+ ldr x4, [x1, #TD_PCB]
+ str x4, [x18, #PC_CURPCB]
+
+ /*
+ * TODO: We may need to flush the cache here.
+ */
+
+ /* Switch to the new pmap */
+ ldr x5, [x4, #PCB_L1ADDR]
+ msr ttbr0_el1, x5
+ isb
+
+ /* Invalidate the TLB */
+ dsb sy
+ tlbi vmalle1is
+ dsb sy
+ isb
+
+ /* Restore the registers */
+ ldp x5, x6, [x4, #PCB_SP]
+ mov sp, x5
+ msr tpidr_el0, x6
+ ldp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ ldp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ ldp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ ldp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ ldp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ ldr x19, [x4, #PCB_REGS + 19 * 8]
+ ldp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ ldp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ ldp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ ldp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ ldp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ ldr x30, [x4, #PCB_REGS + 30 * 8]
+
+ ret
+END(cpu_throw)
+
+/*
+ * void cpu_switch(struct thread *old, struct thread *new, struct mtx *mtx)
+ *
+ * x0 = old
+ * x1 = new
+ * x2 = mtx
+ * x3 to x7, x16 and x17 are caller saved
+ */
+ENTRY(cpu_switch)
+#ifdef SMP
+#error cpu_switch needs to be ported to support SMP
+#endif
+
+ /* Store the new curthread */
+ str x1, [x18, #PC_CURTHREAD]
+ /* And the new pcb */
+ ldr x4, [x1, #TD_PCB]
+ str x4, [x18, #PC_CURPCB]
+
+ /*
+ * Save the old context.
+ */
+ ldr x4, [x0, #TD_PCB]
+
+ /* Store the callee-saved registers */
+ stp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ stp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ stp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ stp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ stp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ stp x18, x19, [x4, #PCB_REGS + 18 * 8]
+ stp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ stp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ stp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ stp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ stp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ str x30, [x4, #PCB_REGS + 30 * 8]
+ /* And the old stack pointer */
+ mov x5, sp
+ mrs x6, tpidr_el0
+ stp x5, x6, [x4, #PCB_SP]
+
+#ifdef VFP
+ mov x19, x0
+ mov x20, x1
+ mov x21, x2
+ bl vfp_save_state
+ mov x2, x21
+ mov x1, x20
+ mov x0, x19
+#endif
+
+ /*
+ * Restore the saved context.
+ */
+ ldr x4, [x1, #TD_PCB]
+
+ /*
+ * TODO: We may need to flush the cache here if switching
+ * to a user process.
+ */
+
+ /* Switch to the new pmap */
+ ldr x5, [x4, #PCB_L1ADDR]
+ msr ttbr0_el1, x5
+ isb
+
+ /* Invalidate the TLB */
+ dsb sy
+ tlbi vmalle1is
+ dsb sy
+ isb
+
+ /* Release the old thread */
+ str x2, [x0, #TD_LOCK]
+#if defined(SCHED_ULE) && defined(SMP)
+#error We may need to wait for the lock here
+#endif
+
+ /* Restore the registers */
+ ldp x5, x6, [x4, #PCB_SP]
+ mov sp, x5
+ msr tpidr_el0, x6
+ ldp x8, x9, [x4, #PCB_REGS + 8 * 8]
+ ldp x10, x11, [x4, #PCB_REGS + 10 * 8]
+ ldp x12, x13, [x4, #PCB_REGS + 12 * 8]
+ ldp x14, x15, [x4, #PCB_REGS + 14 * 8]
+ ldp x16, x17, [x4, #PCB_REGS + 16 * 8]
+ ldr x19, [x4, #PCB_REGS + 19 * 8]
+ ldp x20, x21, [x4, #PCB_REGS + 20 * 8]
+ ldp x22, x23, [x4, #PCB_REGS + 22 * 8]
+ ldp x24, x25, [x4, #PCB_REGS + 24 * 8]
+ ldp x26, x27, [x4, #PCB_REGS + 26 * 8]
+ ldp x28, x29, [x4, #PCB_REGS + 28 * 8]
+ ldr x30, [x4, #PCB_REGS + 30 * 8]
+
+ str xzr, [x4, #PCB_REGS + 18 * 8]
+ ret
+.Lcpu_switch_panic_str:
+ .asciz "cpu_switch: %p\0"
+END(cpu_switch)
+
+ENTRY(fork_trampoline)
+ mov x0, x8
+ mov x1, x9
+ mov x2, sp
+ mov fp, #0 /* Stack traceback stops here. */
+ bl _C_LABEL(fork_exit)
+
+ /* Restore sp and lr */
+ ldp x0, x1, [sp]
+ msr sp_el0, x0
+ mov lr, x1
+
+ /* Restore the registers other than x0 and x1 */
+ ldp x2, x3, [sp, #TF_X + 2 * 8]
+ ldp x4, x5, [sp, #TF_X + 4 * 8]
+ ldp x6, x7, [sp, #TF_X + 6 * 8]
+ ldp x8, x9, [sp, #TF_X + 8 * 8]
+ ldp x10, x11, [sp, #TF_X + 10 * 8]
+ ldp x12, x13, [sp, #TF_X + 12 * 8]
+ ldp x14, x15, [sp, #TF_X + 14 * 8]
+ ldp x16, x17, [sp, #TF_X + 16 * 8]
+ ldr x19, [sp, #TF_X + 19 * 8]
+ ldp x20, x21, [sp, #TF_X + 20 * 8]
+ ldp x22, x23, [sp, #TF_X + 22 * 8]
+ ldp x24, x25, [sp, #TF_X + 24 * 8]
+ ldp x26, x27, [sp, #TF_X + 26 * 8]
+ ldp x28, x29, [sp, #TF_X + 28 * 8]
+ /* Skip x30 as it was restored above as lr */
+
+ /*
+ * Disable interrupts to avoid
+ * overwriting spsr_el1 by an IRQ exception.
+ */
+ msr daifset, #2
+
+ /* Restore elr and spsr */
+ ldp x0, x1, [sp, #16]
+ msr elr_el1, x0
+ msr spsr_el1, x1
+
+ /* Finally x0 and x1 */
+ ldp x0, x1, [sp, #TF_X + 0 * 8]
+ ldr x18, [sp, #TF_X + 18 * 8]
+
+ /*
+ * No need for interrupts reenabling since PSR
+ * will be set to the desired value anyway.
+ */
+ eret
+
+END(fork_trampoline)
+
+ENTRY(savectx)
+ adr x0, .Lsavectx_panic_str
+ bl panic
+ ret
+.Lsavectx_panic_str:
+ .asciz "savectx"
+END(savectx)
+
Index: sys/arm64/arm64/sys_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/sys_machdep.c
@@ -0,0 +1,57 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/capability.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/sysproto.h>
+
+#include <machine/md_var.h>
+#include <machine/sysarch.h>
+
+
+#ifndef _SYS_SYSPROTO_H_
+struct sysarch_args {
+ int op;
+ char *parms;
+};
+#endif
+
+int
+sysarch(struct thread *td, struct sysarch_args *uap)
+{
+
+ return (ENOTSUP);
+}
+
Index: sys/arm64/arm64/trap.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/trap.c
@@ -0,0 +1,311 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/pioctl.h>
+#include <sys/proc.h>
+#include <sys/ptrace.h>
+#include <sys/syscall.h>
+#include <sys/sysent.h>
+#ifdef KDB
+#include <sys/kdb.h>
+#endif
+
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_map.h>
+#include <vm/vm_extern.h>
+
+#include <machine/frame.h>
+#include <machine/pcb.h>
+#include <machine/pcpu.h>
+#include <machine/vmparam.h>
+
+#ifdef VFP
+#include <machine/vfp.h>
+#endif
+
+#ifdef KDB
+#include <machine/db_machdep.h>
+#endif
+
+#ifdef DDB
+#include <ddb/db_output.h>
+#endif
+
+extern uintptr_t fsu_intr_fault;
+
+/* Called from exception.S */
+void do_el1h_sync(struct trapframe *);
+void do_el0_sync(struct trapframe *);
+void do_el0_error(struct trapframe *);
+
+static __inline void
+call_trapsignal(struct thread *td, int sig, u_long code)
+{
+ ksiginfo_t ksi;
+
+ ksiginfo_init_trap(&ksi);
+ ksi.ksi_signo = sig;
+ ksi.ksi_code = (int)code;
+ trapsignal(td, &ksi);
+}
+
+int
+cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
+{
+ struct proc *p;
+ register_t *ap;
+ int nap;
+
+ nap = 8;
+ p = td->td_proc;
+ ap = td->td_frame->tf_x;
+
+ sa->code = td->td_frame->tf_x[8];
+
+ if (sa->code == SYS_syscall || sa->code == SYS___syscall) {
+ sa->code = *ap++;
+ nap--;
+ }
+
+ if (p->p_sysent->sv_mask)
+ sa->code &= p->p_sysent->sv_mask;
+ if (sa->code >= p->p_sysent->sv_size)
+ sa->callp = &p->p_sysent->sv_table[0];
+ else
+ sa->callp = &p->p_sysent->sv_table[sa->code];
+
+ sa->narg = sa->callp->sy_narg;
+ memcpy(sa->args, ap, nap * sizeof(register_t));
+ if (sa->narg > nap)
+ panic("TODO: Could we have more then 8 args?");
+
+ td->td_retval[0] = 0;
+ td->td_retval[1] = 0;
+
+ return (0);
+}
+
+#include "../../kern/subr_syscall.c"
+
+static void
+svc_handler(struct trapframe *frame)
+{
+ struct syscall_args sa;
+ struct thread *td;
+ int error;
+
+ td = curthread;
+ td->td_frame = frame;
+
+ error = syscallenter(td, &sa);
+ syscallret(td, error, &sa);
+}
+
+static void
+data_abort(struct trapframe *frame, uint64_t esr, int lower)
+{
+ struct vm_map *map;
+ struct thread *td;
+ struct proc *p;
+ struct pcb *pcb;
+ vm_prot_t ftype;
+ vm_offset_t va;
+ uint64_t far;
+ int error, sig;
+
+ td = curthread;
+ pcb = td->td_pcb;
+
+ /*
+ * Special case for fuswintr and suswintr. These can't sleep so
+ * handle them early on in the trap handler.
+ */
+ if (__predict_false(pcb->pcb_onfault == fsu_intr_fault)) {
+ frame->tf_elr = pcb->pcb_onfault;
+ return;
+ }
+
+ far = READ_SPECIALREG(far_el1);
+ p = td->td_proc;
+
+ if (lower)
+ map = &td->td_proc->p_vmspace->vm_map;
+ else {
+ /* The top bit tells us which range to use */
+ if ((far >> 63) == 1)
+ map = kernel_map;
+ else
+ map = &td->td_proc->p_vmspace->vm_map;
+ }
+
+ va = trunc_page(far);
+ ftype = ((esr >> 6) & 1) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
+
+ if (map != kernel_map) {
+ /*
+ * Keep swapout from messing with us during this
+ * critical time.
+ */
+ PROC_LOCK(p);
+ ++p->p_lock;
+ PROC_UNLOCK(p);
+
+ /* Fault in the user page: */
+ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+
+ PROC_LOCK(p);
+ --p->p_lock;
+ PROC_UNLOCK(p);
+ } else {
+ /*
+ * Don't have to worry about process locking or stacks in the
+ * kernel.
+ */
+ error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
+ }
+
+ if (error != 0) {
+ if (lower) {
+ if (error == ENOMEM)
+ sig = SIGKILL;
+ else
+ sig = SIGSEGV;
+ call_trapsignal(td, sig, 0);
+ } else {
+ if (td->td_intr_nesting_level == 0 &&
+ pcb->pcb_onfault != 0) {
+ frame->tf_x[0] = error;
+ frame->tf_elr = pcb->pcb_onfault;
+ return;
+ }
+ panic("vm_fault failed: %lx", frame->tf_elr);
+ }
+ }
+
+ if (lower)
+ userret(td, frame);
+}
+
+void
+do_el1h_sync(struct trapframe *frame)
+{
+ uint32_t exception;
+ uint64_t esr;
+
+ /* Read the esr register to get the exception details */
+ esr = READ_SPECIALREG(esr_el1);
+ exception = ESR_ELx_EXCEPTION(esr);
+
+ /*
+ * Sanity check we are in an exception er can handle. The IL bit
+ * is used to indicate the instruction length, except in a few
+ * exceptions described in the ARMv8 ARM.
+ *
+ * It is unclear in some cases if the bit is implementation defined.
+ * The Foundation Model and QEMU disagree on if the IL bit should
+ * be set when we are in a data fault from the same EL and the ISV
+ * bit (bit 24) is also set.
+ */
+ KASSERT((esr & ESR_ELx_IL) == ESR_ELx_IL ||
+ (exception == EXCP_DATA_ABORT && ((esr & ISS_DATA_ISV) == 0)),
+ ("Invalid instruction length in exception"));
+
+ switch(exception) {
+ case EXCP_FP_SIMD:
+ case EXCP_TRAP_FP:
+ panic("VFP exception in the kernel");
+ case EXCP_DATA_ABORT:
+ data_abort(frame, esr, 0);
+ break;
+ case EXCP_BRK:
+ case EXCP_WATCHPT_EL1:
+ case EXCP_SOFTSTP_EL1:
+#ifdef KDB
+ kdb_trap(exception, 0, frame);
+#else
+ panic("No debugger in kernel.\n");
+#endif
+ break;
+ default:
+ panic("Unknown kernel exception %x esr_el1 %lx\n", exception,
+ esr);
+ }
+}
+
+void
+do_el0_sync(struct trapframe *frame)
+{
+ uint32_t exception;
+ uint64_t esr;
+
+ /* Check we have a sane environment when entering from userland */
+ KASSERT((uintptr_t)get_pcpu() >= VM_MIN_KERNEL_ADDRESS,
+ ("Invalid pcpu address from userland: %p (tpidr %lx)",
+ get_pcpu(), READ_SPECIALREG(tpidr_el1)));
+
+ esr = READ_SPECIALREG(esr_el1);
+ exception = ESR_ELx_EXCEPTION(esr);
+
+ switch(exception) {
+ case EXCP_FP_SIMD:
+ case EXCP_TRAP_FP:
+#ifdef VFP
+ vfp_restore_state();
+#else
+ panic("VFP exception in userland");
+#endif
+ break;
+ case EXCP_SVC:
+ svc_handler(frame);
+ break;
+ case EXCP_INSN_ABORT_L:
+ case EXCP_DATA_ABORT_L:
+ data_abort(frame, esr, 1);
+ break;
+ default:
+ panic("Unknown userland exception %x esr_el1 %lx\n", exception,
+ esr);
+ }
+}
+
+void
+do_el0_error(struct trapframe *frame)
+{
+
+ panic("do_el0_error");
+}
+
Index: sys/arm64/arm64/uio_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/uio_machdep.c
@@ -0,0 +1,134 @@
+/*-
+ * Copyright (c) 2004 Alan L. Cox <alc@cs.rice.edu>
+ * Copyright (c) 1982, 1986, 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ * (c) UNIX System Laboratories, Inc.
+ * All or some portions of this file are derived from material licensed
+ * to the University of California by American Telephone and Telegraph
+ * Co. or Unix System Laboratories, Inc. and are reproduced herein with
+ * the permission of UNIX System Laboratories, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)kern_subr.c 8.3 (Berkeley) 1/21/94
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/kernel.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+#include <sys/uio.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+
+#include <machine/vmparam.h>
+
+/*
+ * Implement uiomove(9) from physical memory using the direct map to
+ * avoid the creation and destruction of ephemeral mappings.
+ */
+int
+uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio)
+{
+ struct thread *td = curthread;
+ struct iovec *iov;
+ void *cp;
+ vm_offset_t page_offset, vaddr;
+ size_t cnt;
+ int error = 0;
+ int save = 0;
+ boolean_t mapped;
+
+ KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE,
+ ("uiomove_fromphys: mode"));
+ KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
+ ("uiomove_fromphys proc"));
+ save = td->td_pflags & TDP_DEADLKTREAT;
+ td->td_pflags |= TDP_DEADLKTREAT;
+ mapped = FALSE;
+ while (n > 0 && uio->uio_resid) {
+ iov = uio->uio_iov;
+ cnt = iov->iov_len;
+ if (cnt == 0) {
+ uio->uio_iov++;
+ uio->uio_iovcnt--;
+ continue;
+ }
+ if (cnt > n)
+ cnt = n;
+ page_offset = offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - page_offset);
+ if (uio->uio_segflg != UIO_NOCOPY) {
+ mapped = pmap_map_io_transient(
+ &ma[offset >> PAGE_SHIFT], &vaddr, 1, TRUE);
+ cp = (char *)vaddr + page_offset;
+ }
+ switch (uio->uio_segflg) {
+ case UIO_USERSPACE:
+ maybe_yield();
+ if (uio->uio_rw == UIO_READ)
+ error = copyout(cp, iov->iov_base, cnt);
+ else
+ error = copyin(iov->iov_base, cp, cnt);
+ if (error)
+ goto out;
+ break;
+ case UIO_SYSSPACE:
+ if (uio->uio_rw == UIO_READ)
+ bcopy(cp, iov->iov_base, cnt);
+ else
+ bcopy(iov->iov_base, cp, cnt);
+ break;
+ case UIO_NOCOPY:
+ break;
+ }
+ if (__predict_false(mapped)) {
+ pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT],
+ &vaddr, 1, TRUE);
+ mapped = FALSE;
+ }
+ iov->iov_base = (char *)iov->iov_base + cnt;
+ iov->iov_len -= cnt;
+ uio->uio_resid -= cnt;
+ uio->uio_offset += cnt;
+ offset += cnt;
+ n -= cnt;
+ }
+out:
+ if (__predict_false(mapped)) {
+ panic("TODO 3");
+ pmap_unmap_io_transient(&ma[offset >> PAGE_SHIFT], &vaddr, 1,
+ TRUE);
+ }
+ if (save == 0)
+ td->td_pflags &= ~TDP_DEADLKTREAT;
+ return (error);
+}
Index: sys/arm64/arm64/vfp.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/vfp.c
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#ifdef VFP
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/pcpu.h>
+#include <sys/proc.h>
+
+#include <machine/armreg.h>
+#include <machine/pcb.h>
+#include <machine/vfp.h>
+
+/* Sanity check we can store all the VFP registers */
+CTASSERT(sizeof(((struct pcb *)0)->pcb_vfp) == 16 * 32);
+
+static void
+vfp_enable(void)
+{
+ uint32_t cpacr;
+
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_NONE;
+ WRITE_SPECIALREG(cpacr_el1, cpacr);
+ isb();
+}
+
+static void
+vfp_disable(void)
+{
+ uint32_t cpacr;
+
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ cpacr = (cpacr & ~CPACR_FPEN_MASK) | CPACR_FPEN_TRAP_ALL1;
+ WRITE_SPECIALREG(cpacr_el1, cpacr);
+ isb();
+}
+
+/*
+ * Called when the thread is dying. If the thread was the last to use the
+ * VFP unit mark it as unused to tell the kernel the fp state is unowned.
+ * Ensure the VFP unit is off so we get an exception on the next access.
+ */
+void
+vfp_discard(struct thread *td)
+{
+
+ if (PCPU_GET(fpcurthread) == td)
+ PCPU_SET(fpcurthread, NULL);
+
+ vfp_disable();
+}
+
+void
+vfp_save_state(struct thread *td)
+{
+ __int128_t *vfp_state;
+ uint64_t fpcr, fpsr;
+ uint32_t cpacr;
+
+ /*
+ * Only store the registers if the VFP is enabled,
+ * i.e. return if we are trapping on FP access.
+ */
+ cpacr = READ_SPECIALREG(cpacr_el1);
+ if ((cpacr & CPACR_FPEN_MASK) != CPACR_FPEN_TRAP_NONE)
+ return;
+
+ vfp_state = td->td_pcb->pcb_vfp;
+ __asm __volatile(
+ "mrs %0, fpcr \n"
+ "mrs %1, fpsr \n"
+ "stp q0, q1, [%2, #16 * 0]\n"
+ "stp q2, q3, [%2, #16 * 2]\n"
+ "stp q4, q5, [%2, #16 * 4]\n"
+ "stp q6, q7, [%2, #16 * 6]\n"
+ "stp q8, q9, [%2, #16 * 8]\n"
+ "stp q10, q11, [%2, #16 * 10]\n"
+ "stp q12, q13, [%2, #16 * 12]\n"
+ "stp q14, q15, [%2, #16 * 14]\n"
+ "stp q16, q17, [%2, #16 * 16]\n"
+ "stp q18, q19, [%2, #16 * 18]\n"
+ "stp q20, q21, [%2, #16 * 20]\n"
+ "stp q22, q23, [%2, #16 * 22]\n"
+ "stp q24, q25, [%2, #16 * 24]\n"
+ "stp q26, q27, [%2, #16 * 26]\n"
+ "stp q28, q29, [%2, #16 * 28]\n"
+ "stp q30, q31, [%2, #16 * 30]\n"
+ : "=&r"(fpcr), "=&r"(fpsr) : "r"(vfp_state));
+
+ td->td_pcb->pcb_fpcr = fpcr;
+ td->td_pcb->pcb_fpsr = fpsr;
+
+ dsb();
+ vfp_disable();
+}
+
+void
+vfp_restore_state(void)
+{
+ __int128_t *vfp_state;
+ uint64_t fpcr, fpsr;
+ struct pcb *curpcb;
+ u_int cpu;
+
+ critical_enter();
+
+ cpu = PCPU_GET(cpuid);
+ curpcb = curthread->td_pcb;
+ curpcb->pcb_fpflags |= PCB_FP_STARTED;
+
+ vfp_enable();
+
+ if (PCPU_GET(fpcurthread) != curthread && cpu != curpcb->pcb_vfpcpu) {
+
+ vfp_state = curthread->td_pcb->pcb_vfp;
+ fpcr = curthread->td_pcb->pcb_fpcr;
+ fpsr = curthread->td_pcb->pcb_fpsr;
+
+ __asm __volatile(
+ "ldp q0, q1, [%2, #16 * 0]\n"
+ "ldp q2, q3, [%2, #16 * 2]\n"
+ "ldp q4, q5, [%2, #16 * 4]\n"
+ "ldp q6, q7, [%2, #16 * 6]\n"
+ "ldp q8, q9, [%2, #16 * 8]\n"
+ "ldp q10, q11, [%2, #16 * 10]\n"
+ "ldp q12, q13, [%2, #16 * 12]\n"
+ "ldp q14, q15, [%2, #16 * 14]\n"
+ "ldp q16, q17, [%2, #16 * 16]\n"
+ "ldp q18, q19, [%2, #16 * 18]\n"
+ "ldp q20, q21, [%2, #16 * 20]\n"
+ "ldp q22, q23, [%2, #16 * 22]\n"
+ "ldp q24, q25, [%2, #16 * 24]\n"
+ "ldp q26, q27, [%2, #16 * 26]\n"
+ "ldp q28, q29, [%2, #16 * 28]\n"
+ "ldp q30, q31, [%2, #16 * 30]\n"
+ "msr fpcr, %0 \n"
+ "msr fpsr, %1 \n"
+ : : "r"(fpcr), "r"(fpsr), "r"(vfp_state));
+
+ PCPU_SET(fpcurthread, curthread);
+ curpcb->pcb_vfpcpu = cpu;
+ }
+
+ critical_exit();
+}
+
+void
+vfp_init(void)
+{
+ uint64_t pfr;
+
+ /* Check if there is a vfp unit present */
+ pfr = READ_SPECIALREG(id_aa64pfr0_el1);
+ if ((pfr & ID_AA64PFR0_FP_MASK) == ID_AA64PFR0_FP_NONE)
+ return;
+
+ /* Disable to be enabled when it's used */
+ vfp_disable();
+}
+
+SYSINIT(vfp, SI_SUB_CPU, SI_ORDER_ANY, vfp_init, NULL);
+
+#endif
Index: sys/arm64/arm64/vm_machdep.c
===================================================================
--- /dev/null
+++ sys/arm64/arm64/vm_machdep.c
@@ -0,0 +1,248 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/limits.h>
+#include <sys/proc.h>
+#include <sys/sf_buf.h>
+#include <sys/signal.h>
+#include <sys/unistd.h>
+
+#include <vm/vm.h>
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#include <vm/uma.h>
+#include <vm/uma_int.h>
+
+#include <machine/armreg.h>
+#include <machine/cpu.h>
+#include <machine/pcb.h>
+#include <machine/frame.h>
+
+/*
+ * Finish a fork operation, with process p2 nearly set up.
+ * Copy and update the pcb, set up the stack so that the child
+ * ready to run and return to user mode.
+ */
+void
+cpu_fork(struct thread *td1, struct proc *p2, struct thread *td2, int flags)
+{
+ struct pcb *pcb2;
+ struct trapframe *tf;
+
+ if ((flags & RFPROC) == 0)
+ return;
+
+ pcb2 = (struct pcb *)(td2->td_kstack +
+ td2->td_kstack_pages * PAGE_SIZE) - 1;
+
+ td2->td_pcb = pcb2;
+ bcopy(td1->td_pcb, pcb2, sizeof(*pcb2));
+
+ td2->td_pcb->pcb_l1addr =
+ vtophys(vmspace_pmap(td2->td_proc->p_vmspace)->pm_l1);
+
+ tf = (struct trapframe *)STACKALIGN((struct trapframe *)pcb2 - 1);
+ bcopy(td1->td_frame, tf, sizeof(*tf));
+ tf->tf_x[0] = 0;
+ tf->tf_x[1] = 0;
+ tf->tf_spsr = 0;
+
+ td2->td_frame = tf;
+
+ /* Set the return value registers for fork() */
+ td2->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
+ td2->td_pcb->pcb_x[9] = (uintptr_t)td2;
+ td2->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td2->td_pcb->pcb_sp = (uintptr_t)td2->td_frame;
+ td2->td_pcb->pcb_vfpcpu = UINT_MAX;
+
+ /* Setup to release spin count in fork_exit(). */
+ td2->td_md.md_spinlock_count = 1;
+ td2->td_md.md_saved_daif = 0;
+}
+
+void
+cpu_reset(void)
+{
+
+ printf("cpu_reset");
+ while(1)
+ __asm volatile("wfi" ::: "memory");
+}
+
+void
+cpu_thread_swapin(struct thread *td)
+{
+}
+
+void
+cpu_thread_swapout(struct thread *td)
+{
+}
+
+void
+cpu_set_syscall_retval(struct thread *td, int error)
+{
+ struct trapframe *frame;
+
+ frame = td->td_frame;
+
+ switch (error) {
+ case 0:
+ frame->tf_x[0] = td->td_retval[0];
+ frame->tf_x[1] = td->td_retval[1];
+ frame->tf_spsr &= ~PSR_C; /* carry bit */
+ break;
+ case ERESTART:
+ frame->tf_elr -= 4;
+ break;
+ case EJUSTRETURN:
+ break;
+ default:
+ frame->tf_spsr |= PSR_C; /* carry bit */
+ frame->tf_x[0] = error;
+ break;
+ }
+}
+
+/*
+ * Initialize machine state (pcb and trap frame) for a new thread about to
+ * upcall. Put enough state in the new thread's PCB to get it to go back
+ * userret(), where we can intercept it again to set the return (upcall)
+ * Address and stack, along with those from upcals that are from other sources
+ * such as those generated in thread_userret() itself.
+ */
+void
+cpu_set_upcall(struct thread *td, struct thread *td0)
+{
+ bcopy(td0->td_frame, td->td_frame, sizeof(struct trapframe));
+ bcopy(td0->td_pcb, td->td_pcb, sizeof(struct pcb));
+
+ td->td_pcb->pcb_x[8] = (uintptr_t)fork_return;
+ td->td_pcb->pcb_x[9] = (uintptr_t)td;
+ td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
+ td->td_pcb->pcb_vfpcpu = UINT_MAX;
+
+ /* Setup to release spin count in fork_exit(). */
+ td->td_md.md_spinlock_count = 1;
+ td->td_md.md_saved_daif = 0;
+}
+
+/*
+ * Set that machine state for performing an upcall that has to
+ * be done in thread_userret() so that those upcalls generated
+ * in thread_userret() itself can be done as well.
+ */
+void
+cpu_set_upcall_kse(struct thread *td, void (*entry)(void *), void *arg,
+ stack_t *stack)
+{
+
+ panic("cpu_set_upcall_kse");
+}
+
+int
+cpu_set_user_tls(struct thread *td, void *tls_base)
+{
+
+ panic("cpu_set_user_tls");
+}
+
+void
+cpu_thread_exit(struct thread *td)
+{
+}
+
+void
+cpu_thread_alloc(struct thread *td)
+{
+
+ td->td_pcb = (struct pcb *)(td->td_kstack +
+ td->td_kstack_pages * PAGE_SIZE) - 1;
+ td->td_frame = (struct trapframe *)STACKALIGN(
+ td->td_pcb - 1);
+}
+
+void
+cpu_thread_free(struct thread *td)
+{
+}
+
+void
+cpu_thread_clean(struct thread *td)
+{
+}
+
+/*
+ * Intercept the return address from a freshly forked process that has NOT
+ * been scheduled yet.
+ *
+ * This is needed to make kernel threads stay in kernel mode.
+ */
+void
+cpu_set_fork_handler(struct thread *td, void (*func)(void *), void *arg)
+{
+
+ td->td_pcb->pcb_x[8] = (uintptr_t)func;
+ td->td_pcb->pcb_x[9] = (uintptr_t)arg;
+ td->td_pcb->pcb_x[PCB_LR] = (uintptr_t)fork_trampoline;
+ td->td_pcb->pcb_sp = (uintptr_t)td->td_frame;
+ td->td_pcb->pcb_vfpcpu = UINT_MAX;
+}
+
+void
+cpu_exit(struct thread *td)
+{
+}
+
+void
+swi_vm(void *v)
+{
+
+ /* Nothing to do here - busdma bounce buffers are not implemented. */
+}
+
+void *
+uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait)
+{
+
+ panic("uma_small_alloc");
+}
+
+void
+uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
+{
+
+ panic("uma_small_free");
+}
+
Index: sys/arm64/conf/DEFAULTS
===================================================================
--- /dev/null
+++ sys/arm64/conf/DEFAULTS
@@ -0,0 +1,14 @@
+#
+# DEFAULTS -- Default kernel configuration file for FreeBSD/arm64
+#
+# $FreeBSD$
+
+machine arm64
+
+# Pseudo devices.
+device mem # Memory and kernel memory devices
+
+# Default partitioning schemes
+options GEOM_PART_BSD
+options GEOM_PART_MBR
+
Index: sys/arm64/conf/GENERIC
===================================================================
--- /dev/null
+++ sys/arm64/conf/GENERIC
@@ -0,0 +1,93 @@
+#
+# GENERIC -- Generic kernel configuration file for FreeBSD/arm64
+#
+# For more information on this file, please read the config(5) manual page,
+# and/or the handbook section on Kernel Configuration Files:
+#
+# http://www.FreeBSD.org/doc/en_US.ISO8859-1/books/handbook/kernelconfig-config.html
+#
+# The handbook is also available locally in /usr/share/doc/handbook
+# if you've installed the doc distribution, otherwise always see the
+# FreeBSD World Wide Web server (http://www.FreeBSD.org/) for the
+# latest information.
+#
+# An exhaustive list of options and more detailed explanations of the
+# device lines is also present in the ../../conf/NOTES and NOTES files.
+# If you are in doubt as to the purpose or necessity of a line, check first
+# in NOTES.
+#
+# $FreeBSD$
+
+cpu arm64
+ident GENERIC
+
+makeoptions DEBUG=-g # Build kernel with gdb(1) debug symbols
+makeoptions NO_MODULES=1 # We don't yet support modules on arm64
+
+options SCHED_ULE # ULE scheduler
+options PREEMPTION # Enable kernel thread preemption
+options INET # InterNETworking
+options INET6 # IPv6 communications protocols
+options TCP_OFFLOAD # TCP offload
+options SCTP # Stream Control Transmission Protocol
+options FFS # Berkeley Fast Filesystem
+options SOFTUPDATES # Enable FFS soft updates support
+options UFS_ACL # Support for access control lists
+options UFS_DIRHASH # Improve performance on big directories
+options UFS_GJOURNAL # Enable gjournal-based UFS journaling
+options QUOTA # Enable disk quotas for UFS
+options MD_ROOT # MD is a potential root device
+options NFSCL # New Network Filesystem Client
+options NFSD # New Network Filesystem Server
+options NFSLOCKD # Network Lock Manager
+options NFS_ROOT # NFS usable as /, requires NFSCL
+options MSDOSFS # MSDOS Filesystem
+options CD9660 # ISO 9660 Filesystem
+options PROCFS # Process filesystem (requires PSEUDOFS)
+options PSEUDOFS # Pseudo-filesystem framework
+options GEOM_PART_GPT # GUID Partition Tables.
+options GEOM_RAID # Soft RAID functionality.
+options GEOM_LABEL # Provides labelization
+options SCSI_DELAY=5000 # Delay (in ms) before probing SCSI
+options KTRACE # ktrace(1) support
+options STACK # stack(9) support
+options SYSVSHM # SYSV-style shared memory
+options SYSVMSG # SYSV-style message queues
+options SYSVSEM # SYSV-style semaphores
+options _KPOSIX_PRIORITY_SCHEDULING # POSIX P1003_1B real-time extensions
+options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
+options KBD_INSTALL_CDEV # install a CDEV entry in /dev
+options HWPMC_HOOKS # Necessary kernel hooks for hwpmc(4)
+options AUDIT # Security event auditing
+options CAPABILITY_MODE # Capsicum capability mode
+options CAPABILITIES # Capsicum capabilities
+options MAC # TrustedBSD MAC Framework
+options KDTRACE_FRAME # Ensure frames are compiled in
+options KDTRACE_HOOKS # Kernel DTrace hooks
+options VFP # Floating-point support
+
+device virtio
+device virtio_mmio
+device virtio_blk
+device vtnet
+
+# Serial (COM) ports
+device uart # Generic UART driver
+device pl011
+
+# Pseudo devices.
+device loop # Network loopback
+device random # Entropy device
+device ether # Ethernet support
+device vlan # 802.1Q VLAN support
+device tun # Packet tunnel.
+device md # Memory "disks"
+device gif # IPv6 and IPv4 tunneling
+device firmware # firmware assist module
+
+# The `bpf' device enables the Berkeley Packet Filter.
+# Be aware of the administrative consequences of enabling this!
+# Note that 'bpf' is required for DHCP.
+device bpf # Berkeley packet filter
+
+options FDT
Index: sys/arm64/include/_bus.h
===================================================================
--- /dev/null
+++ sys/arm64/include/_bus.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2005 M. Warner Losh.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions, and the following disclaimer,
+ * without modification, immediately at the beginning of the file.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE__BUS_H_
+#define _MACHINE__BUS_H_
+
+/*
+ * Addresses (in bus space).
+ */
+typedef u_long bus_addr_t;
+typedef u_long bus_size_t;
+
+/*
+ * Access methods for bus space.
+ */
+typedef u_long bus_space_handle_t;
+typedef struct bus_space *bus_space_tag_t;
+
+#endif /* !_MACHINE__BUS_H_ */
Index: sys/arm64/include/armreg.h
===================================================================
--- /dev/null
+++ sys/arm64/include/armreg.h
@@ -0,0 +1,194 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ARMREG_H_
+#define _MACHINE_ARMREG_H_
+
+#define READ_SPECIALREG(reg) \
+({ uint64_t val; \
+ __asm __volatile("mrs %0, " __STRING(reg) : "=&r" (val)); \
+ val; \
+})
+#define WRITE_SPECIALREG(reg, val) \
+ __asm __volatile("msr " __STRING(reg) ", %0" : : "r"((uint64_t)val))
+
+/* CPACR_EL1 */
+#define CPACR_FPEN_MASK (0x3 << 20)
+#define CPACR_FPEN_TRAP_ALL1 (0x0 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_EL0 (0x1 << 20) /* Traps from EL0 */
+#define CPACR_FPEN_TRAP_ALL2 (0x2 << 20) /* Traps from EL0 and EL1 */
+#define CPACR_FPEN_TRAP_NONE (0x3 << 20) /* No traps */
+#define CPACR_TTA (0x1 << 28)
+
+/* CTR_EL0 - Cache Type Register */
+#define CTR_DLINE_SHIFT 16
+#define CTR_DLINE_MASK (0xf << CTR_DLINE_SHIFT)
+#define CTR_DLINE_SIZE(reg) (((reg) & CTR_DLINE_MASK) >> CTR_DLINE_SHIFT)
+#define CTR_ILINE_SHIFT 0
+#define CTR_ILINE_MASK (0xf << CTR_ILINE_SHIFT)
+#define CTR_ILINE_SIZE(reg) (((reg) & CTR_ILINE_MASK) >> CTR_ILINE_SHIFT)
+
+/* ESR_ELx */
+#define ESR_ELx_ISS_MASK 0x00ffffff
+#define ISS_INSN_FnV (0x01 << 10)
+#define ISS_INSN_EA (0x01 << 9)
+#define ISS_INSN_S1PTW (0x01 << 7)
+#define ISS_INSN_IFSC_MASK (0x1f << 0)
+#define ISS_DATA_ISV (0x01 << 24)
+#define ISS_DATA_SAS_MASK (0x03 << 22)
+#define ISS_DATA_SSE (0x01 << 21)
+#define ISS_DATA_SRT_MASK (0x1f << 16)
+#define ISS_DATA_SF (0x01 << 15)
+#define ISS_DATA_AR (0x01 << 14)
+#define ISS_DATA_FnV (0x01 << 10)
+#define ISS_DATa_EA (0x01 << 9)
+#define ISS_DATa_CM (0x01 << 8)
+#define ISS_INSN_S1PTW (0x01 << 7)
+#define ISS_DATa_WnR (0x01 << 6)
+#define ISS_DATA_DFSC_MASK (0x1f << 0)
+#define ESR_ELx_IL (0x01 << 25)
+#define ESR_ELx_EC_SHIFT 26
+#define ESR_ELx_EC_MASK (0x3f << 26)
+#define ESR_ELx_EXCEPTION(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
+#define EXCP_UNKNOWN 0x00 /* Unkwn exception */
+#define EXCP_FP_SIMD 0x07 /* VFP/SIMD trap */
+#define EXCP_ILL_STATE 0x0e /* Illegal execution state */
+#define EXCP_SVC 0x15 /* SVC trap */
+#define EXCP_MSR 0x18 /* MSR/MRS trap */
+#define EXCP_INSN_ABORT_L 0x20 /* Instruction abort, from lower EL */
+#define EXCP_INSN_ABORT 0x21 /* Instruction abort, from same EL */
+#define EXCP_PC_ALIGN 0x22 /* PC alignment fault */
+#define EXCP_DATA_ABORT_L 0x24 /* Data abort, from lower EL */
+#define EXCP_DATA_ABORT 0x25 /* Data abort, from same EL */
+#define EXCP_SP_ALIGN 0x26 /* SP slignment fault */
+#define EXCP_TRAP_FP 0x2c /* Trapped FP exception */
+#define EXCP_SERROR 0x2f /* SError interrupt */
+#define EXCP_SOFTSTP_EL1 0x33 /* Software Step, from same EL */
+#define EXCP_WATCHPT_EL1 0x35 /* Watchpoint, from same EL */
+#define EXCP_BRK 0x3c /* Breakpoint */
+
+/* ID_AA64PFR0_EL1 */
+#define ID_AA64PFR0_EL0_MASK (0xf << 0)
+#define ID_AA64PFR0_EL1_MASK (0xf << 4)
+#define ID_AA64PFR0_EL2_MASK (0xf << 8)
+#define ID_AA64PFR0_EL3_MASK (0xf << 12)
+#define ID_AA64PFR0_FP_MASK (0xf << 16)
+#define ID_AA64PFR0_FP_IMPL (0x0 << 16) /* Floating-point implemented */
+#define ID_AA64PFR0_FP_NONE (0xf << 16) /* Floating-point not implemented */
+#define ID_AA64PFR0_ADV_SIMD_MASK (0xf << 20)
+#define ID_AA64PFR0_GIC_MASK (0xf << 24)
+
+/* MAIR_EL1 - Memory Attribute Indirection Register */
+#define MAIR_ATTR_MASK(idx) (0xff << ((n)* 8))
+#define MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
+
+/* SCTLR_EL1 - System Control Register */
+#define SCTLR_RES0 0xc8222400 /* Reserved, write 0 */
+#define SCTLR_RES1 0x30d00800 /* Reserved, write 1 */
+
+#define SCTLR_M 0x00000001
+#define SCTLR_A 0x00000002
+#define SCTLR_C 0x00000004
+#define SCTLR_SA 0x00000008
+#define SCTLR_SA0 0x00000010
+#define SCTLR_CP15BEN 0x00000020
+#define SCTLR_THEE 0x00000040
+#define SCTLR_ITD 0x00000080
+#define SCTLR_SED 0x00000100
+#define SCTLR_UMA 0x00000200
+#define SCTLR_I 0x00001000
+#define SCTLR_DZE 0x00004000
+#define SCTLR_UCT 0x00008000
+#define SCTLR_nTWI 0x00010000
+#define SCTLR_nTWE 0x00040000
+#define SCTLR_WXN 0x00080000
+#define SCTLR_EOE 0x01000000
+#define SCTLR_EE 0x02000000
+#define SCTLR_UCI 0x04000000
+
+/* SPSR_EL1 */
+/*
+ * When the exception is taken in AArch64:
+ * M[4] is 0 for AArch64 mode
+ * M[3:2] is the exception level
+ * M[1] is unused
+ * M[0] is the SP select:
+ * 0: always SP0
+ * 1: current ELs SP
+ */
+#define PSR_M_EL0t 0x00000000
+#define PSR_M_EL1t 0x00000004
+#define PSR_M_EL1h 0x00000005
+#define PSR_M_EL2t 0x00000008
+#define PSR_M_EL2h 0x00000009
+#define PSR_M_MASK 0x0000001f
+
+#define PSR_F 0x00000040
+#define PSR_I 0x00000080
+#define PSR_A 0x00000100
+#define PSR_D 0x00000200
+#define PSR_IL 0x00100000
+#define PSR_SS 0x00200000
+#define PSR_V 0x10000000
+#define PSR_C 0x20000000
+#define PSR_Z 0x40000000
+#define PSR_N 0x80000000
+
+/* TCR_EL1 - Translation Control Register */
+#define TCR_ASID_16 (1 << 36)
+
+#define TCR_IPS_SHIFT 32
+#define TCR_IPS_32BIT (0 << TCR_IPS_SHIFT)
+#define TCR_IPS_36BIT (1 << TCR_IPS_SHIFT)
+#define TCR_IPS_40BIT (2 << TCR_IPS_SHIFT)
+#define TCR_IPS_42BIT (3 << TCR_IPS_SHIFT)
+#define TCR_IPS_44BIT (4 << TCR_IPS_SHIFT)
+#define TCR_IPS_48BIT (5 << TCR_IPS_SHIFT)
+
+#define TCR_TG1_SHIFT 30
+#define TCR_TG1_16K (1 << TCR_TG1_SHIFT)
+#define TCR_TG1_4K (2 << TCR_TG1_SHIFT)
+#define TCR_TG1_64K (3 << TCR_TG1_SHIFT)
+
+#define TCR_T1SZ_SHIFT 16
+#define TCR_T0SZ_SHIFT 0
+#define TCR_TxSZ(x) (((x) << TCR_T1SZ_SHIFT) | ((x) << TCR_T0SZ_SHIFT))
+
+/* Saved Program Status Register */
+#define DBG_SPSR_SS (0x1 << 21)
+
+/* Monitor Debug System Control Register */
+#define DBG_MDSCR_SS (0x1 << 0)
+#define DBG_MDSCR_KDE (0x1 << 13)
+#define DBG_MDSCR_MDE (0x1 << 15)
+
+#endif /* !_MACHINE_ARMREG_H_ */
Index: sys/arm64/include/atomic.h
===================================================================
--- sys/arm64/include/atomic.h
+++ sys/arm64/include/atomic.h
@@ -160,7 +160,6 @@
#define atomic_set_int atomic_set_32
#define atomic_subtract_int atomic_subtract_32
-
static __inline void
atomic_add_acq_32(volatile uint32_t *p, uint32_t val)
{
@@ -170,7 +169,7 @@
__asm __volatile(
"1: ldaxr %w0, [%2] \n"
" add %w0, %w0, %w3 \n"
- " stlxr %w1, %w0, [%2] \n"
+ " stxr %w1, %w0, [%2] \n"
" cbnz %w1, 1b \n"
"2:"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
@@ -186,7 +185,7 @@
__asm __volatile(
"1: ldaxr %w0, [%2] \n"
" bic %w0, %w0, %w3 \n"
- " stlxr %w1, %w0, [%2] \n"
+ " stxr %w1, %w0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
);
@@ -203,7 +202,7 @@
" ldaxr %w0, [%2] \n"
" cmp %w0, %w3 \n"
" b.ne 2f \n"
- " stlxr %w1, %w4, [%2] \n"
+ " stxr %w1, %w4, [%2] \n"
" cbnz %w1, 1b \n"
"2:"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (cmpval), "+r" (newval)
@@ -218,8 +217,9 @@
{
uint32_t ret;
- ret = *p;
- dmb();
+ __asm __volatile(
+ "ldar %w0, [%1] \n"
+ : "=&r" (ret) : "r" (p) : "memory");
return (ret);
}
@@ -233,7 +233,7 @@
__asm __volatile(
"1: ldaxr %w0, [%2] \n"
" orr %w0, %w0, %w3 \n"
- " stlxr %w1, %w0, [%2] \n"
+ " stxr %w1, %w0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
);
@@ -248,6 +248,82 @@
__asm __volatile(
"1: ldaxr %w0, [%2] \n"
" sub %w0, %w0, %w3 \n"
+ " stxr %w1, %w0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+#define atomic_add_acq_int atomic_add_acq_32
+#define atomic_clear_acq_int atomic_clear_acq_32
+#define atomic_cmpset_acq_int atomic_cmpset_acq_32
+#define atomic_load_acq_int atomic_load_acq_32
+#define atomic_set_acq_int atomic_set_acq_32
+#define atomic_subtract_acq_int atomic_subtract_acq_32
+
+/* The atomic functions currently are both acq and rel, we should fix this. */
+
+static __inline void
+atomic_add_rel_32(volatile uint32_t *p, uint32_t val)
+{
+ uint32_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %w0, [%2] \n"
+ " add %w0, %w0, %w3 \n"
+ " stlxr %w1, %w0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ "2:"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+static __inline void
+atomic_clear_rel_32(volatile uint32_t *p, uint32_t val)
+{
+ uint32_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %w0, [%2] \n"
+ " bic %w0, %w0, %w3 \n"
+ " stlxr %w1, %w0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+static __inline int
+atomic_cmpset_rel_32(volatile uint32_t *p, uint32_t cmpval, uint32_t newval)
+{
+ uint32_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: mov %w1, #1 \n"
+ " ldxr %w0, [%2] \n"
+ " cmp %w0, %w3 \n"
+ " b.ne 2f \n"
+ " stlxr %w1, %w4, [%2] \n"
+ " cbnz %w1, 1b \n"
+ "2:"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (cmpval), "+r" (newval)
+ : : "cc", "memory"
+ );
+
+ return (!res);
+}
+
+static __inline void
+atomic_set_rel_32(volatile uint32_t *p, uint32_t val)
+{
+ uint32_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %w0, [%2] \n"
+ " orr %w0, %w0, %w3 \n"
" stlxr %w1, %w0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
@@ -258,23 +334,25 @@
atomic_store_rel_32(volatile uint32_t *p, uint32_t val)
{
- dmb();
- *p = val;
+ __asm __volatile(
+ "stlr %w0, [%1] \n"
+ : : "r" (val), "r" (p) : "memory");
}
-#define atomic_add_acq_int atomic_add_acq_32
-#define atomic_clear_acq_int atomic_add_acq_32
-#define atomic_cmpset_acq_int atomic_cmpset_acq_32
-#define atomic_load_acq_int atomic_load_acq_32
-#define atomic_set_acq_int atomic_set_acq_32
-#define atomic_subtract_acq_int atomic_subtract_acq_32
+static __inline void
+atomic_subtract_rel_32(volatile uint32_t *p, uint32_t val)
+{
+ uint32_t tmp;
+ int res;
-/* The atomic functions currently are both acq and rel, we should fix this. */
-#define atomic_add_rel_32 atomic_add_acq_32
-#define atomic_clear_rel_32 atomic_add_acq_32
-#define atomic_cmpset_rel_32 atomic_cmpset_acq_32
-#define atomic_set_rel_32 atomic_set_acq_32
-#define atomic_subtract_rel_32 atomic_subtract_acq_32
+ __asm __volatile(
+ "1: ldxr %w0, [%2] \n"
+ " sub %w0, %w0, %w3 \n"
+ " stlxr %w1, %w0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
#define atomic_add_rel_int atomic_add_rel_32
#define atomic_clear_rel_int atomic_add_rel_32
@@ -440,7 +518,7 @@
__asm __volatile(
"1: ldaxr %0, [%2] \n"
" add %0, %0, %3 \n"
- " stlxr %w1, %0, [%2] \n"
+ " stxr %w1, %0, [%2] \n"
" cbnz %w1, 1b \n"
"2:"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
@@ -456,7 +534,7 @@
__asm __volatile(
"1: ldaxr %0, [%2] \n"
" bic %0, %0, %3 \n"
- " stlxr %w1, %0, [%2] \n"
+ " stxr %w1, %0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
);
@@ -473,7 +551,7 @@
" ldaxr %0, [%2] \n"
" cmp %0, %3 \n"
" b.ne 2f \n"
- " stlxr %w1, %4, [%2] \n"
+ " stxr %w1, %4, [%2] \n"
" cbnz %w1, 1b \n"
"2:"
: "=&r" (tmp), "=&r" (res), "+r" (p), "+r" (cmpval), "+r" (newval)
@@ -488,8 +566,9 @@
{
uint64_t ret;
- ret = *p;
- dmb();
+ __asm __volatile(
+ "ldar %0, [%1] \n"
+ : "=&r" (ret) : "r" (p) : "memory");
return (ret);
}
@@ -503,7 +582,7 @@
__asm __volatile(
"1: ldaxr %0, [%2] \n"
" orr %0, %0, %3 \n"
- " stlxr %w1, %0, [%2] \n"
+ " stxr %w1, %0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
);
@@ -518,20 +597,12 @@
__asm __volatile(
"1: ldaxr %0, [%2] \n"
" sub %0, %0, %3 \n"
- " stlxr %w1, %0, [%2] \n"
+ " stxr %w1, %0, [%2] \n"
" cbnz %w1, 1b \n"
: "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
);
}
-static __inline void
-atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
-{
-
- dmb();
- *p = val;
-}
-
#define atomic_add_acq_long atomic_add_acq_64
#define atomic_clear_acq_long atomic_add_acq_64
#define atomic_cmpset_acq_long atomic_cmpset_acq_64
@@ -550,21 +621,106 @@
* TODO: The atomic functions currently are both acq and rel, we should fix
* this.
*/
-#define atomic_add_rel_64 atomic_add_acq_64
-#define atomic_clear_rel_64 atomic_add_acq_64
-#define atomic_cmpset_rel_64 atomic_cmpset_acq_64
-#define atomic_set_rel_64 atomic_set_acq_64
-#define atomic_subtract_rel_64 atomic_subtract_acq_64
+static __inline void
+atomic_add_rel_64(volatile uint64_t *p, uint64_t val)
+{
+ uint64_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %0, [%2] \n"
+ " add %0, %0, %3 \n"
+ " stlxr %w1, %0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ "2:"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+static __inline void
+atomic_clear_rel_64(volatile uint64_t *p, uint64_t val)
+{
+ uint64_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %0, [%2] \n"
+ " bic %0, %0, %3 \n"
+ " stlxr %w1, %0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+static __inline int
+atomic_cmpset_rel_64(volatile uint64_t *p, uint64_t cmpval, uint64_t newval)
+{
+ uint64_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: mov %w1, #1 \n"
+ " ldxr %0, [%2] \n"
+ " cmp %0, %3 \n"
+ " b.ne 2f \n"
+ " stlxr %w1, %4, [%2] \n"
+ " cbnz %w1, 1b \n"
+ "2:"
+ : "=&r" (tmp), "=&r" (res), "+r" (p), "+r" (cmpval), "+r" (newval)
+ : : "cc", "memory"
+ );
+
+ return (!res);
+}
+
+static __inline void
+atomic_set_rel_64(volatile uint64_t *p, uint64_t val)
+{
+ uint64_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %0, [%2] \n"
+ " orr %0, %0, %3 \n"
+ " stlxr %w1, %0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
+
+static __inline void
+atomic_store_rel_64(volatile uint64_t *p, uint64_t val)
+{
+
+ __asm __volatile(
+ "stlr %0, [%1] \n"
+ : : "r" (val), "r" (p) : "memory");
+}
+
+static __inline void
+atomic_subtract_rel_64(volatile uint64_t *p, uint64_t val)
+{
+ uint64_t tmp;
+ int res;
+
+ __asm __volatile(
+ "1: ldxr %0, [%2] \n"
+ " sub %0, %0, %3 \n"
+ " stlxr %w1, %0, [%2] \n"
+ " cbnz %w1, 1b \n"
+ : "=&r"(tmp), "=&r"(res), "+r" (p), "+r" (val) : : "cc", "memory"
+ );
+}
#define atomic_add_rel_long atomic_add_rel_64
-#define atomic_clear_rel_long atomic_add_rel_64
+#define atomic_clear_rel_long atomic_clear_rel_64
#define atomic_cmpset_rel_long atomic_cmpset_rel_64
#define atomic_set_rel_long atomic_set_rel_64
#define atomic_subtract_rel_long atomic_subtract_rel_64
#define atomic_store_rel_long atomic_store_rel_64
#define atomic_add_rel_ptr atomic_add_rel_64
-#define atomic_clear_rel_ptr atomic_add_rel_64
+#define atomic_clear_rel_ptr atomic_clear_rel_64
#define atomic_cmpset_rel_ptr atomic_cmpset_rel_64
#define atomic_set_rel_ptr atomic_set_rel_64
#define atomic_subtract_rel_ptr atomic_subtract_rel_64
Index: sys/arm64/include/bus.h
===================================================================
--- /dev/null
+++ sys/arm64/include/bus.h
@@ -0,0 +1,469 @@
+/* $NetBSD: bus.h,v 1.11 2003/07/28 17:35:54 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1996, 1997, 1998, 2001 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
+ * NASA Ames Research Center.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*-
+ * Copyright (c) 1996 Charles M. Hannum. All rights reserved.
+ * Copyright (c) 1996 Christopher G. Demetriou. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by Christopher G. Demetriou
+ * for the NetBSD Project.
+ * 4. The name of the author may not be used to endorse or promote products
+ * derived from this software without specific prior written permission
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * From: sys/arm/include/bus.h
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_BUS_H_
+#define _MACHINE_BUS_H_
+
+#include <machine/_bus.h>
+
+#define BUS_SPACE_ALIGNED_POINTER(p, t) ALIGNED_POINTER(p, t)
+
+#define BUS_SPACE_MAXADDR_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXADDR_32BIT 0xFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE_24BIT 0xFFFFFFUL
+#define BUS_SPACE_MAXSIZE_32BIT 0xFFFFFFFFUL
+
+#define BUS_SPACE_MAXADDR 0xFFFFFFFFFFFFFFFFUL
+#define BUS_SPACE_MAXSIZE 0xFFFFFFFFFFFFFFFFUL
+
+#define BUS_SPACE_MAP_CACHEABLE 0x01
+#define BUS_SPACE_MAP_LINEAR 0x02
+#define BUS_SPACE_MAP_PREFETCHABLE 0x04
+
+#define BUS_SPACE_UNRESTRICTED (~0)
+
+#define BUS_SPACE_BARRIER_READ 0x01
+#define BUS_SPACE_BARRIER_WRITE 0x02
+
+
+struct bus_space {
+ /* cookie */
+ void *bs_cookie;
+
+ /* mapping/unmapping */
+ int (*bs_map) (void *, bus_addr_t, bus_size_t,
+ int, bus_space_handle_t *);
+ void (*bs_unmap) (void *, bus_space_handle_t, bus_size_t);
+ int (*bs_subregion) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, bus_space_handle_t *);
+
+ /* allocation/deallocation */
+ int (*bs_alloc) (void *, bus_addr_t, bus_addr_t,
+ bus_size_t, bus_size_t, bus_size_t, int,
+ bus_addr_t *, bus_space_handle_t *);
+ void (*bs_free) (void *, bus_space_handle_t,
+ bus_size_t);
+
+ /* get kernel virtual address */
+ /* barrier */
+ void (*bs_barrier) (void *, bus_space_handle_t,
+ bus_size_t, bus_size_t, int);
+
+ /* read single */
+ u_int8_t (*bs_r_1) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple */
+ void (*bs_rm_1) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region */
+ void (*bs_rr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single */
+ void (*bs_w_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple */
+ void (*bs_wm_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region */
+ void (*bs_wr_1) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* set multiple */
+ void (*bs_sm_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sm_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sm_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sm_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* set region */
+ void (*bs_sr_1) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t, bus_size_t);
+ void (*bs_sr_2) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t, bus_size_t);
+ void (*bs_sr_4) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t, bus_size_t);
+ void (*bs_sr_8) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t, bus_size_t);
+
+ /* copy */
+ void (*bs_c_1) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_2) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_4) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+ void (*bs_c_8) (void *, bus_space_handle_t, bus_size_t,
+ bus_space_handle_t, bus_size_t, bus_size_t);
+
+ /* read single stream */
+ u_int8_t (*bs_r_1_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int16_t (*bs_r_2_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int32_t (*bs_r_4_s) (void *, bus_space_handle_t, bus_size_t);
+ u_int64_t (*bs_r_8_s) (void *, bus_space_handle_t, bus_size_t);
+
+ /* read multiple stream */
+ void (*bs_rm_1_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int8_t *, bus_size_t);
+ void (*bs_rm_2_s) (void *, bus_space_handle_t, bus_size_t,
+ u_int16_t *, bus_size_t);
+ void (*bs_rm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* read region stream */
+ void (*bs_rr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t *, bus_size_t);
+ void (*bs_rr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t *, bus_size_t);
+ void (*bs_rr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t *, bus_size_t);
+ void (*bs_rr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t *, bus_size_t);
+
+ /* write single stream */
+ void (*bs_w_1_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int8_t);
+ void (*bs_w_2_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int16_t);
+ void (*bs_w_4_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int32_t);
+ void (*bs_w_8_s) (void *, bus_space_handle_t,
+ bus_size_t, u_int64_t);
+
+ /* write multiple stream */
+ void (*bs_wm_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wm_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wm_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wm_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+
+ /* write region stream */
+ void (*bs_wr_1_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int8_t *, bus_size_t);
+ void (*bs_wr_2_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int16_t *, bus_size_t);
+ void (*bs_wr_4_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int32_t *, bus_size_t);
+ void (*bs_wr_8_s) (void *, bus_space_handle_t,
+ bus_size_t, const u_int64_t *, bus_size_t);
+};
+
+
+/*
+ * Utility macros; INTERNAL USE ONLY.
+ */
+#define __bs_c(a,b) __CONCAT(a,b)
+#define __bs_opname(op,size) __bs_c(__bs_c(__bs_c(bs_,op),_),size)
+
+#define __bs_rs(sz, t, h, o) \
+ (*(t)->__bs_opname(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws(sz, t, h, o, v) \
+ (*(t)->__bs_opname(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, a, c)
+#define __bs_set(type, sz, t, h, o, v, c) \
+ (*(t)->__bs_opname(type,sz))((t)->bs_cookie, h, o, v, c)
+#define __bs_copy(sz, t, h1, o1, h2, o2, cnt) \
+ (*(t)->__bs_opname(c,sz))((t)->bs_cookie, h1, o1, h2, o2, cnt)
+
+#define __bs_opname_s(op,size) __bs_c(__bs_c(__bs_c(__bs_c(bs_,op),_),size),_s)
+#define __bs_rs_s(sz, t, h, o) \
+ (*(t)->__bs_opname_s(r,sz))((t)->bs_cookie, h, o)
+#define __bs_ws_s(sz, t, h, o, v) \
+ (*(t)->__bs_opname_s(w,sz))((t)->bs_cookie, h, o, v)
+#define __bs_nonsingle_s(type, sz, t, h, o, a, c) \
+ (*(t)->__bs_opname_s(type,sz))((t)->bs_cookie, h, o, a, c)
+
+
+/*
+ * Mapping and unmapping operations.
+ */
+#define bus_space_map(t, a, s, c, hp) \
+ (*(t)->bs_map)((t)->bs_cookie, (a), (s), (c), (hp))
+#define bus_space_unmap(t, h, s) \
+ (*(t)->bs_unmap)((t)->bs_cookie, (h), (s))
+#define bus_space_subregion(t, h, o, s, hp) \
+ (*(t)->bs_subregion)((t)->bs_cookie, (h), (o), (s), (hp))
+
+
+/*
+ * Allocation and deallocation operations.
+ */
+#define bus_space_alloc(t, rs, re, s, a, b, c, ap, hp) \
+ (*(t)->bs_alloc)((t)->bs_cookie, (rs), (re), (s), (a), (b), \
+ (c), (ap), (hp))
+#define bus_space_free(t, h, s) \
+ (*(t)->bs_free)((t)->bs_cookie, (h), (s))
+
+/*
+ * Bus barrier operations.
+ */
+#define bus_space_barrier(t, h, o, l, f) \
+ (*(t)->bs_barrier)((t)->bs_cookie, (h), (o), (l), (f))
+
+
+
+/*
+ * Bus read (single) operations.
+ */
+#define bus_space_read_1(t, h, o) __bs_rs(1,(t),(h),(o))
+#define bus_space_read_2(t, h, o) __bs_rs(2,(t),(h),(o))
+#define bus_space_read_4(t, h, o) __bs_rs(4,(t),(h),(o))
+#define bus_space_read_8(t, h, o) __bs_rs(8,(t),(h),(o))
+
+#define bus_space_read_stream_1(t, h, o) __bs_rs_s(1,(t), (h), (o))
+#define bus_space_read_stream_2(t, h, o) __bs_rs_s(2,(t), (h), (o))
+#define bus_space_read_stream_4(t, h, o) __bs_rs_s(4,(t), (h), (o))
+#define bus_space_read_stream_8(t, h, o) __bs_rs_s(8,8,(t),(h),(o))
+
+/*
+ * Bus read multiple operations.
+ */
+#define bus_space_read_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(rm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,1,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,2,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,4,(t),(h),(o),(a),(c))
+#define bus_space_read_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rm,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus read region operations.
+ */
+#define bus_space_read_region_1(t, h, o, a, c) \
+ __bs_nonsingle(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_2(t, h, o, a, c) \
+ __bs_nonsingle(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_4(t, h, o, a, c) \
+ __bs_nonsingle(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_8(t, h, o, a, c) \
+ __bs_nonsingle(rr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_read_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,1,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,2,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,4,(t),(h),(o),(a),(c))
+#define bus_space_read_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(rr,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus write (single) operations.
+ */
+#define bus_space_write_1(t, h, o, v) __bs_ws(1,(t),(h),(o),(v))
+#define bus_space_write_2(t, h, o, v) __bs_ws(2,(t),(h),(o),(v))
+#define bus_space_write_4(t, h, o, v) __bs_ws(4,(t),(h),(o),(v))
+#define bus_space_write_8(t, h, o, v) __bs_ws(8,(t),(h),(o),(v))
+
+#define bus_space_write_stream_1(t, h, o, v) __bs_ws_s(1,(t),(h),(o),(v))
+#define bus_space_write_stream_2(t, h, o, v) __bs_ws_s(2,(t),(h),(o),(v))
+#define bus_space_write_stream_4(t, h, o, v) __bs_ws_s(4,(t),(h),(o),(v))
+#define bus_space_write_stream_8(t, h, o, v) __bs_ws_s(8,(t),(h),(o),(v))
+
+
+/*
+ * Bus write multiple operations.
+ */
+#define bus_space_write_multi_1(t, h, o, a, c) \
+ __bs_nonsingle(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_2(t, h, o, a, c) \
+ __bs_nonsingle(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_4(t, h, o, a, c) \
+ __bs_nonsingle(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_8(t, h, o, a, c) \
+ __bs_nonsingle(wm,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_multi_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,1,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,2,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,4,(t),(h),(o),(a),(c))
+#define bus_space_write_multi_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wm,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Bus write region operations.
+ */
+#define bus_space_write_region_1(t, h, o, a, c) \
+ __bs_nonsingle(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_2(t, h, o, a, c) \
+ __bs_nonsingle(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_4(t, h, o, a, c) \
+ __bs_nonsingle(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_8(t, h, o, a, c) \
+ __bs_nonsingle(wr,8,(t),(h),(o),(a),(c))
+
+#define bus_space_write_region_stream_1(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,1,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_2(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,2,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_4(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,4,(t),(h),(o),(a),(c))
+#define bus_space_write_region_stream_8(t, h, o, a, c) \
+ __bs_nonsingle_s(wr,8,(t),(h),(o),(a),(c))
+
+
+/*
+ * Set multiple operations.
+ */
+#define bus_space_set_multi_1(t, h, o, v, c) \
+ __bs_set(sm,1,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_2(t, h, o, v, c) \
+ __bs_set(sm,2,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_4(t, h, o, v, c) \
+ __bs_set(sm,4,(t),(h),(o),(v),(c))
+#define bus_space_set_multi_8(t, h, o, v, c) \
+ __bs_set(sm,8,(t),(h),(o),(v),(c))
+
+
+/*
+ * Set region operations.
+ */
+#define bus_space_set_region_1(t, h, o, v, c) \
+ __bs_set(sr,1,(t),(h),(o),(v),(c))
+#define bus_space_set_region_2(t, h, o, v, c) \
+ __bs_set(sr,2,(t),(h),(o),(v),(c))
+#define bus_space_set_region_4(t, h, o, v, c) \
+ __bs_set(sr,4,(t),(h),(o),(v),(c))
+#define bus_space_set_region_8(t, h, o, v, c) \
+ __bs_set(sr,8,(t),(h),(o),(v),(c))
+
+
+/*
+ * Copy operations.
+ */
+#define bus_space_copy_region_1(t, h1, o1, h2, o2, c) \
+ __bs_copy(1, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_2(t, h1, o1, h2, o2, c) \
+ __bs_copy(2, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_4(t, h1, o1, h2, o2, c) \
+ __bs_copy(4, t, h1, o1, h2, o2, c)
+#define bus_space_copy_region_8(t, h1, o1, h2, o2, c) \
+ __bs_copy(8, t, h1, o1, h2, o2, c)
+
+#include <machine/bus_dma.h>
+
+#endif /* _MACHINE_BUS_H_ */
Index: sys/arm64/include/bus_dma.h
===================================================================
--- /dev/null
+++ sys/arm64/include/bus_dma.h
@@ -0,0 +1,34 @@
+/*-
+ * Copyright (c) 2005 Scott Long
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* $FreeBSD$ */
+
+#ifndef _MACHINE_BUS_DMA_H_
+#define _MACHINE_BUS_DMA_H_
+
+#include <sys/bus_dma.h>
+
+#endif /* !_MACHINE_BUS_DMA_H_ */
Index: sys/arm64/include/clock.h
===================================================================
--- /dev/null
+++ sys/arm64/include/clock.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
Index: sys/arm64/include/counter.h
===================================================================
--- /dev/null
+++ sys/arm64/include/counter.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2012 Konstantin Belousov <kib@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_COUNTER_H_
+#define _MACHINE_COUNTER_H_
+
+#include <sys/pcpu.h>
+#ifdef INVARIANTS
+#include <sys/proc.h>
+#endif
+
+#define counter_enter() critical_enter()
+#define counter_exit() critical_exit()
+
+#ifdef IN_SUBR_COUNTER_C
+static inline uint64_t
+counter_u64_read_one(uint64_t *p, int cpu)
+{
+
+ return (*(uint64_t *)((char *)p + sizeof(struct pcpu) * cpu));
+}
+
+static inline uint64_t
+counter_u64_fetch_inline(uint64_t *p)
+{
+ uint64_t r;
+ int i;
+
+ r = 0;
+ for (i = 0; i < mp_ncpus; i++)
+ r += counter_u64_read_one((uint64_t *)p, i);
+
+ return (r);
+}
+
+/* XXXKIB might interrupt increment */
+static void
+counter_u64_zero_one_cpu(void *arg)
+{
+
+ *((uint64_t *)((char *)arg + sizeof(struct pcpu) *
+ PCPU_GET(cpuid))) = 0;
+}
+
+static inline void
+counter_u64_zero_inline(counter_u64_t c)
+{
+
+ smp_rendezvous(smp_no_rendevous_barrier, counter_u64_zero_one_cpu,
+ smp_no_rendevous_barrier, c);
+}
+#endif
+
+#define counter_u64_add_protected(c, inc) do { \
+ CRITICAL_ASSERT(curthread); \
+ *(uint64_t *)zpcpu_get(c) += (inc); \
+} while (0)
+
+static inline void
+counter_u64_add(counter_u64_t c, int64_t inc)
+{
+
+ counter_enter();
+ counter_u64_add_protected(c, inc);
+ counter_exit();
+}
+
+#endif /* ! _MACHINE_COUNTER_H_ */
Index: sys/arm64/include/cpufunc.h
===================================================================
--- sys/arm64/include/cpufunc.h
+++ sys/arm64/include/cpufunc.h
@@ -109,6 +109,7 @@
}
#define cpu_nullop() arm64_nullop()
+#define cpufunc_nullop() arm64_nullop()
#define cpu_setttb(a) arm64_setttb(a)
#define cpu_tlb_flushID() arm64_tlb_flushID()
Index: sys/arm64/include/db_machdep.h
===================================================================
--- /dev/null
+++ sys/arm64/include/db_machdep.h
@@ -0,0 +1,123 @@
+/*-
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+ * from: FreeBSD: src/sys/i386/include/db_machdep.h,v 1.16 1999/10/04
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DB_MACHDEP_H_
+#define _MACHINE_DB_MACHDEP_H_
+
+#include <machine/armreg.h>
+#include <machine/frame.h>
+#include <machine/trap.h>
+
+#define T_BREAKPOINT (EXCP_BRK)
+#define T_WATCHPOINT (EXCP_WATCHPT_EL1)
+
+typedef vm_offset_t db_addr_t;
+typedef long db_expr_t;
+
+#define PC_REGS() ((db_addr_t)kdb_thrctx->pcb_pc)
+
+#define BKPT_INST (0xd4200000)
+#define BKPT_SIZE (4)
+#define BKPT_SET(inst) (BKPT_INST)
+
+#define BKPT_SKIP do { \
+ kdb_frame->tf_elr += BKPT_SIZE; \
+} while (0)
+
+#define db_clear_single_step kdb_cpu_clear_singlestep
+#define db_set_single_step kdb_cpu_set_singlestep
+
+#define IS_BREAKPOINT_TRAP(type, code) (type == T_BREAKPOINT)
+#define IS_WATCHPOINT_TRAP(type, code) (type == T_WATCHPOINT)
+
+#define inst_trap_return(ins) (0)
+/* ret */
+#define inst_return(ins) (((ins) & 0xfffffc1fu) == 0xd65f0000)
+#define inst_call(ins) (((ins) & 0xfc000000u) == 0x94000000u || /* BL */ \
+ ((ins) & 0xfffffc1fu) == 0xd63f0000u) /* BLR */
+
+#define inst_load(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_load_instr(tmp_instr); \
+})
+
+#define inst_store(ins) ({ \
+ uint32_t tmp_instr = db_get_value(PC_REGS(), sizeof(uint32_t), FALSE); \
+ is_store_instr(tmp_instr); \
+})
+
+#define is_load_instr(ins) ((((ins) & 0x3b000000u) == 0x18000000u) || /* literal */ \
+ (((ins) & 0x3f400000u) == 0x08400000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3b200c00u) == 0x38000400u) && \
+ (((ins) & 0x3be00c00u) != 0x38000400u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38000c00u) && \
+ (((ins) & 0x3be00c00u) != 0x38000c00u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3b200c00u) == 0x38200800u) && \
+ (((ins) & 0x3be00c00u) != 0x38200800u) && \
+ (((ins) & 0xffe00c00u) != 0x3ca00c80u)) || /* register offset */ \
+ ((((ins) & 0x3b200c00u) == 0x38000800u) && \
+ (((ins) & 0x3be00c00u) != 0x38000800u)) || /* unprivileged */ \
+ ((((ins) & 0x3b200c00u) == 0x38000000u) && \
+ (((ins) & 0x3be00c00u) != 0x38000000u) && \
+ (((ins) & 0xffe00c00u) != 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3b000000u) == 0x39000000u) && \
+ (((ins) & 0x3bc00000u) != 0x39000000u) && \
+ (((ins) & 0xffc00000u) != 0x3d800000u)) && /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28400000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28c00000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define is_store_instr(ins) ((((ins) & 0x3f400000u) == 0x08000000u) || /* exclusive */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* no-allocate pair */ \
+ ((((ins) & 0x3be00c00u) == 0x38000400u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800400u)) || /* immediate post-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38000c00u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800c00u)) || /* immediate pre-indexed */ \
+ ((((ins) & 0x3be00c00u) == 0x38200800u) || \
+ (((ins) & 0xffe00c00u) == 0x3ca00800u)) || /* register offset */ \
+ (((ins) & 0x3be00c00u) == 0x38000800u) || /* unprivileged */ \
+ ((((ins) & 0x3be00c00u) == 0x38000000u) || \
+ (((ins) & 0xffe00c00u) == 0x3c800000u)) || /* unscaled immediate */ \
+ ((((ins) & 0x3bc00000u) == 0x39000000u) || \
+ (((ins) & 0xffc00000u) == 0x3d800000u)) || /* unsigned immediate */ \
+ (((ins) & 0x3bc00000u) == 0x28000000u) || /* pair (offset) */ \
+ (((ins) & 0x3bc00000u) == 0x28800000u) || /* pair (post-indexed) */ \
+ (((ins) & 0x3bc00000u) == 0x29800000u)) /* pair (pre-indexed) */
+
+#define next_instr_address(pc, bd) ((bd) ? (pc) : ((pc) + 4))
+
+#define DB_SMALL_VALUE_MAX (0x7fffffff)
+#define DB_SMALL_VALUE_MIN (-0x40001)
+
+#define DB_ELFSIZE 64
+
+#endif /* !_MACHINE_DB_MACHDEP_H_ */
Index: sys/arm64/include/debug_monitor.h
===================================================================
--- /dev/null
+++ sys/arm64/include/debug_monitor.h
@@ -0,0 +1,61 @@
+/*-
+ * Copyright (c) 2014 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under
+ * the sponsorship of the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef _MACHINE_DEBUG_MONITOR_H_
+#define _MACHINE_DEBUG_MONITOR_H_
+
+#ifdef KDB
+
+#include <machine/db_machdep.h>
+
+enum dbg_el_t {
+ DBG_FROM_EL0 = 0,
+ DBG_FROM_EL1 = 1,
+};
+
+enum dbg_access_t {
+ HW_BREAKPOINT_X = 0,
+ HW_BREAKPOINT_R = 1,
+ HW_BREAKPOINT_W = 2,
+ HW_BREAKPOINT_RW = HW_BREAKPOINT_R | HW_BREAKPOINT_W,
+};
+
+void dbg_monitor_init(void);
+void dbg_show_watchpoint(void);
+int dbg_setup_watchpoint(db_expr_t addr, db_expr_t size, enum dbg_el_t el,
+ enum dbg_access_t access);
+int dbg_remove_watchpoint(db_expr_t addr, db_expr_t size, enum dbg_el_t el);
+#else
+static __inline void
+dbg_monitor_init(void)
+{
+}
+#endif
+
+#endif /* _MACHINE_DEBUG_MONITOR_H_ */
Index: sys/arm64/include/devmap.h
===================================================================
--- /dev/null
+++ sys/arm64/include/devmap.h
@@ -0,0 +1,93 @@
+/*-
+ * Copyright (c) 2013 Ian Lepore <ian@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DEVMAP_H_
+#define _MACHINE_DEVMAP_H_
+
+/*
+ * This structure is used by MD code to describe static mappings of devices
+ * which are established as part of bringing up the MMU early in the boot.
+ */
+struct arm_devmap_entry {
+ vm_offset_t pd_va; /* virtual address */
+ vm_paddr_t pd_pa; /* physical address */
+ vm_size_t pd_size; /* size of region */
+ vm_prot_t pd_prot; /* protection code */
+ int pd_cache; /* cache attributes */
+};
+
+/*
+ * Return the lowest KVA address used in any entry in the registered devmap
+ * table. This works with whatever table is registered, including the internal
+ * table used by arm_devmap_add_entry() if that routine was used. Platforms can
+ * implement initarm_lastaddr() by calling this if static device mappings are
+ * their only use of high KVA space.
+ */
+vm_offset_t arm_devmap_lastaddr(void);
+
+/*
+ * Automatically allocate KVA (from the top of the address space downwards) and
+ * make static device mapping entries in an internal table. The internal table
+ * is automatically registered on the first call to this.
+ */
+void arm_devmap_add_entry(vm_paddr_t pa, vm_size_t sz);
+
+/*
+ * Register a platform-local table to be bootstrapped by the generic
+ * initarm() in arm/machdep.c. This is used by newer code that allocates and
+ * fills in its own local table but does not have its own initarm() routine.
+ */
+void arm_devmap_register_table(const struct arm_devmap_entry * _table);
+
+/*
+ * Establish mappings for all the entries in the table. This is called
+ * automatically from the common initarm() in arm/machdep.c, and also from the
+ * custom initarm() routines in older code. If the table pointer is NULL, this
+ * will use the table installed previously by arm_devmap_register_table().
+ */
+void arm_devmap_bootstrap(vm_offset_t _l1pt,
+ const struct arm_devmap_entry *_table);
+
+/*
+ * Translate between virtual and physical addresses within a region that is
+ * static-mapped by the devmap code. If the given address range isn't
+ * static-mapped, then ptov returns NULL and vtop returns DEVMAP_PADDR_NOTFOUND.
+ * The latter implies that you can't vtop just the last byte of physical address
+ * space. This is not as limiting as it might sound, because even if a device
+ * occupies the end of the physical address space, you're only prevented from
+ * doing vtop for that single byte. If you vtop a size bigger than 1 it works.
+ */
+#define DEVMAP_PADDR_NOTFOUND ((vm_paddr_t)(-1))
+
+void * arm_devmap_ptov(vm_paddr_t _pa, vm_size_t _sz);
+vm_paddr_t arm_devmap_vtop(void * _va, vm_size_t _sz);
+
+/* Print the static mapping table; used for bootverbose output. */
+void arm_devmap_print_table(void);
+
+#endif
Index: sys/arm64/include/dump.h
===================================================================
--- /dev/null
+++ sys/arm64/include/dump.h
@@ -0,0 +1,74 @@
+/*-
+ * Copyright (c) 2014 EMC Corp.
+ * Author: Conrad Meyer <conrad.meyer@isilon.com>
+ * Copyright (c) 2015 The FreeBSD Foundation.
+ * All rights reserved.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_DUMP_H_
+#define _MACHINE_DUMP_H_
+
+#define KERNELDUMP_ARCH_VERSION KERNELDUMP_AARCH64_VERSION
+#define EM_VALUE EM_AARCH64
+/* XXX: I suppose 20 should be enough. */
+#define DUMPSYS_MD_PA_NPAIRS 20
+#define DUMPSYS_NUM_AUX_HDRS 1
+
+void dumpsys_wbinv_all(void);
+int dumpsys_write_aux_headers(struct dumperinfo *di);
+
+static inline void
+dumpsys_pa_init(void)
+{
+
+ dumpsys_gen_pa_init();
+}
+
+static inline struct dump_pa *
+dumpsys_pa_next(struct dump_pa *p)
+{
+
+ return (dumpsys_gen_pa_next(p));
+}
+
+static inline void
+dumpsys_unmap_chunk(vm_paddr_t pa, size_t s, void *va)
+{
+
+ dumpsys_gen_unmap_chunk(pa, s, va);
+}
+
+static inline int
+dumpsys(struct dumperinfo *di)
+{
+
+ return (dumpsys_generic(di));
+}
+
+#endif /* !_MACHINE_DUMP_H_ */
Index: sys/arm64/include/hypervisor.h
===================================================================
--- /dev/null
+++ sys/arm64/include/hypervisor.h
@@ -0,0 +1,85 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_HYPERVISOR_H_
+#define _MACHINE_HYPERVISOR_H_
+
+/*
+ * These registers are only useful when in hypervisor context,
+ * e.g. specific to EL2, or controlling the hypervisor.
+ */
+
+/*
+ * Architecture feature trap register
+ */
+#define CPTR_RES0 0x7fefc800
+#define CPTR_RES1 0x000033ff
+#define CPTR_TFP 0x00000400
+#define CPTR_TTA 0x00100000
+#define CPTR_TCPAC 0x80000000
+
+/*
+ * Hypervisor Config Register
+ */
+
+#define HCR_VM 0x0000000000000001
+#define HCR_SWIO 0x0000000000000002
+#define HCR_PTW 0x0000000000000004
+#define HCR_FMO 0x0000000000000008
+#define HCR_IMO 0x0000000000000010
+#define HCR_AMO 0x0000000000000020
+#define HCR_VF 0x0000000000000040
+#define HCR_VI 0x0000000000000080
+#define HCR_VSE 0x0000000000000100
+#define HCR_FB 0x0000000000000200
+#define HCR_BSU_MASK 0x0000000000000c00
+#define HCR_DC 0x0000000000001000
+#define HCR_TWI 0x0000000000002000
+#define HCR_TWE 0x0000000000004000
+#define HCR_TID0 0x0000000000008000
+#define HCR_TID1 0x0000000000010000
+#define HCR_TID2 0x0000000000020000
+#define HCR_TID3 0x0000000000040000
+#define HCR_TSC 0x0000000000080000
+#define HCR_TIDCP 0x0000000000100000
+#define HCR_TACR 0x0000000000200000
+#define HCR_TSW 0x0000000000400000
+#define HCR_TPC 0x0000000000800000
+#define HCR_TPU 0x0000000001000000
+#define HCR_TTLB 0x0000000002000000
+#define HCR_TVM 0x0000000004000000
+#define HCR_TGE 0x0000000008000000
+#define HCR_TDZ 0x0000000010000000
+#define HCR_HCD 0x0000000020000000
+#define HCR_TRVM 0x0000000040000000
+#define HCR_RW 0x0000000080000000
+#define HCR_CD 0x0000000100000000
+#define HCR_ID 0x0000000200000000
+
+#endif
+
Index: sys/arm64/include/in_cksum.h
===================================================================
--- /dev/null
+++ sys/arm64/include/in_cksum.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from tahoe: in_cksum.c 1.2 86/01/05
+ * from: @(#)in_cksum.c 1.3 (Berkeley) 1/19/91
+ * from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define _MACHINE_IN_CKSUM_H_ 1
+
+#include <sys/cdefs.h>
+
+#ifdef _KERNEL
+#define in_cksum(m, len) in_cksum_skip(m, len, 0)
+u_short in_addword(u_short sum, u_short b);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+u_int do_cksum(const void *, int);
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *);
+#endif
+
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_IN_CKSUM_H_ */
Index: sys/arm64/include/intr.h
===================================================================
--- /dev/null
+++ sys/arm64/include/intr.h
@@ -0,0 +1,56 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_INTR_H_
+#define _MACHINE_INTR_H_
+
+int arm_config_intr(u_int, enum intr_trigger, enum intr_polarity);
+void arm_cpu_intr(struct trapframe *);
+void arm_dispatch_intr(u_int, struct trapframe *);
+int arm_enable_intr(void);
+void arm_mask_irq(u_int);
+void arm_register_root_pic(device_t, u_int);
+void arm_register_msi_pic(device_t);
+int arm_alloc_msi(device_t, int, int *);
+int arm_release_msi(device_t, int, int *);
+int arm_alloc_msix(device_t, int *);
+int arm_release_msix(device_t, int);
+int arm_map_msi(device_t, int, uint64_t *, uint32_t *);
+int arm_map_msix(device_t, int, uint64_t *, uint32_t *);
+int arm_setup_intr(const char *, driver_filter_t *, driver_intr_t,
+ void *, u_int, enum intr_type, void **);
+int arm_teardown_intr(void *);
+void arm_unmask_irq(u_int);
+
+#ifdef SMP
+void arm_init_secondary(void);
+void arm_setup_ipihandler(driver_filter_t *, u_int);
+void arm_unmask_ipi(u_int);
+#endif
+
+#endif /* _MACHINE_INTR_H */
Index: sys/arm64/include/kdb.h
===================================================================
--- /dev/null
+++ sys/arm64/include/kdb.h
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2004 Marcel Moolenaar
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_KDB_H_
+#define _MACHINE_KDB_H_
+
+#include <machine/cpufunc.h>
+
+#define KDB_STOPPEDPCB(pc) &stoppcbs[pc->pc_cpuid]
+
+void kdb_cpu_clear_singlestep(void);
+void kdb_cpu_set_singlestep(void);
+
+static __inline void
+kdb_cpu_sync_icache(unsigned char *addr, size_t size)
+{
+}
+
+static __inline void
+kdb_cpu_trap(int type, int code)
+{
+}
+
+#endif /* _MACHINE_KDB_H_ */
Index: sys/arm64/include/machdep.h
===================================================================
--- /dev/null
+++ sys/arm64/include/machdep.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2013 Andrew Turner <andrew@freebsd.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MACHDEP_H_
+#define _MACHINE_MACHDEP_H_
+
+struct arm64_bootparams {
+ vm_offset_t modulep;
+ vm_offset_t kern_l1pt; /* L1 page table for the kernel */
+ uint64_t kern_delta;
+ vm_offset_t kern_stack;
+};
+
+extern vm_paddr_t physmap[];
+extern u_int physmap_idx;
+
+void initarm(struct arm64_bootparams *);
+
+#endif /* _MACHINE_MACHDEP_H_ */
Index: sys/arm64/include/md_var.h
===================================================================
--- /dev/null
+++ sys/arm64/include/md_var.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 1995 Bruce D. Evans.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the author nor the names of contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * from: FreeBSD: src/sys/i386/include/md_var.h,v 1.40 2001/07/12
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MD_VAR_H_
+#define _MACHINE_MD_VAR_H_
+
+extern long Maxmem;
+extern char sigcode[];
+extern int szsigcode;
+
+struct dumperinfo;
+int minidumpsys(struct dumperinfo *);
+void busdma_swi(void);
+
+#endif /* !_MACHINE_MD_VAR_H_ */
Index: sys/arm64/include/memdev.h
===================================================================
--- /dev/null
+++ sys/arm64/include/memdev.h
@@ -0,0 +1,40 @@
+/*-
+ * Copyright (c) 2004 Mark R V Murray
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer
+ * in this position and unchanged.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_MEMDEV_H_
+#define _MACHINE_MEMDEV_H_
+
+#define CDEV_MINOR_MEM 0
+#define CDEV_MINOR_KMEM 1
+
+d_open_t memopen;
+d_read_t memrw;
+#define memioctl (d_ioctl_t *)NULL
+#define memmmap (d_mmap_t *)NULL
+
+#endif /* _MACHINE_MEMDEV_H_ */
Index: sys/arm64/include/metadata.h
===================================================================
--- /dev/null
+++ sys/arm64/include/metadata.h
@@ -0,0 +1,41 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner <andrew@FreeBSD.org>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_METADATA_H_
+#define _MACHINE_METADATA_H_
+
+#define MODINFOMD_EFI_MAP 0x1001
+#define MODINFOMD_DTBP 0x1002
+
+struct efi_map_header {
+ size_t memory_size;
+ size_t descriptor_size;
+ uint32_t descriptor_version;
+};
+
+#endif /* !_MACHINE_METADATA_H_ */
Index: sys/arm64/include/ofw_machdep.h
===================================================================
--- /dev/null
+++ sys/arm64/include/ofw_machdep.h
@@ -0,0 +1,44 @@
+/*-
+ * Copyright (c) 2009 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_OFW_MACHDEP_H_
+#define _MACHINE_OFW_MACHDEP_H_
+
+#include <vm/vm.h>
+
+typedef uint32_t cell_t;
+
+struct mem_region {
+ vm_offset_t mr_start;
+ vm_size_t mr_size;
+};
+
+#endif /* _MACHINE_OFW_MACHDEP_H_ */
Index: sys/arm64/include/param.h
===================================================================
--- sys/arm64/include/param.h
+++ sys/arm64/include/param.h
@@ -118,8 +118,4 @@
#define pgtok(x) ((unsigned long)(x) * (PAGE_SIZE / 1024))
-#ifdef _KERNEL
-#define NO_FUEWORD 1
-#endif
-
#endif /* !_MACHINE_PARAM_H_ */
Index: sys/arm64/include/resource.h
===================================================================
--- /dev/null
+++ sys/arm64/include/resource.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright 1998 Massachusetts Institute of Technology
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby
+ * granted, provided that both the above copyright notice and this
+ * permission notice appear in all copies, that both the above
+ * copyright notice and this permission notice appear in all
+ * supporting documentation, and that the name of M.I.T. not be used
+ * in advertising or publicity pertaining to distribution of the
+ * software without specific, written prior permission. M.I.T. makes
+ * no representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied
+ * warranty.
+ *
+ * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS
+ * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
+ * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_RESOURCE_H_
+#define _MACHINE_RESOURCE_H_ 1
+
+/*
+ * Definitions of resource types for Intel Architecture machines
+ * with support for legacy ISA devices and drivers.
+ */
+
+#define SYS_RES_IRQ 1 /* interrupt lines */
+#define SYS_RES_DRQ 2 /* isa dma lines */
+#define SYS_RES_MEMORY 3 /* i/o memory */
+#define SYS_RES_IOPORT 4 /* i/o ports */
+#define SYS_RES_GPIO 5 /* general purpose i/o */
+
+#endif /* !_MACHINE_RESOURCE_H_ */
Index: sys/arm64/include/sf_buf.h
===================================================================
--- /dev/null
+++ sys/arm64/include/sf_buf.h
@@ -0,0 +1,51 @@
+/*-
+ * Copyright (c) 2003, 2005 Alan L. Cox <alc@cs.rice.edu>
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_SF_BUF_H_
+#define _MACHINE_SF_BUF_H_
+
+/*
+ * On this machine, the only purpose for which sf_buf is used is to implement
+ * an opaque pointer required by the machine-independent parts of the kernel.
+ * That pointer references the vm_page that is "mapped" by the sf_buf. The
+ * actual mapping is provided by the direct virtual-to-physical mapping.
+ */
+static inline vm_offset_t
+sf_buf_kva(struct sf_buf *sf)
+{
+
+ return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS((vm_page_t)sf)));
+}
+
+static inline vm_page_t
+sf_buf_page(struct sf_buf *sf)
+{
+
+ return ((vm_page_t)sf);
+}
+#endif /* !_MACHINE_SF_BUF_H_ */
Index: sys/arm64/include/smp.h
===================================================================
--- /dev/null
+++ sys/arm64/include/smp.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
Index: sys/arm64/include/stack.h
===================================================================
--- /dev/null
+++ sys/arm64/include/stack.h
@@ -0,0 +1,35 @@
+/*-
+ * Copyright (c) 2001 Jake Burkholder.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_STACK_H_
+#define _MACHINE_STACK_H_
+
+#define INKERNEL(va) \
+ ((va) >= VM_MIN_KERNEL_ADDRESS && (va) <= VM_MAX_KERNEL_ADDRESS)
+
+#endif /* !_MACHINE_STACK_H_ */
Index: sys/arm64/include/trap.h
===================================================================
--- /dev/null
+++ sys/arm64/include/trap.h
@@ -0,0 +1 @@
+/* $FreeBSD$ */
Index: sys/arm64/include/vfp.h
===================================================================
--- /dev/null
+++ sys/arm64/include/vfp.h
@@ -0,0 +1,46 @@
+/*-
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_VFP_H_
+#define _MACHINE_VFP_H_
+
+#ifdef _KERNEL
+
+#ifndef LOCORE
+void vfp_init(void);
+void vfp_discard(struct thread *);
+void vfp_restore_state(void);
+void vfp_save_state(struct thread *);
+#endif
+
+#endif
+
+#endif /* !_MACHINE_VFP_H_ */
Index: sys/conf/Makefile.arm64
===================================================================
--- /dev/null
+++ sys/conf/Makefile.arm64
@@ -0,0 +1,60 @@
+# Makefile.arm64 -- with config changes.
+# Copyright 1990 W. Jolitz
+# from: @(#)Makefile.i386 7.1 5/10/91
+# from FreeBSD: src/sys/conf/Makefile.i386,v 1.255 2002/02/20 23:35:49
+# $FreeBSD$
+#
+# Makefile for FreeBSD
+#
+# This makefile is constructed from a machine description:
+# config machineid
+# Most changes should be made in the machine description
+# /sys/arm64/conf/``machineid''
+# after which you should do
+# config machineid
+# Generic makefile changes should be made in
+# /sys/conf/Makefile.arm64
+# after which config should be rerun for all machines.
+#
+
+# Which version of config(8) is required.
+%VERSREQ= 600012
+
+STD8X16FONT?= iso
+
+.if !defined(S)
+.if exists(./@/.)
+S= ./@
+.else
+S= ../../..
+.endif
+.endif
+.include "$S/conf/kern.pre.mk"
+
+INCLUDES+= -I$S/contrib/libfdt
+
+# We generally don't want fpu instructions in the kernel.
+CFLAGS += -mgeneral-regs-only
+
+# Reserve x18 for pcpu data
+CFLAGS += -ffixed-x18
+
+.if !empty(DDB_ENABLED)
+CFLAGS += -fno-omit-frame-pointer -mno-omit-leaf-frame-pointer
+.endif
+
+%BEFORE_DEPEND
+
+%OBJS
+
+%FILES.c
+
+%FILES.s
+
+%FILES.m
+
+%CLEAN
+
+%RULES
+
+.include "$S/conf/kern.post.mk"
Index: sys/conf/files.arm64
===================================================================
--- /dev/null
+++ sys/conf/files.arm64
@@ -0,0 +1,49 @@
+# $FreeBSD$
+arm/arm/devmap.c standard
+arm/arm/generic_timer.c standard
+arm64/arm64/autoconf.c standard
+arm64/arm64/bcopy.c standard
+arm64/arm64/bus_machdep.c standard
+arm64/arm64/bus_space_asm.S standard
+arm64/arm64/busdma_machdep.c standard
+arm64/arm64/clock.c standard
+arm64/arm64/copyinout.S standard
+arm64/arm64/copystr.c standard
+arm64/arm64/cpufunc_asm.S standard
+arm64/arm64/dump_machdep.c standard
+arm64/arm64/elf_machdep.c standard
+arm64/arm64/exception.S standard
+arm64/arm64/gic.c standard
+arm64/arm64/identcpu.c standard
+arm64/arm64/intr_machdep.c standard
+arm64/arm64/in_cksum.c optional inet | inet6
+arm64/arm64/locore.S standard no-obj
+arm64/arm64/machdep.c standard
+arm64/arm64/mem.c standard
+arm64/arm64/minidump_machdep.c standard
+arm64/arm64/nexus.c standard
+arm64/arm64/pic_if.m standard
+arm64/arm64/pmap.c standard
+arm64/arm64/stack_machdep.c standard
+arm64/arm64/support.S standard
+arm64/arm64/swtch.S standard
+arm64/arm64/sys_machdep.c standard
+arm64/arm64/trap.c standard
+arm64/arm64/uio_machdep.c standard
+arm64/arm64/vfp.c standard
+arm64/arm64/vm_machdep.c standard
+dev/fdt/fdt_arm64.c optional fdt
+dev/ofw/ofw_cpu.c optional fdt
+dev/uart/uart_bus_fdt.c optional uart fdt
+dev/uart/uart_cpu_fdt.c optional uart fdt
+dev/uart/uart_dev_pl011.c optional uart pl011
+kern/kern_clocksource.c standard
+kern/subr_dummy_vdso_tc.c standard
+libkern/bcmp.c standard
+libkern/ffs.c standard
+libkern/ffsl.c standard
+libkern/fls.c standard
+libkern/flsl.c standard
+libkern/flsll.c standard
+libkern/memmove.c standard
+libkern/memset.c standard
Index: sys/conf/ldscript.arm64
===================================================================
--- /dev/null
+++ sys/conf/ldscript.arm64
@@ -0,0 +1,149 @@
+/* $FreeBSD$ */
+OUTPUT_ARCH(aarch64)
+ENTRY(_start)
+
+SEARCH_DIR(/usr/lib);
+SECTIONS
+{
+ /* Read-only sections, merged into text segment: */
+ . = kernbase + SIZEOF_HEADERS;
+ .text :
+ {
+ *(.text)
+ *(.stub)
+ /* .gnu.warning sections are handled specially by elf32.em. */
+ *(.gnu.warning)
+ *(.gnu.linkonce.t*)
+ } =0x9090
+ _etext = .;
+ PROVIDE (etext = .);
+ .fini : { *(.fini) } =0x9090
+ .rodata : { *(.rodata) *(.gnu.linkonce.r*) }
+ .rodata1 : { *(.rodata1) }
+ .interp : { *(.interp) }
+ .hash : { *(.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+ .rel.text :
+ { *(.rel.text) *(.rel.gnu.linkonce.t*) }
+ .rela.text :
+ { *(.rela.text) *(.rela.gnu.linkonce.t*) }
+ .rel.data :
+ { *(.rel.data) *(.rel.gnu.linkonce.d*) }
+ .rela.data :
+ { *(.rela.data) *(.rela.gnu.linkonce.d*) }
+ .rel.rodata :
+ { *(.rel.rodata) *(.rel.gnu.linkonce.r*) }
+ .rela.rodata :
+ { *(.rela.rodata) *(.rela.gnu.linkonce.r*) }
+ .rel.got : { *(.rel.got) }
+ .rela.got : { *(.rela.got) }
+ .rel.ctors : { *(.rel.ctors) }
+ .rela.ctors : { *(.rela.ctors) }
+ .rel.dtors : { *(.rel.dtors) }
+ .rela.dtors : { *(.rela.dtors) }
+ .rel.init : { *(.rel.init) }
+ .rela.init : { *(.rela.init) }
+ .rel.fini : { *(.rel.fini) }
+ .rela.fini : { *(.rela.fini) }
+ .rel.bss : { *(.rel.bss) }
+ .rela.bss : { *(.rela.bss) }
+ .rel.plt : { *(.rel.plt) }
+ .rela.plt : { *(.rela.plt) }
+ .init : { *(.init) } =0x9090
+ .plt : { *(.plt) }
+
+ . = ALIGN(4);
+ _extab_start = .;
+ PROVIDE(extab_start = .);
+ .ARM.extab : { *(.ARM.extab) }
+ _extab.end = .;
+ PROVIDE(extab_end = .);
+
+ _exidx_start = .;
+ PROVIDE(exidx_start = .);
+ .ARM.exidx : { *(.ARM.exidx) }
+ _exidx_end = .;
+ PROVIDE(exidx_end = .);
+
+ /* Adjust the address for the data segment. We want to adjust up to
+ the same address within the page on the next page up. */
+ . = ALIGN(0x1000) + (. & (0x1000 - 1)) ;
+ .data :
+ {
+ *(.data)
+ *(.gnu.linkonce.d*)
+ CONSTRUCTORS
+ }
+ .data1 : { *(.data1) }
+ . = ALIGN(32 / 8);
+ _start_ctors = .;
+ PROVIDE (start_ctors = .);
+ .ctors :
+ {
+ *(.ctors)
+ }
+ _stop_ctors = .;
+ PROVIDE (stop_ctors = .);
+ .dtors :
+ {
+ *(.dtors)
+ }
+ .got : { *(.got.plt) *(.got) }
+ .dynamic : { *(.dynamic) }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ . = ALIGN(8);
+ .sdata : { *(.sdata) }
+ _edata = .;
+ PROVIDE (edata = .);
+ __bss_start = .;
+ .sbss : { *(.sbss) *(.scommon) }
+ .bss :
+ {
+ *(.dynbss)
+ *(.bss)
+ *(COMMON)
+ }
+ . = ALIGN(8);
+ _end = . ;
+ PROVIDE (end = .);
+ /* Stabs debugging sections. */
+ .stab 0 : { *(.stab) }
+ .stabstr 0 : { *(.stabstr) }
+ .stab.excl 0 : { *(.stab.excl) }
+ .stab.exclstr 0 : { *(.stab.exclstr) }
+ .stab.index 0 : { *(.stab.index) }
+ .stab.indexstr 0 : { *(.stab.indexstr) }
+ .comment 0 : { *(.comment) }
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to the beginning
+ of the section so we begin them at 0. */
+ /* DWARF 1 */
+ .debug 0 : { *(.debug) }
+ .line 0 : { *(.line) }
+ /* GNU DWARF 1 extensions */
+ .debug_srcinfo 0 : { *(.debug_srcinfo) }
+ .debug_sfnames 0 : { *(.debug_sfnames) }
+ /* DWARF 1.1 and DWARF 2 */
+ .debug_aranges 0 : { *(.debug_aranges) }
+ .debug_pubnames 0 : { *(.debug_pubnames) }
+ /* DWARF 2 */
+ .debug_info 0 : { *(.debug_info) }
+ .debug_abbrev 0 : { *(.debug_abbrev) }
+ .debug_line 0 : { *(.debug_line) }
+ .debug_frame 0 : { *(.debug_frame) }
+ .debug_str 0 : { *(.debug_str) }
+ .debug_loc 0 : { *(.debug_loc) }
+ .debug_macinfo 0 : { *(.debug_macinfo) }
+ /* SGI/MIPS DWARF 2 extensions */
+ .debug_weaknames 0 : { *(.debug_weaknames) }
+ .debug_funcnames 0 : { *(.debug_funcnames) }
+ .debug_typenames 0 : { *(.debug_typenames) }
+ .debug_varnames 0 : { *(.debug_varnames) }
+ /* These must appear regardless of . */
+}
Index: sys/conf/options.arm64
===================================================================
--- /dev/null
+++ sys/conf/options.arm64
@@ -0,0 +1,2 @@
+ARM64 opt_global.h
+VFP opt_global.h
Index: sys/dev/fdt/fdt_arm64.c
===================================================================
--- /dev/null
+++ sys/dev/fdt/fdt_arm64.c
@@ -0,0 +1,49 @@
+/*-
+ * Copyright (c) 2009-2010 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Semihalf under sponsorship from
+ * the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+#include <sys/bus.h>
+
+#include <dev/ofw/ofw_bus.h>
+#include <dev/ofw/ofw_bus_subr.h>
+#include <dev/ofw/openfirm.h>
+
+#include "ofw_bus_if.h"
+#include "fdt_common.h"
+
+struct fdt_fixup_entry fdt_fixup_table[] = {
+ { NULL, NULL }
+};
+
Index: sys/dev/uart/uart_cpu_fdt.c
===================================================================
--- sys/dev/uart/uart_cpu_fdt.c
+++ sys/dev/uart/uart_cpu_fdt.c
@@ -42,7 +42,9 @@
#include <vm/pmap.h>
#include <machine/bus.h>
+#ifndef __aarch64__
#include <machine/fdt.h>
+#endif
#include <dev/fdt/fdt_common.h>
#include <dev/ofw/ofw_bus.h>
@@ -52,6 +54,13 @@
#include <dev/uart/uart_cpu.h>
#include <dev/uart/uart_cpu_fdt.h>
+#ifdef __aarch64__
+extern bus_space_tag_t fdtbus_bs_tag;
+#define DEFAULT_SHIFT 2
+#else
+#define DEFAULT_SHIFT 0
+#endif
+
/*
* UART console routines.
*/
@@ -83,7 +92,7 @@
{
if ((OF_getencprop(node, "reg-shift", cell, sizeof(*cell))) <= 0)
- *cell = 0;
+ *cell = DEFAULT_SHIFT;
return (0);
}
Index: sys/sys/kerneldump.h
===================================================================
--- sys/sys/kerneldump.h
+++ sys/sys/kerneldump.h
@@ -66,6 +66,7 @@
uint32_t version;
#define KERNELDUMPVERSION 1
uint32_t architectureversion;
+#define KERNELDUMP_AARCH64_VERSION 1
#define KERNELDUMP_AMD64_VERSION 2
#define KERNELDUMP_ARM_VERSION 1
#define KERNELDUMP_I386_VERSION 2

File Metadata

Mime Type
text/plain
Expires
Thu, Dec 25, 8:17 AM (3 h, 39 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
27244339
Default Alt Text
D2199.id4656.diff (354 KB)

Event Timeline