Page MenuHomeFreeBSD

D9433.diff
No OneTemporary

D9433.diff

Index: head/sys/powerpc/booke/booke_machdep.c
===================================================================
--- head/sys/powerpc/booke/booke_machdep.c
+++ head/sys/powerpc/booke/booke_machdep.c
@@ -216,7 +216,7 @@
ivor_setup(void)
{
- mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & 0xffff0000);
+ mtspr(SPR_IVPR, ((uintptr_t)&interrupt_vector_base) & ~0xffffUL);
SET_TRAP(SPR_IVOR0, int_critical_input);
SET_TRAP(SPR_IVOR1, int_machine_check);
@@ -250,6 +250,11 @@
SET_TRAP(SPR_IVOR32, int_vec);
break;
}
+
+#ifdef __powerpc64__
+ /* Set 64-bit interrupt mode. */
+ mtspr(SPR_EPCR, mfspr(SPR_EPCR) | EPCR_ICM);
+#endif
}
static int
@@ -353,7 +358,7 @@
}
#define RES_GRANULE 32
-extern uint32_t tlb0_miss_locks[];
+extern uintptr_t tlb0_miss_locks[];
/* Initialise a struct pcpu. */
void
@@ -363,8 +368,8 @@
pcpu->pc_tid_next = TID_MIN;
#ifdef SMP
- uint32_t *ptr;
- int words_per_gran = RES_GRANULE / sizeof(uint32_t);
+ uintptr_t *ptr;
+ int words_per_gran = RES_GRANULE / sizeof(uintptr_t);
ptr = &tlb0_miss_locks[cpuid * words_per_gran];
pcpu->pc_booke_tlb_lock = ptr;
Index: head/sys/powerpc/booke/locore.S
===================================================================
--- head/sys/powerpc/booke/locore.S
+++ head/sys/powerpc/booke/locore.S
@@ -41,6 +41,39 @@
#define TMPSTACKSZ 16384
+#ifdef __powerpc64__
+#define GET_TOCBASE(r) \
+ mfspr r, SPR_SPRG8
+#define TOC_RESTORE nop
+#define CMPI cmpdi
+#define CMPL cmpld
+#define LOAD ld
+#define LOADX ldarx
+#define STORE std
+#define STOREX stdcx.
+#define STU stdu
+#define CALLSIZE 48
+#define REDZONE 288
+#define THREAD_REG %r13
+#define ADDR(x) \
+ .llong x
+#else
+#define GET_TOCBASE(r)
+#define TOC_RESTORE
+#define CMPI cmpwi
+#define CMPL cmplw
+#define LOAD lwz
+#define LOADX lwarx
+#define STOREX stwcx.
+#define STORE stw
+#define STU stwu
+#define CALLSIZE 8
+#define REDZONE 0
+#define THREAD_REG %r2
+#define ADDR(x) \
+ .long x
+#endif
+
.text
.globl btext
btext:
@@ -101,6 +134,9 @@
* Initial cleanup
*/
li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
+#ifdef __powerpc64__
+ oris %r3, %r3, PSL_CM@h
+#endif
mtmsr %r3
isync
@@ -200,11 +236,8 @@
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
- lis %r3, KERNBASE@h
- ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
-#ifdef SMP
+ LOAD_ADDR(%r3, KERNBASE)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
-#endif
mtspr SPR_MAS2, %r3
isync
@@ -224,11 +257,19 @@
/* Switch to the above TLB1[1] mapping */
bl 4f
4: mflr %r4
- rlwinm %r4, %r4, 0, 8, 31 /* Current offset from kernel load address */
+#ifdef __powerpc64__
+ clrldi %r4, %r4, 38
+ clrrdi %r3, %r3, 12
+#else
+ rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */
rlwinm %r3, %r3, 0, 0, 19
+#endif
add %r4, %r4, %r3 /* Convert to kernel virtual address */
addi %r4, %r4, (5f - 4b)
li %r3, PSL_DE /* Note AS=0 */
+#ifdef __powerpc64__
+ oris %r3, %r3, PSL_CM@h
+#endif
mtspr SPR_SRR0, %r4
mtspr SPR_SRR1, %r3
rfi
@@ -242,6 +283,33 @@
done_mapping:
+#ifdef __powerpc64__
+ /* Set up the TOC pointer */
+ b 0f
+ .align 3
+0: nop
+ bl 1f
+ .llong __tocbase + 0x8000 - .
+1: mflr %r2
+ ld %r1,0(%r2)
+ add %r2,%r1,%r2
+ mtspr SPR_SPRG8, %r2
+
+ /* Get load offset */
+ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
+ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
+
+ /* Set up the stack pointer */
+ ld %r1,TOC_REF(tmpstack)(%r2)
+ addi %r1,%r1,TMPSTACKSZ-96
+ add %r1,%r1,%r31
+ bl 1f
+ .llong _DYNAMIC-.
+1: mflr %r3
+ ld %r4,0(%r3)
+ add %r3,%r4,%r3
+ mr %r4,%r31
+#else
/*
* Setup a temporary stack
*/
@@ -265,12 +333,15 @@
add %r4,%r4,%r5
lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
subf %r4,%r4,%r3 /* subtract to calculate relocbase */
- bl elf_reloc_self
+#endif
+ bl CNAME(elf_reloc_self)
+ TOC_RESTORE
/*
* Initialise exception vector offsets
*/
- bl ivor_setup
+ bl CNAME(ivor_setup)
+ TOC_RESTORE
/*
* Set up arguments and jump to system initialization code
@@ -279,15 +350,17 @@
mr %r4, %r31
/* Prepare core */
- bl booke_init
+ bl CNAME(booke_init)
+ TOC_RESTORE
/* Switch to thread0.td_kstack now */
mr %r1, %r3
li %r3, 0
- stw %r3, 0(%r1)
+ STORE %r3, 0(%r1)
/* Machine independet part, does not return */
- bl mi_startup
+ bl CNAME(mi_startup)
+ TOC_RESTORE
/* NOT REACHED */
5: b 5b
@@ -364,6 +437,9 @@
mfmsr %r3
ori %r3, %r3, (PSL_IS | PSL_DS)
+#ifdef __powerpc64__
+ oris %r3, %r3, PSL_CM@h
+#endif
bl 3f
3: mflr %r4
addi %r4, %r4, (4f - 3b)
@@ -393,20 +469,31 @@
mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
isync
- lis %r3, KERNBASE@h
- ori %r3, %r3, KERNBASE@l /* EPN = KERNBASE */
+ LOAD_ADDR(%r3, KERNBASE)
ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
mtspr SPR_MAS2, %r3
isync
/* Retrieve kernel load [physical] address from bp_kernload */
- bl 5f
- .long bp_kernload
- .long __boot_page
+#ifdef __powerpc64__
+ b 0f
+ .align 3
+0:
+ nop
+#endif
+ bl 5f
+ ADDR(bp_kernload)
+ ADDR(__boot_page)
5: mflr %r3
+#ifdef __powerpc64__
+ ld %r4, 0(%r3)
+ ld %r5, 8(%r3)
+ clrrdi %r3, %r3, 12
+#else
lwz %r4, 0(%r3)
lwz %r5, 4(%r3)
rlwinm %r3, %r3, 0, 0, 19
+#endif
sub %r4, %r4, %r5 /* offset of bp_kernload within __boot_page */
lwzx %r3, %r4, %r3
@@ -426,7 +513,11 @@
rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
add %r3, %r3, %r5 /* Make this virtual address */
addi %r3, %r3, (7f - 6b)
+#ifdef __powerpc64__
+ lis %r4, PSL_CM@h /* Note AS=0 */
+#else
li %r4, 0 /* Note AS=0 */
+#endif
mtspr SPR_SRR0, %r3
mtspr SPR_SRR1, %r4
rfi
@@ -444,6 +535,27 @@
mr %r3, %r28
bl tlb1_inval_entry
+#ifdef __powerpc64__
+ /* Set up the TOC pointer */
+ b 0f
+ .align 3
+0: nop
+ bl 1f
+ .llong __tocbase + 0x8000 - .
+1: mflr %r2
+ ld %r1,0(%r2)
+ add %r2,%r1,%r2
+ mtspr SPR_SPRG8, %r2
+
+ /* Get load offset */
+ ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
+ subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
+
+ /* Set up the stack pointer */
+ ld %r1,TOC_REF(tmpstack)(%r2)
+ addi %r1,%r1,TMPSTACKSZ-96
+ add %r1,%r1,%r31
+#else
/*
* Setup a temporary stack
*/
@@ -454,11 +566,13 @@
add %r1,%r1,%r2
stw %r1, 0(%r1)
addi %r1, %r1, (TMPSTACKSZ - 16)
+#endif
/*
* Initialise exception vector offsets
*/
- bl ivor_setup
+ bl CNAME(ivor_setup)
+ TOC_RESTORE
/*
* Assign our pcpu instance
@@ -468,16 +582,19 @@
1: mflr %r4
lwz %r3, 0(%r4)
add %r3, %r3, %r4
- lwz %r3, 0(%r3)
+ LOAD %r3, 0(%r3)
mtsprg0 %r3
- bl pmap_bootstrap_ap
+ bl CNAME(pmap_bootstrap_ap)
+ TOC_RESTORE
- bl cpudep_ap_bootstrap
+ bl CNAME(cpudep_ap_bootstrap)
+ TOC_RESTORE
/* Switch to the idle thread's kstack */
mr %r1, %r3
- bl machdep_ap_bootstrap
+ bl CNAME(machdep_ap_bootstrap)
+ TOC_RESTORE
/* NOT REACHED */
6: b 6b
@@ -594,7 +711,6 @@
* r3-r5 scratched
*/
tlb1_inval_all_but_current:
- mr %r6, %r3
mfspr %r3, SPR_TLB1CFG /* Get number of entries */
andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
li %r4, 0 /* Start from Entry 0 */
@@ -864,14 +980,18 @@
.data
.align 3
GLOBAL(__startkernel)
- .long begin
+ ADDR(begin)
GLOBAL(__endkernel)
- .long end
+ ADDR(end)
.align 4
tmpstack:
.space TMPSTACKSZ
tmpstackbound:
.space 10240 /* XXX: this really should not be necessary */
+#ifdef __powerpc64__
+TOC_ENTRY(tmpstack)
+TOC_ENTRY(bp_kernload)
+#endif
/*
* Compiled KERNBASE locations
Index: head/sys/powerpc/booke/mp_cpudep.c
===================================================================
--- head/sys/powerpc/booke/mp_cpudep.c
+++ head/sys/powerpc/booke/mp_cpudep.c
@@ -50,7 +50,8 @@
uintptr_t
cpudep_ap_bootstrap()
{
- uint32_t msr, sp, csr;
+ uint32_t msr, csr;
+ uintptr_t sp;
/* Enable L1 caches */
csr = mfspr(SPR_L1CSR0);
@@ -66,7 +67,11 @@
}
/* Set MSR */
+#ifdef __powerpc64__
+ msr = PSL_CM | PSL_ME;
+#else
msr = PSL_ME;
+#endif
mtmsr(msr);
/* Assign pcpu fields, return ptr to this AP's idle thread kstack */
Index: head/sys/powerpc/booke/pmap.c
===================================================================
--- head/sys/powerpc/booke/pmap.c
+++ head/sys/powerpc/booke/pmap.c
@@ -34,18 +34,42 @@
* Kernel and user threads run within one common virtual address space
* defined by AS=0.
*
+ * 32-bit pmap:
* Virtual address space layout:
* -----------------------------
- * 0x0000_0000 - 0xafff_ffff : user process
- * 0xb000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
+ * 0x0000_0000 - 0x7fff_ffff : user process
+ * 0x8000_0000 - 0xbfff_ffff : pmap_mapdev()-ed area (PCI/PCIE etc.)
* 0xc000_0000 - 0xc0ff_ffff : kernel reserved
* 0xc000_0000 - data_end : kernel code+data, env, metadata etc.
- * 0xc100_0000 - 0xfeef_ffff : KVA
+ * 0xc100_0000 - 0xffff_ffff : KVA
* 0xc100_0000 - 0xc100_3fff : reserved for page zero/copy
* 0xc100_4000 - 0xc200_3fff : reserved for ptbl bufs
* 0xc200_4000 - 0xc200_8fff : guard page + kstack0
* 0xc200_9000 - 0xfeef_ffff : actual free KVA space
- * 0xfef0_0000 - 0xffff_ffff : I/O devices region
+ *
+ * 64-bit pmap:
+ * Virtual address space layout:
+ * -----------------------------
+ * 0x0000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : user process
+ * 0x0000_0000_0000_0000 - 0x8fff_ffff_ffff_ffff : text, data, heap, maps, libraries
+ * 0x9000_0000_0000_0000 - 0xafff_ffff_ffff_ffff : mmio region
+ * 0xb000_0000_0000_0000 - 0xbfff_ffff_ffff_ffff : stack
+ * 0xc000_0000_0000_0000 - 0xcfff_ffff_ffff_ffff : kernel reserved
+ * 0xc000_0000_0000_0000 - endkernel-1 : kernel code & data
+ * endkernel - msgbufp-1 : flat device tree
+ * msgbufp - ptbl_bufs-1 : message buffer
+ * ptbl_bufs - kernel_pdir-1 : kernel page tables
+ * kernel_pdir - kernel_pp2d-1 : kernel page directory
+ * kernel_pp2d - . : kernel pointers to page directory
+ * pmap_zero_copy_min - crashdumpmap-1 : reserved for page zero/copy
+ * crashdumpmap - ptbl_buf_pool_vabase-1 : reserved for ptbl bufs
+ * ptbl_buf_pool_vabase - virtual_avail-1 : user page directories and page tables
+ * virtual_avail - 0xcfff_ffff_ffff_ffff : actual free KVA space
+ * 0xd000_0000_0000_0000 - 0xdfff_ffff_ffff_ffff : coprocessor region
+ * 0xe000_0000_0000_0000 - 0xefff_ffff_ffff_ffff : mmio region
+ * 0xf000_0000_0000_0000 - 0xffff_ffff_ffff_ffff : direct map
+ * 0xf000_0000_0000_0000 - +Maxmem : physmem map
+ * - 0xffff_ffff_ffff_ffff : device direct map
*/
#include <sys/cdefs.h>
@@ -83,6 +107,7 @@
#include <vm/vm_pager.h>
#include <vm/uma.h>
+#include <machine/_inttypes.h>
#include <machine/cpu.h>
#include <machine/pcb.h>
#include <machine/platform.h>
@@ -103,6 +128,12 @@
#define debugf(fmt, args...)
#endif
+#ifdef __powerpc64__
+#define PRI0ptrX "016lx"
+#else
+#define PRI0ptrX "08x"
+#endif
+
#define TODO panic("%s: not implemented", __func__);
extern unsigned char _etext[];
@@ -144,6 +175,9 @@
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
unsigned int kernel_ptbls; /* Number of KVA ptbls. */
+#ifdef __powerpc64__
+unsigned int kernel_pdirs;
+#endif
/*
* If user pmap is processed with mmu_booke_remove and the resident count
@@ -152,7 +186,9 @@
#define PMAP_REMOVE_DONE(pmap) \
((pmap) != kernel_pmap && (pmap)->pm_stats.resident_count == 0)
+#if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
extern int elf32_nxstack;
+#endif
/**************************************************************************/
/* TLB and TID handling */
@@ -175,14 +211,17 @@
#define TLB0_ENTRIES_PER_WAY (tlb0_entries_per_way)
#define TLB1_ENTRIES (tlb1_entries)
-#define TLB1_MAXENTRIES 64
static vm_offset_t tlb1_map_base = VM_MAXUSER_ADDRESS + PAGE_SIZE;
static tlbtid_t tid_alloc(struct pmap *);
static void tid_flush(tlbtid_t tid);
+#ifdef __powerpc64__
+static void tlb_print_entry(int, uint32_t, uint64_t, uint32_t, uint32_t);
+#else
static void tlb_print_entry(int, uint32_t, uint32_t, uint32_t, uint32_t);
+#endif
static void tlb1_read_entry(tlb_entry_t *, unsigned int);
static void tlb1_write_entry(tlb_entry_t *, unsigned int);
@@ -219,17 +258,24 @@
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
+#ifdef __powerpc64__
+static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **,
+ unsigned int, boolean_t);
+static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int);
+static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int);
+static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t);
+#else
static pte_t *ptbl_alloc(mmu_t, pmap_t, unsigned int, boolean_t);
static void ptbl_free(mmu_t, pmap_t, unsigned int);
static void ptbl_hold(mmu_t, pmap_t, unsigned int);
static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
+#endif
static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
-static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t);
static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t);
-static void kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr,
- vm_offset_t pdir);
+static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
+static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t);
static pv_entry_t pv_alloc(void);
static void pv_free(pv_entry_t);
@@ -239,7 +285,11 @@
static void booke_pmap_init_qpages(void);
/* Number of kva ptbl buffers, each covering one ptbl (PTBL_PAGES). */
+#ifdef __powerpc64__
+#define PTBL_BUFS (16UL * 16 * 16)
+#else
#define PTBL_BUFS (128 * 16)
+#endif
struct ptbl_buf {
TAILQ_ENTRY(ptbl_buf) link; /* list link */
@@ -503,6 +553,364 @@
/* Page table related */
/**************************************************************************/
+#ifdef __powerpc64__
+/* Initialize pool of kva ptbl buffers. */
+static void
+ptbl_init(void)
+{
+ int i;
+
+ mtx_init(&ptbl_buf_freelist_lock, "ptbl bufs lock", NULL, MTX_DEF);
+ TAILQ_INIT(&ptbl_buf_freelist);
+
+ for (i = 0; i < PTBL_BUFS; i++) {
+ ptbl_bufs[i].kva = ptbl_buf_pool_vabase +
+ i * MAX(PTBL_PAGES,PDIR_PAGES) * PAGE_SIZE;
+ TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
+ }
+}
+
+/* Get an sf_buf from the freelist. */
+static struct ptbl_buf *
+ptbl_buf_alloc(void)
+{
+ struct ptbl_buf *buf;
+
+ mtx_lock(&ptbl_buf_freelist_lock);
+ buf = TAILQ_FIRST(&ptbl_buf_freelist);
+ if (buf != NULL)
+ TAILQ_REMOVE(&ptbl_buf_freelist, buf, link);
+ mtx_unlock(&ptbl_buf_freelist_lock);
+
+ return (buf);
+}
+
+/* Return ptbl buff to free pool. */
+static void
+ptbl_buf_free(struct ptbl_buf *buf)
+{
+ mtx_lock(&ptbl_buf_freelist_lock);
+ TAILQ_INSERT_TAIL(&ptbl_buf_freelist, buf, link);
+ mtx_unlock(&ptbl_buf_freelist_lock);
+}
+
+/*
+ * Search the list of allocated ptbl bufs and find on list of allocated ptbls
+ */
+static void
+ptbl_free_pmap_ptbl(pmap_t pmap, pte_t * ptbl)
+{
+ struct ptbl_buf *pbuf;
+
+ TAILQ_FOREACH(pbuf, &pmap->pm_ptbl_list, link) {
+ if (pbuf->kva == (vm_offset_t) ptbl) {
+ /* Remove from pmap ptbl buf list. */
+ TAILQ_REMOVE(&pmap->pm_ptbl_list, pbuf, link);
+
+ /* Free corresponding ptbl buf. */
+ ptbl_buf_free(pbuf);
+
+ break;
+ }
+ }
+}
+
+/* Get a pointer to a PTE in a page table. */
+static __inline pte_t *
+pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ pte_t **pdir;
+ pte_t *ptbl;
+
+ KASSERT((pmap != NULL), ("pte_find: invalid pmap"));
+
+ pdir = pmap->pm_pp2d[PP2D_IDX(va)];
+ if (!pdir)
+ return NULL;
+ ptbl = pdir[PDIR_IDX(va)];
+ return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL);
+}
+
+/*
+ * Search the list of allocated pdir bufs and find on list of allocated pdirs
+ */
+static void
+ptbl_free_pmap_pdir(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
+{
+ struct ptbl_buf *pbuf;
+
+ TAILQ_FOREACH(pbuf, &pmap->pm_pdir_list, link) {
+ if (pbuf->kva == (vm_offset_t) pdir) {
+ /* Remove from pmap ptbl buf list. */
+ TAILQ_REMOVE(&pmap->pm_pdir_list, pbuf, link);
+
+ /* Free corresponding pdir buf. */
+ ptbl_buf_free(pbuf);
+
+ break;
+ }
+ }
+}
+/* Free pdir pages and invalidate pdir entry. */
+static void
+pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx)
+{
+ pte_t **pdir;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i;
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+
+ KASSERT((pdir != NULL), ("pdir_free: null pdir"));
+
+ pmap->pm_pp2d[pp2d_idx] = NULL;
+
+ for (i = 0; i < PDIR_PAGES; i++) {
+ va = ((vm_offset_t) pdir + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, va);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_free_zero(m);
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ pmap_kremove(va);
+ }
+
+ ptbl_free_pmap_pdir(mmu, pmap, pdir);
+}
+
+/*
+ * Decrement pdir pages hold count and attempt to free pdir pages. Called
+ * when removing directory entry from pdir.
+ *
+ * Return 1 if pdir pages were freed.
+ */
+static int
+pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx)
+{
+ pte_t **pdir;
+ vm_paddr_t pa;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("pdir_unhold: unholding kernel pdir!"));
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+
+ KASSERT(((vm_offset_t) pdir >= VM_MIN_KERNEL_ADDRESS),
+ ("pdir_unhold: non kva pdir"));
+
+ /* decrement hold count */
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) pdir + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count--;
+ }
+
+ /*
+ * Free pdir pages if there are no dir entries in this pdir.
+ * wire_count has the same value for all ptbl pages, so check the
+ * last page.
+ */
+ if (m->wire_count == 0) {
+ pdir_free(mmu, pmap, pp2d_idx);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Increment hold count for pdir pages. This routine is used when new ptlb
+ * entry is being inserted into pdir.
+ */
+static void
+pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir)
+{
+ vm_paddr_t pa;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("pdir_hold: holding kernel pdir!"));
+
+ KASSERT((pdir != NULL), ("pdir_hold: null pdir"));
+
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) pdir + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count++;
+ }
+}
+
+/* Allocate page table. */
+static pte_t *
+ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx,
+ boolean_t nosleep)
+{
+ vm_page_t mtbl [PTBL_PAGES];
+ vm_page_t m;
+ struct ptbl_buf *pbuf;
+ unsigned int pidx;
+ pte_t *ptbl;
+ int i, j;
+ int req;
+
+ KASSERT((pdir[pdir_idx] == NULL),
+ ("%s: valid ptbl entry exists!", __func__));
+
+ pbuf = ptbl_buf_alloc();
+ if (pbuf == NULL)
+ panic("%s: couldn't alloc kernel virtual memory", __func__);
+
+ ptbl = (pte_t *) pbuf->kva;
+
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pidx = (PTBL_PAGES * pdir_idx) + i;
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
+ while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
+ PMAP_UNLOCK(pmap);
+ rw_wunlock(&pvh_global_lock);
+ if (nosleep) {
+ ptbl_free_pmap_ptbl(pmap, ptbl);
+ for (j = 0; j < i; j++)
+ vm_page_free(mtbl[j]);
+ atomic_subtract_int(&vm_cnt.v_wire_count, i);
+ return (NULL);
+ }
+ VM_WAIT;
+ rw_wlock(&pvh_global_lock);
+ PMAP_LOCK(pmap);
+ }
+ mtbl[i] = m;
+ }
+
+ /* Mapin allocated pages into kernel_pmap. */
+ mmu_booke_qenter(mmu, (vm_offset_t) ptbl, mtbl, PTBL_PAGES);
+ /* Zero whole ptbl. */
+ bzero((caddr_t) ptbl, PTBL_PAGES * PAGE_SIZE);
+
+ /* Add pbuf to the pmap ptbl bufs list. */
+ TAILQ_INSERT_TAIL(&pmap->pm_ptbl_list, pbuf, link);
+
+ return (ptbl);
+}
+
+/* Free ptbl pages and invalidate pdir entry. */
+static void
+ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
+{
+ pte_t *ptbl;
+ vm_paddr_t pa;
+ vm_offset_t va;
+ vm_page_t m;
+ int i;
+
+ ptbl = pdir[pdir_idx];
+
+ KASSERT((ptbl != NULL), ("ptbl_free: null ptbl"));
+
+ pdir[pdir_idx] = NULL;
+
+ for (i = 0; i < PTBL_PAGES; i++) {
+ va = ((vm_offset_t) ptbl + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, va);
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_free_zero(m);
+ atomic_subtract_int(&vm_cnt.v_wire_count, 1);
+ pmap_kremove(va);
+ }
+
+ ptbl_free_pmap_ptbl(pmap, ptbl);
+}
+
+/*
+ * Decrement ptbl pages hold count and attempt to free ptbl pages. Called
+ * when removing pte entry from ptbl.
+ *
+ * Return 1 if ptbl pages were freed.
+ */
+static int
+ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ pte_t *ptbl;
+ vm_paddr_t pa;
+ vm_page_t m;
+ u_int pp2d_idx;
+ pte_t **pdir;
+ u_int pdir_idx;
+ int i;
+
+ pp2d_idx = PP2D_IDX(va);
+ pdir_idx = PDIR_IDX(va);
+
+ KASSERT((pmap != kernel_pmap),
+ ("ptbl_unhold: unholding kernel ptbl!"));
+
+ pdir = pmap->pm_pp2d[pp2d_idx];
+ ptbl = pdir[pdir_idx];
+
+ KASSERT(((vm_offset_t) ptbl >= VM_MIN_KERNEL_ADDRESS),
+ ("ptbl_unhold: non kva ptbl"));
+
+ /* decrement hold count */
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) ptbl + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count--;
+ }
+
+ /*
+ * Free ptbl pages if there are no pte entries in this ptbl.
+ * wire_count has the same value for all ptbl pages, so check the
+ * last page.
+ */
+ if (m->wire_count == 0) {
+ /* A pair of indirect entries might point to this ptbl page */
+#if 0
+ tlb_flush_entry(pmap, va & ~((2UL * PAGE_SIZE_1M) - 1),
+ TLB_SIZE_1M, MAS6_SIND);
+ tlb_flush_entry(pmap, (va & ~((2UL * PAGE_SIZE_1M) - 1)) | PAGE_SIZE_1M,
+ TLB_SIZE_1M, MAS6_SIND);
+#endif
+ ptbl_free(mmu, pmap, pdir, pdir_idx);
+ pdir_unhold(mmu, pmap, pp2d_idx);
+ return (1);
+ }
+ return (0);
+}
+
+/*
+ * Increment hold count for ptbl pages. This routine is used when new pte
+ * entry is being inserted into ptbl.
+ */
+static void
+ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx)
+{
+ vm_paddr_t pa;
+ pte_t *ptbl;
+ vm_page_t m;
+ int i;
+
+ KASSERT((pmap != kernel_pmap),
+ ("ptbl_hold: holding kernel ptbl!"));
+
+ ptbl = pdir[pdir_idx];
+
+ KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
+
+ for (i = 0; i < PTBL_PAGES; i++) {
+ pa = pte_vatopa(mmu, kernel_pmap,
+ (vm_offset_t) ptbl + (i * PAGE_SIZE));
+ m = PHYS_TO_VM_PAGE(pa);
+ m->wire_count++;
+ }
+}
+#else
+
/* Initialize pool of kva ptbl buffers. */
static void
ptbl_init(void)
@@ -518,7 +926,8 @@
TAILQ_INIT(&ptbl_buf_freelist);
for (i = 0; i < PTBL_BUFS; i++) {
- ptbl_bufs[i].kva = ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
+ ptbl_bufs[i].kva =
+ ptbl_buf_pool_vabase + i * PTBL_PAGES * PAGE_SIZE;
TAILQ_INSERT_TAIL(&ptbl_buf_freelist, &ptbl_bufs[i], link);
}
}
@@ -602,7 +1011,6 @@
CTR2(KTR_PMAP, "%s: ptbl kva = %p", __func__, ptbl);
- /* Allocate ptbl pages, this will sleep! */
for (i = 0; i < PTBL_PAGES; i++) {
pidx = (PTBL_PAGES * pdir_idx) + i;
while ((m = vm_page_alloc(NULL, pidx,
@@ -763,6 +1171,7 @@
m->wire_count++;
}
}
+#endif
/* Allocate pv_entry structure. */
pv_entry_t
@@ -843,6 +1252,235 @@
//debugf("pv_remove: e\n");
}
+#ifdef __powerpc64__
+/*
+ * Clean pte entry, try to free page table page if requested.
+ *
+ * Return 1 if ptbl pages were freed, otherwise return 0.
+ */
+static int
+pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
+{
+ vm_page_t m;
+ pte_t *pte;
+
+ pte = pte_find(mmu, pmap, va);
+ KASSERT(pte != NULL, ("%s: NULL pte", __func__));
+
+ if (!PTE_ISVALID(pte))
+ return (0);
+
+ /* Get vm_page_t for mapped pte. */
+ m = PHYS_TO_VM_PAGE(PTE_PA(pte));
+
+ if (PTE_ISWIRED(pte))
+ pmap->pm_stats.wired_count--;
+
+ /* Handle managed entry. */
+ if (PTE_ISMANAGED(pte)) {
+
+ /* Handle modified pages. */
+ if (PTE_ISMODIFIED(pte))
+ vm_page_dirty(m);
+
+ /* Referenced pages. */
+ if (PTE_ISREFERENCED(pte))
+ vm_page_aflag_set(m, PGA_REFERENCED);
+
+ /* Remove pv_entry from pv_list. */
+ pv_remove(pmap, va, m);
+ }
+ mtx_lock_spin(&tlbivax_mutex);
+ tlb_miss_lock();
+
+ tlb0_flush_entry(va);
+ *pte = 0;
+
+ tlb_miss_unlock();
+ mtx_unlock_spin(&tlbivax_mutex);
+
+ pmap->pm_stats.resident_count--;
+
+ if (flags & PTBL_UNHOLD) {
+ return (ptbl_unhold(mmu, pmap, va));
+ }
+ return (0);
+}
+
+/*
+ * allocate a page of pointers to page directories, do not preallocate the
+ * page tables
+ */
+static pte_t **
+pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep)
+{
+ vm_page_t mtbl [PDIR_PAGES];
+ vm_page_t m;
+ struct ptbl_buf *pbuf;
+ pte_t **pdir;
+ unsigned int pidx;
+ int i;
+ int req;
+
+ pbuf = ptbl_buf_alloc();
+
+ if (pbuf == NULL)
+ panic("%s: couldn't alloc kernel virtual memory", __func__);
+
+ /* Allocate pdir pages, this will sleep! */
+ for (i = 0; i < PDIR_PAGES; i++) {
+ pidx = (PDIR_PAGES * pp2d_idx) + i;
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
+ while ((m = vm_page_alloc(NULL, pidx, req)) == NULL) {
+ PMAP_UNLOCK(pmap);
+ VM_WAIT;
+ PMAP_LOCK(pmap);
+ }
+ mtbl[i] = m;
+ }
+
+ /* Mapin allocated pages into kernel_pmap. */
+ pdir = (pte_t **) pbuf->kva;
+ pmap_qenter((vm_offset_t) pdir, mtbl, PDIR_PAGES);
+
+ /* Zero whole pdir. */
+ bzero((caddr_t) pdir, PDIR_PAGES * PAGE_SIZE);
+
+ /* Add pdir to the pmap pdir bufs list. */
+ TAILQ_INSERT_TAIL(&pmap->pm_pdir_list, pbuf, link);
+
+ return pdir;
+}
+
+/*
+ * Insert PTE for a given page and virtual address.
+ */
+static int
+pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags,
+ boolean_t nosleep)
+{
+ unsigned int pp2d_idx = PP2D_IDX(va);
+ unsigned int pdir_idx = PDIR_IDX(va);
+ unsigned int ptbl_idx = PTBL_IDX(va);
+ pte_t *ptbl, *pte;
+ pte_t **pdir;
+
+ /* Get the page directory pointer. */
+ pdir = pmap->pm_pp2d[pp2d_idx];
+ if (pdir == NULL)
+ pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep);
+
+ /* Get the page table pointer. */
+ ptbl = pdir[pdir_idx];
+
+ if (ptbl == NULL) {
+ /* Allocate page table pages. */
+ ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep);
+ if (ptbl == NULL) {
+ KASSERT(nosleep, ("nosleep and NULL ptbl"));
+ return (ENOMEM);
+ }
+ } else {
+ /*
+ * Check if there is valid mapping for requested va, if there
+ * is, remove it.
+ */
+ pte = &pdir[pdir_idx][ptbl_idx];
+ if (PTE_ISVALID(pte)) {
+ pte_remove(mmu, pmap, va, PTBL_HOLD);
+ } else {
+ /*
+ * pte is not used, increment hold count for ptbl
+ * pages.
+ */
+ if (pmap != kernel_pmap)
+ ptbl_hold(mmu, pmap, pdir, pdir_idx);
+ }
+ }
+
+ if (pdir[pdir_idx] == NULL) {
+ if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL)
+ pdir_hold(mmu, pmap, pdir);
+ pdir[pdir_idx] = ptbl;
+ }
+ if (pmap->pm_pp2d[pp2d_idx] == NULL)
+ pmap->pm_pp2d[pp2d_idx] = pdir;
+
+ /*
+ * Insert pv_entry into pv_list for mapped page if part of managed
+ * memory.
+ */
+ if ((m->oflags & VPO_UNMANAGED) == 0) {
+ flags |= PTE_MANAGED;
+
+ /* Create and insert pv entry. */
+ pv_insert(pmap, va, m);
+ }
+
+ mtx_lock_spin(&tlbivax_mutex);
+ tlb_miss_lock();
+
+ tlb0_flush_entry(va);
+ pmap->pm_stats.resident_count++;
+ pte = &pdir[pdir_idx][ptbl_idx];
+ *pte = PTE_RPN_FROM_PA(VM_PAGE_TO_PHYS(m));
+ *pte |= (PTE_VALID | flags);
+
+ tlb_miss_unlock();
+ mtx_unlock_spin(&tlbivax_mutex);
+
+ return (0);
+}
+
+/* Return the pa for the given pmap/va. */
+static vm_paddr_t
+pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
+{
+ vm_paddr_t pa = 0;
+ pte_t *pte;
+
+ pte = pte_find(mmu, pmap, va);
+ if ((pte != NULL) && PTE_ISVALID(pte))
+ pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
+ return (pa);
+}
+
+
+/* allocate pte entries to manage (addr & mask) to (addr & mask) + size */
+static void
+kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir)
+{
+ int i, j;
+ vm_offset_t va;
+ pte_t *pte;
+
+ va = addr;
+ /* Initialize kernel pdir */
+ for (i = 0; i < kernel_pdirs; i++) {
+ kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] =
+ (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES));
+ for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES));
+ j < PDIR_NENTRIES; j++) {
+ kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] =
+ (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE * PDIR_PAGES) +
+ (((i * PDIR_NENTRIES) + j) * PAGE_SIZE * PTBL_PAGES));
+ }
+ }
+
+ /*
+ * Fill in PTEs covering kernel code and data. They are not required
+ * for address translation, as this area is covered by static TLB1
+ * entries, but for pte_vatopa() to work correctly with kernel area
+ * addresses.
+ */
+ for (va = addr; va < data_end; va += PAGE_SIZE) {
+ pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]);
+ *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart));
+ *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED |
+ PTE_VALID | PTE_PS_4KB;
+ }
+}
+#else
/*
* Clean pte entry, try to free page table page if requested.
*
@@ -1045,6 +1683,7 @@
PTE_VALID | PTE_PS_4KB;
}
}
+#endif
/**************************************************************************/
/* PMAP related */
@@ -1071,7 +1710,9 @@
/* Set interesting system properties */
hw_direct_map = 0;
+#if defined(COMPAT_FREEBSD32) || !defined(__powerpc64__)
elf32_nxstack = 1;
+#endif
/* Initialize invalidation mutex */
mtx_init(&tlbivax_mutex, "tlbivax", NULL, MTX_SPIN);
@@ -1102,16 +1743,16 @@
/* Allocate space for the message buffer. */
msgbufp = (struct msgbuf *)data_end;
data_end += msgbufsize;
- debugf(" msgbufp at 0x%08x end = 0x%08x\n", (uint32_t)msgbufp,
- data_end);
+ debugf(" msgbufp at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
+ (uintptr_t)msgbufp, data_end);
data_end = round_page(data_end);
/* Allocate space for ptbl_bufs. */
ptbl_bufs = (struct ptbl_buf *)data_end;
data_end += sizeof(struct ptbl_buf) * PTBL_BUFS;
- debugf(" ptbl_bufs at 0x%08x end = 0x%08x\n", (uint32_t)ptbl_bufs,
- data_end);
+ debugf(" ptbl_bufs at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
+ (uintptr_t)ptbl_bufs, data_end);
data_end = round_page(data_end);
@@ -1119,17 +1760,22 @@
kernel_pdir = data_end;
kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS,
PDIR_SIZE);
+#ifdef __powerpc64__
+ kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES);
+ data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE;
+#endif
data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE;
debugf(" kernel ptbls: %d\n", kernel_ptbls);
- debugf(" kernel pdir at 0x%08x end = 0x%08x\n", kernel_pdir, data_end);
+ debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n",
+ kernel_pdir, data_end);
- debugf(" data_end: 0x%08x\n", data_end);
+ debugf(" data_end: 0x%"PRI0ptrX"\n", data_end);
if (data_end - kernstart > kernsize) {
kernsize += tlb1_mapin_region(kernstart + kernsize,
kernload + kernsize, (data_end - kernstart) - kernsize);
}
data_end = kernstart + kernsize;
- debugf(" updated data_end: 0x%08x\n", data_end);
+ debugf(" updated data_end: 0x%"PRI0ptrX"\n", data_end);
/*
* Clear the structures - note we can only do it safely after the
@@ -1138,7 +1784,13 @@
*/
dpcpu_init(dpcpu, 0);
memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE);
+#ifdef __powerpc64__
+ memset((void *)kernel_pdir, 0,
+ kernel_pdirs * PDIR_PAGES * PAGE_SIZE +
+ kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
+#else
memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE);
+#endif
/*******************************************************/
/* Set the start and end of kva. */
@@ -1308,14 +1960,12 @@
/* Initialize (statically allocated) kernel pmap. */
/*******************************************************/
PMAP_LOCK_INIT(kernel_pmap);
+#ifndef __powerpc64__
kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE;
+#endif
- debugf("kernel_pmap = 0x%08x\n", (uint32_t)kernel_pmap);
- debugf("kptbl_min = %d, kernel_ptbls = %d\n", kptbl_min, kernel_ptbls);
- debugf("kernel pdir range: 0x%08x - 0x%08x\n",
- kptbl_min * PDIR_SIZE, (kptbl_min + kernel_ptbls) * PDIR_SIZE - 1);
-
- kernel_pte_alloc(data_end, kernstart, kernel_pdir);
+ debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap);
+ kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir);
for (i = 0; i < MAXCPU; i++) {
kernel_pmap->pm_tid[i] = TID_KERNEL;
@@ -1343,7 +1993,8 @@
debugf("kstack_sz = 0x%08x\n", kstack0_sz);
debugf("kstack0_phys at 0x%09llx - 0x%09llx\n",
kstack0_phys, kstack0_phys + kstack0_sz);
- debugf("kstack0 at 0x%08x - 0x%08x\n", kstack0, kstack0 + kstack0_sz);
+ debugf("kstack0 at 0x%"PRI0ptrX" - 0x%"PRI0ptrX"\n",
+ kstack0, kstack0 + kstack0_sz);
virtual_avail += KSTACK_GUARD_PAGES * PAGE_SIZE + kstack0_sz;
for (i = 0; i < kstack_pages; i++) {
@@ -1354,8 +2005,8 @@
pmap_bootstrapped = 1;
- debugf("virtual_avail = %08x\n", virtual_avail);
- debugf("virtual_end = %08x\n", virtual_end);
+ debugf("virtual_avail = %"PRI0ptrX"\n", virtual_avail);
+ debugf("virtual_end = %"PRI0ptrX"\n", virtual_end);
debugf("mmu_booke_bootstrap: exit\n");
}
@@ -1543,6 +2194,7 @@
flags |= PTE_PS_4KB;
pte = pte_find(mmu, kernel_pmap, va);
+ KASSERT((pte != NULL), ("mmu_booke_kenter: invalid va. NULL PTE"));
mtx_lock_spin(&tlbivax_mutex);
tlb_miss_lock();
@@ -1633,7 +2285,12 @@
pmap->pm_tid[i] = TID_NONE;
CPU_ZERO(&kernel_pmap->pm_active);
bzero(&pmap->pm_stats, sizeof(pmap->pm_stats));
+#ifdef __powerpc64__
+ bzero(&pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES);
+ TAILQ_INIT(&pmap->pm_pdir_list);
+#else
bzero(&pmap->pm_pdir, sizeof(pte_t *) * PDIR_NENTRIES);
+#endif
TAILQ_INIT(&pmap->pm_ptbl_list);
}
@@ -1665,8 +2322,8 @@
rw_wlock(&pvh_global_lock);
PMAP_LOCK(pmap);
error = mmu_booke_enter_locked(mmu, pmap, va, m, prot, flags, psind);
- rw_wunlock(&pvh_global_lock);
PMAP_UNLOCK(pmap);
+ rw_wunlock(&pvh_global_lock);
return (error);
}
@@ -2841,10 +3498,18 @@
do {
tmpva = tlb1_map_base;
va = roundup(tlb1_map_base, 1 << flsl(size));
+#ifdef __powerpc64__
+ } while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
+#else
} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
+#endif
+#else
+#ifdef __powerpc64__
+ va = atomic_fetchadd_long(&tlb1_map_base, size);
#else
va = atomic_fetchadd_int(&tlb1_map_base, size);
#endif
+#endif
res = (void *)va;
do {
@@ -2855,7 +3520,7 @@
} while (va % sz != 0);
}
if (bootverbose)
- printf("Wiring VA=%x to PA=%jx (size=%x)\n",
+ printf("Wiring VA=%lx to PA=%jx (size=%lx)\n",
va, (uintmax_t)pa, sz);
tlb1_set_entry(va, pa, sz,
_TLB_ENTRY_SHARED | tlb_calc_wimg(pa, ma));
@@ -3027,7 +3692,11 @@
/**************************************************************************/
static void
+#ifdef __powerpc64__
+tlb_print_entry(int i, uint32_t mas1, uint64_t mas2, uint32_t mas3,
+#else
tlb_print_entry(int i, uint32_t mas1, uint32_t mas2, uint32_t mas3,
+#endif
uint32_t mas7)
{
int as;
@@ -3057,7 +3726,7 @@
debugf("%3d: (%s) [AS=%d] "
"sz = 0x%08x tsz = %d tid = %d mas1 = 0x%08x "
- "mas2(va) = 0x%08x mas3(pa) = 0x%08x mas7 = 0x%08x\n",
+ "mas2(va) = 0x%"PRI0ptrX" mas3(pa) = 0x%08x mas7 = 0x%08x\n",
i, desc, as, size, tsize, tid, mas1, mas2, mas3, mas7);
}
@@ -3094,7 +3763,12 @@
void
tlb0_print_tlbentries(void)
{
- uint32_t mas0, mas1, mas2, mas3, mas7;
+ uint32_t mas0, mas1, mas3, mas7;
+#ifdef __powerpc64__
+ uint64_t mas2;
+#else
+ uint32_t mas2;
+#endif
int entryidx, way, idx;
debugf("TLB0 entries:\n");
@@ -3367,11 +4041,7 @@
}
mapped = (va - base);
-#ifdef __powerpc64__
- printf("mapped size 0x%016lx (wasted space 0x%16lx)\n",
-#else
- printf("mapped size 0x%08x (wasted space 0x%08x)\n",
-#endif
+ printf("mapped size 0x%"PRI0ptrX" (wasted space 0x%"PRIxPTR")\n",
mapped, mapped - size);
return (mapped);
}
@@ -3528,7 +4198,12 @@
void
tlb1_print_tlbentries(void)
{
- uint32_t mas0, mas1, mas2, mas3, mas7;
+ uint32_t mas0, mas1, mas3, mas7;
+#ifdef __powerpc64__
+ uint64_t mas2;
+#else
+ uint32_t mas2;
+#endif
int i;
debugf("TLB1 entries:\n");
Index: head/sys/powerpc/booke/trap_subr.S
===================================================================
--- head/sys/powerpc/booke/trap_subr.S
+++ head/sys/powerpc/booke/trap_subr.S
@@ -84,7 +84,11 @@
#define RES_GRANULE 32
#define RES_LOCK 0 /* offset to the 'lock' word */
+#ifdef __powerpc64__
+#define RES_RECURSE 8 /* offset to the 'recurse' word */
+#else
#define RES_RECURSE 4 /* offset to the 'recurse' word */
+#endif
/*
* Standard interrupt prolog
@@ -114,16 +118,16 @@
#define STANDARD_PROLOG(sprg_sp, savearea, isrr0, isrr1) \
mtspr sprg_sp, %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- stw %r30, (savearea+CPUSAVE_R30)(%r1); \
- stw %r31, (savearea+CPUSAVE_R31)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
mfdear %r30; \
mfesr %r31; \
- stw %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
- stw %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
mfspr %r31, isrr1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \
isync; \
mfspr %r1, sprg_sp; /* Restore SP */ \
mfcr %r30; /* Save CR */ \
@@ -131,26 +135,26 @@
mtcr %r31; /* MSR at interrupt time */ \
bf 17, 1f; \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
+ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
1:
#define STANDARD_CRIT_PROLOG(sprg_sp, savearea, isrr0, isrr1) \
mtspr sprg_sp, %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- stw %r30, (savearea+CPUSAVE_R30)(%r1); \
- stw %r31, (savearea+CPUSAVE_R31)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_R30)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_R31)(%r1); \
mfdear %r30; \
mfesr %r31; \
- stw %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
- stw %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_BOOKE_DEAR)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_BOOKE_ESR)(%r1); \
mfspr %r30, isrr0; \
mfspr %r31, isrr1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1)(%r1); \
+ STORE %r30, (savearea+CPUSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+CPUSAVE_SRR1)(%r1); \
mfspr %r30, SPR_SRR0; \
mfspr %r31, SPR_SRR1; /* MSR at interrupt time */ \
- stw %r30, (savearea+CPUSAVE_SRR0+8)(%r1); \
- stw %r31, (savearea+CPUSAVE_SRR1+8)(%r1); \
+ STORE %r30, (savearea+BOOKE_CRITSAVE_SRR0)(%r1); \
+ STORE %r31, (savearea+BOOKE_CRITSAVE_SRR1)(%r1); \
isync; \
mfspr %r1, sprg_sp; /* Restore SP */ \
mfcr %r30; /* Save CR */ \
@@ -158,7 +162,7 @@
mtcr %r31; /* MSR at interrupt time */ \
bf 17, 1f; \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
+ LOAD %r1, PC_CURPCB(%r1); /* Per-thread kernel stack */ \
1:
/*
@@ -185,42 +189,109 @@
* enough i.e. when kstack crosses page boundary and both pages are
* untranslated)
*/
+#ifdef __powerpc64__
+#define SAVE_REGS(r) \
+ std %r3, FRAME_3+CALLSIZE(r); \
+ std %r4, FRAME_4+CALLSIZE(r); \
+ std %r5, FRAME_5+CALLSIZE(r); \
+ std %r6, FRAME_6+CALLSIZE(r); \
+ std %r7, FRAME_7+CALLSIZE(r); \
+ std %r8, FRAME_8+CALLSIZE(r); \
+ std %r9, FRAME_9+CALLSIZE(r); \
+ std %r10, FRAME_10+CALLSIZE(r); \
+ std %r11, FRAME_11+CALLSIZE(r); \
+ std %r12, FRAME_12+CALLSIZE(r); \
+ std %r13, FRAME_13+CALLSIZE(r); \
+ std %r14, FRAME_14+CALLSIZE(r); \
+ std %r15, FRAME_15+CALLSIZE(r); \
+ std %r16, FRAME_16+CALLSIZE(r); \
+ std %r17, FRAME_17+CALLSIZE(r); \
+ std %r18, FRAME_18+CALLSIZE(r); \
+ std %r19, FRAME_19+CALLSIZE(r); \
+ std %r20, FRAME_20+CALLSIZE(r); \
+ std %r21, FRAME_21+CALLSIZE(r); \
+ std %r22, FRAME_22+CALLSIZE(r); \
+ std %r23, FRAME_23+CALLSIZE(r); \
+ std %r24, FRAME_24+CALLSIZE(r); \
+ std %r25, FRAME_25+CALLSIZE(r); \
+ std %r26, FRAME_26+CALLSIZE(r); \
+ std %r27, FRAME_27+CALLSIZE(r); \
+ std %r28, FRAME_28+CALLSIZE(r); \
+ std %r29, FRAME_29+CALLSIZE(r); \
+ std %r30, FRAME_30+CALLSIZE(r); \
+ std %r31, FRAME_31+CALLSIZE(r)
+#define LD_REGS(r) \
+ ld %r3, FRAME_3+CALLSIZE(r); \
+ ld %r4, FRAME_4+CALLSIZE(r); \
+ ld %r5, FRAME_5+CALLSIZE(r); \
+ ld %r6, FRAME_6+CALLSIZE(r); \
+ ld %r7, FRAME_7+CALLSIZE(r); \
+ ld %r8, FRAME_8+CALLSIZE(r); \
+ ld %r9, FRAME_9+CALLSIZE(r); \
+ ld %r10, FRAME_10+CALLSIZE(r); \
+ ld %r11, FRAME_11+CALLSIZE(r); \
+ ld %r12, FRAME_12+CALLSIZE(r); \
+ ld %r13, FRAME_13+CALLSIZE(r); \
+ ld %r14, FRAME_14+CALLSIZE(r); \
+ ld %r15, FRAME_15+CALLSIZE(r); \
+ ld %r16, FRAME_16+CALLSIZE(r); \
+ ld %r17, FRAME_17+CALLSIZE(r); \
+ ld %r18, FRAME_18+CALLSIZE(r); \
+ ld %r19, FRAME_19+CALLSIZE(r); \
+ ld %r20, FRAME_20+CALLSIZE(r); \
+ ld %r21, FRAME_21+CALLSIZE(r); \
+ ld %r22, FRAME_22+CALLSIZE(r); \
+ ld %r23, FRAME_23+CALLSIZE(r); \
+ ld %r24, FRAME_24+CALLSIZE(r); \
+ ld %r25, FRAME_25+CALLSIZE(r); \
+ ld %r26, FRAME_26+CALLSIZE(r); \
+ ld %r27, FRAME_27+CALLSIZE(r); \
+ ld %r28, FRAME_28+CALLSIZE(r); \
+ ld %r29, FRAME_29+CALLSIZE(r); \
+ ld %r30, FRAME_30+CALLSIZE(r); \
+ ld %r31, FRAME_31+CALLSIZE(r)
+#else
+#define SAVE_REGS(r) \
+ stmw %r3, FRAME_3+CALLSIZE(r)
+#define LD_REGS(r) \
+ lmw %r3, FRAME_3+CALLSIZE(r)
+#endif
#define FRAME_SETUP(sprg_sp, savearea, exc) \
mfspr %r31, sprg_sp; /* get saved SP */ \
/* establish a new stack frame and put everything on it */ \
- stwu %r31, -FRAMELEN(%r1); \
- stw %r0, FRAME_0+8(%r1); /* save r0 in the trapframe */ \
- stw %r31, FRAME_1+8(%r1); /* save SP " " */ \
- stw %r2, FRAME_2+8(%r1); /* save r2 " " */ \
+ STU %r31, -(FRAMELEN+REDZONE)(%r1); \
+ STORE %r0, FRAME_0+CALLSIZE(%r1); /* save r0 in the trapframe */ \
+ STORE %r31, FRAME_1+CALLSIZE(%r1); /* save SP " " */ \
+ STORE %r2, FRAME_2+CALLSIZE(%r1); /* save r2 " " */ \
mflr %r31; \
- stw %r31, FRAME_LR+8(%r1); /* save LR " " */ \
- stw %r30, FRAME_CR+8(%r1); /* save CR " " */ \
+ STORE %r31, FRAME_LR+CALLSIZE(%r1); /* save LR " " */ \
+ STORE %r30, FRAME_CR+CALLSIZE(%r1); /* save CR " " */ \
GET_CPUINFO(%r2); \
- lwz %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
- lwz %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
+ LOAD %r30, (savearea+CPUSAVE_R30)(%r2); /* get saved r30 */ \
+ LOAD %r31, (savearea+CPUSAVE_R31)(%r2); /* get saved r31 */ \
/* save R3-31 */ \
- stmw %r3, FRAME_3+8(%r1) ; \
+ SAVE_REGS(%r1); \
/* save DEAR, ESR */ \
- lwz %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \
- lwz %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \
- stw %r28, FRAME_BOOKE_DEAR+8(%r1); \
- stw %r29, FRAME_BOOKE_ESR+8(%r1); \
+ LOAD %r28, (savearea+CPUSAVE_BOOKE_DEAR)(%r2); \
+ LOAD %r29, (savearea+CPUSAVE_BOOKE_ESR)(%r2); \
+ STORE %r28, FRAME_BOOKE_DEAR+CALLSIZE(%r1); \
+ STORE %r29, FRAME_BOOKE_ESR+CALLSIZE(%r1); \
/* save XER, CTR, exc number */ \
mfxer %r3; \
mfctr %r4; \
- stw %r3, FRAME_XER+8(%r1); \
- stw %r4, FRAME_CTR+8(%r1); \
+ STORE %r3, FRAME_XER+CALLSIZE(%r1); \
+ STORE %r4, FRAME_CTR+CALLSIZE(%r1); \
li %r5, exc; \
- stw %r5, FRAME_EXC+8(%r1); \
+ STORE %r5, FRAME_EXC+CALLSIZE(%r1); \
/* save DBCR0 */ \
mfspr %r3, SPR_DBCR0; \
- stw %r3, FRAME_BOOKE_DBCR0+8(%r1); \
+ STORE %r3, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \
/* save xSSR0-1 */ \
- lwz %r30, (savearea+CPUSAVE_SRR0)(%r2); \
- lwz %r31, (savearea+CPUSAVE_SRR1)(%r2); \
- stw %r30, FRAME_SRR0+8(%r1); \
- stw %r31, FRAME_SRR1+8(%r1); \
- lwz %r2,PC_CURTHREAD(%r2) /* set curthread pointer */
+ LOAD %r30, (savearea+CPUSAVE_SRR0)(%r2); \
+ LOAD %r31, (savearea+CPUSAVE_SRR1)(%r2); \
+ STORE %r30, FRAME_SRR0+CALLSIZE(%r1); \
+ STORE %r31, FRAME_SRR1+CALLSIZE(%r1); \
+ LOAD THREAD_REG, PC_CURTHREAD(%r2); \
/*
*
@@ -231,27 +302,29 @@
* - potential TLB miss: YES. The deref'd kstack may be not covered
*/
#define FRAME_LEAVE(isrr0, isrr1) \
+ wrteei 0; \
/* restore CTR, XER, LR, CR */ \
- lwz %r4, FRAME_CTR+8(%r1); \
- lwz %r5, FRAME_XER+8(%r1); \
- lwz %r6, FRAME_LR+8(%r1); \
- lwz %r7, FRAME_CR+8(%r1); \
+ LOAD %r4, FRAME_CTR+CALLSIZE(%r1); \
+ LOAD %r5, FRAME_XER+CALLSIZE(%r1); \
+ LOAD %r6, FRAME_LR+CALLSIZE(%r1); \
+ LOAD %r7, FRAME_CR+CALLSIZE(%r1); \
mtctr %r4; \
mtxer %r5; \
mtlr %r6; \
mtcr %r7; \
/* restore DBCR0 */ \
- lwz %r4, FRAME_BOOKE_DBCR0+8(%r1); \
+ LOAD %r4, FRAME_BOOKE_DBCR0+CALLSIZE(%r1); \
mtspr SPR_DBCR0, %r4; \
/* restore xSRR0-1 */ \
- lwz %r30, FRAME_SRR0+8(%r1); \
- lwz %r31, FRAME_SRR1+8(%r1); \
+ LOAD %r30, FRAME_SRR0+CALLSIZE(%r1); \
+ LOAD %r31, FRAME_SRR1+CALLSIZE(%r1); \
mtspr isrr0, %r30; \
mtspr isrr1, %r31; \
/* restore R2-31, SP */ \
- lmw %r2, FRAME_2+8(%r1) ; \
- lwz %r0, FRAME_0+8(%r1); \
- lwz %r1, FRAME_1+8(%r1); \
+ LD_REGS(%r1); \
+ LOAD %r2, FRAME_2+CALLSIZE(%r1); \
+ LOAD %r0, FRAME_0+CALLSIZE(%r1); \
+ LOAD %r1, FRAME_1+CALLSIZE(%r1); \
isync
/*
@@ -264,33 +337,70 @@
* miss within the TLB prolog itself!
* - TLBSAVE is always translated
*/
+#ifdef __powerpc64__
+#define TLB_SAVE_REGS(br) \
+ std %r20, (TLBSAVE_BOOKE_R20)(br); \
+ std %r21, (TLBSAVE_BOOKE_R21)(br); \
+ std %r22, (TLBSAVE_BOOKE_R22)(br); \
+ std %r23, (TLBSAVE_BOOKE_R23)(br); \
+ std %r24, (TLBSAVE_BOOKE_R24)(br); \
+ std %r25, (TLBSAVE_BOOKE_R25)(br); \
+ std %r26, (TLBSAVE_BOOKE_R26)(br); \
+ std %r27, (TLBSAVE_BOOKE_R27)(br); \
+ std %r28, (TLBSAVE_BOOKE_R28)(br); \
+ std %r29, (TLBSAVE_BOOKE_R29)(br); \
+ std %r30, (TLBSAVE_BOOKE_R30)(br); \
+ std %r31, (TLBSAVE_BOOKE_R31)(br);
+#define TLB_RESTORE_REGS(br) \
+ ld %r20, (TLBSAVE_BOOKE_R20)(br); \
+ ld %r21, (TLBSAVE_BOOKE_R21)(br); \
+ ld %r22, (TLBSAVE_BOOKE_R22)(br); \
+ ld %r23, (TLBSAVE_BOOKE_R23)(br); \
+ ld %r24, (TLBSAVE_BOOKE_R24)(br); \
+ ld %r25, (TLBSAVE_BOOKE_R25)(br); \
+ ld %r26, (TLBSAVE_BOOKE_R26)(br); \
+ ld %r27, (TLBSAVE_BOOKE_R27)(br); \
+ ld %r28, (TLBSAVE_BOOKE_R28)(br); \
+ ld %r29, (TLBSAVE_BOOKE_R29)(br); \
+ ld %r30, (TLBSAVE_BOOKE_R30)(br); \
+ ld %r31, (TLBSAVE_BOOKE_R31)(br);
+#define TLB_NEST(outr,inr) \
+ rlwinm outr, inr, 7, 23, 24; /* 8 x TLBSAVE_LEN */
+#else
+#define TLB_SAVE_REGS(br) \
+ stmw %r20, TLBSAVE_BOOKE_R20(br)
+#define TLB_RESTORE_REGS(br) \
+ lmw %r20, TLBSAVE_BOOKE_R20(br)
+#define TLB_NEST(outr,inr) \
+ rlwinm outr, inr, 6, 23, 25; /* 4 x TLBSAVE_LEN */
+#endif
#define TLB_PROLOG \
mtsprg4 %r1; /* Save SP */ \
mtsprg5 %r28; \
mtsprg6 %r29; \
/* calculate TLB nesting level and TLBSAVE instance address */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
- lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
- rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
+ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ TLB_NEST(%r29,%r28); \
addi %r28, %r28, 1; \
- stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
add %r1, %r1, %r29; /* current TLBSAVE ptr */ \
\
/* save R20-31 */ \
mfsprg5 %r28; \
mfsprg6 %r29; \
- stmw %r20, (TLBSAVE_BOOKE_R20)(%r1); \
+ TLB_SAVE_REGS(%r1); \
/* save LR, CR */ \
mflr %r30; \
mfcr %r31; \
- stw %r30, (TLBSAVE_BOOKE_LR)(%r1); \
- stw %r31, (TLBSAVE_BOOKE_CR)(%r1); \
+ STORE %r30, (TLBSAVE_BOOKE_LR)(%r1); \
+ STORE %r31, (TLBSAVE_BOOKE_CR)(%r1); \
/* save SRR0-1 */ \
mfsrr0 %r30; /* execution addr at interrupt time */ \
mfsrr1 %r31; /* MSR at interrupt time*/ \
- stw %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \
- stw %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \
+ STORE %r30, (TLBSAVE_BOOKE_SRR0)(%r1); /* save SRR0 */ \
+ STORE %r31, (TLBSAVE_BOOKE_SRR1)(%r1); /* save SRR1 */ \
isync; \
mfsprg4 %r1
@@ -303,43 +413,43 @@
mtsprg4 %r1; /* Save SP */ \
GET_CPUINFO(%r1); /* Per-cpu structure */ \
/* calculate TLB nesting level and TLBSAVE instance addr */ \
- lwz %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ LOAD %r28, PC_BOOKE_TLB_LEVEL(%r1); \
subi %r28, %r28, 1; \
- stw %r28, PC_BOOKE_TLB_LEVEL(%r1); \
- rlwinm %r29, %r28, 6, 23, 25; /* 4 x TLBSAVE_LEN */ \
+ STORE %r28, PC_BOOKE_TLB_LEVEL(%r1); \
+ TLB_NEST(%r29,%r28); \
addi %r29, %r29, PC_BOOKE_TLBSAVE@l; \
add %r1, %r1, %r29; \
\
/* restore LR, CR */ \
- lwz %r30, (TLBSAVE_BOOKE_LR)(%r1); \
- lwz %r31, (TLBSAVE_BOOKE_CR)(%r1); \
+ LOAD %r30, (TLBSAVE_BOOKE_LR)(%r1); \
+ LOAD %r31, (TLBSAVE_BOOKE_CR)(%r1); \
mtlr %r30; \
mtcr %r31; \
/* restore SRR0-1 */ \
- lwz %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \
- lwz %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \
+ LOAD %r30, (TLBSAVE_BOOKE_SRR0)(%r1); \
+ LOAD %r31, (TLBSAVE_BOOKE_SRR1)(%r1); \
mtsrr0 %r30; \
mtsrr1 %r31; \
/* restore R20-31 */ \
- lmw %r20, (TLBSAVE_BOOKE_R20)(%r1); \
+ TLB_RESTORE_REGS(%r1); \
mfsprg4 %r1
#ifdef SMP
#define TLB_LOCK \
GET_CPUINFO(%r20); \
- lwz %r21, PC_CURTHREAD(%r20); \
- lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
+ LOAD %r21, PC_CURTHREAD(%r20); \
+ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \
\
-1: lwarx %r23, 0, %r22; \
- cmpwi %r23, TLB_UNLOCKED; \
+1: LOADX %r23, 0, %r22; \
+ CMPI %r23, TLB_UNLOCKED; \
beq 2f; \
\
/* check if this is recursion */ \
- cmplw cr0, %r21, %r23; \
+ CMPL cr0, %r21, %r23; \
bne- 1b; \
\
2: /* try to acquire lock */ \
- stwcx. %r21, 0, %r22; \
+ STOREX %r21, 0, %r22; \
bne- 1b; \
\
/* got it, update recursion counter */ \
@@ -351,22 +461,22 @@
#define TLB_UNLOCK \
GET_CPUINFO(%r20); \
- lwz %r21, PC_CURTHREAD(%r20); \
- lwz %r22, PC_BOOKE_TLB_LOCK(%r20); \
+ LOAD %r21, PC_CURTHREAD(%r20); \
+ LOAD %r22, PC_BOOKE_TLB_LOCK(%r20); \
\
/* update recursion counter */ \
lwz %r23, RES_RECURSE(%r22); \
subi %r23, %r23, 1; \
stw %r23, RES_RECURSE(%r22); \
\
- cmpwi %r23, 0; \
+ cmplwi %r23, 0; \
bne 1f; \
isync; \
msync; \
\
/* release the lock */ \
li %r23, TLB_UNLOCKED; \
- stw %r23, 0(%r22); \
+ STORE %r23, 0(%r22); \
1: isync; \
msync
#else
@@ -407,8 +517,10 @@
INTERRUPT(int_critical_input)
STANDARD_CRIT_PROLOG(SPR_SPRG2, PC_BOOKE_CRITSAVE, SPR_CSRR0, SPR_CSRR1)
FRAME_SETUP(SPR_SPRG2, PC_BOOKE_CRITSAVE, EXC_CRIT)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
FRAME_LEAVE(SPR_CSRR0, SPR_CSRR1)
rfci
@@ -419,8 +531,10 @@
INTERRUPT(int_machine_check)
STANDARD_PROLOG(SPR_SPRG3, PC_BOOKE_MCHKSAVE, SPR_MCSRR0, SPR_MCSRR1)
FRAME_SETUP(SPR_SPRG3, PC_BOOKE_MCHKSAVE, EXC_MCHK)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
FRAME_LEAVE(SPR_MCSRR0, SPR_MCSRR1)
rfmci
@@ -449,8 +563,10 @@
INTERRUPT(int_external_input)
STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_EXI)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b clear_we
@@ -487,8 +603,10 @@
INTERRUPT(int_decrementer)
STANDARD_PROLOG(SPR_SPRG1, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG1, PC_TEMPSAVE, EXC_DECR)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b clear_we
@@ -535,8 +653,10 @@
INTERRUPT(int_performance_counter)
STANDARD_PROLOG(SPR_SPRG3, PC_TEMPSAVE, SPR_SRR0, SPR_SRR1)
FRAME_SETUP(SPR_SPRG3, PC_TEMPSAVE, EXC_PERF)
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(powerpc_interrupt)
+ TOC_RESTORE
b trapexit
#endif
@@ -574,9 +694,8 @@
mfspr %r27, SPR_MAS2
/* Check faulting address. */
- lis %r21, VM_MAXUSER_ADDRESS@h
- ori %r21, %r21, VM_MAXUSER_ADDRESS@l
- cmplw cr0, %r31, %r21
+ LOAD_ADDR(%r21, VM_MAXUSER_ADDRESS)
+ CMPL cr0, %r31, %r21
blt search_user_pmap
/* If it's kernel address, allow only supervisor mode misses. */
@@ -587,9 +706,13 @@
search_kernel_pmap:
/* Load r26 with kernel_pmap address */
bl 1f
+#ifdef __powerpc64__
+ .llong kernel_pmap_store-.
+#else
.long kernel_pmap_store-.
+#endif
1: mflr %r21
- lwz %r26, 0(%r21)
+ LOAD %r26, 0(%r21)
add %r26, %r21, %r26 /* kernel_pmap_store in r26 */
/* Force kernel tid, set TID to 0 in MAS1. */
@@ -600,7 +723,7 @@
/* This may result in nested tlb miss. */
bl pte_lookup /* returns PTE address in R25 */
- cmpwi %r25, 0 /* pte found? */
+ CMPI %r25, 0 /* pte found? */
beq search_failed
/* Finish up, write TLB entry. */
@@ -614,7 +737,7 @@
search_user_pmap:
/* Load r26 with current user space process pmap */
GET_CPUINFO(%r26)
- lwz %r26, PC_CURPMAP(%r26)
+ LOAD %r26, PC_CURPMAP(%r26)
b tlb_miss_handle
@@ -657,9 +780,35 @@
*
****************************************************************************/
pte_lookup:
- cmpwi %r26, 0
+ CMPI %r26, 0
beq 1f /* fail quickly if pmap is invalid */
+#ifdef __powerpc64__
+ rldicl %r21, %r31, (64 - PP2D_L_L), (64 - PP2D_L_NUM) /* pp2d offset */
+ rldicl %r25, %r31, (64 - PP2D_H_L), (64 - PP2D_H_NUM)
+ rldimi %r21, %r25, PP2D_L_NUM, (64 - (PP2D_L_NUM + PP2D_H_NUM))
+ slwi %r21, %r21, PP2D_ENTRY_SHIFT /* multiply by pp2d entry size */
+ addi %r25, %r26, PM_PP2D /* pmap pm_pp2d[] address */
+ add %r25, %r25, %r21 /* offset within pm_pp2d[] table */
+ ld %r25, 0(%r25) /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */
+
+ cmpdi %r25, 0
+ beq 1f
+
+#if PAGE_SIZE < 65536
+ rldicl %r21, %r31, (64 - PDIR_L), (64 - PDIR_NUM) /* pdir offset */
+ slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
+ add %r25, %r25, %r21 /* offset within pdir table */
+ ld %r25, 0(%r25) /* get ptbl address, i.e. pmap->pm_pp2d[pp2d_idx][pdir_idx] */
+
+ cmpdi %r25, 0
+ beq 1f
+#endif
+
+ rldicl %r21, %r31, (64 - PTBL_L), (64 - PTBL_NUM) /* ptbl offset */
+ slwi %r21, %r21, PTBL_ENTRY_SHIFT /* multiply by pte entry size */
+
+#else
srwi %r21, %r31, PDIR_SHIFT /* pdir offset */
slwi %r21, %r21, PDIR_ENTRY_SHIFT /* multiply by pdir entry size */
@@ -669,8 +818,8 @@
* Get ptbl address, i.e. pmap->pm_pdir[pdir_idx]
* This load may cause a Data TLB miss for non-kernel pmap!
*/
- lwz %r25, 0(%r25)
- cmpwi %r25, 0
+ LOAD %r25, 0(%r25)
+ CMPI %r25, 0
beq 2f
lis %r21, PTBL_MASK@h
@@ -679,6 +828,7 @@
/* ptbl offset, multiply by ptbl entry size */
srwi %r21, %r21, (PTBL_SHIFT - PTBL_ENTRY_SHIFT)
+#endif
add %r25, %r25, %r21 /* address of pte entry */
/*
@@ -730,12 +880,19 @@
rlwimi %r27, %r21, 13, 27, 30 /* insert WIMG bits from pte */
/* Setup MAS3 value in r23. */
- lwz %r23, PTE_RPN(%r25) /* get pte->rpn */
+ LOAD %r23, PTE_RPN(%r25) /* get pte->rpn */
+#ifdef __powerpc64__
+ rldicr %r22, %r23, 52, 51 /* extract MAS3 portion of RPN */
+ rldicl %r23, %r23, 20, 54 /* extract MAS7 portion of RPN */
+
+ rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */
+#else
rlwinm %r22, %r23, 20, 0, 11 /* extract MAS3 portion of RPN */
rlwimi %r22, %r21, 30, 26, 31 /* insert protection bits from pte */
rlwimi %r22, %r21, 20, 12, 19 /* insert lower 8 RPN bits to MAS3 */
rlwinm %r23, %r23, 20, 24, 31 /* MAS7 portion of RPN */
+#endif
/* Load MAS registers. */
mtspr SPR_MAS0, %r29
@@ -811,35 +968,42 @@
int_debug_int:
mflr %r14
GET_CPUINFO(%r3)
- lwz %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3)
+ LOAD %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0)(%r3)
bl 0f
+#ifdef __powerpc64__
+ .llong interrupt_vector_base-.
+ .llong interrupt_vector_top-.
+#else
.long interrupt_vector_base-.
.long interrupt_vector_top-.
+#endif
0: mflr %r5
- lwz %r4,0(%r5) /* interrupt_vector_base in r4 */
+ LOAD %r4,0(%r5) /* interrupt_vector_base in r4 */
add %r4,%r4,%r5
- cmplw cr0, %r3, %r4
+ CMPL cr0, %r3, %r4
blt 1f
- lwz %r4,4(%r5) /* interrupt_vector_top in r4 */
+ LOAD %r4,4(%r5) /* interrupt_vector_top in r4 */
add %r4,%r4,%r5
addi %r4,%r4,4
- cmplw cr0, %r3, %r4
+ CMPL cr0, %r3, %r4
bge 1f
/* Disable single-stepping for the interrupt handlers. */
- lwz %r3, FRAME_SRR1+8(%r1);
+ LOAD %r3, FRAME_SRR1+CALLSIZE(%r1);
rlwinm %r3, %r3, 0, 23, 21
- stw %r3, FRAME_SRR1+8(%r1);
+ STORE %r3, FRAME_SRR1+CALLSIZE(%r1);
/* Restore srr0 and srr1 as they could have been clobbered. */
GET_CPUINFO(%r4)
- lwz %r3, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR0+8)(%r4);
+ LOAD %r3, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR0)(%r4);
mtspr SPR_SRR0, %r3
- lwz %r4, (PC_BOOKE_CRITSAVE+CPUSAVE_SRR1+8)(%r4);
+ LOAD %r4, (PC_BOOKE_CRITSAVE+BOOKE_CRITSAVE_SRR1)(%r4);
mtspr SPR_SRR1, %r4
mtlr %r14
blr
1:
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(trap)
+ TOC_RESTORE
/*
* Handle ASTs, needed for proper support of single-stepping.
* We actually need to return to the process with an rfi.
@@ -851,8 +1015,10 @@
****************************************************************************/
trap_common:
/* Call C trap dispatcher */
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(trap)
+ TOC_RESTORE
.globl CNAME(trapexit) /* exported for db_backtrace use */
CNAME(trapexit):
@@ -860,12 +1026,12 @@
wrteei 0
/* Test AST pending - makes sense for user process only */
- lwz %r5, FRAME_SRR1+8(%r1)
+ LOAD %r5, FRAME_SRR1+CALLSIZE(%r1)
mtcr %r5
bf 17, 1f
GET_CPUINFO(%r3)
- lwz %r4, PC_CURTHREAD(%r3)
+ LOAD %r4, PC_CURTHREAD(%r3)
lwz %r4, TD_FLAGS(%r4)
lis %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@h
ori %r5, %r5, (TDF_ASTPENDING | TDF_NEEDRESCHED)@l
@@ -875,8 +1041,9 @@
/* re-enable interrupts before calling ast() */
wrteei 1
- addi %r3, %r1, 8
+ addi %r3, %r1, CALLSIZE
bl CNAME(ast)
+ TOC_RESTORE
.globl CNAME(asttrapexit) /* db_backtrace code sentinel #2 */
CNAME(asttrapexit):
b trapexit /* test ast ret value ? */
@@ -889,30 +1056,32 @@
/*
* Deliberate entry to dbtrap
*/
- .globl CNAME(breakpoint)
-CNAME(breakpoint):
+ /* .globl CNAME(breakpoint)*/
+ASENTRY_NOPROF(breakpoint)
mtsprg1 %r1
mfmsr %r3
mtsrr1 %r3
- andi. %r3, %r3, ~(PSL_EE | PSL_ME)@l
+ li %r4, ~(PSL_EE | PSL_ME)@l
+ oris %r4, %r4, ~(PSL_EE | PSL_ME)@h
+ and %r3, %r3, %r4
mtmsr %r3 /* disable interrupts */
isync
GET_CPUINFO(%r3)
- stw %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_R30)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_R31)(%r3)
mflr %r31
mtsrr0 %r31
mfdear %r30
mfesr %r31
- stw %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_BOOKE_DEAR)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_BOOKE_ESR)(%r3)
mfsrr0 %r30
mfsrr1 %r31
- stw %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3)
- stw %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3)
+ STORE %r30, (PC_DBSAVE+CPUSAVE_SRR0)(%r3)
+ STORE %r31, (PC_DBSAVE+CPUSAVE_SRR1)(%r3)
isync
mfcr %r30
@@ -923,8 +1092,10 @@
dbtrap:
FRAME_SETUP(SPR_SPRG1, PC_DBSAVE, EXC_DEBUG)
/* Call C trap code: */
- addi %r3, %r1, 8
+ GET_TOCBASE(%r2)
+ addi %r3, %r1, CALLSIZE
bl CNAME(db_trap_glue)
+ TOC_RESTORE
or. %r3, %r3, %r3
bne dbleave
/* This wasn't for KDB, so switch to real trap: */
@@ -936,19 +1107,19 @@
#endif /* KDB */
clear_we:
- lwz %r3, (FRAME_SRR1+8)(%r1)
+ LOAD %r3, (FRAME_SRR1+CALLSIZE)(%r1)
rlwinm %r3, %r3, 0, 14, 12
- stw %r3, (FRAME_SRR1+8)(%r1)
+ STORE %r3, (FRAME_SRR1+CALLSIZE)(%r1)
b trapexit
#ifdef SMP
ENTRY(tlb_lock)
GET_CPUINFO(%r5)
- lwz %r5, PC_CURTHREAD(%r5)
-1: lwarx %r4, 0, %r3
- cmpwi %r4, TLB_UNLOCKED
+ LOAD %r5, PC_CURTHREAD(%r5)
+1: LOADX %r4, 0, %r3
+ CMPI %r4, TLB_UNLOCKED
bne 1b
- stwcx. %r5, 0, %r3
+ STOREX %r5, 0, %r3
bne- 1b
isync
msync
@@ -958,7 +1129,7 @@
isync
msync
li %r4, TLB_UNLOCKED
- stw %r4, 0(%r3)
+ STORE %r4, 0(%r3)
isync
msync
blr
Index: head/sys/powerpc/conf/QORIQ64
===================================================================
--- head/sys/powerpc/conf/QORIQ64
+++ head/sys/powerpc/conf/QORIQ64
@@ -0,0 +1,103 @@
+#
+# Custom kernel for Freescale QorIQ (P5xxx, Txxxx) based boards, like
+# AmigaOne X5000
+#
+# $FreeBSD$
+#
+
+cpu BOOKE
+cpu BOOKE_E500
+ident MPC85XX
+
+machine powerpc powerpc64
+
+makeoptions DEBUG="-Wa,-me500 -g"
+makeoptions WERROR="-Werror -Wno-format -Wno-redundant-decls"
+makeoptions NO_MODULES=yes
+
+#options EARLY_PRINTF
+
+options FPU_EMU
+
+options BOOTVERBOSE=1
+options _KPOSIX_PRIORITY_SCHEDULING
+options ALT_BREAK_TO_DEBUGGER
+options BREAK_TO_DEBUGGER
+options BOOTP
+options BOOTP_NFSROOT
+#options BOOTP_NFSV3
+options CD9660
+#options COMPAT_43
+options COMPAT_FREEBSD32 #Compatible with FreeBSD/powerpc binaries
+options DDB
+#options DEADLKRES
+options DEVICE_POLLING
+#options DIAGNOSTIC
+options FDT
+#makeoptions FDT_DTS_FILE=mpc8555cds.dts
+options FFS
+options GDB
+options GEOM_PART_GPT
+options INET
+options INET6
+options TCP_HHOOK # hhook(9) framework for TCP
+options INVARIANTS
+options INVARIANT_SUPPORT
+options KDB
+options KTRACE
+options MD_ROOT
+options MPC85XX
+options MSDOSFS
+options NFS_ROOT
+options NFSCL
+options NFSLOCKD
+options PRINTF_BUFR_SIZE=128 # Prevent printf output being interspersed.
+options PROCFS
+options PSEUDOFS
+options SCHED_ULE
+options CAPABILITIES
+options CAPABILITY_MODE
+options SMP
+options SYSVMSG
+options SYSVSEM
+options SYSVSHM
+options WITNESS
+options WITNESS_SKIPSPIN
+
+device ata
+device bpf
+device cfi
+device crypto
+device cryptodev
+device da
+device ds1553
+device em
+device alc
+device ether
+device fxp
+device gpio
+device gpiopower
+device iic
+device iicbus
+#device isa
+device loop
+device md
+device miibus
+device mmc
+device mmcsd
+device pass
+device pci
+device random
+#device rl
+device scbus
+device scc
+device sdhci
+device sec
+device tun
+device uart
+options USB_DEBUG # enable debug msgs
+#device uhci
+device ehci
+device umass
+device usb
+device vlan
Index: head/sys/powerpc/include/asm.h
===================================================================
--- head/sys/powerpc/include/asm.h
+++ head/sys/powerpc/include/asm.h
@@ -128,6 +128,13 @@
.long 0; \
.byte 0,0,0,0,0,0,0,0; \
END_SIZE(name)
+
+#define LOAD_ADDR(reg, var) \
+ lis reg, var@highest; \
+ ori reg, reg, var@higher; \
+ rldicr reg, reg, 32, 31; \
+ oris reg, reg, var@h; \
+ ori reg, reg, var@l;
#else /* !__powerpc64__ */
#define _ENTRY(name) \
.text; \
@@ -136,6 +143,10 @@
.type name,@function; \
name:
#define _END(name)
+
+#define LOAD_ADDR(reg, var) \
+ lis reg, var@ha; \
+ ori reg, reg, var@l;
#endif /* __powerpc64__ */
#if defined(PROF) || (defined(_KERNEL) && defined(GPROF))
Index: head/sys/powerpc/include/pcpu.h
===================================================================
--- head/sys/powerpc/include/pcpu.h
+++ head/sys/powerpc/include/pcpu.h
@@ -80,15 +80,20 @@
#define BOOKE_TLB_SAVELEN 16
#define BOOKE_TLBSAVE_LEN (BOOKE_TLB_SAVELEN * BOOKE_TLB_MAXNEST)
+#ifdef __powerpc64__
+#define BOOKE_PCPU_PAD 773
+#else
+#define BOOKE_PCPU_PAD 173
+#endif
#define PCPU_MD_BOOKE_FIELDS \
register_t pc_booke_critsave[BOOKE_CRITSAVE_LEN]; \
register_t pc_booke_mchksave[CPUSAVE_LEN]; \
register_t pc_booke_tlbsave[BOOKE_TLBSAVE_LEN]; \
register_t pc_booke_tlb_level; \
vm_offset_t pc_qmap_addr; \
- uint32_t *pc_booke_tlb_lock; \
+ uintptr_t *pc_booke_tlb_lock; \
int pc_tid_next; \
- char __pad[173]
+ char __pad[BOOKE_PCPU_PAD]
/* Definitions for register offsets within the exception tmp save areas */
#define CPUSAVE_R27 0 /* where r27 gets saved */
@@ -102,6 +107,8 @@
#define CPUSAVE_BOOKE_ESR 6 /* where SPR_ESR gets saved */
#define CPUSAVE_SRR0 7 /* where SRR0 gets saved */
#define CPUSAVE_SRR1 8 /* where SRR1 gets saved */
+#define BOOKE_CRITSAVE_SRR0 9 /* where real SRR0 gets saved (critical) */
+#define BOOKE_CRITSAVE_SRR1 10 /* where real SRR0 gets saved (critical) */
/* Book-E TLBSAVE is more elaborate */
#define TLBSAVE_BOOKE_LR 0
Index: head/sys/powerpc/include/pmap.h
===================================================================
--- head/sys/powerpc/include/pmap.h
+++ head/sys/powerpc/include/pmap.h
@@ -188,8 +188,16 @@
tlbtid_t pm_tid[MAXCPU]; /* TID to identify this pmap entries in TLB */
cpuset_t pm_active; /* active on cpus */
+#ifdef __powerpc64__
+ /* Page table directory, array of pointers to page directories. */
+ pte_t **pm_pp2d[PP2D_NENTRIES];
+
+ /* List of allocated pdir bufs (pdir kva regions). */
+ TAILQ_HEAD(, ptbl_buf) pm_pdir_list;
+#else
/* Page table directory, array of pointers to page tables. */
pte_t *pm_pdir[PDIR_NENTRIES];
+#endif
/* List of allocated ptbl bufs (ptbl kva regions). */
TAILQ_HEAD(, ptbl_buf) pm_ptbl_list;
Index: head/sys/powerpc/include/psl.h
===================================================================
--- head/sys/powerpc/include/psl.h
+++ head/sys/powerpc/include/psl.h
@@ -50,6 +50,10 @@
#define PSL_PMM 0x00000004UL /* performance monitor mark */
/* Machine State Register - Book-E cores */
+#ifdef __powerpc64__
+#define PSL_CM 0x80000000UL /* Computation Mode (64-bit) */
+#endif
+
#define PSL_UCLE 0x04000000UL /* User mode cache lock enable */
#define PSL_WE 0x00040000UL /* Wait state enable */
#define PSL_CE 0x00020000UL /* Critical interrupt enable */
@@ -86,7 +90,11 @@
#if defined(BOOKE_E500)
/* Initial kernel MSR, use IS=1 ad DS=1. */
#define PSL_KERNSET_INIT (PSL_IS | PSL_DS)
+#ifdef __powerpc64__
+#define PSL_KERNSET (PSL_CM | PSL_CE | PSL_ME | PSL_EE)
+#else
#define PSL_KERNSET (PSL_CE | PSL_ME | PSL_EE)
+#endif
#define PSL_SRR1_MASK 0x00000000UL /* No mask on Book-E */
#elif defined(BOOKE_PPC4XX)
#define PSL_KERNSET (PSL_CE | PSL_ME | PSL_EE | PSL_FP)
Index: head/sys/powerpc/include/pte.h
===================================================================
--- head/sys/powerpc/include/pte.h
+++ head/sys/powerpc/include/pte.h
@@ -162,6 +162,83 @@
#include <machine/tlb.h>
+#ifdef __powerpc64__
+
+#include <machine/tlb.h>
+
+/*
+ * The virtual address is:
+ *
+ * 4K page size
+ * +-----+-----+-----+-------+-------------+-------------+----------------+
+ * | - |p2d#h| - | p2d#l | dir# | pte# | off in 4K page |
+ * +-----+-----+-----+-------+-------------+-------------+----------------+
+ * 63 62 61 60 59 40 39 30 29 ^ 21 20 ^ 12 11 0
+ * | |
+ * index in 1 page of pointers
+ *
+ * 1st level - pointers to page table directory (pp2d)
+ *
+ * pp2d consists of PP2D_NENTRIES entries, each being a pointer to
+ * second level entity, i.e. the page table directory (pdir).
+ */
+#define HARDWARE_WALKER
+#define PP2D_H_H 61
+#define PP2D_H_L 60
+#define PP2D_L_H 39
+#define PP2D_L_L 30 /* >30 would work with no page table pool */
+#ifndef LOCORE
+#define PP2D_SIZE (1UL << PP2D_L_L) /* va range mapped by pp2d */
+#else
+#define PP2D_SIZE (1 << PP2D_L_L) /* va range mapped by pp2d */
+#endif
+#define PP2D_L_SHIFT PP2D_L_L
+#define PP2D_L_NUM (PP2D_L_H-PP2D_L_L+1)
+#define PP2D_L_MASK ((1<<PP2D_L_NUM)-1)
+#define PP2D_H_SHIFT (PP2D_H_L-PP2D_L_NUM)
+#define PP2D_H_NUM (PP2D_H_H-PP2D_H_L+1)
+#define PP2D_H_MASK (((1<<PP2D_H_NUM)-1)<<PP2D_L_NUM)
+#define PP2D_IDX(va) (((va >> PP2D_H_SHIFT) & PP2D_H_MASK) | ((va >> PP2D_L_SHIFT) & PP2D_L_MASK))
+#define PP2D_NENTRIES (1<<(PP2D_L_NUM+PP2D_H_NUM))
+#define PP2D_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry **)) */
+
+/*
+ * 2nd level - page table directory (pdir)
+ *
+ * pdir consists of PDIR_NENTRIES entries, each being a pointer to
+ * second level entity, i.e. the actual page table (ptbl).
+ */
+#define PDIR_H (PP2D_L_L-1)
+#define PDIR_L 21
+#define PDIR_NUM (PDIR_H-PDIR_L+1)
+#define PDIR_SIZE (1 << PDIR_L) /* va range mapped by pdir */
+#define PDIR_MASK ((1<<PDIR_NUM)-1)
+#define PDIR_SHIFT PDIR_L
+#define PDIR_NENTRIES (1<<PDIR_NUM)
+#define PDIR_IDX(va) (((va) >> PDIR_SHIFT) & PDIR_MASK)
+#define PDIR_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry *)) */
+#define PDIR_PAGES ((PDIR_NENTRIES * (1<<PDIR_ENTRY_SHIFT)) / PAGE_SIZE)
+
+/*
+ * 3rd level - page table (ptbl)
+ *
+ * Page table covers PTBL_NENTRIES page table entries. Page
+ * table entry (pte) is 64 bit wide and defines mapping
+ * for a single page.
+ */
+#define PTBL_H (PDIR_L-1)
+#define PTBL_L PAGE_SHIFT
+#define PTBL_NUM (PTBL_H-PTBL_L+1)
+#define PTBL_MASK ((1<<PTBL_NUM)-1)
+#define PTBL_SHIFT PTBL_L
+#define PTBL_SIZE PAGE_SIZE /* va range mapped by ptbl entry */
+#define PTBL_NENTRIES (1<<PTBL_NUM)
+#define PTBL_IDX(va) ((va >> PTBL_SHIFT) & PTBL_MASK)
+#define PTBL_ENTRY_SHIFT 3 /* log2 (sizeof (struct pte_entry)) */
+#define PTBL_PAGES ((PTBL_NENTRIES * (1<<PTBL_ENTRY_SHIFT)) / PAGE_SIZE)
+
+#define KERNEL_LINEAR_MAX 0xc000000040000000
+#else
/*
* 1st level - page table directory (pdir)
*
@@ -197,6 +274,8 @@
#define PTBL_PAGES 2
#define PTBL_ENTRY_SHIFT 3 /* entry size is 2^3 = 8 bytes */
+#endif
+
/*
* Flags for pte_remove() routine.
*/
@@ -268,6 +347,29 @@
#define PTE_MANAGED 0x00000002 /* Managed */
#define PTE_REFERENCED 0x00040000 /* Referenced */
+/*
+ * Page Table Entry definitions and macros.
+ *
+ * We use the hardware page table entry format:
+ *
+ * 63 24 23 19 18 17 14 13 12 11 8 7 6 5 4 3 2 1 0
+ * ---------------------------------------------------------------
+ * ARPN(12:51) WIMGE R U0:U3 SW0 C PSIZE UX SX UW SW UR SR SW1 V
+ * ---------------------------------------------------------------
+ */
+
+/* PTE fields. */
+#define PTE_TSIZE_SHIFT (63-54)
+#define PTE_TSIZE_MASK 0x7
+#define PTE_TSIZE_SHIFT_DIRECT (63-55)
+#define PTE_TSIZE_MASK_DIRECT 0xf
+#define PTE_PS_DIRECT(ps) (ps<<PTE_TSIZE_SHIFT_DIRECT) /* Direct Entry Page Size */
+#define PTE_PS(ps) (ps<<PTE_TSIZE_SHIFT) /* Page Size */
+
+/* Macro argument must of pte_t type. */
+#define PTE_TSIZE(pte) (int)((*pte >> PTE_TSIZE_SHIFT) & PTE_TSIZE_MASK)
+#define PTE_TSIZE_DIRECT(pte) (int)((*pte >> PTE_TSIZE_SHIFT_DIRECT) & PTE_TSIZE_MASK_DIRECT)
+
/* Macro argument must of pte_t type. */
#define PTE_ARPN_SHIFT 12
#define PTE_FLAGS_MASK 0x00ffffff
Index: head/sys/powerpc/include/spr.h
===================================================================
--- head/sys/powerpc/include/spr.h
+++ head/sys/powerpc/include/spr.h
@@ -192,6 +192,18 @@
#define FSL_E5500 0x8024
#define FSL_E6500 0x8040
+#define SPR_EPCR 0x133
+#define EPCR_EXTGS 0x80000000
+#define EPCR_DTLBGS 0x40000000
+#define EPCR_ITLBGS 0x20000000
+#define EPCR_DSIGS 0x10000000
+#define EPCR_ISIGS 0x08000000
+#define EPCR_DUVGS 0x04000000
+#define EPCR_ICM 0x02000000
+#define EPCR_GICMGS 0x01000000
+#define EPCR_DGTMI 0x00800000
+#define EPCR_DMIUH 0x00400000
+#define EPCR_PMGS 0x00200000
#define SPR_SPEFSCR 0x200 /* ..8 Signal Processing Engine FSCR. */
#define SPR_IBAT0U 0x210 /* .68 Instruction BAT Reg 0 Upper */
#define SPR_IBAT0U 0x210 /* .6. Instruction BAT Reg 0 Upper */
@@ -259,6 +271,7 @@
#define SPR_DBAT6L 0x23d /* .6. Data BAT Reg 6 Lower */
#define SPR_DBAT7U 0x23e /* .6. Data BAT Reg 7 Upper */
#define SPR_DBAT7L 0x23f /* .6. Data BAT Reg 7 Lower */
+#define SPR_SPRG8 0x25c /* ..8 SPR General 8 */
#define SPR_MI_CTR 0x310 /* ..8 IMMU control */
#define Mx_CTR_GPM 0x80000000 /* Group Protection Mode */
#define Mx_CTR_PPM 0x40000000 /* Page Protection Mode */
Index: head/sys/powerpc/include/tlb.h
===================================================================
--- head/sys/powerpc/include/tlb.h
+++ head/sys/powerpc/include/tlb.h
@@ -65,7 +65,11 @@
#define TLB_SIZE_1G 10
#define TLB_SIZE_4G 11
+#ifdef __powerpc64__
+#define MAS2_EPN_MASK 0xFFFFFFFFFFFFF000UL
+#else
#define MAS2_EPN_MASK 0xFFFFF000
+#endif
#define MAS2_EPN_SHIFT 12
#define MAS2_X0 0x00000040
#define MAS2_X1 0x00000020
@@ -137,7 +141,11 @@
vm_offset_t virt;
vm_size_t size;
uint32_t mas1;
+#ifdef __powerpc64__
+ uint64_t mas2;
+#else
uint32_t mas2;
+#endif
uint32_t mas3;
uint32_t mas7;
} tlb_entry_t;
@@ -217,8 +225,8 @@
struct pmap;
-void tlb_lock(uint32_t *);
-void tlb_unlock(uint32_t *);
+void tlb_lock(uintptr_t *);
+void tlb_unlock(uintptr_t *);
void tlb1_ap_prep(void);
int tlb1_set_entry(vm_offset_t, vm_paddr_t, vm_size_t, uint32_t);
Index: head/sys/powerpc/include/vmparam.h
===================================================================
--- head/sys/powerpc/include/vmparam.h
+++ head/sys/powerpc/include/vmparam.h
@@ -69,7 +69,11 @@
#if !defined(LOCORE)
#ifdef __powerpc64__
#define VM_MIN_ADDRESS (0x0000000000000000UL)
+#ifdef AIM
#define VM_MAXUSER_ADDRESS (0xfffffffffffff000UL)
+#else
+#define VM_MAXUSER_ADDRESS (0x7ffffffffffff000UL)
+#endif
#define VM_MAX_ADDRESS (0xffffffffffffffffUL)
#else
#define VM_MIN_ADDRESS ((vm_offset_t)0)
@@ -78,23 +82,29 @@
#endif
#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE)
#else /* LOCORE */
-#if !defined(__powerpc64__) && defined(BOOKE)
+#ifdef BOOKE
#define VM_MIN_ADDRESS 0
+#ifdef __powerpc64__
+#define VM_MAXUSER_ADDRESS 0x7ffffffffffff000
+#else
#define VM_MAXUSER_ADDRESS 0x7ffff000
#endif
+#endif
#endif /* LOCORE */
#define FREEBSD32_SHAREDPAGE (VM_MAXUSER_ADDRESS32 - PAGE_SIZE)
#define FREEBSD32_USRSTACK FREEBSD32_SHAREDPAGE
-#ifdef AIM
-#define KERNBASE 0x00100000UL /* start of kernel virtual */
-
#ifdef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS 0xc000000000000000UL
#define VM_MAX_KERNEL_ADDRESS 0xc0000001c7ffffffUL
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
-#else
+#endif
+
+#ifdef AIM
+#define KERNBASE 0x00100000UL /* start of kernel virtual */
+
+#ifndef __powerpc64__
#define VM_MIN_KERNEL_ADDRESS ((vm_offset_t)KERNEL_SR << ADDR_SR_SHFT)
#define VM_MAX_SAFE_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 2*SEGMENT_LENGTH -1)
#define VM_MAX_KERNEL_ADDRESS (VM_MIN_KERNEL_ADDRESS + 3*SEGMENT_LENGTH - 1)
@@ -108,11 +118,19 @@
#else /* Book-E */
+#ifdef __powerpc64__
+#ifndef LOCORE
+#define KERNBASE 0xc000000000000000UL /* start of kernel virtual */
+#else
+#define KERNBASE 0xc000000000000000 /* start of kernel virtual */
+#endif
+#else
#define KERNBASE 0xc0000000 /* start of kernel virtual */
#define VM_MIN_KERNEL_ADDRESS KERNBASE
#define VM_MAX_KERNEL_ADDRESS 0xffffefff
#define VM_MAX_SAFE_KERNEL_ADDRESS VM_MAX_KERNEL_ADDRESS
+#endif
#endif /* AIM/E500 */
Index: head/sys/powerpc/powerpc/db_interface.c
===================================================================
--- head/sys/powerpc/powerpc/db_interface.c
+++ head/sys/powerpc/powerpc/db_interface.c
@@ -40,6 +40,8 @@
if (ret == 0) {
src = (char *)addr;
+ if (size == 8)
+ *((uint64_t*)data) = *((uint64_t*)src);
if (size == 4)
*((int *)data) = *((int *)src);
else if (size == 2)
@@ -67,10 +69,12 @@
dst = (char *)addr;
cnt = size;
- if (size == 4 && (addr & 3) == 0 && ((uintptr_t)data & 3) == 0)
+ if (size == 8)
+ *((uint64_t*)dst) = *((uint64_t*)data);
+ if (size == 4)
*((int*)dst) = *((int*)data);
else
- if (size == 2 && (addr & 1) == 0 && ((uintptr_t)data & 1) == 0)
+ if (size == 2)
*((short*)dst) = *((short*)data);
else
while (cnt-- > 0)
Index: head/sys/powerpc/powerpc/exec_machdep.c
===================================================================
--- head/sys/powerpc/powerpc/exec_machdep.c
+++ head/sys/powerpc/powerpc/exec_machdep.c
@@ -546,9 +546,13 @@
tf->srr0 = imgp->entry_addr;
#ifdef __powerpc64__
tf->fixreg[12] = imgp->entry_addr;
+ #ifdef AIM
tf->srr1 = PSL_SF | PSL_USERSET | PSL_FE_DFLT;
if (mfmsr() & PSL_HV)
tf->srr1 |= PSL_HV;
+ #elif defined(BOOKE)
+ tf->srr1 = PSL_CM | PSL_USERSET | PSL_FE_DFLT;
+ #endif
#else
tf->srr1 = PSL_USERSET | PSL_FE_DFLT;
#endif
@@ -579,9 +583,13 @@
tf->srr0 = imgp->entry_addr;
tf->srr1 = PSL_USERSET | PSL_FE_DFLT;
+#ifdef AIM
tf->srr1 &= ~PSL_SF;
if (mfmsr() & PSL_HV)
tf->srr1 |= PSL_HV;
+#elif defined(BOOKE)
+ tf->srr1 &= ~PSL_CM;
+#endif
td->td_pcb->pcb_flags = 0;
}
#endif
Index: head/sys/powerpc/powerpc/genassym.c
===================================================================
--- head/sys/powerpc/powerpc/genassym.c
+++ head/sys/powerpc/powerpc/genassym.c
@@ -82,6 +82,8 @@
ASSYM(CPUSAVE_AIM_DSISR, CPUSAVE_AIM_DSISR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_DEAR, CPUSAVE_BOOKE_DEAR*sizeof(register_t));
ASSYM(CPUSAVE_BOOKE_ESR, CPUSAVE_BOOKE_ESR*sizeof(register_t));
+ASSYM(BOOKE_CRITSAVE_SRR0, BOOKE_CRITSAVE_SRR0*sizeof(register_t));
+ASSYM(BOOKE_CRITSAVE_SRR1, BOOKE_CRITSAVE_SRR1*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_LR, TLBSAVE_BOOKE_LR*sizeof(register_t));
ASSYM(TLBSAVE_BOOKE_CR, TLBSAVE_BOOKE_CR*sizeof(register_t));
@@ -117,7 +119,11 @@
ASSYM(USER_SR, USER_SR);
#endif
#elif defined(BOOKE)
+#ifdef __powerpc64__
+ASSYM(PM_PP2D, offsetof(struct pmap, pm_pp2d));
+#else
ASSYM(PM_PDIR, offsetof(struct pmap, pm_pdir));
+#endif
/*
* With pte_t being a bitfield struct, these fields cannot be addressed via
* offsetof().
@@ -216,6 +222,9 @@
ASSYM(KERNBASE, KERNBASE);
ASSYM(MAXCOMLEN, MAXCOMLEN);
+#ifdef __powerpc64__
+ASSYM(PSL_CM, PSL_CM);
+#endif
ASSYM(PSL_DE, PSL_DE);
ASSYM(PSL_DS, PSL_DS);
ASSYM(PSL_IS, PSL_IS);

File Metadata

Mime Type
text/plain
Expires
Fri, Dec 27, 1:46 AM (11 h, 21 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
15609984
Default Alt Text
D9433.diff (77 KB)

Event Timeline