Index: sys/riscv/include/pmap.h
===================================================================
--- sys/riscv/include/pmap.h
+++ sys/riscv/include/pmap.h
@@ -153,6 +153,8 @@
#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list))
+int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t);
+
#endif /* _KERNEL */
#endif /* !LOCORE */
Index: sys/riscv/include/pte.h
===================================================================
--- sys/riscv/include/pte.h
+++ sys/riscv/include/pte.h
@@ -65,7 +65,7 @@
#define Ln_ENTRIES (1 << 9)
#define Ln_ADDR_MASK (Ln_ENTRIES - 1)
-/* Bits 9:7 are reserved for software */
+/* Bits 9:8 are reserved for software */
#define PTE_SW_MANAGED (1 << 9)
#define PTE_SW_WIRED (1 << 8)
#define PTE_D (1 << 7) /* Dirty */
@@ -78,6 +78,7 @@
#define PTE_V (1 << 0) /* Valid */
#define PTE_RWX (PTE_R | PTE_W | PTE_X)
#define PTE_RX (PTE_R | PTE_X)
+#define PTE_KERN (PTE_V | PTE_RWX | PTE_A | PTE_D)
#define PTE_PPN0_S 10
#define PTE_PPN1_S 19
Index: sys/riscv/riscv/locore.S
===================================================================
--- sys/riscv/riscv/locore.S
+++ sys/riscv/riscv/locore.S
@@ -94,7 +94,7 @@
add t3, t4, t2
li t5, 0
2:
- li t0, (PTE_V | PTE_RWX | PTE_D)
+ li t0, (PTE_KERN)
slli t2, t4, PTE_PPN1_S /* << PTE_PPN1_S */
or t5, t0, t2
sd t5, (s1) /* Store PTE entry to position */
@@ -126,7 +126,7 @@
mv s2, s11
srli s2, s2, PAGE_SHIFT
- li t0, (PTE_V | PTE_RWX | PTE_D)
+ li t0, (PTE_KERN)
slli t2, s2, PTE_PPN0_S /* << PTE_PPN0_S */
or t0, t0, t2
Index: sys/riscv/riscv/pmap.c
===================================================================
--- sys/riscv/riscv/pmap.c
+++ sys/riscv/riscv/pmap.c
@@ -15,7 +15,7 @@
* All rights reserved.
* Copyright (c) 2014 The FreeBSD Foundation
* All rights reserved.
- * Copyright (c) 2015-2017 Ruslan Bukin
+ * Copyright (c) 2015-2018 Ruslan Bukin
* All rights reserved.
*
* This code is derived from software contributed to Berkeley by
@@ -505,7 +505,7 @@
/* superpages */
pn = (pa / PAGE_SIZE);
- entry = (PTE_V | PTE_RWX);
+ entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(&l1[l1_slot], entry);
}
@@ -933,7 +933,7 @@
KASSERT(l3 != NULL, ("Invalid page table, va: 0x%lx", va));
pn = (pa / PAGE_SIZE);
- entry = (PTE_V | PTE_RWX);
+ entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(l3, entry);
@@ -1035,7 +1035,7 @@
pn = (pa / PAGE_SIZE);
l3 = pmap_l3(kernel_pmap, va);
- entry = (PTE_V | PTE_RWX);
+ entry = PTE_KERN;
entry |= (pn << PTE_PPN0_S);
pmap_load_store(l3, entry);
@@ -1450,7 +1450,8 @@
continue; /* try again */
}
l2 = pmap_l1_to_l2(l1, kernel_vm_end);
- if ((pmap_load(l2) & PTE_A) != 0) {
+ if ((pmap_load(l2) & PTE_V) != 0 &&
+ (pmap_load(l2) & PTE_RWX) == 0) {
kernel_vm_end = (kernel_vm_end + L2_SIZE) & ~L2_OFFSET;
if (kernel_vm_end - 1 >= vm_map_max(kernel_map)) {
kernel_vm_end = vm_map_max(kernel_map);
@@ -2003,6 +2004,36 @@
PMAP_UNLOCK(pmap);
}
+int
+pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t prot)
+{
+ pt_entry_t orig_l3;
+ pt_entry_t new_l3;
+ pt_entry_t *l3;
+
+ l3 = pmap_l3(pmap, va);
+ if (l3 == NULL)
+ return (0);
+
+ orig_l3 = pmap_load(l3);
+ new_l3 = orig_l3;
+ if (((orig_l3 & (PTE_D | PTE_W)) == PTE_W) && \
+ ((prot & PROT_WRITE) != 0))
+ new_l3 |= PTE_D;
+
+ if (((orig_l3 & (PTE_A | PTE_R)) == PTE_R) && \
+ ((prot & PROT_READ) != 0))
+ new_l3 |= PTE_A;
+
+ if (orig_l3 != new_l3) {
+ pmap_load_store(l3, new_l3);
+ pmap_invalidate_page(pmap, va);
+ return (1);
+ }
+
+ return (0);
+}
+
/*
* Insert the given physical page (p) at
* the specified virtual address (v) in the
@@ -2043,6 +2074,8 @@
new_l3 |= PTE_W;
if ((va >> 63) == 0)
new_l3 |= PTE_U;
+ else
+ new_l3 |= PTE_A;
new_l3 |= (pn << PTE_PPN0_S);
if ((flags & PMAP_ENTER_WIRED) != 0)
@@ -2414,8 +2447,7 @@
pa = VM_PAGE_TO_PHYS(m);
pn = (pa / PAGE_SIZE);
- /* RISCVTODO: check permissions */
- entry = (PTE_V | PTE_RWX);
+ entry = (PTE_V | PTE_R);
entry |= (pn << PTE_PPN0_S);
/*
Index: sys/riscv/riscv/trap.c
===================================================================
--- sys/riscv/riscv/trap.c
+++ sys/riscv/riscv/trap.c
@@ -212,6 +212,9 @@
ftype = (VM_PROT_READ);
}
+ if (pmap_fault_fixup(map->pmap, va, ftype))
+ goto done;
+
if (map != kernel_map) {
/*
* Keep swapout from messing with us during this
@@ -256,6 +259,7 @@
}
}
+done:
if (lower)
userret(td, frame);
}