Index: sys/arm64/arm64/machdep.c =================================================================== --- sys/arm64/arm64/machdep.c +++ sys/arm64/arm64/machdep.c @@ -377,7 +377,10 @@ addr = regs->db_breakregs[i].dbr_addr; ctrl = regs->db_breakregs[i].dbr_ctrl; - /* Don't let the user set a breakpoint on a kernel address. */ + /* + * Don't let the user set a breakpoint on a kernel or + * non-canonical user address. + */ if (addr >= VM_MAXUSER_ADDRESS) return (EINVAL); @@ -412,7 +415,10 @@ addr = regs->db_watchregs[i].dbw_addr; ctrl = regs->db_watchregs[i].dbw_ctrl; - /* Don't let the user set a watchpoint on a kernel address. */ + /* + * Don't let the user set a watchpoint on a kernel or + * non-canonical user address. + */ if (addr >= VM_MAXUSER_ADDRESS) return (EINVAL); Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -470,7 +470,9 @@ * The valid bit may be clear if pmap_update_entry() is concurrently * modifying the entry, so for KVA only the entry type may be checked. */ - KASSERT(va >= VM_MAX_USER_ADDRESS || (l1 & ATTR_DESCR_VALID) != 0, + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + KASSERT(ADDR_IS_KERNEL(va) || (l1 & ATTR_DESCR_VALID) != 0, ("%s: L1 entry %#lx for %#lx is invalid", __func__, l1, va)); KASSERT((l1 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L1 entry %#lx for %#lx is a leaf", __func__, l1, va)); @@ -502,7 +504,9 @@ * The valid bit may be clear if pmap_update_entry() is concurrently * modifying the entry, so for KVA only the entry type may be checked. */ - KASSERT(va >= VM_MAX_USER_ADDRESS || (l2 & ATTR_DESCR_VALID) != 0, + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + KASSERT(ADDR_IS_KERNEL(va) || (l2 & ATTR_DESCR_VALID) != 0, ("%s: L2 entry %#lx for %#lx is invalid", __func__, l2, va)); KASSERT((l2 & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_TABLE, ("%s: L2 entry %#lx for %#lx is a leaf", __func__, l2, va)); @@ -1580,7 +1584,9 @@ vm_offset_t va; int lvl; - KASSERT(sva >= VM_MIN_KERNEL_ADDRESS, ("usermode va %lx", sva)); + KASSERT(ADDR_IS_CANONICAL(sva), + ("%s: Address not in canonical form: %lx", __func__, sva)); + KASSERT(ADDR_IS_KERNEL(sva), ("usermode va %lx", sva)); va = sva; while (count-- > 0) { @@ -1700,7 +1706,9 @@ { vm_page_t mpte; - if (va >= VM_MAXUSER_ADDRESS) + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + if (ADDR_IS_KERNEL(va)) return (0); KASSERT(ptepde != 0, ("pmap_unuse_pt: ptepde != 0")); mpte = PHYS_TO_VM_PAGE(ptepde & ~ATTR_MASK); @@ -1954,17 +1962,20 @@ vm_page_t l2pg; vm_pindex_t l2pindex; + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + retry: l1 = pmap_l1(pmap, va); if (l1 != NULL && (pmap_load(l1) & ATTR_DESCR_MASK) == L1_TABLE) { l2 = pmap_l1_to_l2(l1, va); - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { /* Add a reference to the L2 page. */ l2pg = PHYS_TO_VM_PAGE(pmap_load(l1) & ~ATTR_MASK); l2pg->ref_count++; } else l2pg = NULL; - } else if (va < VM_MAXUSER_ADDRESS) { + } else if (!ADDR_IS_KERNEL(va)) { /* Allocate a L2 page. */ l2pindex = pmap_l2_pindex(va) >> Ln_ENTRIES_SHIFT; l2pg = _pmap_alloc_l3(pmap, NUL2E + l2pindex, lockp); @@ -2930,11 +2941,15 @@ vm_offset_t va; vm_page_t l3pg, m; + KASSERT(ADDR_IS_CANONICAL(sva), + ("%s: Start address not in canonical form: %lx", __func__, sva)); + KASSERT(ADDR_IS_CANONICAL(eva) || eva == VM_MAX_USER_ADDRESS, + ("%s: End address not in canonical form: %lx", __func__, eva)); + PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT(rounddown2(sva, L2_SIZE) + L2_SIZE == roundup2(eva, L2_SIZE), ("pmap_remove_l3_range: range crosses an L3 page table boundary")); - l3pg = sva < VM_MAXUSER_ADDRESS ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : - NULL; + l3pg = !ADDR_IS_KERNEL(sva) ? PHYS_TO_VM_PAGE(l2e & ~ATTR_MASK) : NULL; va = eva; for (l3 = pmap_l2_to_l3(&l2e, sva); sva != eva; l3++, sva += L3_SIZE) { if (!pmap_l3_valid(pmap_load(l3))) { @@ -3695,6 +3710,9 @@ boolean_t nosleep; int lvl, rv; + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + va = trunc_page(va); if ((m->oflags & VPO_UNMANAGED) == 0) VM_PAGE_OBJECT_BUSY_ASSERT(m); @@ -3706,7 +3724,7 @@ if ((flags & PMAP_ENTER_WIRED) != 0) new_l3 |= ATTR_SW_WIRED; if (pmap->pm_stage == PM_STAGE1) { - if (va < VM_MAXUSER_ADDRESS) + if (!ADDR_IS_KERNEL(va)) new_l3 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else new_l3 |= ATTR_S1_UXN; @@ -3773,7 +3791,7 @@ pde = pmap_pde(pmap, va, &lvl); if (pde != NULL && lvl == 2) { l3 = pmap_l2_to_l3(pde, va); - if (va < VM_MAXUSER_ADDRESS && mpte == NULL) { + if (!ADDR_IS_KERNEL(va) && mpte == NULL) { mpte = PHYS_TO_VM_PAGE(pmap_load(pde) & ~ATTR_MASK); mpte->ref_count++; } @@ -3783,7 +3801,7 @@ if ((pmap_load(l2) & ATTR_DESCR_MASK) == L2_BLOCK && (l3 = pmap_demote_l2_locked(pmap, l2, va, &lock)) != NULL) { l3 = &l3[pmap_l3_index(va)]; - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { mpte = PHYS_TO_VM_PAGE( pmap_load(l2) & ~ATTR_MASK); mpte->ref_count++; @@ -3792,7 +3810,7 @@ } /* We need to allocate an L3 table. */ } - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; /* @@ -4017,6 +4035,8 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); new_l2 = (pd_entry_t)(VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_S1_IDX(m->md.pv_memattr) | ATTR_S1_AP(ATTR_S1_AP_RO) | @@ -4028,7 +4048,7 @@ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) new_l2 |= ATTR_S1_XN; - if (va < VM_MAXUSER_ADDRESS) + if (!ADDR_IS_KERNEL(va)) new_l2 |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else new_l2 |= ATTR_S1_UXN; @@ -4077,6 +4097,8 @@ vm_page_t l2pg, mt; PMAP_LOCK_ASSERT(pmap, MA_OWNED); + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); if ((l2 = pmap_alloc_l2(pmap, va, &l2pg, (flags & PMAP_ENTER_NOSLEEP) != 0 ? NULL : lockp)) == NULL) { @@ -4091,9 +4113,10 @@ if ((old_l2 = pmap_load(l2)) != 0) { KASSERT(l2pg == NULL || l2pg->ref_count > 1, ("pmap_enter_l2: l2pg's ref count is too low")); - if ((flags & PMAP_ENTER_NOREPLACE) != 0 && (va < - VM_MAXUSER_ADDRESS || (old_l2 & ATTR_DESCR_MASK) == - L2_BLOCK || !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) { + if ((flags & PMAP_ENTER_NOREPLACE) != 0 && + (!ADDR_IS_KERNEL(va) || + (old_l2 & ATTR_DESCR_MASK) == L2_BLOCK || + !pmap_every_pte_zero(old_l2 & ~ATTR_MASK))) { if (l2pg != NULL) l2pg->ref_count--; CTR2(KTR_PMAP, "pmap_enter_l2: failure for va %#lx" @@ -4107,7 +4130,7 @@ else pmap_remove_l3_range(pmap, old_l2, va, va + L2_SIZE, &free, lockp); - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { vm_page_free_pages_toq(&free, true); KASSERT(pmap_load(l2) == 0, ("pmap_enter_l2: non-zero L2 entry %p", l2)); @@ -4246,13 +4269,15 @@ ("pmap_enter_quick_locked: managed mapping within the clean submap")); PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); CTR2(KTR_PMAP, "pmap_enter_quick_locked: %p %lx", pmap, va); /* * In the case that a page table page is not * resident, we are creating it here. */ - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { vm_pindex_t l2pindex; /* @@ -4336,7 +4361,7 @@ if ((prot & VM_PROT_EXECUTE) == 0 || m->md.pv_memattr == VM_MEMATTR_DEVICE) l3_val |= ATTR_S1_XN; - if (va < VM_MAXUSER_ADDRESS) + if (!ADDR_IS_KERNEL(va)) l3_val |= ATTR_S1_AP(ATTR_S1_AP_USER) | ATTR_S1_PXN; else l3_val |= ATTR_S1_UXN; @@ -6082,6 +6107,9 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); PMAP_ASSERT_STAGE1(pmap); + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + l3 = NULL; oldl2 = pmap_load(l2); KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, @@ -6121,7 +6149,7 @@ * so the direct map region is the only part of the * kernel address space that must be handled here. */ - KASSERT(va < VM_MAXUSER_ADDRESS || VIRT_IN_DMAP(va), + KASSERT(!ADDR_IS_KERNEL(va) || VIRT_IN_DMAP(va), ("pmap_demote_l2: No saved mpte for va %#lx", va)); /* @@ -6146,7 +6174,7 @@ goto fail; } - if (va < VM_MAXUSER_ADDRESS) { + if (!ADDR_IS_KERNEL(va)) { ml3->ref_count = NL3PG; pmap_resident_count_inc(pmap, 1); } @@ -6509,7 +6537,10 @@ { PMAP_ASSERT_STAGE1(pmap); - if (va >= VM_MIN_KERNEL_ADDRESS) { + KASSERT(ADDR_IS_CANONICAL(va), + ("%s: Address not in canonical form: %lx", __func__, va)); + + if (ADDR_IS_KERNEL(va)) { cpu_icache_sync_range(va, sz); } else { u_int len, offset; Index: sys/arm64/arm64/trap.c =================================================================== --- sys/arm64/arm64/trap.c +++ sys/arm64/arm64/trap.c @@ -261,8 +261,12 @@ else { intr_enable(); + /* We received a TBI/PAC/etc. fault from the kernel */ + if (!ADDR_IS_CANONICAL(far)) + goto bad_far; + /* The top bit tells us which range to use */ - if (far >= VM_MAXUSER_ADDRESS) { + if (ADDR_IS_KERNEL(far)) { map = kernel_map; } else { map = &p->p_vmspace->vm_map; @@ -306,6 +310,7 @@ /* Fault in the page. */ error = vm_fault_trap(map, far, ftype, VM_FAULT_NORMAL, &sig, &ucode); if (error != KERN_SUCCESS) { +bad_far: if (lower) { call_trapsignal(td, sig, ucode, (void *)far, ESR_ELx_EXCEPTION(esr)); Index: sys/arm64/include/vmparam.h =================================================================== --- sys/arm64/include/vmparam.h +++ sys/arm64/include/vmparam.h @@ -156,6 +156,13 @@ #define VM_MIN_KERNEL_ADDRESS (0xffff000000000000UL) #define VM_MAX_KERNEL_ADDRESS (0xffff008000000000UL) +/* If true addr is in the kernel address space */ +#define ADDR_IS_KERNEL(addr) (((addr) & (1ul << 55)) == (1ul << 55)) +/* If true addr is in it's canonical form (i.e. no TBI, PAC, etc.) */ +#define ADDR_IS_CANONICAL(addr) \ + (((addr) & 0xffff000000000000UL) == 0 || \ + ((addr) & 0xffff000000000000UL) == 0xffff000000000000UL) + /* 95 TiB maximum for the direct map region */ #define DMAP_MIN_ADDRESS (0xffffa00000000000UL) #define DMAP_MAX_ADDRESS (0xffffff0000000000UL)