Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1365,16 +1365,42 @@ return (m); } -vm_paddr_t -pmap_kextract(vm_offset_t va) +/* + * Walks the page tables to translate a kernel virtual address to a + * physical address. Returns true if the kva is valid and stores the + * physical address in pa if it is not NULL. + */ +bool +pmap_klookup(vm_offset_t va, vm_paddr_t *pa) { pt_entry_t *pte, tpte; + register_t intr; + uint64_t par; + + /* + * Disable interrupts so we don't get interrupted between asking + * for address translation, and getting the result back. + */ + intr = intr_disable(); + par = arm64_address_translate_s1e1r(va); + intr_restore(intr); + + if (PAR_SUCCESS(par)) { + if (pa != NULL) + *pa = (par & PAR_PA_MASK) | (va & PAR_LOW_MASK); + return (true); + } + + /* + * Fall back to walking the page table. The address translation + * instruction may fail when the page is in a break-before-make + * sequence. As we only clear the valid bit in said sequence we + * can walk the page table to find the physical address. + */ - if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) - return (DMAP_TO_PHYS(va)); pte = pmap_l1(kernel_pmap, va); if (pte == NULL) - return (0); + return (false); /* * A concurrent pmap_update_entry() will clear the entry's valid bit @@ -1384,20 +1410,41 @@ */ tpte = pmap_load(pte); if (tpte == 0) - return (0); - if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) - return ((tpte & ~ATTR_MASK) | (va & L1_OFFSET)); + return (false); + if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { + if (pa != NULL) + *pa = (tpte & ~ATTR_MASK) | (va & L1_OFFSET); + return (true); + } pte = pmap_l1_to_l2(&tpte, va); tpte = pmap_load(pte); if (tpte == 0) - return (0); - if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) - return ((tpte & ~ATTR_MASK) | (va & L2_OFFSET)); + return (false); + if ((tpte & ATTR_DESCR_TYPE_MASK) == ATTR_DESCR_TYPE_BLOCK) { + if (pa != NULL) + *pa = (tpte & ~ATTR_MASK) | (va & L2_OFFSET); + return (true); + } pte = pmap_l2_to_l3(&tpte, va); tpte = pmap_load(pte); if (tpte == 0) + return (false); + if (pa != NULL) + *pa = (tpte & ~ATTR_MASK) | (va & L3_OFFSET); + return (true); +} + +vm_paddr_t +pmap_kextract(vm_offset_t va) +{ + vm_paddr_t pa; + + if (va >= DMAP_MIN_ADDRESS && va < DMAP_MAX_ADDRESS) + return (DMAP_TO_PHYS(va)); + + if (pmap_klookup(va, &pa) == false) return (0); - return ((tpte & ~ATTR_MASK) | (va & L3_OFFSET)); + return (pa); } /*************************************************** @@ -6833,7 +6880,7 @@ * critical section. Therefore, we must check the * address without acquiring the kernel pmap's lock. */ - if (pmap_kextract(far) != 0) + if (pmap_klookup(far, NULL)) rv = KERN_SUCCESS; } else { PMAP_LOCK(pmap); Index: sys/arm64/include/armreg.h =================================================================== --- sys/arm64/include/armreg.h +++ sys/arm64/include/armreg.h @@ -743,6 +743,7 @@ #define PAR_F (0x1 << PAR_F_SHIFT) #define PAR_SUCCESS(x) (((x) & PAR_F) == 0) /* When PAR_F == 0 (success) */ +#define PAR_LOW_MASK 0xfff #define PAR_SH_SHIFT 7 #define PAR_SH_MASK (0x3 << PAR_SH_SHIFT) #define PAR_NS_SHIFT 9 Index: sys/arm64/include/pmap.h =================================================================== --- sys/arm64/include/pmap.h +++ sys/arm64/include/pmap.h @@ -167,6 +167,7 @@ int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode); void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); +bool pmap_klookup(vm_offset_t va, vm_paddr_t *pa); vm_paddr_t pmap_kextract(vm_offset_t va); void pmap_kremove(vm_offset_t); void pmap_kremove_device(vm_offset_t, vm_size_t);