Index: sys/arm64/arm64/pmap.c =================================================================== --- sys/arm64/arm64/pmap.c +++ sys/arm64/arm64/pmap.c @@ -1452,7 +1452,7 @@ ***************************************************/ void -pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) +pmap_kenter_arm64(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) { pd_entry_t *pde; pt_entry_t *pte, attr; @@ -1460,11 +1460,11 @@ int lvl; KASSERT((pa & L3_OFFSET) == 0, - ("pmap_kenter: Invalid physical address")); + ("%s: Invalid physical address", __func__)); KASSERT((sva & L3_OFFSET) == 0, - ("pmap_kenter: Invalid virtual address")); + ("%s: Invalid virtual address", __func__)); KASSERT((size & PAGE_MASK) == 0, - ("pmap_kenter: Mapping is not page-sized")); + ("%s: Mapping is not page-sized", __func__)); attr = ATTR_DEFAULT | ATTR_S1_AP(ATTR_S1_AP_RW) | ATTR_S1_XN | ATTR_S1_IDX(mode) | L3_PAGE; @@ -1472,8 +1472,8 @@ while (size != 0) { pde = pmap_pde(kernel_pmap, va, &lvl); KASSERT(pde != NULL, - ("pmap_kenter: Invalid page entry, va: 0x%lx", va)); - KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); + ("%s: Invalid page entry, va: 0x%lx", __func__, va)); + KASSERT(lvl == 2, ("%s: Invalid level %d", __func__, lvl)); pte = pmap_l2_to_l3(pde, va); pmap_load_store(pte, (pa & ~L3_OFFSET) | attr); @@ -1489,7 +1489,7 @@ pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) { - pmap_kenter(sva, size, pa, VM_MEMATTR_DEVICE); + pmap_kenter_arm64(sva, size, pa, VM_MEMATTR_DEVICE); } /* @@ -5969,7 +5969,7 @@ /* L3 table is linked */ va = trunc_page(va); pa = trunc_page(pa); - pmap_kenter(va, size, pa, memory_mapping_mode(pa)); + pmap_kenter_arm64(va, size, pa, memory_mapping_mode(pa)); } return ((void *)(va + offset)); @@ -6230,7 +6230,7 @@ (oldl1 & ~ATTR_DESCR_MASK) | L2_BLOCK)); if (tmpl1 != 0) { - pmap_kenter(tmpl1, PAGE_SIZE, + pmap_kenter_arm64(tmpl1, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); @@ -6374,7 +6374,7 @@ * Map the temporary page so we don't lose access to the l2 table. */ if (tmpl2 != 0) { - pmap_kenter(tmpl2, PAGE_SIZE, + pmap_kenter_arm64(tmpl2, PAGE_SIZE, DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, VM_MEMATTR_WRITE_BACK); l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); Index: sys/arm64/include/pmap.h =================================================================== --- sys/arm64/include/pmap.h +++ sys/arm64/include/pmap.h @@ -165,7 +165,8 @@ void pmap_activate_vm(pmap_t); void pmap_bootstrap(vm_offset_t, vm_offset_t, vm_paddr_t, vm_size_t); int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode); -void pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode); +void pmap_kenter_arm64(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, +int mode); void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); bool pmap_klookup(vm_offset_t va, vm_paddr_t *pa); vm_paddr_t pmap_kextract(vm_offset_t va); Index: sys/dev/pci/controller/pci_n1sdp.c =================================================================== --- sys/dev/pci/controller/pci_n1sdp.c +++ sys/dev/pci/controller/pci_n1sdp.c @@ -95,7 +95,7 @@ printf("%s: Can't allocate KVA memory.", __func__); return (ENXIO); } - pmap_kenter(vaddr, (vm_size_t)BDF_TABLE_SIZE, paddr, + pmap_kenter_arm64(vaddr, (vm_size_t)BDF_TABLE_SIZE, paddr, VM_MEMATTR_UNCACHEABLE); shared_data = (struct pcie_discovery_data *)vaddr; Index: sys/kern/subr_devmap.c =================================================================== --- sys/kern/subr_devmap.c +++ sys/kern/subr_devmap.c @@ -315,7 +315,7 @@ if (!va) panic("pmap_mapdev: Couldn't alloc kernel virtual memory"); - pmap_kenter(va, size, pa, ma); + pmap_kenter_arm64(va, size, pa, ma); return ((void *)(va + offset)); }