Index: head/sys/powerpc/booke/pmap.c =================================================================== --- head/sys/powerpc/booke/pmap.c +++ head/sys/powerpc/booke/pmap.c @@ -340,6 +340,8 @@ static void mmu_booke_scan_init(mmu_t); static vm_offset_t mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m); static void mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr); +static int mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, + vm_size_t sz, vm_memattr_t mode); static mmu_method_t mmu_booke_methods[] = { /* pmap dispatcher interface */ @@ -392,6 +394,7 @@ MMUMETHOD(mmu_kextract, mmu_booke_kextract), /* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */ MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev), + MMUMETHOD(mmu_change_attr, mmu_booke_change_attr), /* dumpsys() support */ MMUMETHOD(mmu_dumpsys_map, mmu_booke_dumpsys_map), @@ -419,6 +422,8 @@ return (MAS2_I); case VM_MEMATTR_WRITE_THROUGH: return (MAS2_W | MAS2_M); + case VM_MEMATTR_CACHEABLE: + return (MAS2_M); } } @@ -2900,6 +2905,63 @@ return (0); } +static int +mmu_booke_change_attr(mmu_t mmu, vm_offset_t addr, vm_size_t sz, + vm_memattr_t mode) +{ + vm_offset_t va; + pte_t *pte; + int i, j; + + /* Check TLB1 mappings */ + for (i = 0; i < tlb1_idx; i++) { + if (!(tlb1[i].mas1 & MAS1_VALID)) + continue; + if (addr >= tlb1[i].virt && addr < tlb1[i].virt + tlb1[i].size) + break; + } + if (i < tlb1_idx) { + /* Only allow full mappings to be modified for now. */ + /* Validate the range. */ + for (j = i, va = addr; va < addr + sz; va += tlb1[j].size, j++) { + if (va != tlb1[j].virt || (sz - (va - addr) < tlb1[j].size)) + return (EINVAL); + } + for (va = addr; va < addr + sz; va += tlb1[i].size, i++) { + tlb1[i].mas2 &= ~MAS2_WIMGE_MASK; + tlb1[i].mas2 |= tlb_calc_wimg(tlb1[i].phys, mode); + + /* + * Write it out to the TLB. Should really re-sync with other + * cores. + */ + tlb1_write_entry(i); + } + return (0); + } + + /* Not in TLB1, try through pmap */ + /* First validate the range. */ + for (va = addr; va < addr + sz; va += PAGE_SIZE) { + pte = pte_find(mmu, kernel_pmap, va); + if (pte == NULL || !PTE_ISVALID(pte)) + return (EINVAL); + } + + mtx_lock_spin(&tlbivax_mutex); + tlb_miss_lock(); + for (va = addr; va < addr + sz; va += PAGE_SIZE) { + pte = pte_find(mmu, kernel_pmap, va); + *pte &= ~(PTE_MAS2_MASK << PTE_MAS2_SHIFT); + *pte |= tlb_calc_wimg(PTE_PA(pte), mode << PTE_MAS2_SHIFT); + tlb0_flush_entry(va); + } + tlb_miss_unlock(); + mtx_unlock_spin(&tlbivax_mutex); + + return (pte_vatopa(mmu, kernel_pmap, va)); +} + /**************************************************************************/ /* TID handling */ /**************************************************************************/ Index: head/sys/powerpc/include/pmap.h =================================================================== --- head/sys/powerpc/include/pmap.h +++ head/sys/powerpc/include/pmap.h @@ -238,6 +238,7 @@ void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, vm_memattr_t); void pmap_unmapdev(vm_offset_t, vm_size_t); void pmap_page_set_memattr(vm_page_t, vm_memattr_t); +int pmap_change_attr(vm_offset_t, vm_size_t, vm_memattr_t); void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); Index: head/sys/powerpc/include/tlb.h =================================================================== --- head/sys/powerpc/include/tlb.h +++ head/sys/powerpc/include/tlb.h @@ -74,6 +74,7 @@ #define MAS2_M 0x00000004 #define MAS2_G 0x00000002 #define MAS2_E 0x00000001 +#define MAS2_WIMGE_MASK 0x0000001F #define MAS3_RPN 0xFFFFF000 #define MAS3_RPN_SHIFT 12 Index: head/sys/powerpc/powerpc/mmu_if.m =================================================================== --- head/sys/powerpc/powerpc/mmu_if.m +++ head/sys/powerpc/powerpc/mmu_if.m @@ -124,6 +124,12 @@ { return; } + + static int mmu_null_change_attr(mmu_t mmu, vm_offset_t va, + vm_size_t sz, vm_memattr_t mode) + { + return (0); + } }; @@ -956,3 +962,20 @@ vm_offset_t _va; }; +/** + * @brief Change the specified virtual address range's memory type. + * + * @param _va The virtual base address to change + * + * @param _sz Size of the region to change + * + * @param _mode New mode to set on the VA range + * + * @retval error 0 on success, EINVAL or ENOMEM on error. + */ +METHOD int change_attr { + mmu_t _mmu; + vm_offset_t _va; + vm_size_t _sz; + vm_memattr_t _mode; +} DEFAULT mmu_null_change_attr; Index: head/sys/powerpc/powerpc/pmap_dispatch.c =================================================================== --- head/sys/powerpc/powerpc/pmap_dispatch.c +++ head/sys/powerpc/powerpc/pmap_dispatch.c @@ -564,6 +564,13 @@ MMU_QUICK_REMOVE_PAGE(mmu_obj, addr); } +int +pmap_change_attr(vm_offset_t addr, vm_size_t size, vm_memattr_t mode) +{ + CTR4(KTR_PMAP, "%s(%#x, %#zx, %d)", __func__, addr, size, mode); + return (MMU_CHANGE_ATTR(mmu_obj, addr, size, mode)); +} + /* * MMU install routines. Highest priority wins, equal priority also * overrides allowing last-set to win.