diff --git a/sys/amd64/amd64/efirt_machdep.c b/sys/amd64/amd64/efirt_machdep.c --- a/sys/amd64/amd64/efirt_machdep.c +++ b/sys/amd64/amd64/efirt_machdep.c @@ -235,7 +235,7 @@ "attributes unsupported\n", i); mode = VM_MEMATTR_UNCACHEABLE; } - bits = pmap_cache_bits(kernel_pmap, mode, FALSE) | X86_PG_RW | + bits = pmap_cache_bits(kernel_pmap, mode, false) | X86_PG_RW | X86_PG_V; VM_OBJECT_WLOCK(obj_1t1_pt); for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++, diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -1766,7 +1766,7 @@ pte = vtopte(va); pte_store(pte, pa | PG_RW | PG_V | pmap_cache_bits(kernel_pmap, - mode, 0)); + mode, false)); } /* @@ -1862,7 +1862,7 @@ while (pte < endpte) { m = *ma++; pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(kernel_pmap, - m->md.pat_mode, 0); + m->md.pat_mode, false); if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { oldpte |= *pte; pte_store(pte, pa | pg_nx | PG_RW | PG_V); @@ -3918,8 +3918,8 @@ pd_entry_t newpde; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | - PG_PS | PG_V; + newpde = VM_PAGE_TO_PHYS(m) | + pmap_cache_bits(pmap, m->md.pat_mode, true) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) newpde |= PG_MANAGED; #ifdef PMAP_PAE_COMP @@ -4233,7 +4233,7 @@ pmap->pm_stats.resident_count++; newpte = VM_PAGE_TO_PHYS(m) | PG_V | - pmap_cache_bits(pmap, m->md.pat_mode, 0); + pmap_cache_bits(pmap, m->md.pat_mode, false); if ((m->oflags & VPO_UNMANAGED) == 0) newpte |= PG_MANAGED; #ifdef PMAP_PAE_COMP @@ -4339,7 +4339,7 @@ * "pa" will not affect the termination of this loop. */ PMAP_LOCK(pmap); - for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); + for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true); pa < ptepa + size; pa += NBPDR) { pde = pmap_pde(pmap, addr); if (*pde == 0) { @@ -4596,7 +4596,7 @@ if (*cmap_pte2) panic("pmap_zero_page: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, m->md.pat_mode, false); invlcaddr(pc->pc_cmap_addr2); pagezero(pc->pc_cmap_addr2); *cmap_pte2 = 0; @@ -4627,7 +4627,7 @@ if (*cmap_pte2) panic("pmap_zero_page_area: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, m->md.pat_mode, false); invlcaddr(pc->pc_cmap_addr2); if (off == 0 && size == PAGE_SIZE) pagezero(pc->pc_cmap_addr2); @@ -4657,10 +4657,10 @@ if (*cmap_pte2) panic("pmap_copy_page: CMAP2 busy"); *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | - pmap_cache_bits(kernel_pmap, src->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, src->md.pat_mode, false); invlcaddr(pc->pc_cmap_addr1); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | - pmap_cache_bits(kernel_pmap, dst->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, dst->md.pat_mode, false); invlcaddr(pc->pc_cmap_addr2); bcopy(pc->pc_cmap_addr1, pc->pc_cmap_addr2, PAGE_SIZE); *cmap_pte1 = 0; @@ -4697,10 +4697,11 @@ b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); *cmap_pte1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | - pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, a_pg->md.pat_mode, false); invlcaddr(pc->pc_cmap_addr1); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | - PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, 0); + PG_M | pmap_cache_bits(kernel_pmap, b_pg->md.pat_mode, + false); invlcaddr(pc->pc_cmap_addr2); a_cp = pc->pc_cmap_addr1 + a_pg_offset; b_cp = pc->pc_cmap_addr2 + b_pg_offset; @@ -5663,7 +5664,7 @@ panic("pmap_flush_page: CMAP2 busy"); *cmap_pte2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, - 0); + false); invlcaddr(pc->pc_cmap_addr2); sva = (vm_offset_t)pc->pc_cmap_addr2; eva = sva + PAGE_SIZE; @@ -5724,8 +5725,8 @@ if (base < VM_MIN_KERNEL_ADDRESS) return (EINVAL); - cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); - cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); + cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, true); + cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, false); changed = false; /* @@ -5939,7 +5940,7 @@ KASSERT(*pte == 0, ("pmap_quick_enter_page: PTE busy %#jx", (uintmax_t)*pte)); *pte = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), 0); + pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(m), false); invlpg(qaddr); return (qaddr); @@ -6085,7 +6086,7 @@ ptep = vtopte(sf->kva); opte = *ptep; *ptep = VM_PAGE_TO_PHYS(sf->m) | PG_RW | PG_V | - pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, 0); + pmap_cache_bits(kernel_pmap, sf->m->md.pat_mode, false); /* * Avoid unnecessary TLB invalidations: If the sf_buf's old