diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -178,14 +178,14 @@ #define PMAP_MEMDOM 1 #endif -static __inline boolean_t +static __inline bool pmap_type_guest(pmap_t pmap) { return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI)); } -static __inline boolean_t +static __inline bool pmap_emulate_ad_bits(pmap_t pmap) { @@ -1259,10 +1259,10 @@ static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); static int pmap_change_props_locked(vm_offset_t va, vm_size_t size, vm_prot_t prot, int mode, int flags); -static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); -static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, +static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); +static bool pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); -static boolean_t pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, +static bool pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va); static int pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, struct rwlock **lockp); @@ -1285,7 +1285,7 @@ static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, vm_page_t mpte, struct rwlock **lockp); #endif -static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, +static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_props(pt_entry_t *pte, u_long bits, u_long mask); static void pmap_pti_add_kva_locked(vm_offset_t sva, vm_offset_t eva, @@ -1300,10 +1300,10 @@ static vm_page_t pmap_remove_pt_page(pmap_t pmap, vm_offset_t va); static void pmap_remove_page(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, struct spglist *free); -static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, +static bool pmap_remove_ptes(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, pd_entry_t *pde, struct spglist *free, struct rwlock **lockp); -static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); @@ -2636,7 +2636,7 @@ return (entry); } -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { @@ -2649,7 +2649,7 @@ * caching mode. */ int -pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) +pmap_cache_bits(pmap_t pmap, int mode, bool is_pde) { int cache_bits, pat_flag, pat_idx; @@ -2687,7 +2687,7 @@ } static int -pmap_cache_mask(pmap_t pmap, boolean_t is_pde) +pmap_cache_mask(pmap_t pmap, bool is_pde) { int mask; @@ -3767,7 +3767,7 @@ spa = dmaplimit; } - pte_bits = pmap_cache_bits(kernel_pmap, mattr, 0) | X86_PG_RW | + pte_bits = pmap_cache_bits(kernel_pmap, mattr, false) | X86_PG_RW | X86_PG_V; error = vmem_alloc(kernel_arena, PAGE_SIZE, M_BESTFIT | M_WAITOK, &vaddr); @@ -3932,7 +3932,7 @@ int cache_bits; pte = vtopte(va); - cache_bits = pmap_cache_bits(kernel_pmap, mode, 0); + cache_bits = pmap_cache_bits(kernel_pmap, mode, false); pte_store(pte, pa | pg_g | pg_nx | X86_PG_A | X86_PG_M | X86_PG_RW | X86_PG_V | cache_bits); } @@ -3989,7 +3989,7 @@ endpte = pte + count; while (pte < endpte) { m = *ma++; - cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0); + cache_bits = pmap_cache_bits(kernel_pmap, m->md.pat_mode, false); pa = VM_PAGE_TO_PHYS(m) | cache_bits; if ((*pte & (PG_FRAME | X86_PG_PTE_CACHE)) != pa) { oldpte |= *pte; @@ -4031,8 +4031,7 @@ * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) @@ -4076,19 +4075,19 @@ /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_ptp(pmap, va, m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } static void @@ -4152,7 +4151,7 @@ * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ - pmap_add_delayed_free_list(m, free, TRUE); + pmap_add_delayed_free_list(m, free, true); } /* @@ -4288,14 +4287,14 @@ */ pm_pml5[pmap_pml5e_index(UPT_MAX_ADDRESS)] = KPML4phys | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g | - pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE); + pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false); /* * Install self-referential address mapping entry. */ pm_pml5[PML5PML5I] = VM_PAGE_TO_PHYS(pml5pg) | X86_PG_RW | X86_PG_V | X86_PG_M | X86_PG_A | - pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE); + pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false); } static void @@ -4325,7 +4324,7 @@ pm_pml5u[pmap_pml5e_index(UPT_MAX_ADDRESS)] = pmap_kextract((vm_offset_t)pti_pml4) | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | pg_g | - pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE); + pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false); } /* Allocate a page table page and do related bookkeeping */ @@ -5816,7 +5815,7 @@ * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { @@ -5829,9 +5828,9 @@ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -5879,11 +5878,11 @@ * Tries to demote a 2MB page mapping. If demotion fails, the 2MB page * mapping is invalidated. */ -static boolean_t +static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { struct rwlock *lock; - boolean_t rv; + bool rv; lock = NULL; rv = pmap_demote_pde_locked(pmap, pde, va, &lock); @@ -5936,7 +5935,7 @@ va, pmap); } -static boolean_t +static bool pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp) { @@ -5953,7 +5952,7 @@ PG_M = pmap_modified_bit(pmap); PG_RW = pmap_rw_bit(pmap); PG_V = pmap_valid_bit(pmap); - PG_PTE_CACHE = pmap_cache_mask(pmap, 0); + PG_PTE_CACHE = pmap_cache_mask(pmap, false); PG_PKU_MASK = pmap_pku_mask_bit(pmap); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -5970,7 +5969,7 @@ KASSERT((oldpde & PG_W) == 0, ("pmap_demote_pde: a wired mapping is missing PG_A")); pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp); - return (FALSE); + return (false); } mpte = pmap_remove_pt_page(pmap, va); @@ -6007,7 +6006,7 @@ */ if (mpte == NULL) { pmap_demote_pde_abort(pmap, va, pde, oldpde, lockp); - return (FALSE); + return (false); } if (!in_kernel) @@ -6075,7 +6074,7 @@ counter_u64_add(pmap_pde_demotions, 1); CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#lx in pmap %p", va, pmap); - return (TRUE); + return (true); } /* @@ -6173,7 +6172,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pde: pte page ref count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, free, FALSE); + pmap_add_delayed_free_list(mpte, free, false); } } return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free)); @@ -6559,12 +6558,12 @@ /* * pmap_protect_pde: do the things to protect a 2mpage in a process */ -static boolean_t +static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; vm_page_t m, mt; - boolean_t anychanged; + bool anychanged; pt_entry_t PG_G, PG_M, PG_RW; PG_G = pmap_global_bit(pmap); @@ -6574,7 +6573,7 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_protect_pde: sva is not 2mpage aligned")); - anychanged = FALSE; + anychanged = false; retry: oldpde = newpde = *pde; if ((prot & VM_PROT_WRITE) == 0) { @@ -6599,7 +6598,7 @@ if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); else - anychanged = TRUE; + anychanged = true; } return (anychanged); } @@ -6618,7 +6617,7 @@ pd_entry_t ptpaddr, *pde; pt_entry_t *pte, PG_G, PG_M, PG_RW, PG_V; pt_entry_t obits, pbits; - boolean_t anychanged; + bool anychanged; KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { @@ -6634,7 +6633,7 @@ PG_M = pmap_modified_bit(pmap); PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); - anychanged = FALSE; + anychanged = false; /* * Although this function delays and batches the invalidation @@ -6690,7 +6689,7 @@ if (!atomic_cmpset_long(pdpe, obits, pbits)) /* PG_PS cannot be cleared under us, */ goto retry_pdpe; - anychanged = TRUE; + anychanged = true; } continue; } @@ -6722,7 +6721,7 @@ * invalidated by pmap_protect_pde(). */ if (pmap_protect_pde(pmap, pde, sva, prot)) - anychanged = TRUE; + anychanged = true; continue; } else if (!pmap_demote_pde(pmap, pde, sva)) { /* @@ -6759,7 +6758,7 @@ if (obits & PG_G) pmap_invalidate_page(pmap, sva); else - anychanged = TRUE; + anychanged = true; } } } @@ -6800,7 +6799,7 @@ PG_V = pmap_valid_bit(pmap); PG_RW = pmap_rw_bit(pmap); PG_PKU_MASK = pmap_pku_mask_bit(pmap); - PG_PTE_CACHE = pmap_cache_mask(pmap, 0); + PG_PTE_CACHE = pmap_cache_mask(pmap, false); PMAP_LOCK_ASSERT(pmap, MA_OWNED); @@ -7058,7 +7057,7 @@ vm_paddr_t opa, pa; vm_page_t mpte, om; int rv; - boolean_t nosleep; + bool nosleep; PG_A = pmap_accessed_bit(pmap); PG_G = pmap_global_bit(pmap); @@ -7335,8 +7334,8 @@ PMAP_LOCK_ASSERT(pmap, MA_OWNED); PG_V = pmap_valid_bit(pmap); - newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(pmap, m->md.pat_mode, 1) | - PG_PS | PG_V; + newpde = VM_PAGE_TO_PHYS(m) | + pmap_cache_bits(pmap, m->md.pat_mode, true) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) newpde |= PG_MANAGED; if ((prot & VM_PROT_EXECUTE) == 0) @@ -7681,7 +7680,7 @@ pmap_resident_count_adj(pmap, 1); newpte = VM_PAGE_TO_PHYS(m) | PG_V | - pmap_cache_bits(pmap, m->md.pat_mode, 0); + pmap_cache_bits(pmap, m->md.pat_mode, false); if ((m->oflags & VPO_UNMANAGED) == 0) newpte |= PG_MANAGED; if ((prot & VM_PROT_EXECUTE) == 0) @@ -7770,7 +7769,7 @@ * will not affect the termination of this loop. */ PMAP_LOCK(pmap); - for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, 1); + for (pa = ptepa | pmap_cache_bits(pmap, pat_mode, true); pa < ptepa + size; pa += NBPDR) { pde = pmap_alloc_pde(pmap, addr, &pdpg, NULL); if (pde == NULL) { @@ -8171,7 +8170,7 @@ vm_page_t pages[2]; vm_offset_t vaddr[2], a_pg_offset, b_pg_offset; int cnt; - boolean_t mapped; + bool mapped; while (xfersize > 0) { a_pg_offset = a_offset & PAGE_MASK; @@ -8180,12 +8179,12 @@ pages[1] = mb[b_offset >> PAGE_SHIFT]; cnt = min(xfersize, PAGE_SIZE - a_pg_offset); cnt = min(cnt, PAGE_SIZE - b_pg_offset); - mapped = pmap_map_io_transient(pages, vaddr, 2, FALSE); + mapped = pmap_map_io_transient(pages, vaddr, 2, false); a_cp = (char *)vaddr[0] + a_pg_offset; b_cp = (char *)vaddr[1] + b_pg_offset; bcopy(a_cp, b_cp, cnt); if (__predict_false(mapped)) - pmap_unmap_io_transient(pages, vaddr, 2, FALSE); + pmap_unmap_io_transient(pages, vaddr, 2, false); a_offset += cnt; b_offset += cnt; xfersize -= cnt; @@ -8199,23 +8198,23 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -8226,7 +8225,7 @@ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -8304,17 +8303,17 @@ } /* - * Returns TRUE if the given page is mapped individually or as part of - * a 2mpage. Otherwise, returns FALSE. + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. */ -boolean_t +bool pmap_page_is_mapped(vm_page_t m) { struct rwlock *lock; - boolean_t rv; + bool rv; if ((m->oflags & VPO_UNMANAGED) != 0) - return (FALSE); + return (false); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -8368,7 +8367,7 @@ #ifdef PV_STATS int freed; #endif - boolean_t superpage; + bool superpage; vm_paddr_t pa; /* @@ -8418,7 +8417,7 @@ pte = pmap_pdpe_to_pde(pte, pv->pv_va); tpte = *pte; if ((tpte & (PG_PS | PG_V)) == PG_V) { - superpage = FALSE; + superpage = false; ptepde = tpte; pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & PG_FRAME); @@ -8435,7 +8434,7 @@ * regular page could be mistaken for * a superpage. */ - superpage = TRUE; + superpage = true; } if ((tpte & PG_V) == 0) { @@ -8509,7 +8508,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pages: pte page reference count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, &free, FALSE); + pmap_add_delayed_free_list(mpte, &free, false); } } else { pmap_resident_count_adj(pmap, -1); @@ -8546,8 +8545,8 @@ vm_page_free_pages_toq(&free, true); } -static boolean_t -pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) +static bool +pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified) { struct rwlock *lock; pv_entry_t pv; @@ -8556,9 +8555,9 @@ pt_entry_t PG_A, PG_M, PG_RW, PG_V; pmap_t pmap; int md_gen, pvh_gen; - boolean_t rv; + bool rv; - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: @@ -8636,7 +8635,7 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -boolean_t +bool pmap_is_modified(vm_page_t m) { @@ -8647,8 +8646,8 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); - return (pmap_page_test_mappings(m, FALSE, TRUE)); + return (false); + return (pmap_page_test_mappings(m, false, true)); } /* @@ -8657,20 +8656,20 @@ * Return whether or not the specified virtual address is eligible * for prefault. */ -boolean_t +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte, PG_V; - boolean_t rv; + bool rv; PG_V = pmap_valid_bit(pmap); /* - * Return TRUE if and only if the PTE for the specified virtual + * Return true if and only if the PTE for the specified virtual * address is allocated but invalid. */ - rv = FALSE; + rv = false; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr); if (pde != NULL && (*pde & (PG_PS | PG_V)) == PG_V) { @@ -8687,13 +8686,13 @@ * Return whether or not the specified physical page was referenced * in any physical maps. */ -boolean_t +bool pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - return (pmap_page_test_mappings(m, TRUE, FALSE)); + return (pmap_page_test_mappings(m, true, false)); } /* @@ -8782,12 +8781,12 @@ pmap_delayed_invl_wait(m); } -static __inline boolean_t +static __inline bool safe_to_clear_referenced(pmap_t pmap, pt_entry_t pte) { if (!pmap_emulate_ad_bits(pmap)) - return (TRUE); + return (true); KASSERT(pmap->pm_type == PT_EPT, ("invalid pm_type %d", pmap->pm_type)); @@ -8797,16 +8796,16 @@ * if the EPT_PG_WRITE bit is set. */ if ((pte & EPT_PG_WRITE) != 0) - return (FALSE); + return (false); /* * XWR = 100 is allowed only if the PMAP_SUPPORTS_EXEC_ONLY is set. */ if ((pte & EPT_PG_EXECUTE) == 0 || ((pmap->pm_flags & PMAP_SUPPORTS_EXEC_ONLY) != 0)) - return (TRUE); + return (true); else - return (FALSE); + return (false); } /* @@ -8842,7 +8841,7 @@ vm_paddr_t pa; int cleared, md_gen, not_cleared, pvh_gen; struct spglist free; - boolean_t demoted; + bool demoted; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); @@ -8910,7 +8909,7 @@ if (safe_to_clear_referenced(pmap, oldpde)) { atomic_clear_long(pde, PG_A); pmap_invalidate_page(pmap, pv->pv_va); - demoted = FALSE; + demoted = false; } else if (pmap_demote_pde_locked(pmap, pde, pv->pv_va, &lock)) { /* @@ -8921,7 +8920,7 @@ * this removal never frees a page * table page. */ - demoted = TRUE; + demoted = true; va += VM_PAGE_TO_PHYS(m) - (oldpde & PG_PS_FRAME); pte = pmap_pde_to_pte(pde, va); @@ -8929,7 +8928,7 @@ NULL, &lock); pmap_invalidate_page(pmap, va); } else - demoted = TRUE; + demoted = true; if (demoted) { /* @@ -9420,7 +9419,7 @@ /* * Tries to demote a 1GB page mapping. */ -static boolean_t +static bool pmap_demote_pdpe(pmap_t pmap, pdp_entry_t *pdpe, vm_offset_t va) { pdp_entry_t newpdpe, oldpdpe; @@ -9443,7 +9442,7 @@ if (pdpg == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); - return (FALSE); + return (false); } pdpgpa = VM_PAGE_TO_PHYS(pdpg); firstpde = (pd_entry_t *)PHYS_TO_DMAP(pdpgpa); @@ -9475,7 +9474,7 @@ counter_u64_add(pmap_pdpe_demotions, 1); CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" " in pmap %p", va, pmap); - return (TRUE); + return (true); } /* @@ -9818,12 +9817,12 @@ * is not mandatory. The caller may, however, request a TLB invalidation. */ void -pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate) +pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate) { pdp_entry_t *pdpe; pd_entry_t *pde; vm_offset_t va; - boolean_t changed; + bool changed; if (len == 0) return; @@ -9832,7 +9831,7 @@ ("pmap_demote_DMAP: base is not a multiple of len")); if (len < NBPDP && base < dmaplimit) { va = PHYS_TO_DMAP(base); - changed = FALSE; + changed = false; PMAP_LOCK(kernel_pmap); pdpe = pmap_pdpe(kernel_pmap, va); if ((*pdpe & X86_PG_V) == 0) @@ -9840,7 +9839,7 @@ if ((*pdpe & PG_PS) != 0) { if (!pmap_demote_pdpe(kernel_pmap, pdpe, va)) panic("pmap_demote_DMAP: PDPE failed"); - changed = TRUE; + changed = true; } if (len < NBPDR) { pde = pmap_pdpe_to_pde(pdpe, va); @@ -9849,7 +9848,7 @@ if ((*pde & PG_PS) != 0) { if (!pmap_demote_pde(kernel_pmap, pde, va)) panic("pmap_demote_DMAP: PDE failed"); - changed = TRUE; + changed = true; } } if (changed && invalidate) @@ -10451,7 +10450,7 @@ invlpg(qframe); pte_store(vtopte(qframe), paddr | X86_PG_RW | X86_PG_V | X86_PG_A | - X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, 0)); + X86_PG_M | pmap_cache_bits(kernel_pmap, m->md.pat_mode, false)); return (qframe); } @@ -10664,7 +10663,7 @@ MPASS(*pdpe == 0); *pdpe = pa | pg_g | X86_PG_PS | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | - pmap_cache_bits(kernel_pmap, mattr, TRUE); + pmap_cache_bits(kernel_pmap, mattr, true); inc = NBPDP; } else if (len >= NBPDR && (pa & PDRMASK) == 0 && (va & PDRMASK) == 0) { @@ -10672,7 +10671,7 @@ MPASS(*pde == 0); *pde = pa | pg_g | X86_PG_PS | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | - pmap_cache_bits(kernel_pmap, mattr, TRUE); + pmap_cache_bits(kernel_pmap, mattr, true); PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pde))-> ref_count++; inc = NBPDR; @@ -10681,7 +10680,7 @@ MPASS(*pte == 0); *pte = pa | pg_g | X86_PG_RW | X86_PG_V | X86_PG_A | pg_nx | pmap_cache_bits(kernel_pmap, - mattr, FALSE); + mattr, false); PHYS_TO_VM_PAGE(DMAP_TO_PHYS((uintptr_t)pte))-> ref_count++; inc = PAGE_SIZE; @@ -11195,7 +11194,7 @@ pa = pmap_kextract(sva); ptev = pa | X86_PG_RW | X86_PG_V | X86_PG_A | X86_PG_G | (exec ? 0 : pg_nx) | pmap_cache_bits(kernel_pmap, - VM_MEMATTR_DEFAULT, FALSE); + VM_MEMATTR_DEFAULT, false); if (*pte == 0) { pte_store(pte, ptev); pmap_pti_wire_pte(pte); diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -450,10 +450,10 @@ void pmap_activate_sw(struct thread *); void pmap_allow_2m_x_ept_recalculate(void); void pmap_bootstrap(vm_paddr_t *); -int pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde); +int pmap_cache_bits(pmap_t pmap, int mode, bool is_pde); int pmap_change_attr(vm_offset_t, vm_size_t, int); int pmap_change_prot(vm_offset_t, vm_size_t, vm_prot_t); -void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate); +void pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, bool invalidate); void pmap_flush_cache_range(vm_offset_t, vm_offset_t); void pmap_flush_cache_phys_range(vm_paddr_t, vm_paddr_t, vm_memattr_t); void pmap_init_pat(void); @@ -469,7 +469,7 @@ void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); void *pmap_mapdev_pciecfg(vm_paddr_t pa, vm_size_t size); bool pmap_not_in_di(void); -boolean_t pmap_page_is_mapped(vm_page_t m); +bool pmap_page_is_mapped(vm_page_t m); void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); void pmap_page_set_memattr_noflush(vm_page_t m, vm_memattr_t ma); void pmap_pinit_pml4(vm_page_t); diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -328,7 +328,7 @@ #define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ static __inline void pt2_wirecount_init(vm_page_t m); -static boolean_t pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, +static bool pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va); static int pmap_enter_pte1(pmap_t pmap, vm_offset_t va, pt1_entry_t pte1, u_int flags, vm_page_t m); @@ -401,7 +401,7 @@ CTASSERT(VM_MEMATTR_WRITE_THROUGH == 4); #define VM_MEMATTR_END (VM_MEMATTR_WRITE_THROUGH + 1) -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { @@ -2298,15 +2298,15 @@ } #ifdef INVARIANTS -static boolean_t +static bool pt2tab_user_is_empty(pt2_entry_t *tab) { u_int i, end; end = pt2tab_index(VM_MAXUSER_ADDRESS); for (i = 0; i < end; i++) - if (tab[i] != 0) return (FALSE); - return (TRUE); + if (tab[i] != 0) return (false); + return (true); } #endif /* @@ -2441,14 +2441,14 @@ return (m->md.pt2_wirecount[pte1_idx & PT2PG_MASK]); } -static __inline boolean_t +static __inline bool pt2_is_empty(vm_page_t m, vm_offset_t va) { return (m->md.pt2_wirecount[pte1_index(va) & PT2PG_MASK] == 0); } -static __inline boolean_t +static __inline bool pt2_is_full(vm_page_t m, vm_offset_t va) { @@ -2456,7 +2456,7 @@ NPTE2_IN_PT2); } -static __inline boolean_t +static __inline bool pt2pg_is_empty(vm_page_t m) { @@ -2649,10 +2649,10 @@ /* * Decrements a L2 page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static __inline boolean_t +static __inline bool pmap_unwire_pt2(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { pt2_wirecount_dec(m, pte1_index(va)); @@ -2665,9 +2665,9 @@ */ pmap_unwire_pt2pg(pmap, va, m); pmap_add_delayed_free_list(m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -2720,14 +2720,14 @@ * After removing a L2 page table entry, this routine is used to * conditionally free the page, and manage the hold/wire counts. */ -static boolean_t +static bool pmap_unuse_pt2(pmap_t pmap, vm_offset_t va, struct spglist *free) { pt1_entry_t pte1; vm_page_t mpte; if (va >= VM_MAXUSER_ADDRESS) - return (FALSE); + return (false); pte1 = pte1_load(pmap_pte1(pmap, va)); mpte = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); return (pmap_unwire_pt2(pmap, va, mpte, free)); @@ -2997,7 +2997,7 @@ * when needed. */ static pv_entry_t -get_pv_entry(pmap_t pmap, boolean_t try) +get_pv_entry(pmap_t pmap, bool try) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; @@ -3081,7 +3081,7 @@ rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - pv = get_pv_entry(pmap, FALSE); + pv = get_pv_entry(pmap, false); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } @@ -3197,7 +3197,7 @@ /* * Conditionally create a pv entry. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; @@ -3205,12 +3205,12 @@ rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pv_entry_count < pv_entry_high_water && - (pv = get_pv_entry(pmap, TRUE)) != NULL) { + (pv = get_pv_entry(pmap, true)) != NULL) { pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -3666,7 +3666,7 @@ * Tries to demote a 1MB page mapping. If demotion fails, the * 1MB page mapping is invalidated. */ -static boolean_t +static bool pmap_demote_pte1(pmap_t pmap, pt1_entry_t *pte1p, vm_offset_t va) { pt1_entry_t opte1, npte1; @@ -3700,7 +3700,7 @@ vm_page_free_pages_toq(&free, false); CTR3(KTR_PMAP, "%s: failure for va %#x in pmap %p", __func__, va, pmap); - return (FALSE); + return (false); } m->pindex = pte1_index(va) & ~PT2PG_MASK; if (va < VM_MAXUSER_ADDRESS) @@ -3823,7 +3823,7 @@ PDEBUG(6, printf("%s(%p): success for va %#x pte1 %#x(%#x) at %p\n", __func__, pmap, va, npte1, pte1_load(pte1p), pte1p)); - return (TRUE); + return (true); } /* @@ -3979,7 +3979,7 @@ */ if ((m->oflags & VPO_UNMANAGED) == 0) { if (pv == NULL) { - pv = get_pv_entry(pmap, FALSE); + pv = get_pv_entry(pmap, false); pv->pv_va = va; } TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); @@ -4403,7 +4403,7 @@ int field, idx; int32_t bit; uint32_t inuse, bitmask; - boolean_t allfree; + bool allfree; /* * Assert that the given pmap is only active on the current @@ -4431,7 +4431,7 @@ TAILQ_FOREACH_SAFE(pc, &pmap->pm_pvchunk, pc_list, npc) { KASSERT(pc->pc_pmap == pmap, ("%s: wrong pmap %p %p", __func__, pmap, pc->pc_pmap)); - allfree = TRUE; + allfree = true; for (field = 0; field < _NPCM; field++) { inuse = (~(pc->pc_map[field])) & pc_freemask[field]; while (inuse != 0) { @@ -4449,7 +4449,7 @@ pte1 = pte1_load(pte1p); if (pte1_is_section(pte1)) { if (pte1_is_wired(pte1)) { - allfree = FALSE; + allfree = false; continue; } pte1_clear(pte1p); @@ -4468,7 +4468,7 @@ } if (pte2_is_wired(pte2)) { - allfree = FALSE; + allfree = false; continue; } pte2_clear(pte2p); @@ -4936,7 +4936,7 @@ void pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) { - boolean_t pv_lists_locked; + bool pv_lists_locked; vm_offset_t nextva; pt1_entry_t *pte1p, pte1; pt2_entry_t *pte2p, opte2, npte2; @@ -4952,9 +4952,9 @@ return; if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); @@ -4989,7 +4989,7 @@ continue; } else { if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); goto resume; @@ -5122,21 +5122,21 @@ } /* - * Returns TRUE if any of the given mappings were used to modify - * physical memory. Otherwise, returns FALSE. Both page and 1mpage + * Returns true if any of the given mappings were used to modify + * physical memory. Otherwise, returns false. Both page and 1mpage * mappings are supported. */ -static boolean_t +static bool pmap_is_modified_pvh(struct md_page *pvh) { pv_entry_t pv; pt1_entry_t pte1; pt2_entry_t pte2; pmap_t pmap; - boolean_t rv; + bool rv; rw_assert(&pvh_global_lock, RA_WLOCKED); - rv = FALSE; + rv = false; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); @@ -5164,10 +5164,10 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -boolean_t +bool pmap_is_modified(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("%s: page %p is not managed", __func__, m)); @@ -5176,7 +5176,7 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && @@ -5191,14 +5191,14 @@ * Return whether or not the specified virtual address is eligible * for prefault. */ -boolean_t +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt1_entry_t pte1; pt2_entry_t pte2; - boolean_t rv; + bool rv; - rv = FALSE; + rv = false; PMAP_LOCK(pmap); pte1 = pte1_load(pmap_pte1(pmap, addr)); if (pte1_is_link(pte1)) { @@ -5210,10 +5210,10 @@ } /* - * Returns TRUE if any of the given mappings were referenced and FALSE + * Returns true if any of the given mappings were referenced and false * otherwise. Both page and 1mpage mappings are supported. */ -static boolean_t +static bool pmap_is_referenced_pvh(struct md_page *pvh) { @@ -5221,10 +5221,10 @@ pt1_entry_t pte1; pt2_entry_t pte2; pmap_t pmap; - boolean_t rv; + bool rv; rw_assert(&pvh_global_lock, RA_WLOCKED); - rv = FALSE; + rv = false; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); @@ -5250,10 +5250,10 @@ * Return whether or not the specified physical page was referenced * in any physical maps. */ -boolean_t +bool pmap_is_referenced(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("%s: page %p is not managed", __func__, m)); @@ -5400,12 +5400,12 @@ vm_offset_t nextva; pt1_entry_t *pte1p, pte1; pt2_entry_t *pte2p, pte2; - boolean_t pv_lists_locked; + bool pv_lists_locked; if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); @@ -5441,7 +5441,7 @@ continue; } else { if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); /* Repeat sva. */ @@ -5563,14 +5563,14 @@ pt2_entry_t *pte2p, pte2; vm_offset_t pdnxt; vm_page_t m; - boolean_t pv_lists_locked; + bool pv_lists_locked; if (advice != MADV_DONTNEED && advice != MADV_FREE) return; if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); @@ -5588,7 +5588,7 @@ if (!pte1_is_managed(opte1)) continue; if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); goto resume; @@ -5773,16 +5773,16 @@ */ /* - * Returns TRUE if the given page is mapped individually or as part of - * a 1mpage. Otherwise, returns FALSE. + * Returns true if the given page is mapped individually or as part of + * a 1mpage. Otherwise, returns false. */ -boolean_t +bool pmap_page_is_mapped(vm_page_t m) { - boolean_t rv; + bool rv; if ((m->oflags & VPO_UNMANAGED) != 0) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && @@ -5798,21 +5798,21 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("%s: page %p is not managed", __func__, m)); - rv = FALSE; + rv = false; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -5823,7 +5823,7 @@ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -6754,7 +6754,7 @@ } static void -dump_link(pmap_t pmap, uint32_t pte1_idx, boolean_t invalid_ok) +dump_link(pmap_t pmap, uint32_t pte1_idx, bool invalid_ok) { uint32_t i; vm_offset_t va; @@ -6786,14 +6786,14 @@ } } -static __inline boolean_t +static __inline bool is_pv_chunk_space(vm_offset_t va) { if ((((vm_offset_t)pv_chunkbase) <= va) && (va < ((vm_offset_t)pv_chunkbase + PAGE_SIZE * pv_maxchunks))) - return (TRUE); - return (FALSE); + return (true); + return (false); } DB_SHOW_COMMAND(pmap, pmap_pmap_print) @@ -6805,7 +6805,7 @@ vm_offset_t va, eva; vm_page_t m; uint32_t i; - boolean_t invalid_ok, dump_link_ok, dump_pv_chunk; + bool invalid_ok, dump_link_ok, dump_pv_chunk; if (have_addr) { pmap_t pm; @@ -6820,7 +6820,7 @@ pmap = PCPU_GET(curpmap); eva = (modif[0] == 'u') ? VM_MAXUSER_ADDRESS : 0xFFFFFFFF; - dump_pv_chunk = FALSE; /* XXX evaluate from modif[] */ + dump_pv_chunk = false; /* XXX evaluate from modif[] */ printf("pmap: 0x%08X\n", (uint32_t)pmap); printf("PT2MAP: 0x%08X\n", (uint32_t)PT2MAP); @@ -6839,8 +6839,8 @@ !!(pte1 & PTE1_S), !(pte1 & PTE1_NG)); dump_section(pmap, i); } else if (pte1_is_link(pte1)) { - dump_link_ok = TRUE; - invalid_ok = FALSE; + dump_link_ok = true; + invalid_ok = false; pte2 = pte2_load(pmap_pt2tab_entry(pmap, va)); m = PHYS_TO_VM_PAGE(pte1_link_pa(pte1)); printf("0x%08X: Link 0x%08X, pt2tab: 0x%08X m: %p", @@ -6848,9 +6848,9 @@ if (is_pv_chunk_space(va)) { printf(" - pv_chunk space"); if (dump_pv_chunk) - invalid_ok = TRUE; + invalid_ok = true; else - dump_link_ok = FALSE; + dump_link_ok = false; } else if (m != NULL) printf(" w:%d w2:%u", m->ref_count, diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -139,7 +139,7 @@ void pmap_bootstrap(vm_offset_t); void pmap_kenter(vm_offset_t, vm_paddr_t); void pmap_kremove(vm_offset_t); -boolean_t pmap_page_is_mapped(vm_page_t); +bool pmap_page_is_mapped(vm_page_t); bool pmap_ps_enabled(pmap_t pmap); void pmap_tlb_flush(pmap_t, vm_offset_t); diff --git a/sys/arm/include/pmap_var.h b/sys/arm/include/pmap_var.h --- a/sys/arm/include/pmap_var.h +++ b/sys/arm/include/pmap_var.h @@ -163,7 +163,7 @@ pte1_sync(pte1p); } -static __inline boolean_t +static __inline bool pte1_is_link(pt1_entry_t pte1) { @@ -177,21 +177,21 @@ return ((pte1 & L1_TYPE_MASK) == L1_TYPE_S); } -static __inline boolean_t +static __inline bool pte1_is_dirty(pt1_entry_t pte1) { return ((pte1 & (PTE1_NM | PTE1_RO)) == 0); } -static __inline boolean_t +static __inline bool pte1_is_global(pt1_entry_t pte1) { return ((pte1 & PTE1_NG) == 0); } -static __inline boolean_t +static __inline bool pte1_is_valid(pt1_entry_t pte1) { int l1_type; @@ -200,7 +200,7 @@ return ((l1_type == L1_TYPE_C) || (l1_type == L1_TYPE_S)); } -static __inline boolean_t +static __inline bool pte1_is_wired(pt1_entry_t pte1) { @@ -303,28 +303,28 @@ pte2_sync(pte2p); } -static __inline boolean_t +static __inline bool pte2_is_dirty(pt2_entry_t pte2) { return ((pte2 & (PTE2_NM | PTE2_RO)) == 0); } -static __inline boolean_t +static __inline bool pte2_is_global(pt2_entry_t pte2) { return ((pte2 & PTE2_NG) == 0); } -static __inline boolean_t +static __inline bool pte2_is_valid(pt2_entry_t pte2) { return (pte2 & PTE2_V); } -static __inline boolean_t +static __inline bool pte2_is_wired(pt2_entry_t pte2) { @@ -360,7 +360,7 @@ } static __inline void -pte2_set_wired(pt2_entry_t *pte2p, boolean_t wired) +pte2_set_wired(pt2_entry_t *pte2p, bool wired) { /* diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -472,7 +472,7 @@ static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, pd_entry_t l2e, struct spglist *free, struct rwlock **lockp); static void pmap_reset_asid_set(pmap_t pmap); -static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, @@ -2140,8 +2140,7 @@ * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) @@ -2154,19 +2153,19 @@ /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool pmap_unwire_l3(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_l3(pmap, va, m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } static void @@ -2222,7 +2221,7 @@ * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ - pmap_add_delayed_free_list(m, free, TRUE); + pmap_add_delayed_free_list(m, free, true); } /* @@ -2602,7 +2601,7 @@ void pmap_release(pmap_t pmap) { - boolean_t rv __diagused; + bool rv __diagused; struct spglist free; struct asid_set *set; vm_page_t m; @@ -2621,7 +2620,7 @@ PMAP_LOCK(pmap); rv = pmap_unwire_l3(pmap, 0, m, &free); PMAP_UNLOCK(pmap); - MPASS(rv == TRUE); + MPASS(rv == true); vm_page_free_pages_toq(&free, true); } @@ -3320,7 +3319,7 @@ * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { @@ -3333,9 +3332,9 @@ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -3451,7 +3450,7 @@ KASSERT(ml3->ref_count == NL3PG, ("pmap_remove_l2: l3 page ref count error")); ml3->ref_count = 0; - pmap_add_delayed_free_list(ml3, free, FALSE); + pmap_add_delayed_free_list(ml3, free, false); } } return (pmap_unuse_pt(pmap, sva, l1e, free)); @@ -4370,7 +4369,7 @@ pv_entry_t pv; vm_paddr_t opa, pa; vm_page_t mpte, om; - boolean_t nosleep; + bool nosleep; int lvl, rv; KASSERT(ADDR_IS_CANONICAL(va), @@ -5463,23 +5462,23 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -5490,7 +5489,7 @@ pvh = page_to_pvh(m); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -5738,7 +5737,7 @@ ("pmap_remove_pages: l3 page ref count error")); ml3->ref_count = 0; pmap_add_delayed_free_list(ml3, - &free, FALSE); + &free, false); } break; case 2: @@ -5781,8 +5780,8 @@ /* * This is used to check if a page has been accessed or modified. */ -static boolean_t -pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) +static bool +pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified) { struct rwlock *lock; pv_entry_t pv; @@ -5790,9 +5789,9 @@ pt_entry_t *pte, mask, value; pmap_t pmap; int md_gen, pvh_gen; - boolean_t rv; + bool rv; - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: @@ -5870,7 +5869,7 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -boolean_t +bool pmap_is_modified(vm_page_t m) { @@ -5881,8 +5880,8 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); - return (pmap_page_test_mappings(m, FALSE, TRUE)); + return (false); + return (pmap_page_test_mappings(m, false, true)); } /* @@ -5891,19 +5890,19 @@ * Return whether or not the specified virtual address is eligible * for prefault. */ -boolean_t +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pd_entry_t *pde; pt_entry_t *pte; - boolean_t rv; + bool rv; int lvl; /* - * Return TRUE if and only if the L3 entry for the specified virtual + * Return true if and only if the L3 entry for the specified virtual * address is allocated but invalid. */ - rv = FALSE; + rv = false; PMAP_LOCK(pmap); pde = pmap_pde(pmap, addr, &lvl); if (pde != NULL && lvl == 2) { @@ -5920,13 +5919,13 @@ * Return whether or not the specified physical page was referenced * in any physical maps. */ -boolean_t +bool pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - return (pmap_page_test_mappings(m, TRUE, FALSE)); + return (pmap_page_test_mappings(m, true, false)); } /* @@ -7685,7 +7684,7 @@ } } -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -299,7 +299,7 @@ static void free_pv_chunk(struct pv_chunk *pc); static void free_pv_entry(pmap_t pmap, pv_entry_t pv); -static pv_entry_t get_pv_entry(pmap_t pmap, boolean_t try); +static pv_entry_t get_pv_entry(pmap_t pmap, bool try); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa); static bool pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, pd_entry_t pde, u_int flags); @@ -312,7 +312,7 @@ static int pmap_pvh_wired_mappings(struct md_page *pvh, int count); static void pmap_abort_ptp(pmap_t pmap, vm_offset_t va, vm_page_t mpte); -static boolean_t pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); +static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); static bool pmap_enter_4mpage(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot); static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, pd_entry_t newpde, @@ -323,15 +323,15 @@ static void pmap_invalidate_pde_page(pmap_t pmap, vm_offset_t va, pd_entry_t pde); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); -static boolean_t pmap_is_modified_pvh(struct md_page *pvh); -static boolean_t pmap_is_referenced_pvh(struct md_page *pvh); +static bool pmap_is_modified_pvh(struct md_page *pvh); +static bool pmap_is_referenced_pvh(struct md_page *pvh); static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode); static void pmap_kenter_pde(vm_offset_t va, pd_entry_t newpde); static void pmap_pde_attr(pd_entry_t *pde, int cache_bits); #if VM_NRESERVLEVEL > 0 static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va); #endif -static boolean_t pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, +static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot); static void pmap_pte_attr(pt_entry_t *pte, int cache_bits); static void pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, @@ -344,7 +344,7 @@ struct spglist *free); static void pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va); static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); -static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m); static void pmap_update_pde(pmap_t pmap, vm_offset_t va, pd_entry_t *pde, pd_entry_t newpde); @@ -1090,7 +1090,7 @@ * Low level helper routines..... ***************************************************/ -static boolean_t +static bool __CONCAT(PMTYPE, is_valid_memattr)(pmap_t pmap __unused, vm_memattr_t mode) { @@ -1103,7 +1103,7 @@ * caching mode. */ static int -__CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, boolean_t is_pde) +__CONCAT(PMTYPE, cache_bits)(pmap_t pmap, int mode, bool is_pde) { int cache_bits, pat_flag, pat_idx; @@ -1912,8 +1912,7 @@ * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) @@ -1957,19 +1956,19 @@ /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool pmap_unwire_ptp(pmap_t pmap, vm_page_t m, struct spglist *free) { --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_ptp(pmap, m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } static void @@ -1990,7 +1989,7 @@ * shootdown is done. */ MPASS(pmap != kernel_pmap); - pmap_add_delayed_free_list(m, free, TRUE); + pmap_add_delayed_free_list(m, free, true); } /* @@ -2499,7 +2498,7 @@ * when needed. */ static pv_entry_t -get_pv_entry(pmap_t pmap, boolean_t try) +get_pv_entry(pmap_t pmap, bool try) { static const struct timeval printinterval = { 60, 0 }; static struct timeval lastprint; @@ -2691,7 +2690,7 @@ rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - pv = get_pv_entry(pmap, FALSE); + pv = get_pv_entry(pmap, false); pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } @@ -2699,7 +2698,7 @@ /* * Conditionally create a pv entry. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m) { pv_entry_t pv; @@ -2707,12 +2706,12 @@ rw_assert(&pvh_global_lock, RA_WLOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); if (pv_entry_count < pv_entry_high_water && - (pv = get_pv_entry(pmap, TRUE)) != NULL) { + (pv = get_pv_entry(pmap, true)) != NULL) { pv->pv_va = va; TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -2754,7 +2753,7 @@ * Tries to demote a 2- or 4MB page mapping. If demotion fails, the * 2- or 4MB page mapping is invalidated. */ -static boolean_t +static bool pmap_demote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va) { pd_entry_t newpde, oldpde; @@ -2789,7 +2788,7 @@ vm_page_free_pages_toq(&free, true); CTR2(KTR_PMAP, "pmap_demote_pde: failure for va %#x" " in pmap %p", va, pmap); - return (FALSE); + return (false); } mpte->pindex = va >> PDRSHIFT; if (pmap != kernel_pmap) { @@ -2897,7 +2896,7 @@ pmap_pde_demotions++; CTR2(KTR_PMAP, "pmap_demote_pde: success for va %#x" " in pmap %p", va, pmap); - return (TRUE); + return (true); } /* @@ -2992,7 +2991,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pde: pte page ref count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, free, FALSE); + pmap_add_delayed_free_list(mpte, free, false); } } } @@ -3263,17 +3262,17 @@ /* * pmap_protect_pde: do the things to protect a 4mpage in a process */ -static boolean_t +static bool pmap_protect_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t sva, vm_prot_t prot) { pd_entry_t newpde, oldpde; vm_page_t m, mt; - boolean_t anychanged; + bool anychanged; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & PDRMASK) == 0, ("pmap_protect_pde: sva is not 4mpage aligned")); - anychanged = FALSE; + anychanged = false; retry: oldpde = newpde = *pde; if ((prot & VM_PROT_WRITE) == 0) { @@ -3300,7 +3299,7 @@ if ((oldpde & PG_G) != 0) pmap_invalidate_pde_page(kernel_pmap, sva, oldpde); else - anychanged = TRUE; + anychanged = true; } return (anychanged); } @@ -3316,7 +3315,7 @@ vm_offset_t pdnxt; pd_entry_t ptpaddr; pt_entry_t *pte; - boolean_t anychanged, pv_lists_locked; + bool anychanged, pv_lists_locked; KASSERT((prot & ~VM_PROT_ALL) == 0, ("invalid prot %x", prot)); if (prot == VM_PROT_NONE) { @@ -3334,14 +3333,14 @@ #endif if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); } - anychanged = FALSE; + anychanged = false; PMAP_LOCK(pmap); for (; sva < eva; sva = pdnxt) { @@ -3377,11 +3376,11 @@ */ if (pmap_protect_pde(pmap, &pmap->pm_pdir[pdirindex], sva, prot)) - anychanged = TRUE; + anychanged = true; continue; } else { if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { if (anychanged) pmap_invalidate_all_int( @@ -3444,7 +3443,7 @@ if (obits & PG_G) pmap_invalidate_page_int(pmap, sva); else - anychanged = TRUE; + anychanged = true; } } } @@ -3812,7 +3811,7 @@ */ if ((newpte & PG_MANAGED) != 0) { if (pv == NULL) { - pv = get_pv_entry(pmap, FALSE); + pv = get_pv_entry(pmap, false); pv->pv_va = va; } TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); @@ -4275,12 +4274,12 @@ vm_offset_t pdnxt; pd_entry_t *pde; pt_entry_t *pte; - boolean_t pv_lists_locked; + bool pv_lists_locked; if (pmap_is_current(pmap)) - pv_lists_locked = FALSE; + pv_lists_locked = false; else { - pv_lists_locked = TRUE; + pv_lists_locked = true; resume: rw_wlock(&pvh_global_lock); sched_pin(); @@ -4314,7 +4313,7 @@ continue; } else { if (!pv_lists_locked) { - pv_lists_locked = TRUE; + pv_lists_locked = true; if (!rw_try_wlock(&pvh_global_lock)) { PMAP_UNLOCK(pmap); /* Repeat sva. */ @@ -4626,21 +4625,21 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -static boolean_t +static bool __CONCAT(PMTYPE, page_exists_quick)(pmap_t pmap, vm_page_t m) { struct md_page *pvh; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; + rv = false; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -4651,7 +4650,7 @@ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -4714,16 +4713,16 @@ } /* - * Returns TRUE if the given page is mapped individually or as part of - * a 4mpage. Otherwise, returns FALSE. + * Returns true if the given page is mapped individually or as part of + * a 4mpage. Otherwise, returns false. */ -static boolean_t +static bool __CONCAT(PMTYPE, page_is_mapped)(vm_page_t m) { - boolean_t rv; + bool rv; if ((m->oflags & VPO_UNMANAGED) != 0) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || ((m->flags & PG_FICTITIOUS) == 0 && @@ -4843,7 +4842,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pages: pte page ref count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, &free, FALSE); + pmap_add_delayed_free_list(mpte, &free, false); } } else { pmap->pm_stats.resident_count--; @@ -4876,10 +4875,10 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -static boolean_t +static bool __CONCAT(PMTYPE, is_modified)(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); @@ -4888,7 +4887,7 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); rv = pmap_is_modified_pvh(&m->md) || ((m->flags & PG_FICTITIOUS) == 0 && @@ -4898,20 +4897,20 @@ } /* - * Returns TRUE if any of the given mappings were used to modify - * physical memory. Otherwise, returns FALSE. Both page and 2mpage + * Returns true if any of the given mappings were used to modify + * physical memory. Otherwise, returns false. Both page and 2mpage * mappings are supported. */ -static boolean_t +static bool pmap_is_modified_pvh(struct md_page *pvh) { pv_entry_t pv; pt_entry_t *pte; pmap_t pmap; - boolean_t rv; + bool rv; rw_assert(&pvh_global_lock, RA_WLOCKED); - rv = FALSE; + rv = false; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); @@ -4932,13 +4931,13 @@ * Return whether or not the specified virtual address is elgible * for prefault. */ -static boolean_t +static bool __CONCAT(PMTYPE, is_prefaultable)(pmap_t pmap, vm_offset_t addr) { pd_entry_t pde; - boolean_t rv; + bool rv; - rv = FALSE; + rv = false; PMAP_LOCK(pmap); pde = *pmap_pde(pmap, addr); if (pde != 0 && (pde & PG_PS) == 0) @@ -4953,10 +4952,10 @@ * Return whether or not the specified physical page was referenced * in any physical maps. */ -static boolean_t +static bool __CONCAT(PMTYPE, is_referenced)(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); @@ -4969,19 +4968,19 @@ } /* - * Returns TRUE if any of the given mappings were referenced and FALSE + * Returns true if any of the given mappings were referenced and false * otherwise. Both page and 4mpage mappings are supported. */ -static boolean_t +static bool pmap_is_referenced_pvh(struct md_page *pvh) { pv_entry_t pv; pt_entry_t *pte; pmap_t pmap; - boolean_t rv; + bool rv; rw_assert(&pvh_global_lock, RA_WLOCKED); - rv = FALSE; + rv = false; sched_pin(); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); @@ -5616,7 +5615,7 @@ pd_entry_t *pde; pt_entry_t *pte; int cache_bits_pte, cache_bits_pde; - boolean_t changed; + bool changed; base = trunc_page(va); offset = va & PAGE_MASK; @@ -5630,7 +5629,7 @@ cache_bits_pde = pmap_cache_bits(kernel_pmap, mode, 1); cache_bits_pte = pmap_cache_bits(kernel_pmap, mode, 0); - changed = FALSE; + changed = false; /* * Pages that aren't mapped aren't supported. Also break down @@ -5689,14 +5688,14 @@ if (*pde & PG_PS) { if ((*pde & PG_PDE_CACHE) != cache_bits_pde) { pmap_pde_attr(pde, cache_bits_pde); - changed = TRUE; + changed = true; } tmpva = trunc_4mpage(tmpva) + NBPDR; } else { pte = vtopte(tmpva); if ((*pte & PG_PTE_CACHE) != cache_bits_pte) { pmap_pte_attr(pte, cache_bits_pte); - changed = TRUE; + changed = true; } tmpva += PAGE_SIZE; } @@ -5893,7 +5892,7 @@ m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK); pte_store(&trm_pte[atop(af - prev_addr)], VM_PAGE_TO_PHYS(m) | PG_M | PG_A | PG_RW | PG_V | pgeflag | - pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, FALSE)); + pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, false)); } *addrp = prev_addr; return (0); @@ -5912,7 +5911,7 @@ pd_m = vm_page_alloc_noobj(VM_ALLOC_WIRED | VM_ALLOC_WAITOK | VM_ALLOC_ZERO); PTD[TRPTDI] = VM_PAGE_TO_PHYS(pd_m) | PG_M | PG_A | PG_RW | PG_V | - pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, TRUE); + pmap_cache_bits(kernel_pmap, VM_MEMATTR_DEFAULT, true); } static void * @@ -6015,7 +6014,7 @@ for (i = 0, pte = vtopte(kaddr); i < plen; i++, pte++) { *pte = PG_V | PG_RW | PG_A | PG_M | VM_PAGE_TO_PHYS(ma[i]) | pmap_cache_bits(kernel_pmap, pmap_page_get_memattr(ma[i]), - FALSE); + false); invlpg(kaddr + ptoa(i)); } } diff --git a/sys/i386/i386/pmap_base.c b/sys/i386/i386/pmap_base.c --- a/sys/i386/i386/pmap_base.c +++ b/sys/i386/i386/pmap_base.c @@ -561,7 +561,7 @@ pmap_methods_ptr->pm_bootstrap(firstaddr); } -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap, vm_memattr_t mode) { @@ -569,7 +569,7 @@ } int -pmap_cache_bits(pmap_t pmap, int mode, boolean_t is_pde) +pmap_cache_bits(pmap_t pmap, int mode, bool is_pde) { return (pmap_methods_ptr->pm_cache_bits(pmap, mode, is_pde)); @@ -720,7 +720,7 @@ pmap_methods_ptr->pm_unwire(pmap, sva, eva); } -boolean_t +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { @@ -734,7 +734,7 @@ return (pmap_methods_ptr->pm_page_wired_mappings(m)); } -boolean_t +bool pmap_page_is_mapped(vm_page_t m) { @@ -748,21 +748,21 @@ pmap_methods_ptr->pm_remove_pages(pmap); } -boolean_t +bool pmap_is_modified(vm_page_t m) { return (pmap_methods_ptr->pm_is_modified(m)); } -boolean_t +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { return (pmap_methods_ptr->pm_is_prefaultable(pmap, addr)); } -boolean_t +bool pmap_is_referenced(vm_page_t m) { diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h --- a/sys/i386/include/pmap.h +++ b/sys/i386/include/pmap.h @@ -222,7 +222,7 @@ void *pmap_bios16_enter(void); void pmap_bios16_leave(void *handle); void pmap_bootstrap(vm_paddr_t); -int pmap_cache_bits(pmap_t, int mode, boolean_t is_pde); +int pmap_cache_bits(pmap_t, int mode, bool is_pde); int pmap_change_attr(vm_offset_t, vm_size_t, int); caddr_t pmap_cmap3(vm_paddr_t pa, u_int pte_bits); void pmap_cp_slow0_map(vm_offset_t kaddr, int plen, vm_page_t *ma); @@ -240,7 +240,7 @@ void *pmap_mapbios(vm_paddr_t, vm_size_t); void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); -boolean_t pmap_page_is_mapped(vm_page_t m); +bool pmap_page_is_mapped(vm_page_t m); void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); vm_paddr_t pmap_pg_frame(vm_paddr_t pa); bool pmap_ps_enabled(pmap_t pmap); diff --git a/sys/i386/include/pmap_base.h b/sys/i386/include/pmap_base.h --- a/sys/i386/include/pmap_base.h +++ b/sys/i386/include/pmap_base.h @@ -60,8 +60,8 @@ void *(*pm_bios16_enter)(void); void (*pm_bios16_leave)(void *handle); void (*pm_bootstrap)(vm_paddr_t firstaddr); - boolean_t (*pm_is_valid_memattr)(pmap_t, vm_memattr_t); - int (*pm_cache_bits)(pmap_t, int, boolean_t); + bool (*pm_is_valid_memattr)(pmap_t, vm_memattr_t); + int (*pm_cache_bits)(pmap_t, int, bool); bool (*pm_ps_enabled)(pmap_t); void (*pm_pinit0)(pmap_t); int (*pm_pinit)(pmap_t); @@ -86,13 +86,13 @@ void (*pm_object_init_pt)(pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); void (*pm_unwire)(pmap_t, vm_offset_t, vm_offset_t); - boolean_t (*pm_page_exists_quick)(pmap_t, vm_page_t); + bool (*pm_page_exists_quick)(pmap_t, vm_page_t); int (*pm_page_wired_mappings)(vm_page_t); - boolean_t (*pm_page_is_mapped)(vm_page_t); + bool (*pm_page_is_mapped)(vm_page_t); void (*pm_remove_pages)(pmap_t); - boolean_t (*pm_is_modified)(vm_page_t); - boolean_t (*pm_is_prefaultable)(pmap_t, vm_offset_t); - boolean_t (*pm_is_referenced)(vm_page_t); + bool (*pm_is_modified)(vm_page_t); + bool (*pm_is_prefaultable)(pmap_t, vm_offset_t); + bool (*pm_is_referenced)(vm_page_t); void (*pm_remove_write)(vm_page_t); int (*pm_ts_referenced)(vm_page_t); void *(*pm_mapdev_attr)(vm_paddr_t, vm_size_t, int, int); diff --git a/sys/powerpc/aim/mmu_oea.c b/sys/powerpc/aim/mmu_oea.c --- a/sys/powerpc/aim/mmu_oea.c +++ b/sys/powerpc/aim/mmu_oea.c @@ -216,7 +216,7 @@ #define VSID_NBPW (sizeof(u_int32_t) * 8) static u_int moea_vsid_bitmap[NPMAPS / VSID_NBPW]; -static boolean_t moea_initialized = FALSE; +static bool moea_initialized = false; /* * Statistics. @@ -268,7 +268,7 @@ static int moea_enter_locked(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, u_int, int8_t); static void moea_syncicache(vm_paddr_t, vm_size_t); -static boolean_t moea_query_bit(vm_page_t, int); +static bool moea_query_bit(vm_page_t, int); static u_int moea_clear_bit(vm_page_t, int); static void moea_kremove(vm_offset_t); int moea_pte_spill(vm_offset_t); @@ -288,13 +288,13 @@ vm_paddr_t moea_extract(pmap_t, vm_offset_t); vm_page_t moea_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); void moea_init(void); -boolean_t moea_is_modified(vm_page_t); -boolean_t moea_is_prefaultable(pmap_t, vm_offset_t); -boolean_t moea_is_referenced(vm_page_t); +bool moea_is_modified(vm_page_t); +bool moea_is_prefaultable(pmap_t, vm_offset_t); +bool moea_is_referenced(vm_page_t); int moea_ts_referenced(vm_page_t); vm_offset_t moea_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); static int moea_mincore(pmap_t, vm_offset_t, vm_paddr_t *); -boolean_t moea_page_exists_quick(pmap_t, vm_page_t); +bool moea_page_exists_quick(pmap_t, vm_page_t); void moea_page_init(vm_page_t); int moea_page_wired_mappings(vm_page_t); int moea_pinit(pmap_t); @@ -326,7 +326,7 @@ void moea_scan_init(void); vm_offset_t moea_quick_enter_page(vm_page_t m); void moea_quick_remove_page(vm_offset_t addr); -boolean_t moea_page_is_mapped(vm_page_t m); +bool moea_page_is_mapped(vm_page_t m); bool moea_ps_enabled(pmap_t pmap); static int moea_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); @@ -1118,7 +1118,7 @@ { } -boolean_t +bool moea_page_is_mapped(vm_page_t m) { return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); @@ -1320,13 +1320,13 @@ moea_mpvo_zone = uma_zcreate("MPVO entry", sizeof(struct pvo_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - moea_initialized = TRUE; + moea_initialized = true; } -boolean_t +bool moea_is_referenced(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_is_referenced: page %p is not managed", m)); @@ -1336,10 +1336,10 @@ return (rv); } -boolean_t +bool moea_is_modified(vm_page_t m) { - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_is_modified: page %p is not managed", m)); @@ -1348,7 +1348,7 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); rv = moea_query_bit(m, PTE_CHG); @@ -1356,11 +1356,11 @@ return (rv); } -boolean_t +bool moea_is_prefaultable(pmap_t pmap, vm_offset_t va) { struct pvo_entry *pvo; - boolean_t rv; + bool rv; PMAP_LOCK(pmap); pvo = moea_pvo_find_va(pmap, va & ~ADDR_POFF, NULL); @@ -1659,21 +1659,21 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool moea_page_exists_quick(pmap_t pmap, vm_page_t m) { int loops; struct pvo_entry *pvo; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea_page_exists_quick: page %p is not managed", m)); loops = 0; - rv = FALSE; + rv = false; rw_wlock(&pvh_global_lock); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { if (pvo->pvo_pmap == pmap) { - rv = TRUE; + rv = true; break; } if (++loops >= 16) @@ -2527,7 +2527,7 @@ return (victim_idx & 7); } -static boolean_t +static bool moea_query_bit(vm_page_t m, int ptebit) { struct pvo_entry *pvo; @@ -2535,7 +2535,7 @@ rw_assert(&pvh_global_lock, RA_WLOCKED); if (moea_attr_fetch(m) & ptebit) - return (TRUE); + return (true); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { /* @@ -2544,7 +2544,7 @@ */ if (pvo->pvo_pte.pte.pte_lo & ptebit) { moea_attr_save(m, ptebit); - return (TRUE); + return (true); } } @@ -2566,12 +2566,12 @@ mtx_unlock(&moea_table_mutex); if (pvo->pvo_pte.pte.pte_lo & ptebit) { moea_attr_save(m, ptebit); - return (TRUE); + return (true); } } } - return (FALSE); + return (false); } static u_int diff --git a/sys/powerpc/aim/mmu_oea64.c b/sys/powerpc/aim/mmu_oea64.c --- a/sys/powerpc/aim/mmu_oea64.c +++ b/sys/powerpc/aim/mmu_oea64.c @@ -244,7 +244,7 @@ #endif static u_int moea64_vsid_bitmap[NVSIDS / VSID_NBPW]; -static boolean_t moea64_initialized = FALSE; +static bool moea64_initialized = false; #ifdef MOEA64_STATS /* @@ -290,7 +290,7 @@ /* * Utility routines. */ -static boolean_t moea64_query_bit(vm_page_t, uint64_t); +static bool moea64_query_bit(vm_page_t, uint64_t); static u_int moea64_clear_bit(vm_page_t, uint64_t); static void moea64_kremove(vm_offset_t); static void moea64_syncicache(pmap_t pmap, vm_offset_t va, @@ -399,12 +399,12 @@ vm_paddr_t moea64_extract(pmap_t, vm_offset_t); vm_page_t moea64_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); void moea64_init(void); -boolean_t moea64_is_modified(vm_page_t); -boolean_t moea64_is_prefaultable(pmap_t, vm_offset_t); -boolean_t moea64_is_referenced(vm_page_t); +bool moea64_is_modified(vm_page_t); +bool moea64_is_prefaultable(pmap_t, vm_offset_t); +bool moea64_is_referenced(vm_page_t); int moea64_ts_referenced(vm_page_t); vm_offset_t moea64_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); -boolean_t moea64_page_exists_quick(pmap_t, vm_page_t); +bool moea64_page_exists_quick(pmap_t, vm_page_t); void moea64_page_init(vm_page_t); int moea64_page_wired_mappings(vm_page_t); int moea64_pinit(pmap_t); @@ -438,7 +438,7 @@ vm_offset_t moea64_quick_enter_page(vm_page_t m); vm_offset_t moea64_quick_enter_page_dmap(vm_page_t m); void moea64_quick_remove_page(vm_offset_t addr); -boolean_t moea64_page_is_mapped(vm_page_t m); +bool moea64_page_is_mapped(vm_page_t m); static int moea64_map_user_ptr(pmap_t pm, volatile const void *uaddr, void **kaddr, size_t ulen, size_t *klen); static int moea64_decode_kernel_ptr(vm_offset_t addr, @@ -1631,7 +1631,7 @@ sched_unpin(); } -boolean_t +bool moea64_page_is_mapped(vm_page_t m) { return (!LIST_EMPTY(&(m)->md.mdpg_pvoh)); @@ -1986,10 +1986,10 @@ elf32_nxstack = 1; #endif - moea64_initialized = TRUE; + moea64_initialized = true; } -boolean_t +bool moea64_is_referenced(vm_page_t m) { @@ -1999,7 +1999,7 @@ return (moea64_query_bit(m, LPTE_REF)); } -boolean_t +bool moea64_is_modified(vm_page_t m) { @@ -2010,21 +2010,21 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); + return (false); return (moea64_query_bit(m, LPTE_CHG)); } -boolean_t +bool moea64_is_prefaultable(pmap_t pmap, vm_offset_t va) { struct pvo_entry *pvo; - boolean_t rv = TRUE; + bool rv = true; PMAP_LOCK(pmap); pvo = moea64_pvo_find_va(pmap, va & ~ADDR_POFF); if (pvo != NULL) - rv = FALSE; + rv = false; PMAP_UNLOCK(pmap); return (rv); } @@ -2378,21 +2378,21 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool moea64_page_exists_quick(pmap_t pmap, vm_page_t m) { int loops; struct pvo_entry *pvo; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("moea64_page_exists_quick: page %p is not managed", m)); loops = 0; - rv = FALSE; + rv = false; PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { if (!(pvo->pvo_vaddr & PVO_DEAD) && pvo->pvo_pmap == pmap) { - rv = TRUE; + rv = true; break; } if (++loops >= 16) @@ -3033,12 +3033,12 @@ return (RB_FIND(pvo_tree, &pm->pmap_pvo, &key)); } -static boolean_t +static bool moea64_query_bit(vm_page_t m, uint64_t ptebit) { struct pvo_entry *pvo; int64_t ret; - boolean_t rv; + bool rv; vm_page_t sp; /* @@ -3050,13 +3050,13 @@ ((sp = PHYS_TO_VM_PAGE(VM_PAGE_TO_PHYS(m) & ~HPT_SP_MASK)) != NULL && (sp->md.mdpg_attrs & (ptebit | MDPG_ATTR_SP)) == (ptebit | MDPG_ATTR_SP))) - return (TRUE); + return (true); /* * Examine each PTE. Sync so that any pending REF/CHG bits are * flushed to the PTEs. */ - rv = FALSE; + rv = false; powerpc_sync(); PV_PAGE_LOCK(m); LIST_FOREACH(pvo, vm_page_to_pvoh(m), pvo_vlink) { @@ -3067,7 +3067,7 @@ */ if (ret != -1) { if ((ret & ptebit) != 0) { - rv = TRUE; + rv = true; break; } continue; @@ -3091,7 +3091,7 @@ atomic_set_32(&m->md.mdpg_attrs, ret & (LPTE_CHG | LPTE_REF)); if (ret & ptebit) { - rv = TRUE; + rv = true; break; } } diff --git a/sys/powerpc/aim/mmu_radix.c b/sys/powerpc/aim/mmu_radix.c --- a/sys/powerpc/aim/mmu_radix.c +++ b/sys/powerpc/aim/mmu_radix.c @@ -441,14 +441,14 @@ void mmu_radix_kenter(vm_offset_t, vm_paddr_t); vm_paddr_t mmu_radix_kextract(vm_offset_t); void mmu_radix_kremove(vm_offset_t); -boolean_t mmu_radix_is_modified(vm_page_t); -boolean_t mmu_radix_is_prefaultable(pmap_t, vm_offset_t); -boolean_t mmu_radix_is_referenced(vm_page_t); +bool mmu_radix_is_modified(vm_page_t); +bool mmu_radix_is_prefaultable(pmap_t, vm_offset_t); +bool mmu_radix_is_referenced(vm_page_t); void mmu_radix_object_init_pt(pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); -boolean_t mmu_radix_page_exists_quick(pmap_t, vm_page_t); +bool mmu_radix_page_exists_quick(pmap_t, vm_page_t); void mmu_radix_page_init(vm_page_t); -boolean_t mmu_radix_page_is_mapped(vm_page_t m); +bool mmu_radix_page_is_mapped(vm_page_t m); void mmu_radix_page_set_memattr(vm_page_t, vm_memattr_t); int mmu_radix_page_wired_mappings(vm_page_t); int mmu_radix_pinit(pmap_t); @@ -565,9 +565,9 @@ MMU_DEF(mmu_radix, MMU_TYPE_RADIX, mmu_radix_methods); -static boolean_t pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, +static bool pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, struct rwlock **lockp); -static boolean_t pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); +static bool pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va); static int pmap_unuse_pt(pmap_t, vm_offset_t, pml3_entry_t, struct spglist *); static int pmap_remove_l3e(pmap_t pmap, pml3_entry_t *pdq, vm_offset_t sva, struct spglist *free, struct rwlock **lockp); @@ -604,7 +604,7 @@ struct rwlock **lockp); static void _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); -static boolean_t pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); +static bool pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free); static void pmap_invalidate_page(pmap_t pmap, vm_offset_t start); static void pmap_invalidate_all(pmap_t pmap); @@ -1029,17 +1029,17 @@ } /* - * Returns TRUE if the given page is mapped individually or as part of - * a 2mpage. Otherwise, returns FALSE. + * Returns true if the given page is mapped individually or as part of + * a 2mpage. Otherwise, returns false. */ -boolean_t +bool mmu_radix_page_is_mapped(vm_page_t m) { struct rwlock *lock; - boolean_t rv; + bool rv; if ((m->oflags & VPO_UNMANAGED) != 0) - return (FALSE); + return (false); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -1728,7 +1728,7 @@ * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { @@ -1741,9 +1741,9 @@ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_link); m->md.pv_gen++; - return (TRUE); + return (true); } else - return (FALSE); + return (false); } vm_paddr_t phys_avail_debug[2 * VM_PHYSSEG_MAX]; @@ -2834,7 +2834,7 @@ vm_paddr_t opa, pa; vm_page_t mpte, om; int rv, retrycount; - boolean_t nosleep, invalidate_all, invalidate_page; + bool nosleep, invalidate_all, invalidate_page; va = trunc_page(va); retrycount = 0; @@ -3704,8 +3704,8 @@ 1, 1, M_WAITOK); } -static boolean_t -pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) +static bool +pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified) { struct rwlock *lock; pv_entry_t pv; @@ -3713,9 +3713,9 @@ pt_entry_t *pte, mask; pmap_t pmap; int md_gen, pvh_gen; - boolean_t rv; + bool rv; - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); restart: @@ -3781,7 +3781,7 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -boolean_t +bool mmu_radix_is_modified(vm_page_t m) { @@ -3793,19 +3793,19 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); - return (pmap_page_test_mappings(m, FALSE, TRUE)); + return (false); + return (pmap_page_test_mappings(m, false, true)); } -boolean_t +bool mmu_radix_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pml3_entry_t *l3e; pt_entry_t *pte; - boolean_t rv; + bool rv; CTR3(KTR_PMAP, "%s(%p, %#x)", __func__, pmap, addr); - rv = FALSE; + rv = false; PMAP_LOCK(pmap); l3e = pmap_pml3e(pmap, addr); if (l3e != NULL && (be64toh(*l3e) & (RPTE_LEAF | PG_V)) == PG_V) { @@ -3816,13 +3816,13 @@ return (rv); } -boolean_t +bool mmu_radix_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); CTR2(KTR_PMAP, "%s(%p)", __func__, m); - return (pmap_page_test_mappings(m, TRUE, FALSE)); + return (pmap_page_test_mappings(m, true, false)); } /* @@ -4078,24 +4078,24 @@ } } -boolean_t +bool mmu_radix_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); CTR3(KTR_PMAP, "%s(%p, %p)", __func__, pmap, m); - rv = FALSE; + rv = false; lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -4106,7 +4106,7 @@ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_link) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -4448,18 +4448,18 @@ /* * pmap_protect_l3e: do the things to protect a 2mpage in a process */ -static boolean_t +static bool pmap_protect_l3e(pmap_t pmap, pt_entry_t *l3e, vm_offset_t sva, vm_prot_t prot) { pt_entry_t newpde, oldpde; vm_offset_t eva, va; vm_page_t m; - boolean_t anychanged; + bool anychanged; PMAP_LOCK_ASSERT(pmap, MA_OWNED); KASSERT((sva & L3_PAGE_MASK) == 0, ("pmap_protect_l3e: sva is not 2mpage aligned")); - anychanged = FALSE; + anychanged = false; retry: oldpde = newpde = be64toh(*l3e); if ((oldpde & (PG_MANAGED | PG_M | PG_RW)) == @@ -4483,7 +4483,7 @@ */ if (!atomic_cmpset_long(l3e, htobe64(oldpde), htobe64(newpde & ~PG_PROMOTED))) goto retry; - anychanged = TRUE; + anychanged = true; } return (anychanged); } @@ -4497,7 +4497,7 @@ pml2_entry_t *l2e; pml3_entry_t ptpaddr, *l3e; pt_entry_t *pte; - boolean_t anychanged; + bool anychanged; CTR5(KTR_PMAP, "%s(%p, %#x, %#x, %#x)", __func__, pmap, sva, eva, prot); @@ -4517,7 +4517,7 @@ printf("pmap_protect(%p, %#lx, %#lx, %x) - asid: %lu\n", pmap, sva, eva, prot, pmap->pm_pid); #endif - anychanged = FALSE; + anychanged = false; PMAP_LOCK(pmap); for (; sva < eva; sva = va_next) { @@ -4560,7 +4560,7 @@ */ if (sva + L3_PAGE_SIZE == va_next && eva >= va_next) { if (pmap_protect_l3e(pmap, l3e, sva, prot)) - anychanged = TRUE; + anychanged = true; continue; } else if (!pmap_demote_l3e(pmap, l3e, sva)) { /* @@ -4600,7 +4600,7 @@ if (!atomic_cmpset_long(pte, htobe64(obits), htobe64(pbits))) goto retry; if (obits & (PG_A|PG_M)) { - anychanged = TRUE; + anychanged = true; #ifdef INVARIANTS if (VERBOSE_PROTECT || pmap_logging) printf("%#lx %#lx -> %#lx\n", @@ -4687,8 +4687,7 @@ * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) @@ -4729,19 +4728,19 @@ /* * Decrements a page table page's wire count, which is used to record the * number of valid page table entries within the page. If the wire count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_ptp(pmap, va, m, free); - return (TRUE); + return (true); } else - return (FALSE); + return (false); } static void @@ -4788,7 +4787,7 @@ * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ - pmap_add_delayed_free_list(m, free, TRUE); + pmap_add_delayed_free_list(m, free, true); } /* @@ -4866,11 +4865,11 @@ } } -static boolean_t +static bool pmap_demote_l3e(pmap_t pmap, pml3_entry_t *pde, vm_offset_t va) { struct rwlock *lock; - boolean_t rv; + bool rv; lock = NULL; rv = pmap_demote_l3e_locked(pmap, pde, va, &lock); @@ -4879,7 +4878,7 @@ return (rv); } -static boolean_t +static bool pmap_demote_l3e_locked(pmap_t pmap, pml3_entry_t *l3e, vm_offset_t va, struct rwlock **lockp) { @@ -4923,7 +4922,7 @@ vm_page_free_pages_toq(&free, true); CTR2(KTR_PMAP, "pmap_demote_l3e: failure for va %#lx" " in pmap %p", va, pmap); - return (FALSE); + return (false); } mpte->pindex = pmap_l3e_pindex(va); if (va < VM_MAXUSER_ADDRESS) @@ -4984,7 +4983,7 @@ counter_u64_add(pmap_l3e_demotions, 1); CTR2(KTR_PMAP, "pmap_demote_l3e: success for va %#lx" " in pmap %p", va, pmap); - return (TRUE); + return (true); } /* @@ -5060,7 +5059,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_l3e: pte page wire count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, free, FALSE); + pmap_add_delayed_free_list(mpte, free, false); } } return (pmap_unuse_pt(pmap, sva, be64toh(*pmap_pml2e(pmap, sva)), free)); @@ -5412,7 +5411,7 @@ #ifdef PV_STATS int freed; #endif - boolean_t superpage; + bool superpage; vm_paddr_t pa; /* @@ -5447,7 +5446,7 @@ pte = pmap_l2e_to_l3e(pte, pv->pv_va); tpte = be64toh(*pte); if ((tpte & (RPTE_LEAF | PG_V)) == PG_V) { - superpage = FALSE; + superpage = false; ptel3e = tpte; pte = (pt_entry_t *)PHYS_TO_DMAP(tpte & PG_FRAME); @@ -5464,7 +5463,7 @@ * regular page could be mistaken for * a superpage. */ - superpage = TRUE; + superpage = true; } if ((tpte & PG_V) == 0) { @@ -5530,7 +5529,7 @@ KASSERT(mpte->ref_count == NPTEPG, ("pmap_remove_pages: pte page wire count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, &free, FALSE); + pmap_add_delayed_free_list(mpte, &free, false); } } else { pmap_resident_count_dec(pmap, 1); @@ -5964,7 +5963,7 @@ /* * Tries to demote a 1GB page mapping. */ -static boolean_t +static bool pmap_demote_l2e(pmap_t pmap, pml2_entry_t *l2e, vm_offset_t va) { pml2_entry_t oldpdpe; @@ -5980,7 +5979,7 @@ if (pdpg == NULL) { CTR2(KTR_PMAP, "pmap_demote_pdpe: failure for va %#lx" " in pmap %p", va, pmap); - return (FALSE); + return (false); } pdpg->pindex = va >> L2_PAGE_SIZE_SHIFT; pdpgpa = VM_PAGE_TO_PHYS(pdpg); @@ -6012,7 +6011,7 @@ counter_u64_add(pmap_l2e_demotions, 1); CTR2(KTR_PMAP, "pmap_demote_pdpe: success for va %#lx" " in pmap %p", va, pmap); - return (TRUE); + return (true); } vm_paddr_t @@ -6171,7 +6170,7 @@ pml3_entry_t *l3e; pt_entry_t *pte; int cache_bits, error; - boolean_t changed; + bool changed; PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); base = trunc_page(va); @@ -6186,7 +6185,7 @@ return (EINVAL); cache_bits = pmap_cache_bits(mode); - changed = FALSE; + changed = false; /* * Pages that aren't mapped aren't supported. Also break down 2MB pages @@ -6267,7 +6266,7 @@ if ((be64toh(*l2e) & RPTE_ATTR_MASK) != cache_bits) { pmap_pte_attr(l2e, cache_bits, RPTE_ATTR_MASK); - changed = TRUE; + changed = true; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (*l2e & PG_PS_FRAME) < dmaplimit) { @@ -6297,7 +6296,7 @@ if ((be64toh(*l3e) & RPTE_ATTR_MASK) != cache_bits) { pmap_pte_attr(l3e, cache_bits, RPTE_ATTR_MASK); - changed = TRUE; + changed = true; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (be64toh(*l3e) & PG_PS_FRAME) < dmaplimit) { @@ -6325,7 +6324,7 @@ if ((be64toh(*pte) & RPTE_ATTR_MASK) != cache_bits) { pmap_pte_attr(pte, cache_bits, RPTE_ATTR_MASK); - changed = TRUE; + changed = true; } if (tmpva >= VM_MIN_KERNEL_ADDRESS && (be64toh(*pte) & PG_FRAME) < dmaplimit) { diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -263,7 +263,7 @@ #endif static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t); -static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); +static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool); static int pte_remove(pmap_t, vm_offset_t, uint8_t); static pte_t *pte_find(pmap_t, vm_offset_t); static void kernel_pte_alloc(vm_offset_t, vm_offset_t); @@ -302,9 +302,9 @@ static vm_page_t mmu_booke_extract_and_hold(pmap_t, vm_offset_t, vm_prot_t); static void mmu_booke_init(void); -static boolean_t mmu_booke_is_modified(vm_page_t); -static boolean_t mmu_booke_is_prefaultable(pmap_t, vm_offset_t); -static boolean_t mmu_booke_is_referenced(vm_page_t); +static bool mmu_booke_is_modified(vm_page_t); +static bool mmu_booke_is_prefaultable(pmap_t, vm_offset_t); +static bool mmu_booke_is_referenced(vm_page_t); static int mmu_booke_ts_referenced(vm_page_t); static vm_offset_t mmu_booke_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); @@ -312,7 +312,7 @@ vm_paddr_t *); static void mmu_booke_object_init_pt(pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); -static boolean_t mmu_booke_page_exists_quick(pmap_t, vm_page_t); +static bool mmu_booke_page_exists_quick(pmap_t, vm_page_t); static void mmu_booke_page_init(vm_page_t); static int mmu_booke_page_wired_mappings(vm_page_t); static int mmu_booke_pinit(pmap_t); @@ -353,7 +353,7 @@ static int mmu_booke_decode_kernel_ptr(vm_offset_t addr, int *is_user, vm_offset_t *decoded_addr); static void mmu_booke_page_array_startup(long); -static boolean_t mmu_booke_page_is_mapped(vm_page_t m); +static bool mmu_booke_page_is_mapped(vm_page_t m); static bool mmu_booke_ps_enabled(pmap_t pmap); static struct pmap_funcs mmu_booke_methods = { @@ -1221,7 +1221,7 @@ return (0); } -static boolean_t +static bool mmu_booke_page_is_mapped(vm_page_t m) { @@ -1783,22 +1783,22 @@ * Return whether or not the specified physical page was modified * in any of physical maps. */ -static boolean_t +static bool mmu_booke_is_modified(vm_page_t m) { pte_t *pte; pv_entry_t pv; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_is_modified: page %p is not managed", m)); - rv = FALSE; + rv = false; /* * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); + return (false); rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { @@ -1806,7 +1806,7 @@ if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { if (PTE_ISMODIFIED(pte)) - rv = TRUE; + rv = true; } PMAP_UNLOCK(pv->pv_pmap); if (rv) @@ -1820,34 +1820,34 @@ * Return whether or not the specified virtual address is eligible * for prefault. */ -static boolean_t +static bool mmu_booke_is_prefaultable(pmap_t pmap, vm_offset_t addr) { - return (FALSE); + return (false); } /* * Return whether or not the specified physical page was referenced * in any physical maps. */ -static boolean_t +static bool mmu_booke_is_referenced(vm_page_t m) { pte_t *pte; pv_entry_t pv; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_is_referenced: page %p is not managed", m)); - rv = FALSE; + rv = false; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { PMAP_LOCK(pv->pv_pmap); if ((pte = pte_find(pv->pv_pmap, pv->pv_va)) != NULL && PTE_ISVALID(pte)) { if (PTE_ISREFERENCED(pte)) - rv = TRUE; + rv = true; } PMAP_UNLOCK(pv->pv_pmap); if (rv) @@ -1984,21 +1984,21 @@ * only necessary that true be returned for a small subset of pmaps for proper * page aging. */ -static boolean_t +static bool mmu_booke_page_exists_quick(pmap_t pmap, vm_page_t m) { pv_entry_t pv; int loops; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("mmu_booke_page_exists_quick: page %p is not managed", m)); loops = 0; - rv = FALSE; + rv = false; rw_wlock(&pvh_global_lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_link) { if (pv->pv_pmap == pmap) { - rv = TRUE; + rv = true; break; } if (++loops >= 16) diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c --- a/sys/powerpc/booke/pmap_32.c +++ b/sys/powerpc/booke/pmap_32.c @@ -130,13 +130,13 @@ static void ptbl_buf_free(struct ptbl_buf *); static void ptbl_free_pmap_ptbl(pmap_t, pte_t *); -static pte_t *ptbl_alloc(pmap_t, unsigned int, boolean_t); +static pte_t *ptbl_alloc(pmap_t, unsigned int, bool); static void ptbl_free(pmap_t, unsigned int); static void ptbl_hold(pmap_t, unsigned int); static int ptbl_unhold(pmap_t, unsigned int); static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t); -static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); +static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool); static int pte_remove(pmap_t, vm_offset_t, uint8_t); static pte_t *pte_find(pmap_t, vm_offset_t); @@ -237,7 +237,7 @@ /* Allocate page table. */ static pte_t * -ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, boolean_t nosleep) +ptbl_alloc(pmap_t pmap, unsigned int pdir_idx, bool nosleep) { vm_page_t mtbl[PTBL_PAGES]; vm_page_t m; @@ -500,7 +500,7 @@ */ static int pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, - boolean_t nosleep) + bool nosleep) { unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); @@ -762,7 +762,7 @@ m = PHYS_TO_VM_PAGE(pa); PMAP_LOCK(pmap); pte_enter(pmap, m, addr, - PTE_SR | PTE_VALID, FALSE); + PTE_SR | PTE_VALID, false); __syncicache((void *)(addr + (va & PAGE_MASK)), sync_sz); pte_remove(pmap, addr, PTBL_UNHOLD); diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c --- a/sys/powerpc/booke/pmap_64.c +++ b/sys/powerpc/booke/pmap_64.c @@ -140,7 +140,7 @@ static int ptbl_unhold(pmap_t, vm_offset_t); static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t); -static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); +static int pte_enter(pmap_t, vm_page_t, vm_offset_t, uint32_t, bool); static int pte_remove(pmap_t, vm_offset_t, uint8_t); static pte_t *pte_find(pmap_t, vm_offset_t); static pte_t *pte_find_next(pmap_t, vm_offset_t *); @@ -442,7 +442,7 @@ */ static int pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, - boolean_t nosleep) + bool nosleep) { unsigned int ptbl_idx = PTBL_IDX(va); pte_t *ptbl, *pte, pte_tmp; diff --git a/sys/powerpc/include/mmuvar.h b/sys/powerpc/include/mmuvar.h --- a/sys/powerpc/include/mmuvar.h +++ b/sys/powerpc/include/mmuvar.h @@ -64,15 +64,15 @@ typedef vm_page_t (*pmap_extract_and_hold_t)(pmap_t, vm_offset_t, vm_prot_t); typedef void (*pmap_growkernel_t)(vm_offset_t); typedef void (*pmap_init_t)(void); -typedef boolean_t (*pmap_is_modified_t)(vm_page_t); -typedef boolean_t (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t); -typedef boolean_t (*pmap_is_referenced_t)(vm_page_t); +typedef bool (*pmap_is_modified_t)(vm_page_t); +typedef bool (*pmap_is_prefaultable_t)(pmap_t, vm_offset_t); +typedef bool (*pmap_is_referenced_t)(vm_page_t); typedef int (*pmap_ts_referenced_t)(vm_page_t); typedef vm_offset_t (*pmap_map_t)(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); typedef void (*pmap_object_init_pt_t)(pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t); -typedef boolean_t (*pmap_page_exists_quick_t)(pmap_t, vm_page_t); -typedef boolean_t (*pmap_page_is_mapped_t)(vm_page_t); +typedef bool (*pmap_page_exists_quick_t)(pmap_t, vm_page_t); +typedef bool (*pmap_page_is_mapped_t)(vm_page_t); typedef void (*pmap_page_init_t)(vm_page_t); typedef int (*pmap_page_wired_mappings_t)(vm_page_t); typedef void (*pmap_pinit0_t)(pmap_t); diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -320,12 +320,12 @@ void pmap_deactivate(struct thread *); vm_paddr_t pmap_kextract(vm_offset_t); int pmap_dev_direct_mapped(vm_paddr_t, vm_size_t); -boolean_t pmap_mmu_install(char *name, int prio); +bool pmap_mmu_install(char *name, int prio); void pmap_mmu_init(void); const char *pmap_mmu_name(void); bool pmap_ps_enabled(pmap_t pmap); int pmap_nofault(pmap_t pmap, vm_offset_t va, vm_prot_t flags); -boolean_t pmap_page_is_mapped(vm_page_t m); +bool pmap_page_is_mapped(vm_page_t m); #define pmap_map_delete(pmap, sva, eva) pmap_remove(pmap, sva, eva) void pmap_page_array_startup(long count); diff --git a/sys/powerpc/powerpc/pmap_dispatch.c b/sys/powerpc/powerpc/pmap_dispatch.c --- a/sys/powerpc/powerpc/pmap_dispatch.c +++ b/sys/powerpc/powerpc/pmap_dispatch.c @@ -137,12 +137,12 @@ DEFINE_PMAP_IFUNC(void, kremove, (vm_offset_t)); DEFINE_PMAP_IFUNC(void, object_init_pt, (pmap_t, vm_offset_t, vm_object_t, vm_pindex_t, vm_size_t)); -DEFINE_PMAP_IFUNC(boolean_t, is_modified, (vm_page_t)); -DEFINE_PMAP_IFUNC(boolean_t, is_prefaultable, (pmap_t, vm_offset_t)); -DEFINE_PMAP_IFUNC(boolean_t, is_referenced, (vm_page_t)); -DEFINE_PMAP_IFUNC(boolean_t, page_exists_quick, (pmap_t, vm_page_t)); +DEFINE_PMAP_IFUNC(bool, is_modified, (vm_page_t)); +DEFINE_PMAP_IFUNC(bool, is_prefaultable, (pmap_t, vm_offset_t)); +DEFINE_PMAP_IFUNC(bool, is_referenced, (vm_page_t)); +DEFINE_PMAP_IFUNC(bool, page_exists_quick, (pmap_t, vm_page_t)); DEFINE_PMAP_IFUNC(void, page_init, (vm_page_t)); -DEFINE_PMAP_IFUNC(boolean_t, page_is_mapped, (vm_page_t)); +DEFINE_PMAP_IFUNC(bool, page_is_mapped, (vm_page_t)); DEFINE_PMAP_IFUNC(int, page_wired_mappings, (vm_page_t)); DEFINE_PMAP_IFUNC(void, protect, (pmap_t, vm_offset_t, vm_offset_t, vm_prot_t)); DEFINE_PMAP_IFUNC(bool, ps_enabled, (pmap_t)); @@ -198,7 +198,7 @@ */ SET_DECLARE(mmu_set, struct mmu_kobj); -boolean_t +bool pmap_mmu_install(char *name, int prio) { mmu_t *mmupp, mmup; @@ -215,11 +215,11 @@ (prio >= curr_prio || mmu_obj == NULL)) { curr_prio = prio; mmu_obj = mmup; - return (TRUE); + return (true); } } - return (FALSE); + return (false); } /* MMU "pre-bootstrap" init, used to install extra resolvers, etc. */ @@ -238,7 +238,7 @@ int unmapped_buf_allowed; -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { @@ -250,8 +250,8 @@ case VM_MEMATTR_WRITE_BACK: case VM_MEMATTR_WRITE_THROUGH: case VM_MEMATTR_PREFETCHABLE: - return (TRUE); + return (true); default: - return (FALSE); + return (false); } } diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -318,7 +318,7 @@ vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); -static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp); static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, @@ -1170,8 +1170,7 @@ * physical memory manager after the TLB has been updated. */ static __inline void -pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, - boolean_t set_PG_ZERO) +pmap_add_delayed_free_list(vm_page_t m, struct spglist *free, bool set_PG_ZERO) { if (set_PG_ZERO) @@ -1215,10 +1214,10 @@ /* * Decrements a page table page's reference count, which is used to record the * number of valid page table entries within the page. If the reference count - * drops to zero, then the page table page is unmapped. Returns TRUE if the - * page table page was unmapped and FALSE otherwise. + * drops to zero, then the page table page is unmapped. Returns true if the + * page table page was unmapped and false otherwise. */ -static inline boolean_t +static inline bool pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, struct spglist *free) { KASSERT(m->ref_count > 0, @@ -1227,9 +1226,9 @@ --m->ref_count; if (m->ref_count == 0) { _pmap_unwire_ptp(pmap, va, m, free); - return (TRUE); + return (true); } else { - return (FALSE); + return (false); } } @@ -1280,7 +1279,7 @@ * Put page on a list so that it is released after * *ALL* TLB shootdown is done */ - pmap_add_delayed_free_list(m, free, TRUE); + pmap_add_delayed_free_list(m, free, true); } /* @@ -1994,7 +1993,7 @@ * Conditionally create the PV entry for a 4KB page mapping if the required * memory can be allocated without resorting to reclamation. */ -static boolean_t +static bool pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, struct rwlock **lockp) { @@ -2008,9 +2007,9 @@ CHANGE_PV_LIST_LOCK_TO_VM_PAGE(lockp, m); TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); m->md.pv_gen++; - return (TRUE); + return (true); } else - return (FALSE); + return (false); } /* @@ -2227,7 +2226,7 @@ ("pmap_remove_l2: l3 page ref count error")); ml3->ref_count = 1; vm_page_unwire_noq(ml3); - pmap_add_delayed_free_list(ml3, free, FALSE); + pmap_add_delayed_free_list(ml3, free, false); } } return (pmap_unuse_pt(pmap, sva, l1e, free)); @@ -3684,24 +3683,24 @@ * is only necessary that true be returned for a small * subset of pmaps for proper page aging. */ -boolean_t +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m) { struct md_page *pvh; struct rwlock *lock; pv_entry_t pv; int loops = 0; - boolean_t rv; + bool rv; KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; + rv = false; rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -3712,7 +3711,7 @@ pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { - rv = TRUE; + rv = true; break; } loops++; @@ -3843,7 +3842,7 @@ KASSERT(mpte->ref_count == Ln_ENTRIES, ("pmap_remove_pages: pte page ref count error")); mpte->ref_count = 0; - pmap_add_delayed_free_list(mpte, free, FALSE); + pmap_add_delayed_free_list(mpte, free, false); } } else { pmap_resident_count_dec(pmap, 1); @@ -3981,7 +3980,7 @@ } static bool -pmap_page_test_mappings(vm_page_t m, boolean_t accessed, boolean_t modified) +pmap_page_test_mappings(vm_page_t m, bool accessed, bool modified) { struct md_page *pvh; struct rwlock *lock; @@ -3998,7 +3997,7 @@ if (accessed) mask |= PTE_A; - rv = FALSE; + rv = false; rw_rlock(&pvh_global_lock); lock = VM_PAGE_TO_PV_LIST_LOCK(m); rw_rlock(lock); @@ -4059,7 +4058,7 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ -boolean_t +bool pmap_is_modified(vm_page_t m) { @@ -4070,8 +4069,8 @@ * If the page is not busied then this check is racy. */ if (!pmap_page_is_write_mapped(m)) - return (FALSE); - return (pmap_page_test_mappings(m, FALSE, TRUE)); + return (false); + return (pmap_page_test_mappings(m, false, true)); } /* @@ -4080,21 +4079,21 @@ * Return whether or not the specified virtual address is eligible * for prefault. */ -boolean_t +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) { pt_entry_t *l3; - boolean_t rv; + bool rv; /* - * Return TRUE if and only if the L3 entry for the specified virtual + * Return true if and only if the L3 entry for the specified virtual * address is allocated but invalid. */ - rv = FALSE; + rv = false; PMAP_LOCK(pmap); l3 = pmap_l3(pmap, addr); if (l3 != NULL && pmap_load(l3) == 0) { - rv = TRUE; + rv = true; } PMAP_UNLOCK(pmap); return (rv); @@ -4106,13 +4105,13 @@ * Return whether or not the specified physical page was referenced * in any physical maps. */ -boolean_t +bool pmap_is_referenced(vm_page_t m) { KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - return (pmap_page_test_mappings(m, TRUE, FALSE)); + return (pmap_page_test_mappings(m, true, false)); } /* @@ -4802,7 +4801,7 @@ } } -boolean_t +bool pmap_is_valid_memattr(pmap_t pmap __unused, vm_memattr_t mode) { diff --git a/sys/vm/pmap.h b/sys/vm/pmap.h --- a/sys/vm/pmap.h +++ b/sys/vm/pmap.h @@ -85,8 +85,8 @@ * Each machine-dependent implementation is required to provide: * * vm_memattr_t pmap_page_get_memattr(vm_page_t); - * boolean_t pmap_page_is_mapped(vm_page_t); - * boolean_t pmap_page_is_write_mapped(vm_page_t); + * bool pmap_page_is_mapped(vm_page_t); + * bool pmap_page_is_write_mapped(vm_page_t); * void pmap_page_set_memattr(vm_page_t, vm_memattr_t); */ #include @@ -140,15 +140,15 @@ vm_prot_t prot); void pmap_growkernel(vm_offset_t); void pmap_init(void); -boolean_t pmap_is_modified(vm_page_t m); -boolean_t pmap_is_prefaultable(pmap_t pmap, vm_offset_t va); -boolean_t pmap_is_referenced(vm_page_t m); -boolean_t pmap_is_valid_memattr(pmap_t, vm_memattr_t); +bool pmap_is_modified(vm_page_t m); +bool pmap_is_prefaultable(pmap_t pmap, vm_offset_t va); +bool pmap_is_referenced(vm_page_t m); +bool pmap_is_valid_memattr(pmap_t, vm_memattr_t); vm_offset_t pmap_map(vm_offset_t *, vm_paddr_t, vm_paddr_t, int); int pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *pap); void pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_size_t size); -boolean_t pmap_page_exists_quick(pmap_t pmap, vm_page_t m); +bool pmap_page_exists_quick(pmap_t pmap, vm_page_t m); void pmap_page_init(vm_page_t m); int pmap_page_wired_mappings(vm_page_t m); int pmap_pinit(pmap_t);