diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -5092,24 +5092,10 @@ * page management routines. ***************************************************/ -CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); -CTASSERT(_NPCM == 3); -CTASSERT(_NPCPV == 168); - -static __inline struct pv_chunk * -pv_to_chunk(pv_entry_t pv) -{ - - return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); -} - -#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) - -#define PC_FREE0 0xfffffffffffffffful -#define PC_FREE1 0xfffffffffffffffful -#define PC_FREE2 ((1ul << (_NPCPV % 64)) - 1) - -static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; +static const uint64_t pc_freemask[_NPCM] = { + [0 ... _NPCM - 2] = PC_FREEN, + [_NPCM - 1] = PC_FREEL +}; #ifdef PV_STATS @@ -5321,8 +5307,7 @@ PV_STAT(counter_u64_add(pv_entry_spare, freed)); PV_STAT(counter_u64_add(pv_entry_count, -freed)); TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); - if (pc->pc_map[0] == PC_FREE0 && pc->pc_map[1] == PC_FREE1 && - pc->pc_map[2] == PC_FREE2) { + if (pc_is_free(pc)) { PV_STAT(counter_u64_add(pv_entry_spare, -_NPCPV)); PV_STAT(counter_u64_add(pc_chunk_count, -1)); PV_STAT(counter_u64_add(pc_chunk_frees, 1)); @@ -5406,8 +5391,7 @@ field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; - if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || - pc->pc_map[2] != PC_FREE2) { + if (!pc_is_free(pc)) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); @@ -5532,9 +5516,9 @@ dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; - pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ - pc->pc_map[1] = PC_FREE1; - pc->pc_map[2] = PC_FREE2; + pc->pc_map[0] = PC_FREEN & ~1ul; /* preallocated bit 0 */ + pc->pc_map[1] = PC_FREEN; + pc->pc_map[2] = PC_FREEL; pvc = &pv_chunks[vm_page_domain(m)]; mtx_lock(&pvc->pvc_lock); TAILQ_INSERT_TAIL(&pvc->pvc_list, pc, pc_lru); @@ -5632,9 +5616,9 @@ dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; - pc->pc_map[0] = PC_FREE0; - pc->pc_map[1] = PC_FREE1; - pc->pc_map[2] = PC_FREE2; + pc->pc_map[0] = PC_FREEN; + pc->pc_map[1] = PC_FREEN; + pc->pc_map[2] = PC_FREEL; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail[vm_page_domain(m)], pc, pc_lru); PV_STAT(counter_u64_add(pv_entry_spare, _NPCPV)); diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -291,6 +291,7 @@ #include #include #include +#include #include #include @@ -353,8 +354,6 @@ /* * Pmap stuff */ -struct pv_entry; -struct pv_chunk; /* * Locks @@ -424,40 +423,6 @@ int pmap_pinit_type(pmap_t pmap, enum pmap_type pm_type, int flags); int pmap_emulate_accessed_dirty(pmap_t pmap, vm_offset_t va, int ftype); -#endif - -/* - * For each vm_page_t, there is a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t, the list is pv_list. - */ -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_next; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCPV 168 -#define _NPCM howmany(_NPCPV, 64) - -#define PV_CHUNK_HEADER \ - pmap_t pc_pmap; \ - TAILQ_ENTRY(pv_chunk) pc_list; \ - uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \ - TAILQ_ENTRY(pv_chunk) pc_lru; - -struct pv_chunk_header { - PV_CHUNK_HEADER -}; - -struct pv_chunk { - PV_CHUNK_HEADER - struct pv_entry pc_pventry[_NPCPV]; -}; - -#ifdef _KERNEL extern caddr_t CADDR1; extern pt_entry_t *CMAP1; diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -2739,27 +2739,9 @@ * *************************************/ -CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); -CTASSERT(_NPCM == 11); -CTASSERT(_NPCPV == 336); - -static __inline struct pv_chunk * -pv_to_chunk(pv_entry_t pv) -{ - - return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); -} - -#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) - -#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ -#define PC_FREE10 ((1ul << (_NPCPV % 32)) - 1) /* Free values for index 10 */ - static const uint32_t pc_freemask[_NPCM] = { - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE10 + [0 ... _NPCM - 2] = PC_FREEN, + [_NPCM - 1] = PC_FREEL }; SYSCTL_INT(_vm_pmap, OID_AUTO, pv_entry_count, CTLFLAG_RD, &pv_entry_count, 0, diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -52,6 +52,7 @@ #include #include #include +#include typedef uint32_t pt1_entry_t; /* L1 table entry */ typedef uint32_t pt2_entry_t; /* L2 table entry */ @@ -93,9 +94,6 @@ /* * Pmap stuff */ -struct pv_entry; -struct pv_chunk; - struct md_page { TAILQ_HEAD(,pv_entry) pv_list; uint16_t pt2_wirecount[4]; @@ -128,33 +126,7 @@ #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -#endif -/* - * For each vm_page_t, there is a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t, the list is pv_list. - */ -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_next; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCPV 336 -#define _NPCM howmany(_NPCPV, 32) - -struct pv_chunk { - pmap_t pc_pmap; - TAILQ_ENTRY(pv_chunk) pc_list; - uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ - TAILQ_ENTRY(pv_chunk) pc_lru; - struct pv_entry pc_pventry[_NPCPV]; -}; - -#ifdef _KERNEL extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ #define pmap_page_get_memattr(m) ((m)->md.pat_mode) diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c --- a/sys/arm64/arm64/pmap.c +++ b/sys/arm64/arm64/pmap.c @@ -2501,43 +2501,11 @@ * page management routines. ***************************************************/ -CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); - -static __inline struct pv_chunk * -pv_to_chunk(pv_entry_t pv) -{ - - return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); -} - -#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) - -#define PC_FREEN 0xfffffffffffffffful -#define PC_FREEL ((1ul << (_NPCPV % 64)) - 1) - static const uint64_t pc_freemask[_NPCM] = { [0 ... _NPCM - 2] = PC_FREEN, [_NPCM - 1] = PC_FREEL }; -static __inline bool -pc_is_full(struct pv_chunk *pc) -{ - for (u_int i = 0; i < _NPCM; i++) - if (pc->pc_map[i] != 0) - return (false); - return (true); -} - -static __inline bool -pc_is_free(struct pv_chunk *pc) -{ - for (u_int i = 0; i < _NPCM - 1; i++) - if (pc->pc_map[i] != PC_FREEN) - return (false); - return (pc->pc_map[_NPCM - 1] == PC_FREEL); -} - #ifdef PV_STATS static int pc_chunk_count, pc_chunk_allocs, pc_chunk_frees, pc_chunk_tryfail; diff --git a/sys/arm64/include/pmap.h b/sys/arm64/include/pmap.h --- a/sys/arm64/include/pmap.h +++ b/sys/arm64/include/pmap.h @@ -43,6 +43,7 @@ #include #include #include +#include #include @@ -97,42 +98,6 @@ }; typedef struct pmap *pmap_t; -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_next; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#if PAGE_SIZE == PAGE_SIZE_4K -#define _NPCPV 168 -#define _NPAD 0 -#elif PAGE_SIZE == PAGE_SIZE_16K -#define _NPCPV 677 -#define _NPAD 1 -#else -#error Unsupported page size -#endif -#define _NPCM howmany(_NPCPV, 64) - -#define PV_CHUNK_HEADER \ - pmap_t pc_pmap; \ - TAILQ_ENTRY(pv_chunk) pc_list; \ - uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ \ - TAILQ_ENTRY(pv_chunk) pc_lru; - -struct pv_chunk_header { - PV_CHUNK_HEADER -}; - -struct pv_chunk { - PV_CHUNK_HEADER - struct pv_entry pc_pventry[_NPCPV]; - uint64_t pc_pad[_NPAD]; -}; - struct thread; #ifdef _KERNEL diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2287,27 +2287,9 @@ * page management routines. ***************************************************/ -CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); -CTASSERT(_NPCM == 11); -CTASSERT(_NPCPV == 336); - -static __inline struct pv_chunk * -pv_to_chunk(pv_entry_t pv) -{ - - return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); -} - -#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) - -#define PC_FREE0_9 0xfffffffful /* Free values for index 0 through 9 */ -#define PC_FREE10 ((1ul << (_NPCPV % 32)) - 1) /* Free values for index 10 */ - static const uint32_t pc_freemask[_NPCM] = { - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE0_9, PC_FREE0_9, - PC_FREE0_9, PC_FREE10 + [0 ... _NPCM - 2] = PC_FREEN, + [_NPCM - 1] = PC_FREEL }; #ifdef PV_STATS diff --git a/sys/i386/include/pmap.h b/sys/i386/include/pmap.h --- a/sys/i386/include/pmap.h +++ b/sys/i386/include/pmap.h @@ -133,6 +133,7 @@ #include #include #include +#include #include @@ -157,9 +158,6 @@ /* * Pmap stuff */ -struct pv_entry; -struct pv_chunk; - struct md_page { TAILQ_HEAD(,pv_entry) pv_list; int pat_mode; @@ -194,33 +192,6 @@ #define PMAP_MTX(pmap) (&(pmap)->pm_mtx) #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -#endif - -/* - * For each vm_page_t, there is a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t, the list is pv_list. - */ -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_next; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCPV 336 -#define _NPCM howmany(_NPCPV, 32) - -struct pv_chunk { - pmap_t pc_pmap; - TAILQ_ENTRY(pv_chunk) pc_list; - uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ - TAILQ_ENTRY(pv_chunk) pc_lru; - struct pv_entry pc_pventry[_NPCPV]; -}; - -#ifdef _KERNEL extern char *ptvmmap; /* poor name! */ extern vm_offset_t virtual_avail; diff --git a/sys/riscv/include/pmap.h b/sys/riscv/include/pmap.h --- a/sys/riscv/include/pmap.h +++ b/sys/riscv/include/pmap.h @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -88,26 +89,6 @@ struct vm_radix pm_root; }; -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_next; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCPV 168 -#define _NPCM howmany(_NPCPV, 64) - -struct pv_chunk { - struct pmap * pc_pmap; - TAILQ_ENTRY(pv_chunk) pc_list; - uint64_t pc_map[_NPCM]; /* bitmap; 1 = free */ - TAILQ_ENTRY(pv_chunk) pc_lru; - struct pv_entry pc_pventry[_NPCPV]; -}; - typedef struct pmap *pmap_t; #ifdef _KERNEL diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -1710,24 +1710,10 @@ * page management routines. ***************************************************/ -CTASSERT(sizeof(struct pv_chunk) == PAGE_SIZE); -CTASSERT(_NPCM == 3); -CTASSERT(_NPCPV == 168); - -static __inline struct pv_chunk * -pv_to_chunk(pv_entry_t pv) -{ - - return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); -} - -#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) - -#define PC_FREE0 0xfffffffffffffffful -#define PC_FREE1 0xfffffffffffffffful -#define PC_FREE2 ((1ul << (_NPCPV % 64)) - 1) - -static const uint64_t pc_freemask[_NPCM] = { PC_FREE0, PC_FREE1, PC_FREE2 }; +static const uint64_t pc_freemask[_NPCM] = { + [0 ... _NPCM - 2] = PC_FREEN, + [_NPCM - 1] = PC_FREEL +}; #if 0 #ifdef PV_STATS @@ -1793,8 +1779,7 @@ field = idx / 64; bit = idx % 64; pc->pc_map[field] |= 1ul << bit; - if (pc->pc_map[0] != PC_FREE0 || pc->pc_map[1] != PC_FREE1 || - pc->pc_map[2] != PC_FREE2) { + if (!pc_is_free(pc)) { /* 98% of the time, pc is already at the head of the list. */ if (__predict_false(pc != TAILQ_FIRST(&pmap->pm_pvchunk))) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); @@ -1856,8 +1841,7 @@ pv = &pc->pc_pventry[field * 64 + bit]; pc->pc_map[field] &= ~(1ul << bit); /* If this was the last item, move it to tail */ - if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && - pc->pc_map[2] == 0) { + if (pc_is_full(pc)) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); @@ -1883,9 +1867,9 @@ dump_add_page(m->phys_addr); pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; - pc->pc_map[0] = PC_FREE0 & ~1ul; /* preallocated bit 0 */ - pc->pc_map[1] = PC_FREE1; - pc->pc_map[2] = PC_FREE2; + pc->pc_map[0] = PC_FREEN & ~1ul; /* preallocated bit 0 */ + pc->pc_map[1] = PC_FREEN; + pc->pc_map[2] = PC_FREEL; mtx_lock(&pv_chunks_mutex); TAILQ_INSERT_TAIL(&pv_chunks, pc, pc_lru); mtx_unlock(&pv_chunks_mutex); @@ -1947,9 +1931,9 @@ #endif pc = (void *)PHYS_TO_DMAP(m->phys_addr); pc->pc_pmap = pmap; - pc->pc_map[0] = PC_FREE0; - pc->pc_map[1] = PC_FREE1; - pc->pc_map[2] = PC_FREE2; + pc->pc_map[0] = PC_FREEN; + pc->pc_map[1] = PC_FREEN; + pc->pc_map[2] = PC_FREEL; TAILQ_INSERT_HEAD(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&new_tail, pc, pc_lru); @@ -2065,8 +2049,7 @@ va_last = va + L2_SIZE - PAGE_SIZE; for (;;) { pc = TAILQ_FIRST(&pmap->pm_pvchunk); - KASSERT(pc->pc_map[0] != 0 || pc->pc_map[1] != 0 || - pc->pc_map[2] != 0, ("pmap_pv_demote_l2: missing spare")); + KASSERT(!pc_is_full(pc), ("pmap_pv_demote_l2: missing spare")); for (field = 0; field < _NPCM; field++) { while (pc->pc_map[field] != 0) { bit = ffsl(pc->pc_map[field]) - 1; @@ -2087,7 +2070,7 @@ TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } out: - if (pc->pc_map[0] == 0 && pc->pc_map[1] == 0 && pc->pc_map[2] == 0) { + if (pc_is_free(pc)) { TAILQ_REMOVE(&pmap->pm_pvchunk, pc, pc_list); TAILQ_INSERT_TAIL(&pmap->pm_pvchunk, pc, pc_list); } diff --git a/sys/sys/_pv_entry.h b/sys/sys/_pv_entry.h new file mode 100644 --- /dev/null +++ b/sys/sys/_pv_entry.h @@ -0,0 +1,134 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2003 Peter Wemm. + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department and William Jolitz of UUNET Technologies Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef __SYS__PV_ENTRY_H__ +#define __SYS__PV_ENTRY_H__ + +struct pmap; + +/* + * For each vm_page_t, there is a list of all currently valid virtual + * mappings of that page. An entry is a pv_entry_t, the list is pv_list. + */ +typedef struct pv_entry { + vm_offset_t pv_va; /* virtual address for mapping */ + TAILQ_ENTRY(pv_entry) pv_next; +} *pv_entry_t; + +/* + * pv_entries are allocated in chunks per-process. This avoids the + * need to track per-pmap assignments. Each chunk is the size of a + * single page. + * + * Chunks store a bitmap in pc_map[] to track which entries in the + * bitmap are free (1) or used (0). PC_FREEL is the value of the last + * entry in the pc_map[] array when a chunk is completely free. PC_FREEN + * is the value of all the other entries in the pc_map[] array when a + * chunk is completely free. + */ +#if PAGE_SIZE == 4 * 1024 +#ifdef __LP64__ +#define _NPCPV 168 +#define _NPAD 0 +#else +#define _NPCPV 336 +#define _NPAD 0 +#endif +#elif PAGE_SIZE == 16 * 1024 +#ifdef __LP64__ +#define _NPCPV 677 +#define _NPAD 1 +#endif +#endif + +#ifndef _NPCPV +#error Unsupported page size +#endif + +#define _NPCM howmany(_NPCPV, __LONG_WIDTH__) +#define PC_FREEN ~0ul +#define PC_FREEL ((1ul << (_NPCPV % __LONG_WIDTH__)) - 1) + +#define PV_CHUNK_HEADER \ + struct pmap *pc_pmap; \ + TAILQ_ENTRY(pv_chunk) pc_list; \ + unsigned long pc_map[_NPCM]; /* bitmap; 1 = free */ \ + TAILQ_ENTRY(pv_chunk) pc_lru; + +struct pv_chunk_header { + PV_CHUNK_HEADER +}; + +struct pv_chunk { + PV_CHUNK_HEADER + struct pv_entry pc_pventry[_NPCPV]; + unsigned long pc_pad[_NPAD]; +}; + +_Static_assert(sizeof(struct pv_chunk) == PAGE_SIZE, + "PV entry chunk size mismatch"); + +#ifdef _KERNEL +static __inline bool +pc_is_full(struct pv_chunk *pc) +{ + for (u_int i = 0; i < _NPCM; i++) { + if (pc->pc_map[i] != 0) + return (false); + } + return (true); +} + +static __inline bool +pc_is_free(struct pv_chunk *pc) +{ + for (u_int i = 0; i < _NPCM - 1; i++) { + if (pc->pc_map[i] != PC_FREEN) + return (false); + } + return (pc->pc_map[_NPCM - 1] == PC_FREEL); +} + +static __inline struct pv_chunk * +pv_to_chunk(pv_entry_t pv) +{ + return ((struct pv_chunk *)((uintptr_t)pv & ~(uintptr_t)PAGE_MASK)); +} + +#define PV_PMAP(pv) (pv_to_chunk(pv)->pc_pmap) +#endif + +#endif /* !__SYS__PV_ENTRY_H__ */