Page Menu
Home
FreeBSD
Search
Configure Global Search
Log In
Files
F109381585
D32006.id96189.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Flag For Later
Award Token
Size
11 KB
Referenced Files
None
Subscribers
None
D32006.id96189.diff
View Options
Index: sys/amd64/amd64/mp_machdep.c
===================================================================
--- sys/amd64/amd64/mp_machdep.c
+++ sys/amd64/amd64/mp_machdep.c
@@ -334,8 +334,7 @@
mtx_init(&ap_boot_mtx, "ap boot", NULL, MTX_SPIN);
MPASS(bootMP_size <= PAGE_SIZE);
- m_boottramp = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL |
- VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ, 1, 0,
+ m_boottramp = vm_page_alloc_noobj_contig(0, 1, 0,
(1ULL << 20), /* Trampoline should be below 1M for real mode */
PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
boot_address = VM_PAGE_TO_PHYS(m_boottramp);
Index: sys/amd64/amd64/pmap.c
===================================================================
--- sys/amd64/amd64/pmap.c
+++ sys/amd64/amd64/pmap.c
@@ -2110,14 +2110,8 @@
vm_page_t
pmap_page_alloc_below_4g(bool zeroed)
{
- vm_page_t m;
-
- m = vm_page_alloc_contig(NULL, 0, (zeroed ? VM_ALLOC_ZERO : 0) |
- VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY | VM_ALLOC_NOOBJ,
- 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
- if (m != NULL && zeroed && (m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
- return (m);
+ return (vm_page_alloc_noobj_contig((zeroed ? VM_ALLOC_ZERO : 0),
+ 1, 0, (1ULL << 32), PAGE_SIZE, 0, VM_MEMATTR_DEFAULT));
}
extern const char la57_trampoline[], la57_trampoline_gdt_desc[],
@@ -11393,13 +11387,8 @@
static vm_page_t
pmap_kasan_enter_alloc_2m(void)
{
- vm_page_t m;
-
- m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT);
- if (m != NULL)
- memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR);
- return (m);
+ return (vm_page_alloc_noobj_contig(VM_ALLOC_WIRED | VM_ALLOC_ZERO,
+ NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
}
/*
@@ -11461,13 +11450,8 @@
static vm_page_t
pmap_kmsan_enter_alloc_2m(void)
{
- vm_page_t m;
-
- m = vm_page_alloc_contig(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ |
- VM_ALLOC_WIRED, NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT);
- if (m != NULL)
- memset((void *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), 0, NBPDR);
- return (m);
+ return (vm_page_alloc_noobj_contig(VM_ALLOC_ZERO | VM_ALLOC_WIRED,
+ NPTEPG, 0, ~0ul, NBPDR, 0, VM_MEMATTR_DEFAULT));
}
/*
Index: sys/arm/nvidia/drm2/tegra_bo.c
===================================================================
--- sys/arm/nvidia/drm2/tegra_bo.c
+++ sys/arm/nvidia/drm2/tegra_bo.c
@@ -103,11 +103,10 @@
low = 0;
high = -1UL;
boundary = 0;
- pflags = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_NOBUSY |
- VM_ALLOC_WIRED | VM_ALLOC_ZERO;
+ pflags = VM_ALLOC_WIRED | VM_ALLOC_ZERO;
tries = 0;
retry:
- m = vm_page_alloc_contig(NULL, 0, pflags, npages, low, high, alignment,
+ m = vm_page_alloc_noobj_contig(pflags, npages, low, high, alignment,
boundary, memattr);
if (m == NULL) {
if (tries < 3) {
@@ -121,8 +120,6 @@
}
for (i = 0; i < npages; i++, m++) {
- if ((m->flags & PG_ZERO) == 0)
- pmap_zero_page(m);
m->valid = VM_PAGE_BITS_ALL;
(*ret_page)[i] = m;
}
Index: sys/compat/linuxkpi/common/src/linux_page.c
===================================================================
--- sys/compat/linuxkpi/common/src/linux_page.c
+++ sys/compat/linuxkpi/common/src/linux_page.c
@@ -92,7 +92,7 @@
if (PMAP_HAS_DMAP) {
unsigned long npages = 1UL << order;
- int req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_NORMAL;
+ int req = VM_ALLOC_WIRED;
if ((flags & M_ZERO) != 0)
req |= VM_ALLOC_ZERO;
@@ -104,9 +104,8 @@
vm_paddr_t pmax = (flags & GFP_DMA32) ?
BUS_SPACE_MAXADDR_32BIT : BUS_SPACE_MAXADDR;
retry:
- page = vm_page_alloc_contig(NULL, 0, req,
- npages, 0, pmax, PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
-
+ page = vm_page_alloc_noobj_contig(req, npages, 0, pmax,
+ PAGE_SIZE, 0, VM_MEMATTR_DEFAULT);
if (page == NULL) {
if (flags & M_WAITOK) {
if (!vm_page_reclaim_contig(req,
@@ -119,16 +118,6 @@
return (NULL);
}
}
- if (flags & M_ZERO) {
- unsigned long x;
-
- for (x = 0; x != npages; x++) {
- vm_page_t pgo = page + x;
-
- if ((pgo->flags & PG_ZERO) == 0)
- pmap_zero_page(pgo);
- }
- }
} else {
vm_offset_t vaddr;
Index: sys/dev/drm2/ttm/ttm_bo.c
===================================================================
--- sys/dev/drm2/ttm/ttm_bo.c
+++ sys/dev/drm2/ttm/ttm_bo.c
@@ -1488,21 +1488,20 @@
struct ttm_bo_global_ref *bo_ref =
container_of(ref, struct ttm_bo_global_ref, ref);
struct ttm_bo_global *glob = ref->object;
- int req, ret;
+ int ret;
int tries;
sx_init(&glob->device_list_mutex, "ttmdlm");
mtx_init(&glob->lru_lock, "ttmlru", NULL, MTX_DEF);
glob->mem_glob = bo_ref->mem_glob;
- req = VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ;
tries = 0;
retry:
- glob->dummy_read_page = vm_page_alloc_contig(NULL, 0, req,
- 1, 0, VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
+ glob->dummy_read_page = vm_page_alloc_noobj_contig(0, 1, 0,
+ VM_MAX_ADDRESS, PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
if (unlikely(glob->dummy_read_page == NULL)) {
- if (tries < 1 && vm_page_reclaim_contig(req, 1,
- 0, VM_MAX_ADDRESS, PAGE_SIZE, 0)) {
+ if (tries < 1 && vm_page_reclaim_contig(0, 1, 0,
+ VM_MAX_ADDRESS, PAGE_SIZE, 0)) {
tries++;
goto retry;
}
Index: sys/dev/drm2/ttm/ttm_page_alloc.c
===================================================================
--- sys/dev/drm2/ttm/ttm_page_alloc.c
+++ sys/dev/drm2/ttm/ttm_page_alloc.c
@@ -163,8 +163,8 @@
int tries;
for (tries = 0; ; tries++) {
- p = vm_page_alloc_contig(NULL, 0, req, 1, 0, 0xffffffff,
- PAGE_SIZE, 0, memattr);
+ p = vm_page_alloc_noobj_contig(req, 1, 0, 0xffffffff, PAGE_SIZE,
+ 0, memattr);
if (p != NULL || tries > 2)
return (p);
if (!vm_page_reclaim_contig(req, 1, 0, 0xffffffff,
@@ -191,7 +191,7 @@
int req;
memattr = ttm_caching_state_to_vm(cstate);
- req = VM_ALLOC_NORMAL | VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
+ req = VM_ALLOC_WIRED;
if ((flags & TTM_PAGE_FLAG_ZERO_ALLOC) != 0)
req |= VM_ALLOC_ZERO;
Index: sys/kern/uipc_ktls.c
===================================================================
--- sys/kern/uipc_ktls.c
+++ sys/kern/uipc_ktls.c
@@ -333,16 +333,15 @@
ktls_buffer_import(void *arg, void **store, int count, int domain, int flags)
{
vm_page_t m;
- int i;
+ int i, req;
KASSERT((ktls_maxlen & PAGE_MASK) == 0,
("%s: ktls max length %d is not page size-aligned",
__func__, ktls_maxlen));
+ req = VM_ALLOC_WIRED | VM_ALLOC_NODUMP | malloc2vm_flags(flags);
for (i = 0; i < count; i++) {
- m = vm_page_alloc_contig_domain(NULL, 0, domain,
- VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
- VM_ALLOC_NODUMP | malloc2vm_flags(flags),
+ m = vm_page_alloc_noobj_contig_domain(domain, req,
atop(ktls_maxlen), 0, ~0ul, PAGE_SIZE, 0,
VM_MEMATTR_DEFAULT);
if (m == NULL)
Index: sys/powerpc/aim/mmu_radix.c
===================================================================
--- sys/powerpc/aim/mmu_radix.c
+++ sys/powerpc/aim/mmu_radix.c
@@ -3576,11 +3576,13 @@
radix_pgd_import(void *arg __unused, void **store, int count, int domain __unused,
int flags)
{
+ int req;
+ /* XXXMJ presumably WAITOK shouldn't be hard-coded */
+ req = VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_WAITOK;
for (int i = 0; i < count; i++) {
- vm_page_t m = vm_page_alloc_contig(NULL, 0,
- VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
- VM_ALLOC_ZERO | VM_ALLOC_WAITOK, RADIX_PGD_SIZE/PAGE_SIZE,
+ vm_page_t m = vm_page_alloc_noobj_contig(req,
+ RADIX_PGD_SIZE / PAGE_SIZE,
0, (vm_paddr_t)-1, RADIX_PGD_SIZE, L1_PAGE_SIZE,
VM_MEMATTR_DEFAULT);
/* XXX zero on alloc here so we don't have to later */
Index: sys/powerpc/aim/slb.c
===================================================================
--- sys/powerpc/aim/slb.c
+++ sys/powerpc/aim/slb.c
@@ -500,9 +500,9 @@
realmax = platform_real_maxaddr();
*flags = UMA_SLAB_PRIV;
- m = vm_page_alloc_contig_domain(NULL, 0, domain,
- malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED,
- 1, 0, realmax, PAGE_SIZE, PAGE_SIZE, VM_MEMATTR_DEFAULT);
+ m = vm_page_alloc_noobj_contig_domain(domain, malloc2vm_flags(wait) |
+ VM_ALLOC_WIRED, 1, 0, realmax, PAGE_SIZE, PAGE_SIZE,
+ VM_MEMATTR_DEFAULT);
if (m == NULL)
return (NULL);
@@ -513,9 +513,6 @@
pmap_kenter((vm_offset_t)va, VM_PAGE_TO_PHYS(m));
}
- if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
- bzero(va, PAGE_SIZE);
-
return (va);
}
Index: sys/riscv/riscv/pmap.c
===================================================================
--- sys/riscv/riscv/pmap.c
+++ sys/riscv/riscv/pmap.c
@@ -2747,8 +2747,6 @@
VM_ALLOC_ZERO);
if (l3_m == NULL)
panic("pmap_enter: l3 pte_m == NULL");
- if ((l3_m->flags & PG_ZERO) == 0)
- pmap_zero_page(l3_m);
l3_pa = VM_PAGE_TO_PHYS(l3_m);
l3_pn = (l3_pa / PAGE_SIZE);
Index: sys/vm/uma_core.c
===================================================================
--- sys/vm/uma_core.c
+++ sys/vm/uma_core.c
@@ -1885,17 +1885,15 @@
{
vm_paddr_t pa;
vm_page_t m;
- void *mem;
- int pages;
- int i;
+ int i, pages;
pages = howmany(bytes, PAGE_SIZE);
KASSERT(pages > 0, ("%s can't reserve 0 pages", __func__));
*pflag = UMA_SLAB_BOOT;
- m = vm_page_alloc_contig_domain(NULL, 0, domain,
- malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED, pages,
- (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0, VM_MEMATTR_DEFAULT);
+ m = vm_page_alloc_noobj_contig_domain(domain, malloc2vm_flags(wait) |
+ VM_ALLOC_WIRED, pages, (vm_paddr_t)0, ~(vm_paddr_t)0, 1, 0,
+ VM_MEMATTR_DEFAULT);
if (m == NULL)
return (NULL);
@@ -1907,13 +1905,10 @@
dump_add_page(pa);
#endif
}
- /* Allocate KVA and indirectly advance bootmem. */
- mem = (void *)pmap_map(&bootmem, m->phys_addr,
- m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE);
- if ((wait & M_ZERO) != 0)
- bzero(mem, pages * PAGE_SIZE);
- return (mem);
+ /* Allocate KVA and indirectly advance bootmem. */
+ return ((void *)pmap_map(&bootmem, m->phys_addr,
+ m->phys_addr + (pages * PAGE_SIZE), VM_PROT_READ | VM_PROT_WRITE));
}
static void
Index: sys/vm/vm_page.c
===================================================================
--- sys/vm/vm_page.c
+++ sys/vm/vm_page.c
@@ -2883,32 +2883,32 @@
* "m_run" and "high" only as a last
* resort.
*/
- req = req_class | VM_ALLOC_NOOBJ;
+ req = req_class;
if ((m->flags & PG_NODUMP) != 0)
req |= VM_ALLOC_NODUMP;
if (trunc_page(high) !=
~(vm_paddr_t)PAGE_MASK) {
- m_new = vm_page_alloc_contig(
- NULL, 0, req, 1,
- round_page(high),
- ~(vm_paddr_t)0,
- PAGE_SIZE, 0,
- VM_MEMATTR_DEFAULT);
+ m_new =
+ vm_page_alloc_noobj_contig(
+ req, 1, round_page(high),
+ ~(vm_paddr_t)0, PAGE_SIZE,
+ 0, VM_MEMATTR_DEFAULT);
} else
m_new = NULL;
if (m_new == NULL) {
pa = VM_PAGE_TO_PHYS(m_run);
- m_new = vm_page_alloc_contig(
- NULL, 0, req, 1,
- 0, pa - 1, PAGE_SIZE, 0,
+ m_new =
+ vm_page_alloc_noobj_contig(
+ req, 1, 0, pa - 1,
+ PAGE_SIZE, 0,
VM_MEMATTR_DEFAULT);
}
if (m_new == NULL) {
pa += ptoa(npages);
- m_new = vm_page_alloc_contig(
- NULL, 0, req, 1,
- pa, high, PAGE_SIZE, 0,
- VM_MEMATTR_DEFAULT);
+ m_new =
+ vm_page_alloc_noobj_contig(
+ req, 1, pa, high, PAGE_SIZE,
+ 0, VM_MEMATTR_DEFAULT);
}
if (m_new == NULL) {
vm_page_xunbusy(m);
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Wed, Feb 5, 8:08 AM (20 h, 9 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
16470500
Default Alt Text
D32006.id96189.diff (11 KB)
Attached To
Mode
D32006: Convert consumers to vm_page_alloc_noobj_contig()
Attached
Detach File
Event Timeline
Log In to Comment