Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -2261,6 +2261,8 @@ } } PMAP_UNLOCK(pmap); + if (pa == VM_PAGE_TO_PHYS(zero_page)) + return (0); return (pa); } @@ -2304,7 +2306,10 @@ &pa)) goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); - vm_page_hold(m); + if (m == zero_page) + m = NULL; + else + vm_page_hold(m); } } } Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c +++ sys/i386/i386/pmap.c @@ -1502,6 +1502,8 @@ } } PMAP_UNLOCK(pmap); + if (rtval == VM_PAGE_TO_PHYS(zero_page)) + return (0); return (rtval); } @@ -1545,7 +1547,10 @@ &pa)) goto retry; m = PHYS_TO_VM_PAGE(pte & PG_FRAME); - vm_page_hold(m); + if (m == zero_page) + m = NULL; + else + vm_page_hold(m); } } } Index: sys/vm/vm_fault.c =================================================================== --- sys/vm/vm_fault.c +++ sys/vm/vm_fault.c @@ -730,6 +730,33 @@ * page. (Default objects are zero-fill, so there is no real * pager for them.) */ + + if (fs.object == fs.first_object && (fs.object->flags & + (OBJ_ONEMAPPING | OBJ_NOSPLIT)) == OBJ_ONEMAPPING && + fs.object->shadow_count == 0 && + fs.object->backing_object == NULL && + fault_type == VM_PROT_READ && !wired && m_hold == NULL) { + /* + * This is a lone nameless anonymous object + * not participating in the shadow chains, and + * the fault is for read. Install transient + * zero page mapping to avoid allocating real + * physical memory until some user content is + * written there. + * + * Write faults allocate and install the real + * page. Pmaps must not return zero_page from + * pmap_extract() and pmap_extract_and_hold() + * for this optimization to work. + */ + rv = pmap_enter(map->pmap, vaddr, zero_page, + VM_PROT_READ, fault_type | PMAP_ENTER_NOSLEEP, 0); + if (rv == KERN_SUCCESS) { + unlock_and_deallocate(&fs); + return (rv); + } + } + if (fs.object->type != OBJT_DEFAULT || fs.object == fs.first_object) { if (fs.pindex >= fs.object->size) { Index: sys/vm/vm_kern.c =================================================================== --- sys/vm/vm_kern.c +++ sys/vm/vm_kern.c @@ -610,6 +610,8 @@ vm_map_unlock(map); } +vm_page_t zero_page; + void kmem_init_zero_region(void) { @@ -622,7 +624,7 @@ * zeros, while not using much more physical resources. */ addr = kva_alloc(ZERO_REGION_SIZE); - m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | + zero_page = m = vm_page_alloc(NULL, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO); if ((m->flags & PG_ZERO) == 0) pmap_zero_page(m); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h +++ sys/vm/vm_page.h @@ -370,6 +370,7 @@ extern vm_page_t vm_page_array; /* First resident page in table */ extern long vm_page_array_size; /* number of vm_page_t's */ extern long first_page; /* first physical page number */ +extern vm_page_t zero_page; #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr)