Changeset View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 239 Lines • ▼ Show 20 Lines | |||||
static void free_pv_chunk(struct pv_chunk *pc); | static void free_pv_chunk(struct pv_chunk *pc); | ||||
static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | static void free_pv_entry(pmap_t pmap, pv_entry_t pv); | ||||
static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | static pv_entry_t get_pv_entry(pmap_t pmap, struct rwlock **lockp); | ||||
static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | static vm_page_t reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp); | ||||
static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | static void pmap_pvh_free(struct md_page *pvh, pmap_t pmap, vm_offset_t va); | ||||
static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | static pv_entry_t pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, | ||||
vm_offset_t va); | vm_offset_t va); | ||||
static int pmap_change_attr(vm_offset_t va, vm_size_t size, int mode); | |||||
static int pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode); | |||||
static pt_entry_t *pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va); | |||||
static pt_entry_t *pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va); | |||||
static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, | ||||
vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp); | ||||
static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, | static int pmap_remove_l3(pmap_t pmap, pt_entry_t *l3, vm_offset_t sva, | ||||
pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); | pd_entry_t ptepde, struct spglist *free, struct rwlock **lockp); | ||||
static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, | static boolean_t pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, | ||||
vm_page_t m, struct rwlock **lockp); | vm_page_t m, struct rwlock **lockp); | ||||
static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, | static vm_page_t _pmap_alloc_l3(pmap_t pmap, vm_pindex_t ptepindex, | ||||
▲ Show 20 Lines • Show All 585 Lines • ▼ Show 20 Lines | |||||
void | void | ||||
pmap_init(void) | pmap_init(void) | ||||
{ | { | ||||
int i; | int i; | ||||
/* | /* | ||||
* Initialize the pv chunk list mutex. | * Initialize the pv chunk list mutex. | ||||
*/ | */ | ||||
mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); | mtx_init(&pv_chunks_mutex, "pmap pv chunk list", NULL, MTX_DEF); | ||||
kib: On amd64/i386, the same knob is called vm.pmap.pg_ps_enabled. Not sure if it is required to… | |||||
Not Done Inline ActionsIt looked like this was named after the flag in the pagetables. On armv6 is seems to be vm.pmap.sp_enabled. I don't have an opinion on the correct name. andrew: It looked like this was named after the flag in the pagetables. On armv6 is seems to be `vm. | |||||
/* | /* | ||||
* Initialize the pool of pv list locks. | * Initialize the pool of pv list locks. | ||||
*/ | */ | ||||
for (i = 0; i < NPV_LIST_LOCKS; i++) | for (i = 0; i < NPV_LIST_LOCKS; i++) | ||||
rw_init(&pv_list_locks[i], "pmap pv list"); | rw_init(&pv_list_locks[i], "pmap pv list"); | ||||
} | } | ||||
▲ Show 20 Lines • Show All 175 Lines • ▼ Show 20 Lines | pmap_kextract(vm_offset_t va) | ||||
} | } | ||||
return (pa); | return (pa); | ||||
} | } | ||||
/*************************************************** | /*************************************************** | ||||
* Low level mapping routines..... | * Low level mapping routines..... | ||||
***************************************************/ | ***************************************************/ | ||||
void | static void | ||||
pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) | pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_t pa, int mode) | ||||
{ | { | ||||
pd_entry_t *pde; | pd_entry_t *pde; | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
vm_offset_t va; | vm_offset_t va; | ||||
int lvl; | int lvl; | ||||
KASSERT((pa & L3_OFFSET) == 0, | KASSERT((pa & L3_OFFSET) == 0, | ||||
("pmap_kenter_device: Invalid physical address")); | ("pmap_kenter: Invalid physical address")); | ||||
KASSERT((sva & L3_OFFSET) == 0, | KASSERT((sva & L3_OFFSET) == 0, | ||||
("pmap_kenter_device: Invalid virtual address")); | ("pmap_kenter: Invalid virtual address")); | ||||
KASSERT((size & PAGE_MASK) == 0, | KASSERT((size & PAGE_MASK) == 0, | ||||
("pmap_kenter_device: Mapping is not page-sized")); | ("pmap_kenter: Mapping is not page-sized")); | ||||
va = sva; | va = sva; | ||||
while (size != 0) { | while (size != 0) { | ||||
pde = pmap_pde(kernel_pmap, va, &lvl); | pde = pmap_pde(kernel_pmap, va, &lvl); | ||||
KASSERT(pde != NULL, | KASSERT(pde != NULL, | ||||
("pmap_kenter_device: Invalid page entry, va: 0x%lx", va)); | ("pmap_kenter: Invalid page entry, va: 0x%lx", va)); | ||||
KASSERT(lvl == 2, | KASSERT(lvl == 2, ("pmap_kenter: Invalid level %d", lvl)); | ||||
("pmap_kenter_device: Invalid level %d", lvl)); | |||||
pte = pmap_l2_to_l3(pde, va); | pte = pmap_l2_to_l3(pde, va); | ||||
pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT | | pmap_load_store(pte, (pa & ~L3_OFFSET) | ATTR_DEFAULT | | ||||
ATTR_IDX(DEVICE_MEMORY) | L3_PAGE); | ATTR_IDX(mode) | L3_PAGE); | ||||
PTE_SYNC(pte); | PTE_SYNC(pte); | ||||
va += PAGE_SIZE; | va += PAGE_SIZE; | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
size -= PAGE_SIZE; | size -= PAGE_SIZE; | ||||
} | } | ||||
pmap_invalidate_range(kernel_pmap, sva, va); | pmap_invalidate_range(kernel_pmap, sva, va); | ||||
} | } | ||||
void | |||||
pmap_kenter_device(vm_offset_t sva, vm_size_t size, vm_paddr_t pa) | |||||
{ | |||||
pmap_kenter(sva, size, pa, DEVICE_MEMORY); | |||||
} | |||||
/* | /* | ||||
* Remove a page from the kernel pagetables. | * Remove a page from the kernel pagetables. | ||||
*/ | */ | ||||
PMAP_INLINE void | PMAP_INLINE void | ||||
pmap_kremove(vm_offset_t va) | pmap_kremove(vm_offset_t va) | ||||
{ | { | ||||
pt_entry_t *pte; | pt_entry_t *pte; | ||||
int lvl; | int lvl; | ||||
▲ Show 20 Lines • Show All 2,274 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
void | void | ||||
pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) | pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) | ||||
{ | { | ||||
m->md.pv_memattr = ma; | m->md.pv_memattr = ma; | ||||
/* | /* | ||||
* ARM64TODO: Implement the below (from the amd64 pmap) | |||||
* If "m" is a normal page, update its direct mapping. This update | * If "m" is a normal page, update its direct mapping. This update | ||||
* can be relied upon to perform any cache operations that are | * can be relied upon to perform any cache operations that are | ||||
* required for data coherence. | * required for data coherence. | ||||
*/ | */ | ||||
if ((m->flags & PG_FICTITIOUS) == 0 && | if ((m->flags & PG_FICTITIOUS) == 0 && | ||||
PHYS_IN_DMAP(VM_PAGE_TO_PHYS(m))) | pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, | ||||
panic("ARM64TODO: pmap_page_set_memattr"); | m->md.pv_memattr) != 0) | ||||
panic("memory attribute change on the direct map failed"); | |||||
} | |||||
mmelUnsubmitted Not Done Inline ActionsThe given page can be mapped by multiple times, but attributes are changed only in DMAP. ARM doesn't allows multiple mapping with different page attributes. mmel: The given page can be mapped by multiple times, but attributes are changed only in DMAP. ARM… | |||||
andrewAuthorUnsubmitted Not Done Inline ActionsHow would this happen? My understanding is the VM code will only call this with unmapped pages. andrew: How would this happen? My understanding is the VM code will only call this with unmapped pages. | |||||
/* | |||||
* Changes the specified virtual address range's memory type to that given by | |||||
* the parameter "mode". The specified virtual address range must be | |||||
* completely contained within either the direct map or the kernel map. If | |||||
* the virtual address range is contained within the kernel map, then the | |||||
* memory type for each of the corresponding ranges of the direct map is also | |||||
* changed. (The corresponding ranges of the direct map are those ranges that | |||||
* map the same physical pages as the specified virtual address range.) These | |||||
* changes to the direct map are necessary because Intel describes the | |||||
* behavior of their processors as "undefined" if two or more mappings to the | |||||
* same physical page have different memory types. | |||||
* | |||||
* Returns zero if the change completed successfully, and either EINVAL or | |||||
* ENOMEM if the change failed. Specifically, EINVAL is returned if some part | |||||
* of the virtual address range was not mapped, and ENOMEM is returned if | |||||
* there was insufficient memory available to complete the change. In the | |||||
* latter case, the memory type may have been changed on some part of the | |||||
* virtual address range or the direct map. | |||||
*/ | |||||
static int | |||||
pmap_change_attr(vm_offset_t va, vm_size_t size, int mode) | |||||
{ | |||||
int error; | |||||
PMAP_LOCK(kernel_pmap); | |||||
error = pmap_change_attr_locked(va, size, mode); | |||||
PMAP_UNLOCK(kernel_pmap); | |||||
return (error); | |||||
} | |||||
static int | |||||
pmap_change_attr_locked(vm_offset_t va, vm_size_t size, int mode) | |||||
{ | |||||
vm_offset_t base, offset, tmpva; | |||||
pt_entry_t l3, *pte, *newpte; | |||||
int lvl; | |||||
PMAP_LOCK_ASSERT(kernel_pmap, MA_OWNED); | |||||
base = trunc_page(va); | |||||
offset = va & PAGE_MASK; | |||||
size = round_page(offset + size); | |||||
if (!VIRT_IN_DMAP(base)) | |||||
return (EINVAL); | |||||
for (tmpva = base; tmpva < base + size; ) { | |||||
pte = pmap_pte(kernel_pmap, va, &lvl); | |||||
if (pte == NULL) | |||||
return (EINVAL); | |||||
if ((pmap_load(pte) & ATTR_IDX_MASK) == ATTR_IDX(mode)) { | |||||
/* | |||||
* We already have the correct attribute, | |||||
* ignore this entry. | |||||
*/ | |||||
switch (lvl) { | |||||
default: | |||||
panic("Invalid DMAP table level: %d\n", lvl); | |||||
case 1: | |||||
tmpva = (tmpva & ~L1_OFFSET) + L1_SIZE; | |||||
break; | |||||
case 2: | |||||
tmpva = (tmpva & ~L2_OFFSET) + L2_SIZE; | |||||
break; | |||||
case 3: | |||||
tmpva += PAGE_SIZE; | |||||
break; | |||||
} | |||||
} else { | |||||
/* | |||||
* Split the entry to an level 3 table, then | |||||
* set the new attribute. | |||||
*/ | |||||
switch (lvl) { | |||||
default: | |||||
panic("Invalid DMAP table level: %d\n", lvl); | |||||
case 1: | |||||
newpte = pmap_demote_l1(kernel_pmap, pte, | |||||
tmpva & ~L1_OFFSET); | |||||
if (newpte == NULL) | |||||
return (EINVAL); | |||||
pte = pmap_l1_to_l2(pte, tmpva); | |||||
case 2: | |||||
newpte = pmap_demote_l2(kernel_pmap, pte, | |||||
tmpva & ~L2_OFFSET); | |||||
if (newpte == NULL) | |||||
return (EINVAL); | |||||
pte = pmap_l2_to_l3(pte, tmpva); | |||||
case 3: | |||||
/* Update the entry */ | |||||
l3 = pmap_load(pte); | |||||
l3 &= ~ATTR_IDX_MASK; | |||||
l3 |= ATTR_IDX(mode); | |||||
/* | |||||
* If moving to a non-cacheable entry flush | |||||
* the cache. | |||||
*/ | |||||
if (mode == VM_MEMATTR_UNCACHEABLE) | |||||
cpu_idcache_wbinv_range(tmpva, L3_SIZE); | |||||
mmelUnsubmitted Not Done Inline ActionsWhy idcache ? mmel: Why idcache ? | |||||
/* Clear the old mapping */ | |||||
pmap_load_clear(pte); | |||||
PTE_SYNC(pte); | |||||
pmap_invalidate_page(kernel_pmap, tmpva); | |||||
mmelUnsubmitted Not Done Inline ActionsIsn't tmpva just unmapped? mmel: Isn't tmpva just unmapped? | |||||
andrewAuthorUnsubmitted Not Done Inline ActionsNo, it is the virtual address we are currently changing. It will have been mapped in pmap_demote_l2. andrew: No, it is the virtual address we are currently changing. It will have been mapped in… | |||||
/* Create the new mapping */ | |||||
pmap_load_store(pte, l3); | |||||
PTE_SYNC(pte); | |||||
pmap_invalidate_page(kernel_pmap, tmpva); | |||||
break; | |||||
} | |||||
tmpva += PAGE_SIZE; | |||||
} | |||||
} | |||||
return (0); | |||||
} | |||||
/* | |||||
* Create an L2 table to map all addresses within an L1 mapping. | |||||
*/ | |||||
static pt_entry_t * | |||||
pmap_demote_l1(pmap_t pmap, pt_entry_t *l1, vm_offset_t va) | |||||
{ | |||||
pt_entry_t *l2, newl2, oldl1; | |||||
vm_offset_t tmpl1; | |||||
vm_paddr_t l2phys, phys; | |||||
vm_page_t ml2; | |||||
int i; | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
oldl1 = pmap_load(l1); | |||||
KASSERT((oldl1 & ATTR_DESCR_MASK) == L1_BLOCK, | |||||
("pmap_demote_l1: Demoting a non-block entry")); | |||||
KASSERT((va & L1_OFFSET) == 0, | |||||
("pmap_demote_l1: Invalid virtual address %#lx", va)); | |||||
tmpl1 = 0; | |||||
if (va <= (vm_offset_t)l1 && va + L1_SIZE > (vm_offset_t)l1) { | |||||
tmpl1 = kva_alloc(PAGE_SIZE); | |||||
if (tmpl1 == 0) | |||||
return (NULL); | |||||
} | |||||
if ((ml2 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | | |||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { | |||||
CTR2(KTR_PMAP, "pmap_demote_l1: failure for va %#lx" | |||||
" in pmap %p", va, pmap); | |||||
return (NULL); | |||||
} | |||||
l2phys = VM_PAGE_TO_PHYS(ml2); | |||||
l2 = (pt_entry_t *)PHYS_TO_DMAP(l2phys); | |||||
/* Address the range points at */ | |||||
phys = oldl1 & ~ATTR_MASK; | |||||
/* The attributed from the old l1 table to be copied */ | |||||
newl2 = oldl1 & ATTR_MASK; | |||||
/* Create the new entries */ | |||||
for (i = 0; i < Ln_ENTRIES; i++) { | |||||
l2[i] = newl2 | phys; | |||||
mmelUnsubmitted Not Done Inline ActionsNew entries are not synced. mmel: New entries are not synced. | |||||
phys += L2_SIZE; | |||||
} | |||||
if (tmpl1 != 0) { | |||||
pmap_kenter(tmpl1, PAGE_SIZE, | |||||
DMAP_TO_PHYS((vm_offset_t)l1) & ~L3_OFFSET, CACHED_MEMORY); | |||||
l1 = (pt_entry_t *)(tmpl1 + ((vm_offset_t)l1 & PAGE_MASK)); | |||||
} | |||||
/* | |||||
* Ensure we don't get switched out with the page table | |||||
* in an inconsistent state. | |||||
*/ | |||||
critical_enter(); | |||||
/* Clear the old mapping */ | |||||
pmap_load_clear(l1); | |||||
PTE_SYNC(l1); | |||||
pmap_invalidate_page(pmap, va); | |||||
/* Create the new mapping */ | |||||
pmap_load_store(l1, l2phys | L1_TABLE); | |||||
PTE_SYNC(l1); | |||||
pmap_invalidate_page(pmap, va); | |||||
mmelUnsubmitted Not Done Inline ActionsThis invalidate is superfluous, unmapped TLB entries cannot be cached. mmel: This invalidate is superfluous, unmapped TLB entries cannot be cached. | |||||
critical_exit(); | |||||
if (tmpl1 != 0) { | |||||
pmap_kremove(tmpl1); | |||||
kva_free(tmpl1, PAGE_SIZE); | |||||
} | |||||
return (l2); | |||||
} | |||||
/* | |||||
* Create an L3 table to map all addresses within an L2 mapping. | |||||
*/ | |||||
static pt_entry_t * | |||||
pmap_demote_l2(pmap_t pmap, pt_entry_t *l2, vm_offset_t va) | |||||
{ | |||||
pt_entry_t *l3, newl3, oldl2; | |||||
vm_offset_t tmpl2; | |||||
vm_paddr_t l3phys, phys; | |||||
vm_page_t ml3; | |||||
int i; | |||||
PMAP_LOCK_ASSERT(pmap, MA_OWNED); | |||||
oldl2 = pmap_load(l2); | |||||
KASSERT((oldl2 & ATTR_DESCR_MASK) == L2_BLOCK, | |||||
("pmap_demote_l2: Demoting a non-block entry")); | |||||
KASSERT((va & L2_OFFSET) == 0, | |||||
("pmap_demote_l2: Invalid virtual address %#lx", va)); | |||||
tmpl2 = 0; | |||||
if (va <= (vm_offset_t)l2 && va + L2_SIZE > (vm_offset_t)l2) { | |||||
tmpl2 = kva_alloc(PAGE_SIZE); | |||||
if (tmpl2 == 0) | |||||
return (NULL); | |||||
} | |||||
if ((ml3 = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | | |||||
VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { | |||||
CTR2(KTR_PMAP, "pmap_demote_l2: failure for va %#lx" | |||||
" in pmap %p", va, pmap); | |||||
return (NULL); | |||||
} | |||||
l3phys = VM_PAGE_TO_PHYS(ml3); | |||||
l3 = (pt_entry_t *)PHYS_TO_DMAP(l3phys); | |||||
/* Address the range points at */ | |||||
phys = oldl2 & ~ATTR_MASK; | |||||
/* The attributed from the old l2 table to be copied */ | |||||
newl3 = (oldl2 & (ATTR_MASK & ~ATTR_DESCR_MASK)) | L3_PAGE; | |||||
/* Create the new entries */ | |||||
for (i = 0; i < Ln_ENTRIES; i++) { | |||||
l3[i] = newl3 | phys; | |||||
phys += L3_SIZE; | |||||
} | |||||
if (tmpl2 != 0) { | |||||
pmap_kenter(tmpl2, PAGE_SIZE, | |||||
DMAP_TO_PHYS((vm_offset_t)l2) & ~L3_OFFSET, CACHED_MEMORY); | |||||
l2 = (pt_entry_t *)(tmpl2 + ((vm_offset_t)l2 & PAGE_MASK)); | |||||
} | |||||
/* | |||||
* Ensure we don't get switched out with the page table | |||||
* in an inconsistent state. | |||||
*/ | |||||
critical_enter(); | |||||
/* Clear the old mapping */ | |||||
pmap_load_clear(l2); | |||||
PTE_SYNC(l2); | |||||
pmap_invalidate_page(pmap, va); | |||||
/* Create the new mapping */ | |||||
pmap_load_store(l2, l3phys | L2_TABLE); | |||||
PTE_SYNC(l2); | |||||
pmap_invalidate_page(pmap, va); | |||||
critical_exit(); | |||||
if (tmpl2 != 0) { | |||||
pmap_kremove(tmpl2); | |||||
kva_free(tmpl2, PAGE_SIZE); | |||||
} | |||||
return (l3); | |||||
} | } | ||||
/* | /* | ||||
* perform the pmap work for mincore | * perform the pmap work for mincore | ||||
*/ | */ | ||||
int | int | ||||
pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) | pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 150 Lines • ▼ Show 20 Lines | |||||
*/ | */ | ||||
boolean_t | boolean_t | ||||
pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, | pmap_map_io_transient(vm_page_t page[], vm_offset_t vaddr[], int count, | ||||
boolean_t can_fault) | boolean_t can_fault) | ||||
{ | { | ||||
vm_paddr_t paddr; | vm_paddr_t paddr; | ||||
boolean_t needs_mapping; | boolean_t needs_mapping; | ||||
int error, i; | int error, i; | ||||
Not Done Inline ActionsI remember that I asked the same question when reviewing the similar arm changes (break-before-make). Could transient page fault occur recursively ? In other words, could you get the fault while the thread owns pmap lock ? E.g., might it happen that interrupt handler accesses the DMAP mapping which is being promoted or demoted by the same CPU and interrupted ? kib: I remember that I asked the same question when reviewing the similar arm changes (break-before… | |||||
Not Done Inline ActionsI can change the critical section to a spin lock to disable interrupts. I don't expect any other exception to be triggered within the break-before-make section, and any that do would indicate a bug in the break-before-make section. andrew: I can change the critical section to a spin lock to disable interrupts. I don't expect any… | |||||
/* | /* | ||||
* Allocate any KVA space that we need, this is done in a separate | * Allocate any KVA space that we need, this is done in a separate | ||||
* loop to prevent calling vmem_alloc while pinned. | * loop to prevent calling vmem_alloc while pinned. | ||||
*/ | */ | ||||
needs_mapping = FALSE; | needs_mapping = FALSE; | ||||
for (i = 0; i < count; i++) { | for (i = 0; i < count; i++) { | ||||
paddr = VM_PAGE_TO_PHYS(page[i]); | paddr = VM_PAGE_TO_PHYS(page[i]); | ||||
if (__predict_false(!PHYS_IN_DMAP(paddr))) { | if (__predict_false(!PHYS_IN_DMAP(paddr))) { | ||||
▲ Show 20 Lines • Show All 42 Lines • Show Last 20 Lines |
On amd64/i386, the same knob is called vm.pmap.pg_ps_enabled. Not sure if it is required to have consistent name there, but at least this is something to consider.