Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c +++ sys/amd64/amd64/pmap.c @@ -114,6 +114,7 @@ #include #include #include +#include #include #include #include @@ -349,6 +350,7 @@ vm_paddr_t dmaplimit; vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; pt_entry_t pg_nx; +static epoch_t pmap_epoch; static SYSCTL_NODE(_vm, OID_AUTO, pmap, CTLFLAG_RD, 0, "VM/pmap parameters"); @@ -438,14 +440,12 @@ CTLFLAG_MPSAFE, NULL, 0, pmap_pcid_save_cnt_proc, "QU", "Count of saved TLB context on switch"); -static LIST_HEAD(, pmap_invl_gen) pmap_invl_gen_tracker = - LIST_HEAD_INITIALIZER(&pmap_invl_gen_tracker); -static struct mtx invl_gen_mtx; -static u_long pmap_invl_gen = 0; -/* Fake lock object to satisfy turnstiles interface. */ -static struct lock_object invl_gen_ts = { - .lo_name = "invlts", -}; +static void +pmap_epoch_init(void *arg __unused) +{ + pmap_epoch = epoch_alloc(EPOCH_PREEMPT|EPOCH_LOCKED); +} +SYSINIT(epoch, SI_SUB_TASKQ + 1, SI_ORDER_ANY, pmap_epoch_init, NULL); static bool pmap_not_in_di(void) @@ -468,19 +468,8 @@ static void pmap_delayed_invl_started(void) { - struct pmap_invl_gen *invl_gen; - u_long currgen; - - invl_gen = &curthread->td_md.md_invl_gen; - PMAP_ASSERT_NOT_IN_DI(); - mtx_lock(&invl_gen_mtx); - if (LIST_EMPTY(&pmap_invl_gen_tracker)) - currgen = pmap_invl_gen; - else - currgen = LIST_FIRST(&pmap_invl_gen_tracker)->gen; - invl_gen->gen = currgen + 1; - LIST_INSERT_HEAD(&pmap_invl_gen_tracker, invl_gen, link); - mtx_unlock(&invl_gen_mtx); + epoch_enter_preempt(pmap_epoch); + curthread->td_md.md_invl_gen.gen = 1; } /* @@ -500,28 +489,8 @@ static void pmap_delayed_invl_finished(void) { - struct pmap_invl_gen *invl_gen, *next; - struct turnstile *ts; - - invl_gen = &curthread->td_md.md_invl_gen; - KASSERT(invl_gen->gen != 0, ("missed invl_started")); - mtx_lock(&invl_gen_mtx); - next = LIST_NEXT(invl_gen, link); - if (next == NULL) { - turnstile_chain_lock(&invl_gen_ts); - ts = turnstile_lookup(&invl_gen_ts); - pmap_invl_gen = invl_gen->gen; - if (ts != NULL) { - turnstile_broadcast(ts, TS_SHARED_QUEUE); - turnstile_unpend(ts); - } - turnstile_chain_unlock(&invl_gen_ts); - } else { - next->gen = invl_gen->gen; - } - LIST_REMOVE(invl_gen, link); - mtx_unlock(&invl_gen_mtx); - invl_gen->gen = 0; + curthread->td_md.md_invl_gen.gen = 0; + epoch_exit_preempt(pmap_epoch); } #ifdef PV_STATS @@ -544,36 +513,14 @@ * pmap_delayed_invl_wait(), upon its return we know that no CPU has a * valid mapping for the page m in either its page table or TLB. * - * This function works by blocking until the global DI generation - * number catches up with the generation number associated with the - * given page m and its PV list. Since this function's callers - * typically own an object lock and sometimes own a page lock, it - * cannot sleep. Instead, it blocks on a turnstile to relinquish the - * processor. + * This function works by checking that there are either no callers + * within a DI block or if there are that a grace period elapses for + * any callers in an epoch section when it is initially called. */ static void pmap_delayed_invl_wait(vm_page_t m) { - struct turnstile *ts; - u_long *m_gen; -#ifdef PV_STATS - bool accounted = false; -#endif - - m_gen = pmap_delayed_invl_genp(m); - while (*m_gen > pmap_invl_gen) { -#ifdef PV_STATS - if (!accounted) { - atomic_add_long(&invl_wait, 1); - accounted = true; - } -#endif - ts = turnstile_trywait(&invl_gen_ts); - if (*m_gen > pmap_invl_gen) - turnstile_wait(ts, NULL, TS_SHARED_QUEUE); - else - turnstile_cancel(ts); - } + epoch_wait_preempt(pmap_epoch); } /* @@ -1130,11 +1077,6 @@ TAILQ_INIT(&kernel_pmap->pm_pvchunk); kernel_pmap->pm_flags = pmap_flags; - /* - * Initialize the TLB invalidations generation number lock. - */ - mtx_init(&invl_gen_mtx, "invlgn", NULL, MTX_DEF); - /* * Reserve some special page table entries/VA space for temporary * mapping of pages.