Changeset View
Changeset View
Standalone View
Standalone View
sys/arm64/arm64/pmap.c
Show First 20 Lines • Show All 6,581 Lines • ▼ Show 20 Lines | pmap_alloc_asid(pmap_t pmap) | ||||
} | } | ||||
bit_set(set->asid_set, new_asid); | bit_set(set->asid_set, new_asid); | ||||
set->asid_next = new_asid + 1; | set->asid_next = new_asid + 1; | ||||
pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch); | pmap->pm_cookie = COOKIE_FROM(new_asid, set->asid_epoch); | ||||
out: | out: | ||||
mtx_unlock_spin(&set->asid_set_mutex); | mtx_unlock_spin(&set->asid_set_mutex); | ||||
} | } | ||||
static uint64_t __read_mostly ttbr_flags; | |||||
kib: You could replace this boolean with the value to or ttbr with. Then you can avoid one jump in… | |||||
/* | /* | ||||
* Compute the value that should be stored in ttbr0 to activate the specified | * Compute the value that should be stored in ttbr0 to activate the specified | ||||
* pmap. This value may change from time to time. | * pmap. This value may change from time to time. | ||||
*/ | */ | ||||
uint64_t | uint64_t | ||||
pmap_to_ttbr0(pmap_t pmap) | pmap_to_ttbr0(pmap_t pmap) | ||||
{ | { | ||||
uint64_t ttbr; | |||||
return (ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)) | | ttbr = pmap->pm_ttbr; | ||||
pmap->pm_ttbr); | ttbr |= ASID_TO_OPERAND(COOKIE_TO_ASID(pmap->pm_cookie)); | ||||
ttbr |= ttbr_flags; | |||||
return (ttbr); | |||||
} | } | ||||
static void | |||||
pmap_set_cnp(void *arg) | |||||
{ | |||||
uint64_t ttbr0, ttbr1; | |||||
u_int cpuid; | |||||
cpuid = *(u_int *)arg; | |||||
if (cpuid == curcpu) { | |||||
/* | |||||
* Set the flags while all CPUs are handling the | |||||
* smp_rendezvous so will not call pmap_to_ttbr0. Any calls | |||||
* to pmap_to_ttbr0 after this will have the CnP flag set. | |||||
Not Done Inline ActionsUnable is the strange term. Do you mean that userspace is not yet activated, so there is no address switching done? BTW I do not see why do you need to do this from the callback, instead of setting the value before calling IPI. kib: Unable is the strange term. Do you mean that userspace is not yet activated, so there is no… | |||||
* The dsb after invalidating the TLB will act as a barrier | |||||
* to ensure all CPUs can observe this change. | |||||
*/ | |||||
ttbr_flags |= TTBR_CnP; | |||||
} | |||||
ttbr0 = READ_SPECIALREG(ttbr0_el1); | |||||
ttbr0 |= TTBR_CnP; | |||||
ttbr1 = READ_SPECIALREG(ttbr1_el1); | |||||
ttbr1 |= TTBR_CnP; | |||||
/* Update ttbr{0,1}_el1 with the CnP flag */ | |||||
WRITE_SPECIALREG(ttbr0_el1, ttbr0); | |||||
WRITE_SPECIALREG(ttbr1_el1, ttbr1); | |||||
isb(); | |||||
__asm __volatile("tlbi vmalle1is"); | |||||
dsb(ish); | |||||
isb(); | |||||
} | |||||
static void | |||||
pmap_init_cnp(void *dummy __unused) | |||||
alcUnsubmitted Not Done Inline ActionsI would suggest adding a comment explaining why you defer enabling CnP until all of the cores are running. alc: I would suggest adding a comment explaining why you defer enabling CnP until all of the cores… | |||||
{ | |||||
uint64_t reg; | |||||
u_int cpuid; | |||||
if (!get_kernel_reg(ID_AA64MMFR2_EL1, ®)) | |||||
return; | |||||
if (ID_AA64MMFR2_CnP_VAL(reg) != ID_AA64MMFR2_CnP_NONE) { | |||||
if (bootverbose) | |||||
printf("Enabling CnP\n"); | |||||
cpuid = curcpu; | |||||
smp_rendezvous(NULL, pmap_set_cnp, NULL, &cpuid); | |||||
} | |||||
} | |||||
SYSINIT(pmap_init_cnp, SI_SUB_SMP, SI_ORDER_ANY, pmap_init_cnp, NULL); | |||||
static bool | static bool | ||||
pmap_activate_int(pmap_t pmap) | pmap_activate_int(pmap_t pmap) | ||||
{ | { | ||||
struct asid_set *set; | struct asid_set *set; | ||||
int epoch; | int epoch; | ||||
KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap")); | KASSERT(PCPU_GET(curpmap) != NULL, ("no active pmap")); | ||||
▲ Show 20 Lines • Show All 649 Lines • Show Last 20 Lines |
You could replace this boolean with the value to or ttbr with. Then you can avoid one jump in pmap_to_ttbr0().