Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/minidump_machdep.c
Show First 20 Lines • Show All 159 Lines • ▼ Show 20 Lines | |||||
/* A fake page table page, to avoid having to handle both 4K and 2M pages */ | /* A fake page table page, to avoid having to handle both 4K and 2M pages */ | ||||
static pd_entry_t fakepd[NPDEPG]; | static pd_entry_t fakepd[NPDEPG]; | ||||
int | int | ||||
cpu_minidumpsys(struct dumperinfo *di, struct minidumpstate *state) | cpu_minidumpsys(struct dumperinfo *di, struct minidumpstate *state) | ||||
{ | { | ||||
uint32_t pmapsize; | uint32_t pmapsize; | ||||
vm_offset_t va; | vm_offset_t va, kva_end; | ||||
int error; | int error; | ||||
uint64_t *pml4, *pdp, *pd, *pt, pa; | uint64_t *pml4, *pdp, *pd, *pt, pa; | ||||
int i, ii, j, k, n; | uint64_t pdpe, pde, pte; | ||||
int ii, j, k, n; | |||||
int retry_count; | int retry_count; | ||||
struct minidumphdr mdhdr; | struct minidumphdr mdhdr; | ||||
retry_count = 0; | retry_count = 0; | ||||
retry: | retry: | ||||
retry_count++; | retry_count++; | ||||
/* Walk page table pages, set bits in vm_page_dump */ | /* Snapshot the KVA upper bound in case it grows. */ | ||||
kva_end = MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end); | |||||
/* | |||||
* Walk the kernel page table pages, setting the active entries in the | |||||
* dump bitmap. | |||||
* | |||||
* NB: for a live dump, we may be racing with updates to the page | |||||
* tables, so care must be taken to read each entry only once. | |||||
*/ | |||||
pmapsize = 0; | pmapsize = 0; | ||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, | for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) { | ||||
kernel_vm_end); ) { | |||||
/* | /* | ||||
* We always write a page, even if it is zero. Each | * We always write a page, even if it is zero. Each | ||||
* page written corresponds to 1GB of space | * page written corresponds to 1GB of space | ||||
*/ | */ | ||||
pmapsize += PAGE_SIZE; | pmapsize += PAGE_SIZE; | ||||
ii = pmap_pml4e_index(va); | ii = pmap_pml4e_index(va); | ||||
pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | ||||
pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | ||||
i = pmap_pdpe_index(va); | pdpe = pdp[pmap_pdpe_index(va)]; | ||||
markj: There is no guarantee in general that the compiled code will load PTEs only once. atomic_load_*… | |||||
mhorneAuthorUnsubmitted Done Inline ActionsRight, I should have realized this. Will update shortly. mhorne: Right, I should have realized this. Will update shortly. | |||||
if ((pdp[i] & PG_V) == 0) { | if ((pdpe & PG_V) == 0) { | ||||
va += NBPDP; | va += NBPDP; | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* 1GB page is represented as 512 2MB pages in a dump. | * 1GB page is represented as 512 2MB pages in a dump. | ||||
*/ | */ | ||||
if ((pdp[i] & PG_PS) != 0) { | if ((pdpe & PG_PS) != 0) { | ||||
va += NBPDP; | va += NBPDP; | ||||
pa = pdp[i] & PG_PS_FRAME; | pa = pdpe & PG_PS_FRAME; | ||||
for (n = 0; n < NPDEPG * NPTEPG; n++) { | for (n = 0; n < NPDEPG * NPTEPG; n++) { | ||||
if (dump_page_is_dumpable(pa)) | if (dump_page_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME); | pd = (uint64_t *)PHYS_TO_DMAP(pdpe & PG_FRAME); | ||||
for (n = 0; n < NPDEPG; n++, va += NBPDR) { | for (n = 0; n < NPDEPG; n++, va += NBPDR) { | ||||
j = pmap_pde_index(va); | pde = pd[pmap_pde_index(va)]; | ||||
if ((pd[j] & PG_V) == 0) | if ((pde & PG_V) == 0) | ||||
continue; | continue; | ||||
if ((pd[j] & PG_PS) != 0) { | if ((pde & PG_PS) != 0) { | ||||
/* This is an entire 2M page. */ | /* This is an entire 2M page. */ | ||||
pa = pd[j] & PG_PS_FRAME; | pa = pde & PG_PS_FRAME; | ||||
for (k = 0; k < NPTEPG; k++) { | for (k = 0; k < NPTEPG; k++) { | ||||
if (dump_page_is_dumpable(pa)) | if (dump_page_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
pa = pd[j] & PG_FRAME; | pa = pde & PG_FRAME; | ||||
/* set bit for this PTE page */ | /* set bit for this PTE page */ | ||||
if (dump_page_is_dumpable(pa)) | if (dump_page_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
/* and for each valid page in this 2MB block */ | /* and for each valid page in this 2MB block */ | ||||
pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME); | pt = (uint64_t *)PHYS_TO_DMAP(pde & PG_FRAME); | ||||
for (k = 0; k < NPTEPG; k++) { | for (k = 0; k < NPTEPG; k++) { | ||||
if ((pt[k] & PG_V) == 0) | pte = pt[k]; | ||||
if ((pte & PG_V) == 0) | |||||
continue; | continue; | ||||
pa = pt[k] & PG_FRAME; | pa = pte & PG_FRAME; | ||||
if (dump_page_is_dumpable(pa)) | if (dump_page_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
/* Calculate dump size. */ | /* Calculate dump size. */ | ||||
dumpsize = pmapsize; | dumpsize = pmapsize; | ||||
▲ Show 20 Lines • Show All 59 Lines • ▼ Show 20 Lines | retry: | ||||
/* Dump bitmap */ | /* Dump bitmap */ | ||||
error = blk_write(di, (char *)vm_page_dump, 0, | error = blk_write(di, (char *)vm_page_dump, 0, | ||||
round_page(BITSET_SIZE(vm_page_dump_pages))); | round_page(BITSET_SIZE(vm_page_dump_pages))); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* Dump kernel page directory pages */ | /* Dump kernel page directory pages */ | ||||
bzero(fakepd, sizeof(fakepd)); | bzero(fakepd, sizeof(fakepd)); | ||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, | for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) { | ||||
kernel_vm_end); va += NBPDP) { | |||||
ii = pmap_pml4e_index(va); | ii = pmap_pml4e_index(va); | ||||
pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | ||||
pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | ||||
i = pmap_pdpe_index(va); | pdpe = pdp[pmap_pdpe_index(va)]; | ||||
/* We always write a page, even if it is zero */ | /* We always write a page, even if it is zero */ | ||||
if ((pdp[i] & PG_V) == 0) { | if ((pdpe & PG_V) == 0) { | ||||
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* flush, in case we reuse fakepd in the same block */ | /* flush, in case we reuse fakepd in the same block */ | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
continue; | continue; | ||||
} | } | ||||
/* 1GB page is represented as 512 2MB pages in a dump */ | /* 1GB page is represented as 512 2MB pages in a dump */ | ||||
if ((pdp[i] & PG_PS) != 0) { | if ((pdpe & PG_PS) != 0) { | ||||
/* PDPE and PDP have identical layout in this case */ | /* PDPE and PDP have identical layout in this case */ | ||||
fakepd[0] = pdp[i]; | fakepd[0] = pdpe; | ||||
for (j = 1; j < NPDEPG; j++) | for (j = 1; j < NPDEPG; j++) | ||||
fakepd[j] = fakepd[j - 1] + NBPDR; | fakepd[j] = fakepd[j - 1] + NBPDR; | ||||
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* flush, in case we reuse fakepd in the same block */ | /* flush, in case we reuse fakepd in the same block */ | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
bzero(fakepd, sizeof(fakepd)); | bzero(fakepd, sizeof(fakepd)); | ||||
continue; | continue; | ||||
} | } | ||||
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME); | pd = (uint64_t *)PHYS_TO_DMAP(pdpe & PG_FRAME); | ||||
error = blk_write(di, (char *)pd, 0, PAGE_SIZE); | error = blk_write(di, (char *)pd, 0, PAGE_SIZE); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
Show All 40 Lines |
There is no guarantee in general that the compiled code will load PTEs only once. atomic_load_* is needed, I believe.