Changeset View
Changeset View
Standalone View
Standalone View
sys/amd64/amd64/minidump_machdep.c
Show First 20 Lines • Show All 46 Lines • ▼ Show 20 Lines | |||||
#include <vm/vm_page.h> | #include <vm/vm_page.h> | ||||
#include <vm/vm_phys.h> | #include <vm/vm_phys.h> | ||||
#include <vm/vm_dumpset.h> | #include <vm/vm_dumpset.h> | ||||
#include <vm/pmap.h> | #include <vm/pmap.h> | ||||
#include <machine/atomic.h> | #include <machine/atomic.h> | ||||
#include <machine/elf.h> | #include <machine/elf.h> | ||||
#include <machine/md_var.h> | #include <machine/md_var.h> | ||||
#include <machine/minidump.h> | #include <machine/minidump.h> | ||||
#include <machine/vmparam.h> | |||||
CTASSERT(sizeof(struct kerneldumpheader) == 512); | CTASSERT(sizeof(struct kerneldumpheader) == 512); | ||||
static struct kerneldumpheader kdh; | static struct kerneldumpheader kdh; | ||||
/* Handle chunked writes. */ | /* Handle chunked writes. */ | ||||
static size_t fragsz; | static size_t fragsz; | ||||
static void *dump_va; | static void *dump_va; | ||||
▲ Show 20 Lines • Show All 95 Lines • ▼ Show 20 Lines | |||||
/* A fake page table page, to avoid having to handle both 4K and 2M pages */ | /* A fake page table page, to avoid having to handle both 4K and 2M pages */ | ||||
static pd_entry_t fakepd[NPDEPG]; | static pd_entry_t fakepd[NPDEPG]; | ||||
int | int | ||||
cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) | cpu_minidumpsys(struct dumperinfo *di, const struct minidumpstate *state) | ||||
{ | { | ||||
uint32_t pmapsize; | uint32_t pmapsize; | ||||
vm_offset_t va; | vm_offset_t va, kva_end; | ||||
int error; | int error; | ||||
uint64_t *pml4, *pdp, *pd, *pt, pa; | uint64_t *pml4, *pdp, *pd, *pt, pa; | ||||
int i, ii, j, k, n; | uint64_t pdpe, pde, pte; | ||||
int ii, j, k, n; | |||||
int retry_count; | int retry_count; | ||||
struct minidumphdr mdhdr; | struct minidumphdr mdhdr; | ||||
retry_count = 0; | retry_count = 0; | ||||
retry: | retry: | ||||
retry_count++; | retry_count++; | ||||
/* Walk page table pages, set bits in vm_page_dump */ | /* Snapshot the KVA upper bound in case it grows. */ | ||||
kva_end = MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end); | |||||
/* | |||||
* Walk the kernel page table pages, setting the active entries in the | |||||
* dump bitmap. | |||||
* | |||||
* NB: for a live dump, we may be racing with updates to the page | |||||
* tables, so care must be taken to read each entry only once. | |||||
*/ | |||||
pmapsize = 0; | pmapsize = 0; | ||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, | for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; ) { | ||||
kernel_vm_end); ) { | |||||
/* | /* | ||||
* We always write a page, even if it is zero. Each | * We always write a page, even if it is zero. Each | ||||
* page written corresponds to 1GB of space | * page written corresponds to 1GB of space | ||||
*/ | */ | ||||
pmapsize += PAGE_SIZE; | pmapsize += PAGE_SIZE; | ||||
ii = pmap_pml4e_index(va); | ii = pmap_pml4e_index(va); | ||||
pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | ||||
pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | ||||
i = pmap_pdpe_index(va); | pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]); | ||||
if ((pdp[i] & PG_V) == 0) { | if ((pdpe & PG_V) == 0) { | ||||
markj: There is no guarantee in general that the compiled code will load PTEs only once. atomic_load_*… | |||||
Done Inline ActionsRight, I should have realized this. Will update shortly. mhorne: Right, I should have realized this. Will update shortly. | |||||
va += NBPDP; | va += NBPDP; | ||||
continue; | continue; | ||||
} | } | ||||
/* | /* | ||||
* 1GB page is represented as 512 2MB pages in a dump. | * 1GB page is represented as 512 2MB pages in a dump. | ||||
*/ | */ | ||||
if ((pdp[i] & PG_PS) != 0) { | if ((pdpe & PG_PS) != 0) { | ||||
va += NBPDP; | va += NBPDP; | ||||
pa = pdp[i] & PG_PS_FRAME; | pa = pdpe & PG_PS_FRAME; | ||||
for (n = 0; n < NPDEPG * NPTEPG; n++) { | for (n = 0; n < NPDEPG * NPTEPG; n++) { | ||||
if (vm_phys_is_dumpable(pa)) | if (vm_phys_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME); | pd = (uint64_t *)PHYS_TO_DMAP(pdpe & PG_FRAME); | ||||
for (n = 0; n < NPDEPG; n++, va += NBPDR) { | for (n = 0; n < NPDEPG; n++, va += NBPDR) { | ||||
j = pmap_pde_index(va); | pde = atomic_load_64(pd[pmap_pde_index(va)]); | ||||
if ((pd[j] & PG_V) == 0) | if ((pde & PG_V) == 0) | ||||
continue; | continue; | ||||
if ((pd[j] & PG_PS) != 0) { | if ((pde & PG_PS) != 0) { | ||||
/* This is an entire 2M page. */ | /* This is an entire 2M page. */ | ||||
pa = pd[j] & PG_PS_FRAME; | pa = pde & PG_PS_FRAME; | ||||
for (k = 0; k < NPTEPG; k++) { | for (k = 0; k < NPTEPG; k++) { | ||||
if (vm_phys_is_dumpable(pa)) | if (vm_phys_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
pa += PAGE_SIZE; | pa += PAGE_SIZE; | ||||
} | } | ||||
continue; | continue; | ||||
} | } | ||||
pa = pd[j] & PG_FRAME; | pa = pde & PG_FRAME; | ||||
/* set bit for this PTE page */ | /* set bit for this PTE page */ | ||||
if (vm_phys_is_dumpable(pa)) | if (vm_phys_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
/* and for each valid page in this 2MB block */ | /* and for each valid page in this 2MB block */ | ||||
pt = (uint64_t *)PHYS_TO_DMAP(pd[j] & PG_FRAME); | pt = (uint64_t *)PHYS_TO_DMAP(pde & PG_FRAME); | ||||
for (k = 0; k < NPTEPG; k++) { | for (k = 0; k < NPTEPG; k++) { | ||||
if ((pt[k] & PG_V) == 0) | pte = atomic_load_64(&pt[k]); | ||||
if ((pte & PG_V) == 0) | |||||
continue; | continue; | ||||
pa = pt[k] & PG_FRAME; | pa = pte & PG_FRAME; | ||||
if (vm_phys_is_dumpable(pa)) | if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) | ||||
dump_add_page(pa); | dump_add_page(pa); | ||||
} | } | ||||
} | } | ||||
} | } | ||||
/* Calculate dump size. */ | /* Calculate dump size. */ | ||||
dumpsize = pmapsize; | dumpsize = pmapsize; | ||||
dumpsize += round_page(msgbufp->msg_size); | dumpsize += round_page(msgbufp->msg_size); | ||||
dumpsize += round_page(sizeof(dump_avail)); | dumpsize += round_page(sizeof(dump_avail)); | ||||
dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); | dumpsize += round_page(BITSET_SIZE(vm_page_dump_pages)); | ||||
VM_PAGE_DUMP_FOREACH(pa) { | VM_PAGE_DUMP_FOREACH(pa) { | ||||
/* Clear out undumpable pages now if needed */ | /* Clear out undumpable pages now if needed */ | ||||
if (vm_phys_is_dumpable(pa)) { | if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) { | ||||
dumpsize += PAGE_SIZE; | dumpsize += PAGE_SIZE; | ||||
} else { | } else { | ||||
dump_drop_page(pa); | dump_drop_page(pa); | ||||
} | } | ||||
} | } | ||||
dumpsize += PAGE_SIZE; | dumpsize += PAGE_SIZE; | ||||
wdog_next = progress = dumpsize; | wdog_next = progress = dumpsize; | ||||
▲ Show 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | retry: | ||||
/* Dump bitmap */ | /* Dump bitmap */ | ||||
error = blk_write(di, (char *)vm_page_dump, 0, | error = blk_write(di, (char *)vm_page_dump, 0, | ||||
round_page(BITSET_SIZE(vm_page_dump_pages))); | round_page(BITSET_SIZE(vm_page_dump_pages))); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* Dump kernel page directory pages */ | /* Dump kernel page directory pages */ | ||||
bzero(fakepd, sizeof(fakepd)); | bzero(fakepd, sizeof(fakepd)); | ||||
for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, | for (va = VM_MIN_KERNEL_ADDRESS; va < kva_end; va += NBPDP) { | ||||
kernel_vm_end); va += NBPDP) { | |||||
ii = pmap_pml4e_index(va); | ii = pmap_pml4e_index(va); | ||||
pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | pml4 = (uint64_t *)PHYS_TO_DMAP(KPML4phys) + ii; | ||||
pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | pdp = (uint64_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME); | ||||
i = pmap_pdpe_index(va); | pdpe = atomic_load_64(&pdp[pmap_pdpe_index(va)]); | ||||
/* We always write a page, even if it is zero */ | /* We always write a page, even if it is zero */ | ||||
if ((pdp[i] & PG_V) == 0) { | if ((pdpe & PG_V) == 0) { | ||||
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* flush, in case we reuse fakepd in the same block */ | /* flush, in case we reuse fakepd in the same block */ | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
continue; | continue; | ||||
} | } | ||||
/* 1GB page is represented as 512 2MB pages in a dump */ | /* 1GB page is represented as 512 2MB pages in a dump */ | ||||
if ((pdp[i] & PG_PS) != 0) { | if ((pdpe & PG_PS) != 0) { | ||||
/* PDPE and PDP have identical layout in this case */ | /* PDPE and PDP have identical layout in this case */ | ||||
fakepd[0] = pdp[i]; | fakepd[0] = pdpe; | ||||
for (j = 1; j < NPDEPG; j++) | for (j = 1; j < NPDEPG; j++) | ||||
fakepd[j] = fakepd[j - 1] + NBPDR; | fakepd[j] = fakepd[j - 1] + NBPDR; | ||||
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
/* flush, in case we reuse fakepd in the same block */ | /* flush, in case we reuse fakepd in the same block */ | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
bzero(fakepd, sizeof(fakepd)); | bzero(fakepd, sizeof(fakepd)); | ||||
continue; | continue; | ||||
} | } | ||||
pd = (uint64_t *)PHYS_TO_DMAP(pdp[i] & PG_FRAME); | pa = pdpe & PG_FRAME; | ||||
if (PHYS_IN_DMAP(pa) && vm_phys_is_dumpable(pa)) { | |||||
pd = (uint64_t *)PHYS_TO_DMAP(pa); | |||||
error = blk_write(di, (char *)pd, 0, PAGE_SIZE); | error = blk_write(di, (char *)pd, 0, PAGE_SIZE); | ||||
} else { | |||||
/* Malformed pa, write the zeroed fakepd. */ | |||||
error = blk_write(di, (char *)&fakepd, 0, PAGE_SIZE); | |||||
} | |||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
error = blk_flush(di); | error = blk_flush(di); | ||||
if (error) | if (error) | ||||
goto fail; | goto fail; | ||||
} | } | ||||
/* Dump memory chunks */ | /* Dump memory chunks */ | ||||
Show All 39 Lines |
There is no guarantee in general that the compiled code will load PTEs only once. atomic_load_* is needed, I believe.