Changeset View
Changeset View
Standalone View
Standalone View
lib/libkvm/kvm_private.c
Show First 20 Lines • Show All 284 Lines • ▼ Show 20 Lines | _kvm_map_get(kvm_t *kd, u_long pa, unsigned int page_size) | ||||
addr = (uintptr_t)kd->page_map + off; | addr = (uintptr_t)kd->page_map + off; | ||||
if (off >= kd->pt_sparse_off) | if (off >= kd->pt_sparse_off) | ||||
addr = (uintptr_t)kd->sparse_map + (off - kd->pt_sparse_off); | addr = (uintptr_t)kd->sparse_map + (off - kd->pt_sparse_off); | ||||
return (void *)addr; | return (void *)addr; | ||||
} | } | ||||
int | int | ||||
_kvm_pt_init(kvm_t *kd, size_t map_len, off_t map_off, off_t sparse_off, | _kvm_pt_init(kvm_t *kd, off_t dump_avail_off, size_t map_len, off_t map_off, | ||||
int page_size, int word_size) | off_t sparse_off, int page_size, int word_size) | ||||
{ | { | ||||
uint64_t *addr; | uint64_t *addr; | ||||
uint32_t *popcount_bin; | uint32_t *popcount_bin; | ||||
int bin_popcounts = 0; | int bin_popcounts = 0, i; | ||||
uint64_t pc_bins, res; | uint64_t pc_bins, res; | ||||
ssize_t rd; | ssize_t rd; | ||||
i = 0; | |||||
jhb: So this means we can't examine old crash dumps? Presumably it would be possible to add a… | |||||
scottphAuthorUnsubmitted Done Inline ActionsThis should maintain compatibility with old dumps now, we could even possibly not bump the dump version number, but that seemed kinda wrong so I've updated here and checked for the old version by number, like amd64 was doing before. scottph: This should maintain compatibility with old dumps now, we could even possibly not bump the dump… | |||||
do { | |||||
if (i * sizeof(kpaddr_t) >= kd->dump_avail_size) { | |||||
kd->dump_avail_size = MAX(8 * sizeof(kpaddr_t), | |||||
2 * kd->dump_avail_size); | |||||
kd->dump_avail = realloc(kd->dump_avail, | |||||
kd->dump_avail_size); | |||||
Not Done Inline ActionsNice way to handle this. jhb: Nice way to handle this. | |||||
if (kd->dump_avail == NULL) { | |||||
_kvm_err(kd, kd->program, "cannot allocate %zu " | |||||
"bytes for dump_avail", kd->dump_avail_size); | |||||
return (-1); | |||||
} | |||||
} | |||||
rd = pread(kd->pmfd, &kd->dump_avail[i], 2 * sizeof(kpaddr_t), | |||||
dump_avail_off); | |||||
if (rd < 0 || rd != 2 * sizeof(kpaddr_t)) { | |||||
_kvm_err(kd, kd->program, "cannot read %zu bytes for " | |||||
"dump_avail", 2 * sizeof(kpaddr_t)); | |||||
return (-1); | |||||
} | |||||
i += 2; | |||||
dump_avail_off += 2 * sizeof(kpaddr_t); | |||||
} while (kd->dump_avail[i - 1] != 0 && i < 128); | |||||
if (i >= 128) | |||||
kd->dump_avail[0] = kd->dump_avail[1] = 0; | |||||
/* | /* | ||||
* Map the bitmap specified by the arguments. | * Map the bitmap specified by the arguments. | ||||
*/ | */ | ||||
kd->pt_map = _kvm_malloc(kd, map_len); | kd->pt_map = _kvm_malloc(kd, map_len); | ||||
if (kd->pt_map == NULL) { | if (kd->pt_map == NULL) { | ||||
_kvm_err(kd, kd->program, "cannot allocate %zu bytes for bitmap", | _kvm_err(kd, kd->program, "cannot allocate %zu bytes for bitmap", | ||||
map_len); | map_len); | ||||
return (-1); | return (-1); | ||||
▲ Show 20 Lines • Show All 79 Lines • ▼ Show 20 Lines | _kvm_pmap_init(kvm_t *kd, uint32_t pmap_size, off_t pmap_off) | ||||
if (pread(kd->pmfd, kd->page_map, pmap_size, pmap_off) != exp_len) { | if (pread(kd->pmfd, kd->page_map, pmap_size, pmap_off) != exp_len) { | ||||
_kvm_err(kd, kd->program, "cannot read %d bytes from " | _kvm_err(kd, kd->program, "cannot read %d bytes from " | ||||
"offset %jd for page map", pmap_size, (intmax_t)pmap_off); | "offset %jd for page map", pmap_size, (intmax_t)pmap_off); | ||||
return (-1); | return (-1); | ||||
} | } | ||||
return (0); | return (0); | ||||
} | } | ||||
uint64_t | |||||
_kvm_pa_bit_id(kvm_t *kd, uint64_t pa, unsigned int page_size) | |||||
{ | |||||
uint64_t adj; | |||||
int i; | |||||
adj = 0; | |||||
for (i = 0; kd->dump_avail[i + 1] != 0; i += 2) { | |||||
if (pa >= kd->dump_avail[i + 1]) { | |||||
adj += howmany(kd->dump_avail[i + 1], page_size) - | |||||
kd->dump_avail[i] / page_size; | |||||
} else { | |||||
return (pa / page_size - | |||||
kd->dump_avail[i] / page_size + adj); | |||||
} | |||||
} | |||||
return (adj + pa / page_size - | |||||
howmany(kd->dump_avail[i - 1], page_size)); | |||||
} | |||||
uint64_t | |||||
_kvm_bit_id_pa(kvm_t *kd, uint64_t bit_id, unsigned int page_size) | |||||
{ | |||||
uint64_t sz; | |||||
int i; | |||||
for (i = 0; kd->dump_avail[i + 1] != 0; i += 2) { | |||||
sz = howmany(kd->dump_avail[i + 1], page_size) - | |||||
kd->dump_avail[i] / page_size; | |||||
if (bit_id < sz) { | |||||
return (rounddown2(kd->dump_avail[i], page_size) + | |||||
bit_id * page_size); | |||||
} | |||||
bit_id -= sz; | |||||
} | |||||
return (bit_id * page_size + | |||||
roundup2(kd->dump_avail[i - 1], page_size)); | |||||
} | |||||
/* | /* | ||||
* Find the offset for the given physical page address; returns -1 otherwise. | * Find the offset for the given physical page address; returns -1 otherwise. | ||||
* | * | ||||
* A page's offset is represented by the sparse page base offset plus the | * A page's offset is represented by the sparse page base offset plus the | ||||
* number of bits set before its bit multiplied by page size. This means | * number of bits set before its bit multiplied by page size. This means | ||||
* that if a page exists in the dump, it's necessary to know how many pages | * that if a page exists in the dump, it's necessary to know how many pages | ||||
* in the dump precede it. Reduce this O(n) counting to O(1) by caching the | * in the dump precede it. Reduce this O(n) counting to O(1) by caching the | ||||
* number of bits set at POPCOUNT_BITS intervals. | * number of bits set at POPCOUNT_BITS intervals. | ||||
* | * | ||||
* Then to find the number of pages before the requested address, simply | * Then to find the number of pages before the requested address, simply | ||||
* index into the cache and count the number of bits set between that cache | * index into the cache and count the number of bits set between that cache | ||||
* bin and the page's bit. Halve the number of bytes that have to be | * bin and the page's bit. Halve the number of bytes that have to be | ||||
* checked by also counting down from the next higher bin if it's closer. | * checked by also counting down from the next higher bin if it's closer. | ||||
*/ | */ | ||||
off_t | off_t | ||||
_kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size) | _kvm_pt_find(kvm_t *kd, uint64_t pa, unsigned int page_size) | ||||
{ | { | ||||
uint64_t *bitmap = kd->pt_map; | uint64_t *bitmap = kd->pt_map; | ||||
uint64_t pte_bit_id = pa / page_size; | uint64_t pte_bit_id = _kvm_pa_bit_id(kd, pa, page_size); | ||||
uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap); | uint64_t pte_u64 = pte_bit_id / BITS_IN(*bitmap); | ||||
uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS; | uint64_t popcount_id = pte_bit_id / POPCOUNT_BITS; | ||||
uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap)); | uint64_t pte_mask = 1ULL << (pte_bit_id % BITS_IN(*bitmap)); | ||||
uint64_t bitN; | uint64_t bitN; | ||||
uint32_t count; | uint32_t count; | ||||
/* Check whether the page address requested is in the dump. */ | /* Check whether the page address requested is in the dump. */ | ||||
if (pte_bit_id >= (kd->pt_map_size * NBBY) || | if (pte_bit_id >= (kd->pt_map_size * NBBY) || | ||||
▲ Show 20 Lines • Show All 285 Lines • ▼ Show 20 Lines | _kvm_bitmap_init(struct kvm_bitmap *bm, u_long bitmapsize, u_long *idx) | ||||
bm->map = calloc(bitmapsize, sizeof *bm->map); | bm->map = calloc(bitmapsize, sizeof *bm->map); | ||||
if (bm->map == NULL) | if (bm->map == NULL) | ||||
return (0); | return (0); | ||||
bm->size = bitmapsize; | bm->size = bitmapsize; | ||||
return (1); | return (1); | ||||
} | } | ||||
void | void | ||||
_kvm_bitmap_set(struct kvm_bitmap *bm, u_long pa, unsigned int page_size) | _kvm_bitmap_set(struct kvm_bitmap *bm, u_long bm_index) | ||||
{ | { | ||||
u_long bm_index = pa / page_size; | |||||
uint8_t *byte = &bm->map[bm_index / 8]; | uint8_t *byte = &bm->map[bm_index / 8]; | ||||
*byte |= (1UL << (bm_index % 8)); | *byte |= (1UL << (bm_index % 8)); | ||||
} | } | ||||
int | int | ||||
_kvm_bitmap_next(struct kvm_bitmap *bm, u_long *idx) | _kvm_bitmap_next(struct kvm_bitmap *bm, u_long *idx) | ||||
{ | { | ||||
▲ Show 20 Lines • Show All 42 Lines • Show Last 20 Lines |
So this means we can't examine old crash dumps? Presumably it would be possible to add a fallback for the previous version if the architecture-specific backends passed in a "0" as the offset which would leave kd->dump_avail as NULL and then the two helper routines could use the old 'pa / idx' logic. In particular, it would be kind of annoying on stable that you can't examine an old crash dump, or that kgdb on head can't examine a stable crash dump, etc. Keep in mind that kgdb can do cross-debug (so you can copy the vmcrash from an RPi running head off onto a stable/12 amd64 host to debug).
Also, given that we are bumping the version, we could also include the size of the dump_avail array in the header as well to avoid the need for the realloc loop. It would also let you just mmap() the array directly instead of using read().