Changeset View
Standalone View
sys/arm/arm/unwind.c
Show All 26 Lines | |||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | ||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/types.h> | |||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/eventhandler.h> | |||||
#include <sys/kernel.h> | |||||
#include <sys/linker.h> | #include <sys/linker.h> | ||||
#include <sys/lock.h> | |||||
#include <sys/malloc.h> | |||||
#include <sys/mutex.h> | |||||
#include <sys/queue.h> | |||||
#include <machine/stack.h> | #include <machine/stack.h> | ||||
#include "linker_if.h" | #include "linker_if.h" | ||||
static MALLOC_DEFINE(M_ARMUNWIND, "unwind info", "unwind info for arm"); | |||||
/* | /* | ||||
* Definitions for the instruction interpreter. | * Definitions for the instruction interpreter. | ||||
* | * | ||||
* The ARM EABI specifies how to perform the frame unwinding in the | * The ARM EABI specifies how to perform the frame unwinding in the | ||||
* Exception Handling ABI for the ARM Architecture document. To perform | * Exception Handling ABI for the ARM Architecture document. To perform | ||||
* the unwind we need to know the initial frame pointer, stack pointer, | * the unwind we need to know the initial frame pointer, stack pointer, | ||||
* link register and program counter. We then find the entry within the | * link register and program counter. We then find the entry within the | ||||
* index table that points to the function the program counter is within. | * index table that points to the function the program counter is within. | ||||
▲ Show 20 Lines • Show All 52 Lines • ▼ Show 20 Lines | |||||
/* Expand a 31-bit signed value to a 32-bit signed value */ | /* Expand a 31-bit signed value to a 32-bit signed value */ | ||||
static __inline int32_t | static __inline int32_t | ||||
expand_prel31(uint32_t prel31) | expand_prel31(uint32_t prel31) | ||||
{ | { | ||||
return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2; | return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2; | ||||
} | } | ||||
struct search_context { | struct module_idx { | ||||
uint32_t addr; | SLIST_ENTRY(module_idx) module_next; | ||||
caddr_t exidx_start; | caddr_t address; | ||||
caddr_t exidx_end; | size_t size; | ||||
caddr_t start; | |||||
caddr_t end; | |||||
}; | }; | ||||
static int | static SLIST_HEAD(slisthead, module_idx) modules = | ||||
module_search(linker_file_t lf, void *context) | SLIST_HEAD_INITIALIZER(head); | ||||
static struct mtx mtx_modules; | |||||
static void | |||||
unwind_kld_load(void *arg __unused, linker_file_t lf) | |||||
markj: These should be static. | |||||
{ | { | ||||
struct search_context *sc = context; | caddr_t exidx_start, exidx_end; | ||||
struct module_idx *m, *newentry; | |||||
linker_symval_t symval; | linker_symval_t symval; | ||||
c_linker_sym_t sym; | c_linker_sym_t sym; | ||||
if (lf->address <= (caddr_t)sc->addr && | |||||
(lf->address + lf->size) >= (caddr_t)sc->addr) { | |||||
if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_start", &sym) == 0 || | if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_start", &sym) == 0 || | ||||
LINKER_LOOKUP_SYMBOL(lf, "exidx_start", &sym) == 0) && | LINKER_LOOKUP_SYMBOL(lf, "_exidx_start", &sym) == 0) && | ||||
Done Inline ActionsThe indentation on this line and the next looks wrong. markj: The indentation on this line and the next looks wrong. | |||||
LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | ||||
sc->exidx_start = symval.value; | exidx_start = symval.value; | ||||
else | |||||
return; | |||||
if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_end", &sym) == 0 || | if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_end", &sym) == 0 || | ||||
LINKER_LOOKUP_SYMBOL(lf, "exidx_end", &sym) == 0) && | LINKER_LOOKUP_SYMBOL(lf, "_exidx_end", &sym) == 0) && | ||||
LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | ||||
sc->exidx_end = symval.value; | exidx_end = symval.value; | ||||
else | |||||
return; | |||||
if (sc->exidx_start != NULL && sc->exidx_end != NULL) | newentry = malloc(sizeof(*newentry), M_ARMUNWIND, M_WAITOK); | ||||
Done Inline ActionsThis should be declared at the beginning of the function; the first argument should be sizeof(*newentry). markj: This should be declared at the beginning of the function; the first argument should be sizeof… | |||||
mtx_lock(&mtx_modules); | |||||
SLIST_FOREACH(m, &modules, module_next) { | |||||
if (m->address == 0 || m->address == lf->address) { | |||||
m->start = exidx_start; | |||||
m->end = exidx_end; | |||||
m->size = lf->size; | |||||
// set address last | |||||
andrewUnsubmitted Done Inline ActionsShould be a /* ... */ comment andrew: Should be a `/* ... */` comment | |||||
howard0su_gmail.comAuthorUnsubmitted Not Done Inline Actionsi will fix this. howard0su_gmail.com: i will fix this. | |||||
atomic_store_rel_ptr((uint32_t*)&m->address, (uint32_t)lf->address); | |||||
Done Inline ActionsWe need to use atomic_store_rel_ptr to store m->address, otherwise the hardware may reorder the previous stores. andrew: We need to use atomic_store_rel_ptr to store m->address, otherwise the hardware may reorder the… | |||||
mtx_unlock(&mtx_modules); | |||||
free(newentry, M_ARMUNWIND); | |||||
return; | |||||
} | |||||
} | |||||
newentry->start = exidx_start; | |||||
newentry->end = exidx_end; | |||||
newentry->size = lf->size; | |||||
newentry->address = lf->address; | |||||
SLIST_INSERT_HEAD(&modules, newentry, module_next); | |||||
andrewUnsubmitted Not Done Inline ActionsThis is unsafe as SLIST_INSERT_HEAD doesn't have any memory ordering guarantee. We need to ensure the above entry is observable and the next pointer is valid before setting the new head. andrew: This is unsafe as `SLIST_INSERT_HEAD` doesn't have any memory ordering guarantee. We need to… | |||||
howard0su_gmail.comAuthorUnsubmitted Not Done Inline Actionsthis is done in scope of mtx_modules. unload will not happen here. howard0su_gmail.com: this is done in scope of mtx_modules. unload will not happen here. | |||||
Not Done Inline ActionsI'm not concerned about unload, my worry is with find_index. It iterates over the list without the lock however if the head was updated before the next pointer it may only see a single item in the list. To fix this we would need to manually write the two items with a barrier after setting the next pointer, but before setting the new head. andrew: I'm not concerned about unload, my worry is with `find_index`. It iterates over the list… | |||||
mtx_unlock(&mtx_modules); | |||||
} | |||||
static void | |||||
unwind_kld_unload(void *arg, const char *filename, caddr_t address, size_t length) | |||||
{ | |||||
struct module_idx *m; | |||||
mtx_lock(&mtx_modules); | |||||
SLIST_FOREACH(m, &modules, module_next) { | |||||
if (m->address == address) { | |||||
m->address = 0; | |||||
mtx_unlock(&mtx_modules); | |||||
return; | |||||
} | |||||
Done Inline ActionsI think this is safe without using an atomic function as word sized and aligned writes are atomic. andrew: I think this is safe without using an atomic function as word sized and aligned writes are… | |||||
} | |||||
mtx_unlock(&mtx_modules); | |||||
} | |||||
static int | |||||
module_search(linker_file_t lf, void* arg) | |||||
{ | |||||
unwind_kld_load(arg, lf); | |||||
return (1); | return (1); | ||||
panic("Invalid module %s, no unwind tables\n", lf->filename); | |||||
} | } | ||||
eventhandler_tag unwind_kld_load_tag; | |||||
eventhandler_tag unwind_kld_unload_tag; | |||||
static int | |||||
unwind_initialize(void) | |||||
{ | |||||
mtx_init(&mtx_modules, "unwind module lock", NULL, MTX_DEF); | |||||
linker_file_foreach(module_search, NULL); | |||||
/* Register callbacks for linker file load and unload events. */ | |||||
unwind_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, | |||||
unwind_kld_load, NULL, EVENTHANDLER_PRI_ANY); | |||||
unwind_kld_unload_tag = EVENTHANDLER_REGISTER(kld_unload, | |||||
unwind_kld_unload, NULL, EVENTHANDLER_PRI_ANY); | |||||
return (0); | return (0); | ||||
} | } | ||||
SYSINIT(unwind_init, SI_SUB_KLD, SI_ORDER_ANY, unwind_initialize, NULL); | |||||
/* | /* | ||||
* Perform a binary search of the index table to find the function | * Perform a binary search of the index table to find the function | ||||
* with the largest address that doesn't exceed addr. | * with the largest address that doesn't exceed addr. | ||||
*/ | */ | ||||
static struct unwind_idx * | static struct unwind_idx * | ||||
find_index(uint32_t addr, int search_modules) | find_index(uint32_t addr) | ||||
{ | { | ||||
struct search_context sc; | |||||
caddr_t idx_start, idx_end; | caddr_t idx_start, idx_end; | ||||
caddr_t daddr, dstart, dend; | |||||
unsigned int min, mid, max; | unsigned int min, mid, max; | ||||
struct unwind_idx *start; | struct unwind_idx *start = NULL; | ||||
struct unwind_idx *item; | struct unwind_idx *item; | ||||
int32_t prel31_addr; | int32_t prel31_addr; | ||||
uint32_t func_addr; | uint32_t func_addr; | ||||
struct module_idx *m; | |||||
size_t size; | |||||
SLIST_FOREACH(m, &modules, module_next) { | |||||
do { | |||||
daddr = (caddr_t)atomic_load_acq_ptr((uint32_t *)&m->address); | |||||
Done Inline ActionsThere is an issue here if, between reading m->address, and using it, the module we are looking at is unloaded and a new module reuses the entry. I think the following code should be safe. It does assume the module we are interested in doesn't get unloaded from under us. I think this is a safe assumption as, if it is unloaded we will have issues when the thread returns to the stack from that module, it won't have anything to run. uintptr_t a; do { a = atomic_load_acq_ptr(m->address); if (a == 0) break; start = m->start; size = m->size; /* Ensure m->start and m->size have been loaded before m->address */ atomic_thread_fence_acq(); } while (a != m->address); if (a != 0 && a >= addr && (a + size) <= addr) { ... } andrew: There is an issue here if, between reading `m->address`, and using it, the module we are… | |||||
Done Inline ActionsI don't really agree that this is a safe assumption: the kldunload_try event fires before any module unload/shutdown/quiesce handlers are called. At this point, threads may still be validly executing code within the module. This might be addressed by using the kldunload event handler rather than kldunload_try; I'm not sure why the latter is being used here, since it never tries to veto the unload. markj: I don't really agree that this is a safe assumption: the kldunload_try event fires before any… | |||||
if (daddr == 0) | |||||
break; | |||||
size = m->size; | |||||
dstart = m->start; | |||||
dend = m->end; | |||||
/* Ensure start, end, size have been loaded before address */ | |||||
atomic_thread_fence_acq(); | |||||
} while (daddr != m->address); | |||||
if (daddr != 0 && daddr <= (caddr_t)addr && | |||||
(daddr + size) >= (caddr_t)addr) { | |||||
andrewUnsubmitted Not Done Inline ActionsIf the statement doesn't fit on one line the second line should be indented 2 tabs followed by 4 spaces. andrew: If the statement doesn't fit on one line the second line should be indented 2 tabs followed by… | |||||
start = (struct unwind_idx *)dstart; | |||||
idx_start = dstart; | |||||
idx_end = dend; | |||||
break; | |||||
} | |||||
} | |||||
if (start == NULL) { | |||||
start = (struct unwind_idx *)&exidx_start; | start = (struct unwind_idx *)&exidx_start; | ||||
idx_start = (caddr_t)&exidx_start; | idx_start = (caddr_t)&exidx_start; | ||||
idx_end = (caddr_t)&exidx_end; | idx_end = (caddr_t)&exidx_end; | ||||
/* This may acquire a lock */ | |||||
if (search_modules) { | |||||
bzero(&sc, sizeof(sc)); | |||||
sc.addr = addr; | |||||
if (linker_file_foreach(module_search, &sc) != 0 && | |||||
sc.exidx_start != NULL && sc.exidx_end != NULL) { | |||||
start = (struct unwind_idx *)sc.exidx_start; | |||||
idx_start = sc.exidx_start; | |||||
idx_end = sc.exidx_end; | |||||
} | } | ||||
} | |||||
min = 0; | min = 0; | ||||
max = (idx_end - idx_start) / sizeof(struct unwind_idx); | max = (idx_end - idx_start) / sizeof(struct unwind_idx); | ||||
while (min != max) { | while (min != max) { | ||||
mid = min + (max - min + 1) / 2; | mid = min + (max - min + 1) / 2; | ||||
item = &start[mid]; | item = &start[mid]; | ||||
▲ Show 20 Lines • Show All 198 Lines • ▼ Show 20 Lines | if (state->registers[PC] == 0) { | ||||
if (state->start_pc != state->registers[PC]) | if (state->start_pc != state->registers[PC]) | ||||
state->update_mask |= 1 << PC; | state->update_mask |= 1 << PC; | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
int | int | ||||
unwind_stack_one(struct unwind_state *state, int can_lock) | unwind_stack_one(struct unwind_state *state) | ||||
{ | { | ||||
struct unwind_idx *index; | struct unwind_idx *index; | ||||
int finished; | int finished; | ||||
/* Reset the mask of updated registers */ | /* Reset the mask of updated registers */ | ||||
state->update_mask = 0; | state->update_mask = 0; | ||||
/* The pc value is correct and will be overwritten, save it */ | /* The pc value is correct and will be overwritten, save it */ | ||||
state->start_pc = state->registers[PC]; | state->start_pc = state->registers[PC]; | ||||
/* Find the item to run */ | /* Find the item to run */ | ||||
index = find_index(state->start_pc, can_lock); | index = find_index(state->start_pc); | ||||
finished = 0; | finished = 0; | ||||
if (index->insn != EXIDX_CANTUNWIND) { | if (index->insn != EXIDX_CANTUNWIND) { | ||||
if (index->insn & (1U << 31)) { | if (index->insn & (1U << 31)) { | ||||
/* The data is within the instruction */ | /* The data is within the instruction */ | ||||
state->insn = &index->insn; | state->insn = &index->insn; | ||||
} else { | } else { | ||||
/* A prel31 offset to the unwind table */ | /* A prel31 offset to the unwind table */ | ||||
Show All 14 Lines |
These should be static.