Changeset View
Standalone View
sys/arm/arm/unwind.c
Show All 26 Lines | |||||
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF | ||||
* ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
*/ | */ | ||||
#include <sys/cdefs.h> | #include <sys/cdefs.h> | ||||
__FBSDID("$FreeBSD$"); | __FBSDID("$FreeBSD$"); | ||||
#include <sys/param.h> | #include <sys/param.h> | ||||
#include <sys/types.h> | |||||
#include <sys/systm.h> | #include <sys/systm.h> | ||||
#include <sys/eventhandler.h> | |||||
#include <sys/kernel.h> | |||||
#include <sys/linker.h> | #include <sys/linker.h> | ||||
#include <sys/lock.h> | |||||
#include <sys/malloc.h> | |||||
#include <sys/mutex.h> | |||||
#include <sys/queue.h> | |||||
#include <machine/stack.h> | #include <machine/stack.h> | ||||
#include "linker_if.h" | #include "linker_if.h" | ||||
/* | /* | ||||
* Definitions for the instruction interpreter. | * Definitions for the instruction interpreter. | ||||
* | * | ||||
Show All 11 Lines | |||||
* patterns to encode that steps to take to update the stack pointer and | * patterns to encode that steps to take to update the stack pointer and | ||||
* link register to the correct values at the start of the function. | * link register to the correct values at the start of the function. | ||||
*/ | */ | ||||
/* A special case when we are unable to unwind past this function */ | /* A special case when we are unable to unwind past this function */ | ||||
#define EXIDX_CANTUNWIND 1 | #define EXIDX_CANTUNWIND 1 | ||||
/* | /* | ||||
* These are set in the linker script. Their addresses will be | |||||
* either the start or end of the exception table or index. | |||||
*/ | |||||
extern int exidx_start, exidx_end; | |||||
/* | |||||
* Entry types. | * Entry types. | ||||
* These are the only entry types that have been seen in the kernel. | * These are the only entry types that have been seen in the kernel. | ||||
*/ | */ | ||||
#define ENTRY_MASK 0xff000000 | #define ENTRY_MASK 0xff000000 | ||||
#define ENTRY_ARM_SU16 0x80000000 | #define ENTRY_ARM_SU16 0x80000000 | ||||
#define ENTRY_ARM_LU16 0x81000000 | #define ENTRY_ARM_LU16 0x81000000 | ||||
/* Instruction masks. */ | /* Instruction masks. */ | ||||
Show All 24 Lines | |||||
/* Expand a 31-bit signed value to a 32-bit signed value */ | /* Expand a 31-bit signed value to a 32-bit signed value */ | ||||
static __inline int32_t | static __inline int32_t | ||||
expand_prel31(uint32_t prel31) | expand_prel31(uint32_t prel31) | ||||
{ | { | ||||
return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2; | return ((int32_t)(prel31 & 0x7fffffffu) << 1) / 2; | ||||
} | } | ||||
struct search_context { | struct module_idx { | ||||
uint32_t addr; | SLIST_ENTRY(module_idx) module_next; | ||||
caddr_t exidx_start; | caddr_t address; | ||||
caddr_t exidx_end; | size_t size; | ||||
caddr_t start; | |||||
caddr_t end; | |||||
}; | }; | ||||
static int | SLIST_HEAD(slisthead, module_idx) modules = | ||||
module_search(linker_file_t lf, void *context) | SLIST_HEAD_INITIALIZER(head); | ||||
struct mtx mtx_modules; | |||||
static void | |||||
unwind_kld_load(void *arg __unused, linker_file_t lf) | |||||
markj: These should be static. | |||||
{ | { | ||||
struct search_context *sc = context; | caddr_t exidx_start = 0, exidx_end = 0; | ||||
struct module_idx *m; | |||||
linker_symval_t symval; | linker_symval_t symval; | ||||
c_linker_sym_t sym; | c_linker_sym_t sym; | ||||
if (lf->address <= (caddr_t)sc->addr && | |||||
(lf->address + lf->size) >= (caddr_t)sc->addr) { | |||||
if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_start", &sym) == 0 || | if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_start", &sym) == 0 || | ||||
LINKER_LOOKUP_SYMBOL(lf, "exidx_start", &sym) == 0) && | LINKER_LOOKUP_SYMBOL(lf, "_exidx_start", &sym) == 0) && | ||||
Done Inline ActionsThe indentation on this line and the next looks wrong. markj: The indentation on this line and the next looks wrong. | |||||
LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | ||||
sc->exidx_start = symval.value; | exidx_start = symval.value; | ||||
if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_end", &sym) == 0 || | if ((LINKER_LOOKUP_SYMBOL(lf, "__exidx_end", &sym) == 0 || | ||||
LINKER_LOOKUP_SYMBOL(lf, "exidx_end", &sym) == 0) && | LINKER_LOOKUP_SYMBOL(lf, "_exidx_end", &sym) == 0) && | ||||
LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | LINKER_SYMBOL_VALUES(lf, sym, &symval) == 0) | ||||
sc->exidx_end = symval.value; | exidx_end = symval.value; | ||||
if (sc->exidx_start != NULL && sc->exidx_end != NULL) | if (exidx_start == 0 || exidx_end == 0) { | ||||
return; | |||||
} | |||||
struct module_idx *newentry = malloc(sizeof(struct module_idx), M_TEMP, M_WAITOK); | |||||
Done Inline ActionsThis should be declared at the beginning of the function; the first argument should be sizeof(*newentry). markj: This should be declared at the beginning of the function; the first argument should be sizeof… | |||||
mtx_lock(&mtx_modules); | |||||
SLIST_FOREACH(m, &modules, module_next) { | |||||
if (m->address == 0 || m->address == lf->address) { | |||||
m->start = exidx_start; | |||||
m->end = exidx_end; | |||||
m->size = lf->size; | |||||
// set address last | |||||
Done Inline ActionsShould be a /* ... */ comment andrew: Should be a `/* ... */` comment | |||||
Not Done Inline Actionsi will fix this. howard0su_gmail.com: i will fix this. | |||||
m->address = lf->address; | |||||
andrewUnsubmitted Done Inline ActionsWe need to use atomic_store_rel_ptr to store m->address, otherwise the hardware may reorder the previous stores. andrew: We need to use atomic_store_rel_ptr to store m->address, otherwise the hardware may reorder the… | |||||
mtx_unlock(&mtx_modules); | |||||
free(newentry, M_TEMP); | |||||
return; | |||||
} | |||||
} | |||||
newentry->start = exidx_start; | |||||
newentry->end = exidx_end; | |||||
newentry->size = lf->size; | |||||
newentry->address = lf->address; | |||||
SLIST_INSERT_HEAD(&modules, newentry, module_next); | |||||
Not Done Inline ActionsThis is unsafe as SLIST_INSERT_HEAD doesn't have any memory ordering guarantee. We need to ensure the above entry is observable and the next pointer is valid before setting the new head. andrew: This is unsafe as `SLIST_INSERT_HEAD` doesn't have any memory ordering guarantee. We need to… | |||||
Not Done Inline Actionsthis is done in scope of mtx_modules. unload will not happen here. howard0su_gmail.com: this is done in scope of mtx_modules. unload will not happen here. | |||||
mtx_unlock(&mtx_modules); | |||||
} | |||||
static void | |||||
unwind_kld_unload_try(void *arg __unused, linker_file_t lf, int *error) | |||||
{ | |||||
if (*error != 0) | |||||
/* We already have an error, so don't do anything. */ | |||||
Not Done Inline ActionsI'm not concerned about unload, my worry is with find_index. It iterates over the list without the lock however if the head was updated before the next pointer it may only see a single item in the list. To fix this we would need to manually write the two items with a barrier after setting the next pointer, but before setting the new head. andrew: I'm not concerned about unload, my worry is with `find_index`. It iterates over the list… | |||||
return; | |||||
struct module_idx *m; | |||||
mtx_lock(&mtx_modules); | |||||
SLIST_FOREACH(m, &modules, module_next) { | |||||
if (m->address == lf->address) { | |||||
m->address = 0; | |||||
andrewUnsubmitted Done Inline ActionsI think this is safe without using an atomic function as word sized and aligned writes are atomic. andrew: I think this is safe without using an atomic function as word sized and aligned writes are… | |||||
mtx_unlock(&mtx_modules); | |||||
return; | |||||
} | |||||
} | |||||
mtx_unlock(&mtx_modules); | |||||
} | |||||
static int module_search(linker_file_t lf, void* arg) | |||||
{ | |||||
unwind_kld_load(arg, lf); | |||||
return (1); | return (1); | ||||
panic("Invalid module %s, no unwind tables\n", lf->filename); | |||||
} | } | ||||
eventhandler_tag unwind_kld_load_tag; | |||||
eventhandler_tag unwind_kld_unload_try_tag; | |||||
static int unwind_initialize() | |||||
{ | |||||
mtx_init(&mtx_modules, "unwind module lock", NULL, MTX_DEF); | |||||
linker_file_foreach(module_search, NULL); | |||||
/* Register callbacks for linker file load and unload events. */ | |||||
unwind_kld_load_tag = EVENTHANDLER_REGISTER(kld_load, | |||||
unwind_kld_load, NULL, EVENTHANDLER_PRI_ANY); | |||||
unwind_kld_unload_try_tag = EVENTHANDLER_REGISTER(kld_unload_try, | |||||
unwind_kld_unload_try, NULL, EVENTHANDLER_PRI_ANY); | |||||
return (0); | return (0); | ||||
} | } | ||||
SYSINIT(unwind_init, SI_SUB_KLD, SI_ORDER_ANY, unwind_initialize, NULL); | |||||
/* | /* | ||||
* Perform a binary search of the index table to find the function | * Perform a binary search of the index table to find the function | ||||
* with the largest address that doesn't exceed addr. | * with the largest address that doesn't exceed addr. | ||||
*/ | */ | ||||
static struct unwind_idx * | static struct unwind_idx * | ||||
find_index(uint32_t addr, int search_modules) | find_index(uint32_t addr) | ||||
{ | { | ||||
struct search_context sc; | |||||
caddr_t idx_start, idx_end; | caddr_t idx_start, idx_end; | ||||
unsigned int min, mid, max; | unsigned int min, mid, max; | ||||
struct unwind_idx *start; | struct unwind_idx *start = NULL; | ||||
struct unwind_idx *item; | struct unwind_idx *item; | ||||
int32_t prel31_addr; | int32_t prel31_addr; | ||||
uint32_t func_addr; | uint32_t func_addr; | ||||
struct module_idx *m; | |||||
start = (struct unwind_idx *)&exidx_start; | SLIST_FOREACH(m, &modules, module_next) { | ||||
idx_start = (caddr_t)&exidx_start; | if (m->address != 0 && m->address <= (caddr_t)addr && | ||||
idx_end = (caddr_t)&exidx_end; | (m->address + m->size) >= (caddr_t)addr) { | ||||
start = (struct unwind_idx *)m->start; | |||||
Done Inline ActionsThere is an issue here if, between reading m->address, and using it, the module we are looking at is unloaded and a new module reuses the entry. I think the following code should be safe. It does assume the module we are interested in doesn't get unloaded from under us. I think this is a safe assumption as, if it is unloaded we will have issues when the thread returns to the stack from that module, it won't have anything to run. uintptr_t a; do { a = atomic_load_acq_ptr(m->address); if (a == 0) break; start = m->start; size = m->size; /* Ensure m->start and m->size have been loaded before m->address */ atomic_thread_fence_acq(); } while (a != m->address); if (a != 0 && a >= addr && (a + size) <= addr) { ... } andrew: There is an issue here if, between reading `m->address`, and using it, the module we are… | |||||
Done Inline ActionsI don't really agree that this is a safe assumption: the kldunload_try event fires before any module unload/shutdown/quiesce handlers are called. At this point, threads may still be validly executing code within the module. This might be addressed by using the kldunload event handler rather than kldunload_try; I'm not sure why the latter is being used here, since it never tries to veto the unload. markj: I don't really agree that this is a safe assumption: the kldunload_try event fires before any… | |||||
/* This may acquire a lock */ | idx_start = m->start; | ||||
if (search_modules) { | idx_end = m->end; | ||||
bzero(&sc, sizeof(sc)); | break; | ||||
sc.addr = addr; | |||||
if (linker_file_foreach(module_search, &sc) != 0 && | |||||
sc.exidx_start != NULL && sc.exidx_end != NULL) { | |||||
start = (struct unwind_idx *)sc.exidx_start; | |||||
idx_start = sc.exidx_start; | |||||
idx_end = sc.exidx_end; | |||||
} | } | ||||
} | } | ||||
if (start == NULL) { | |||||
return (NULL); | |||||
} | |||||
min = 0; | min = 0; | ||||
max = (idx_end - idx_start) / sizeof(struct unwind_idx); | max = (idx_end - idx_start) / sizeof(struct unwind_idx); | ||||
Not Done Inline ActionsIf the statement doesn't fit on one line the second line should be indented 2 tabs followed by 4 spaces. andrew: If the statement doesn't fit on one line the second line should be indented 2 tabs followed by… | |||||
while (min != max) { | while (min != max) { | ||||
mid = min + (max - min + 1) / 2; | mid = min + (max - min + 1) / 2; | ||||
item = &start[mid]; | item = &start[mid]; | ||||
prel31_addr = expand_prel31(item->offset); | prel31_addr = expand_prel31(item->offset); | ||||
func_addr = (uint32_t)&item->offset + prel31_addr; | func_addr = (uint32_t)&item->offset + prel31_addr; | ||||
▲ Show 20 Lines • Show All 194 Lines • ▼ Show 20 Lines | if (state->registers[PC] == 0) { | ||||
if (state->start_pc != state->registers[PC]) | if (state->start_pc != state->registers[PC]) | ||||
state->update_mask |= 1 << PC; | state->update_mask |= 1 << PC; | ||||
} | } | ||||
return 0; | return 0; | ||||
} | } | ||||
int | int | ||||
unwind_stack_one(struct unwind_state *state, int can_lock) | unwind_stack_one(struct unwind_state *state) | ||||
{ | { | ||||
struct unwind_idx *index; | struct unwind_idx *index; | ||||
int finished; | int finished; | ||||
/* Reset the mask of updated registers */ | /* Reset the mask of updated registers */ | ||||
state->update_mask = 0; | state->update_mask = 0; | ||||
/* The pc value is correct and will be overwritten, save it */ | /* The pc value is correct and will be overwritten, save it */ | ||||
state->start_pc = state->registers[PC]; | state->start_pc = state->registers[PC]; | ||||
/* Find the item to run */ | /* Find the item to run */ | ||||
index = find_index(state->start_pc, can_lock); | index = find_index(state->start_pc); | ||||
if (index == NULL) | |||||
return 1; | |||||
finished = 0; | finished = 0; | ||||
if (index->insn != EXIDX_CANTUNWIND) { | if (index->insn != EXIDX_CANTUNWIND) { | ||||
if (index->insn & (1U << 31)) { | if (index->insn & (1U << 31)) { | ||||
/* The data is within the instruction */ | /* The data is within the instruction */ | ||||
state->insn = &index->insn; | state->insn = &index->insn; | ||||
} else { | } else { | ||||
/* A prel31 offset to the unwind table */ | /* A prel31 offset to the unwind table */ | ||||
Show All 14 Lines |
These should be static.