diff --git a/libexec/rtld-elf/riscv/reloc.c b/libexec/rtld-elf/riscv/reloc.c
index 5ea005a813cb..aa2cc97ae769 100644
--- a/libexec/rtld-elf/riscv/reloc.c
+++ b/libexec/rtld-elf/riscv/reloc.c
@@ -1,409 +1,505 @@
/*-
* Copyright (c) 2015-2017 Ruslan Bukin
* All rights reserved.
*
* This software was developed by SRI International and the University of
* Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
* ("CTSRD"), as part of the DARPA CRASH research programme.
*
* This software was developed by the University of Cambridge Computer
* Laboratory as part of the CTSRD Project, with support from the UK Higher
* Education Innovation Fund (HEIF).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include
#include
#include "debug.h"
#include "rtld.h"
#include "rtld_printf.h"
/*
* It is possible for the compiler to emit relocations for unaligned data.
* We handle this situation with these inlines.
*/
#define RELOC_ALIGNED_P(x) \
(((uintptr_t)(x) & (sizeof(void *) - 1)) == 0)
uint64_t
set_gp(Obj_Entry *obj)
{
uint64_t old;
SymLook req;
uint64_t gp;
int res;
__asm __volatile("mv %0, gp" : "=r"(old));
symlook_init(&req, "__global_pointer$");
req.ventry = NULL;
req.flags = SYMLOOK_EARLY;
res = symlook_obj(&req, obj);
if (res == 0) {
gp = req.sym_out->st_value;
__asm __volatile("mv gp, %0" :: "r"(gp));
}
return (old);
}
void
init_pltgot(Obj_Entry *obj)
{
if (obj->pltgot != NULL) {
obj->pltgot[0] = (Elf_Addr)&_rtld_bind_start;
obj->pltgot[1] = (Elf_Addr)obj;
}
}
int
do_copy_relocations(Obj_Entry *dstobj)
{
const Obj_Entry *srcobj, *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *srcsym;
const Elf_Sym *dstsym;
const void *srcaddr;
const char *name;
void *dstaddr;
SymLook req;
size_t size;
int res;
/*
* COPY relocs are invalid outside of the main program
*/
assert(dstobj->mainprog);
relalim = (const Elf_Rela *)((const char *)dstobj->rela +
dstobj->relasize);
for (rela = dstobj->rela; rela < relalim; rela++) {
if (ELF_R_TYPE(rela->r_info) != R_RISCV_COPY)
continue;
dstaddr = (void *)(dstobj->relocbase + rela->r_offset);
dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info);
name = dstobj->strtab + dstsym->st_name;
size = dstsym->st_size;
symlook_init(&req, name);
req.ventry = fetch_ventry(dstobj, ELF_R_SYM(rela->r_info));
req.flags = SYMLOOK_EARLY;
for (srcobj = globallist_next(dstobj); srcobj != NULL;
srcobj = globallist_next(srcobj)) {
res = symlook_obj(&req, srcobj);
if (res == 0) {
srcsym = req.sym_out;
defobj = req.defobj_out;
break;
}
}
if (srcobj == NULL) {
_rtld_error(
"Undefined symbol \"%s\" referenced from COPY relocation in %s",
name, dstobj->path);
return (-1);
}
srcaddr = (const void *)(defobj->relocbase + srcsym->st_value);
memcpy(dstaddr, srcaddr, size);
}
return (0);
}
/*
* Process the PLT relocations.
*/
int
reloc_plt(Obj_Entry *obj, int flags __unused, RtldLockState *lockstate __unused)
{
const Elf_Rela *relalim;
const Elf_Rela *rela;
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
- assert(ELF_R_TYPE(rela->r_info) == R_RISCV_JUMP_SLOT);
-
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
- *where += (Elf_Addr)obj->relocbase;
+
+ switch (ELF_R_TYPE(rela->r_info)) {
+ case R_RISCV_JUMP_SLOT:
+ *where += (Elf_Addr)obj->relocbase;
+ break;
+ case R_RISCV_IRELATIVE:
+ obj->irelative = true;
+ break;
+ default:
+ _rtld_error("Unknown relocation type %u in PLT",
+ (unsigned int)ELF_R_TYPE(rela->r_info));
+ return (-1);
+ }
}
return (0);
}
/*
* LD_BIND_NOW was set - force relocation for all jump slots
*/
int
reloc_jmpslots(Obj_Entry *obj, int flags, RtldLockState *lockstate)
{
const Obj_Entry *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *def;
relalim = (const Elf_Rela *)((const char *)obj->pltrela +
obj->pltrelasize);
for (rela = obj->pltrela; rela < relalim; rela++) {
Elf_Addr *where;
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
switch(ELF_R_TYPE(rela->r_info)) {
case R_RISCV_JUMP_SLOT:
def = find_symdef(ELF_R_SYM(rela->r_info), obj,
&defobj, SYMLOOK_IN_PLT | flags, NULL, lockstate);
if (def == NULL) {
dbg("reloc_jmpslots: sym not found");
return (-1);
}
+ if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
+ obj->gnu_ifunc = true;
+ continue;
+ }
+
*where = (Elf_Addr)(defobj->relocbase + def->st_value);
break;
default:
_rtld_error("Unknown relocation type %x in jmpslot",
(unsigned int)ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}
+static void
+reloc_iresolve_one(Obj_Entry *obj, const Elf_Rela *rela,
+ RtldLockState *lockstate)
+{
+ Elf_Addr *where, target, *ptr;
+
+ ptr = (Elf_Addr *)(obj->relocbase + rela->r_addend);
+ where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
+ lock_release(rtld_bind_lock, lockstate);
+ target = call_ifunc_resolver(ptr);
+ wlock_acquire(rtld_bind_lock, lockstate);
+ *where = target;
+}
+
int
-reloc_iresolve(Obj_Entry *obj __unused,
- struct Struct_RtldLockState *lockstate __unused)
+reloc_iresolve(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
{
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
+
+ if (!obj->irelative)
+ return (0);
- /* XXX not implemented */
+ obj->irelative = false;
+ relalim = (const Elf_Rela *)((const char *)obj->pltrela +
+ obj->pltrelasize);
+ for (rela = obj->pltrela; rela < relalim; rela++) {
+ if (ELF_R_TYPE(rela->r_info) == R_RISCV_IRELATIVE)
+ reloc_iresolve_one(obj, rela, lockstate);
+ }
return (0);
}
int
-reloc_iresolve_nonplt(Obj_Entry *obj __unused,
- struct Struct_RtldLockState *lockstate __unused)
+reloc_iresolve_nonplt(Obj_Entry *obj, struct Struct_RtldLockState *lockstate)
{
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
- /* XXX not implemented */
+ if (!obj->irelative_nonplt)
+ return (0);
+
+ obj->irelative_nonplt = false;
+ relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
+ for (rela = obj->rela; rela < relalim; rela++) {
+ if (ELF_R_TYPE(rela->r_info) == R_RISCV_IRELATIVE)
+ reloc_iresolve_one(obj, rela, lockstate);
+ }
return (0);
}
int
-reloc_gnu_ifunc(Obj_Entry *obj __unused, int flags __unused,
- struct Struct_RtldLockState *lockstate __unused)
+reloc_gnu_ifunc(Obj_Entry *obj, int flags,
+ struct Struct_RtldLockState *lockstate)
{
+ const Elf_Rela *relalim;
+ const Elf_Rela *rela;
+ Elf_Addr *where, target;
+ const Elf_Sym *def;
+ const Obj_Entry *defobj;
+
+ if (!obj->gnu_ifunc)
+ return (0);
- /* XXX not implemented */
+ relalim = (const Elf_Rela *)((const char *)obj->pltrela + obj->pltrelasize);
+ for (rela = obj->pltrela; rela < relalim; rela++) {
+ if (ELF_R_TYPE(rela->r_info) == R_RISCV_JUMP_SLOT) {
+ where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
+ def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj,
+ SYMLOOK_IN_PLT | flags, NULL, lockstate);
+ if (def == NULL)
+ return (-1);
+ if (ELF_ST_TYPE(def->st_info) != STT_GNU_IFUNC)
+ continue;
+
+ lock_release(rtld_bind_lock, lockstate);
+ target = (Elf_Addr)rtld_resolve_ifunc(defobj, def);
+ wlock_acquire(rtld_bind_lock, lockstate);
+ reloc_jmpslot(where, target, defobj, obj,
+ (const Elf_Rel *)rela);
+ }
+ }
+ obj->gnu_ifunc = false;
return (0);
}
Elf_Addr
reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const Obj_Entry *defobj __unused, const Obj_Entry *obj __unused,
const Elf_Rel *rel)
{
- assert(ELF_R_TYPE(rel->r_info) == R_RISCV_JUMP_SLOT);
+ assert(ELF_R_TYPE(rel->r_info) == R_RISCV_JUMP_SLOT ||
+ ELF_R_TYPE(rel->r_info) == R_RISCV_IRELATIVE);
if (*where != target && !ld_bind_not)
*where = target;
return (target);
}
/*
* Process non-PLT relocations
*/
int
reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld, int flags,
RtldLockState *lockstate)
{
const Obj_Entry *defobj;
const Elf_Rela *relalim;
const Elf_Rela *rela;
const Elf_Sym *def;
SymCache *cache;
- Elf_Addr *where;
+ Elf_Addr *where, symval;
unsigned long symnum;
- if ((flags & SYMLOOK_IFUNC) != 0)
- /* XXX not implemented */
- return (0);
-
/*
* The dynamic loader may be called from a thread, we have
* limited amounts of stack available so we cannot use alloca().
*/
if (obj == obj_rtld)
cache = NULL;
else
cache = calloc(obj->dynsymcount, sizeof(SymCache));
/* No need to check for NULL here */
relalim = (const Elf_Rela *)((const char *)obj->rela + obj->relasize);
for (rela = obj->rela; rela < relalim; rela++) {
where = (Elf_Addr *)(obj->relocbase + rela->r_offset);
symnum = ELF_R_SYM(rela->r_info);
switch (ELF_R_TYPE(rela->r_info)) {
case R_RISCV_JUMP_SLOT:
/* This will be handled by the plt/jmpslot routines */
break;
case R_RISCV_NONE:
break;
case R_RISCV_64:
def = find_symdef(symnum, obj, &defobj, flags, cache,
lockstate);
if (def == NULL)
return (-1);
- *where = (Elf_Addr)(defobj->relocbase + def->st_value +
- rela->r_addend);
+ /*
+ * If symbol is IFUNC, only perform relocation
+ * when caller allowed it by passing
+ * SYMLOOK_IFUNC flag. Skip the relocations
+ * otherwise.
+ */
+ if (ELF_ST_TYPE(def->st_info) == STT_GNU_IFUNC) {
+ if ((flags & SYMLOOK_IFUNC) == 0) {
+ obj->non_plt_gnu_ifunc = true;
+ continue;
+ }
+ symval = (Elf_Addr)rtld_resolve_ifunc(defobj,
+ def);
+ } else {
+ if ((flags & SYMLOOK_IFUNC) != 0)
+ continue;
+ symval = (Elf_Addr)(defobj->relocbase +
+ def->st_value);
+ }
+
+ *where = symval + rela->r_addend;
break;
case R_RISCV_TLS_DTPMOD64:
def = find_symdef(symnum, obj, &defobj, flags, cache,
lockstate);
if (def == NULL)
return -1;
*where += (Elf_Addr)defobj->tlsindex;
break;
case R_RISCV_COPY:
/*
* These are deferred until all other relocations have
* been done. All we do here is make sure that the
* COPY relocation is not in a shared library. They
* are allowed only in executable files.
*/
if (!obj->mainprog) {
_rtld_error("%s: Unexpected R_RISCV_COPY "
"relocation in shared library", obj->path);
return (-1);
}
break;
case R_RISCV_TLS_DTPREL64:
def = find_symdef(symnum, obj, &defobj, flags, cache,
lockstate);
if (def == NULL)
return (-1);
/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
* TLS block. This allows us to support (small
* amounts of) static TLS in dynamically loaded
* modules. If we run out of space, we generate an
* error.
*/
if (!defobj->tls_static) {
if (!allocate_tls_offset(
__DECONST(Obj_Entry *, defobj))) {
_rtld_error(
"%s: No space available for static "
"Thread Local Storage", obj->path);
return (-1);
}
}
*where += (Elf_Addr)(def->st_value + rela->r_addend
- TLS_DTV_OFFSET);
break;
case R_RISCV_TLS_TPREL64:
def = find_symdef(symnum, obj, &defobj, flags, cache,
lockstate);
if (def == NULL)
return (-1);
/*
* We lazily allocate offsets for static TLS as we
* see the first relocation that references the
* TLS block. This allows us to support (small
* amounts of) static TLS in dynamically loaded
* modules. If we run out of space, we generate an
* error.
*/
if (!defobj->tls_static) {
if (!allocate_tls_offset(
__DECONST(Obj_Entry *, defobj))) {
_rtld_error(
"%s: No space available for static "
"Thread Local Storage", obj->path);
return (-1);
}
}
*where = (def->st_value + rela->r_addend +
defobj->tlsoffset - TLS_TP_OFFSET - TLS_TCB_SIZE);
break;
case R_RISCV_RELATIVE:
*where = (Elf_Addr)(obj->relocbase + rela->r_addend);
break;
+ case R_RISCV_IRELATIVE:
+ obj->irelative_nonplt = true;
+ break;
default:
rtld_printf("%s: Unhandled relocation %lu\n",
obj->path, ELF_R_TYPE(rela->r_info));
return (-1);
}
}
return (0);
}
+unsigned long elf_hwcap;
+
void
-ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)] __unused)
+ifunc_init(Elf_Auxinfo *aux_info[__min_size(AT_COUNT)])
{
-
+ if (aux_info[AT_HWCAP] != NULL)
+ elf_hwcap = aux_info[AT_HWCAP]->a_un.a_val;
}
void
allocate_initial_tls(Obj_Entry *objs)
{
/*
* Fix the size of the static TLS block by using the maximum
* offset allocated so far and adding a bit for dynamic modules to
* use.
*/
tls_static_space = tls_last_offset + tls_last_size +
ld_static_tls_extra;
_tcb_set(allocate_tls(objs, NULL, TLS_TCB_SIZE, TLS_TCB_ALIGN));
}
void *
__tls_get_addr(tls_index* ti)
{
uintptr_t **dtvp;
void *p;
dtvp = &_tcb_get()->tcb_dtv;
p = tls_get_addr_common(dtvp, ti->ti_module, ti->ti_offset);
return ((char*)p + TLS_DTV_OFFSET);
}
diff --git a/libexec/rtld-elf/riscv/rtld_machdep.h b/libexec/rtld-elf/riscv/rtld_machdep.h
index fb5f5643efc6..c6600b583612 100644
--- a/libexec/rtld-elf/riscv/rtld_machdep.h
+++ b/libexec/rtld-elf/riscv/rtld_machdep.h
@@ -1,110 +1,113 @@
/*-
* Copyright (c) 1999, 2000 John D. Polstra.
* Copyright (c) 2015 Ruslan Bukin
* All rights reserved.
*
* Portions of this software were developed by SRI International and the
* University of Cambridge Computer Laboratory under DARPA/AFRL contract
* FA8750-10-C-0237 ("CTSRD"), as part of the DARPA CRASH research programme.
*
* Portions of this software were developed by the University of Cambridge
* Computer Laboratory as part of the CTSRD Project, with support from the
* UK Higher Education Innovation Fund (HEIF).
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#ifndef RTLD_MACHDEP_H
#define RTLD_MACHDEP_H 1
#include
#include
#include
struct Struct_Obj_Entry;
#define MD_OBJ_ENTRY
uint64_t set_gp(struct Struct_Obj_Entry *obj);
/* Return the address of the .dynamic section in the dynamic linker. */
#define rtld_dynamic(obj) \
({ \
Elf_Addr _dynamic_addr; \
__asm __volatile("lla %0, _DYNAMIC" : "=r"(_dynamic_addr)); \
(const Elf_Dyn *)_dynamic_addr; \
})
/* No arch-specific dynamic tags */
#define arch_digest_dynamic(obj, dynp) false
/* No architecture specific notes */
#define arch_digest_note(obj, note) false
Elf_Addr reloc_jmpslot(Elf_Addr *where, Elf_Addr target,
const struct Struct_Obj_Entry *defobj, const struct Struct_Obj_Entry *obj,
const Elf_Rel *rel);
#define make_function_pointer(def, defobj) \
((defobj)->relocbase + (def)->st_value)
#define call_initfini_pointer(obj, target) \
({ \
uint64_t old0; \
old0 = set_gp(obj); \
(((InitFunc)(target))()); \
__asm __volatile("mv gp, %0" :: "r"(old0)); \
})
#define call_init_pointer(obj, target) \
({ \
uint64_t old1; \
old1 = set_gp(obj); \
(((InitArrFunc)(target))(main_argc, main_argv, environ)); \
__asm __volatile("mv gp, %0" :: "r"(old1)); \
})
+extern unsigned long elf_hwcap;
#define call_ifunc_resolver(ptr) \
- (((Elf_Addr (*)(void))ptr)())
+ (((Elf_Addr (*)(unsigned long, unsigned long, unsigned long, \
+ unsigned long, unsigned long, unsigned long, unsigned long, \
+ unsigned long))ptr)(elf_hwcap, 0, 0, 0, 0, 0, 0, 0))
/*
* TLS
*/
#define round(size, align) \
(((size) + (align) - 1) & ~((align) - 1))
#define calculate_first_tls_offset(size, align, offset) \
TLS_TCB_SIZE
#define calculate_tls_offset(prev_offset, prev_size, size, align, offset) \
round(prev_offset + prev_size, align)
#define calculate_tls_post_size(align) 0
typedef struct {
unsigned long ti_module;
unsigned long ti_offset;
} tls_index;
extern void *__tls_get_addr(tls_index* ti);
#define md_abi_variant_hook(x)
#endif