Index: head/libexec/rtld-elf/alpha/reloc.c =================================================================== --- head/libexec/rtld-elf/alpha/reloc.c (revision 85003) +++ head/libexec/rtld-elf/alpha/reloc.c (revision 85004) @@ -1,482 +1,486 @@ /*- * Copyright 1996, 1997, 1998, 1999 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Dynamic linker for ELF. * * John Polstra . */ #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" #include "rtld.h" extern Elf_Dyn _DYNAMIC; /* * Macros for loading/storing unaligned 64-bit values. These are * needed because relocations can point to unaligned data. This * occurs in the DWARF2 exception frame tables generated by the * compiler, for instance. * * We don't use these when relocating jump slots and GOT entries, * since they are guaranteed to be aligned. */ #define load64(p) ({ \ Elf_Addr __res; \ __asm__("ldq_u %0,%1" : "=r"(__res) : "m"(*(p))); \ __res; }) #define store64(p, v) \ __asm__("stq_u %1,%0" : "=m"(*(p)) : "r"(v)) /* Relocate a non-PLT object with addend. */ static int reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, SymCache *cache) { Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); switch (ELF_R_TYPE(rela->r_info)) { case R_ALPHA_NONE: break; case R_ALPHA_REFQUAD: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; store64(where, (Elf_Addr) (defobj->relocbase + def->st_value) + load64(where) + rela->r_addend); } break; case R_ALPHA_GLOB_DAT: { const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr val; def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; val = (Elf_Addr) (defobj->relocbase + def->st_value + rela->r_addend); if (load64(where) != val) store64(where, val); } break; case R_ALPHA_RELATIVE: { if (obj != obj_rtld || (caddr_t)where < (caddr_t)_GLOBAL_OFFSET_TABLE_ || (caddr_t)where >= (caddr_t)&_DYNAMIC) store64(where, load64(where) + (Elf_Addr) obj->relocbase); } break; case R_ALPHA_COPY: { /* * These are deferred until all other relocations * have been done. All we do here is make sure * that the COPY relocation is not in a shared * library. They are allowed only in executable * files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_COPY " " relocation in shared library", obj->path); return -1; } } break; default: _rtld_error("%s: Unsupported relocation type %d" " in non-PLT relocations\n", obj->path, ELF_R_TYPE(rela->r_info)); return -1; } return(0); } /* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) { const Elf_Rel *rellim; const Elf_Rel *rel; const Elf_Rela *relalim; const Elf_Rela *rela; SymCache *cache; cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache)); if (cache != NULL) memset(cache, 0, obj->nchains * sizeof(SymCache)); /* Perform relocations without addend if there are any: */ rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) { Elf_Rela locrela; locrela.r_info = rel->r_info; locrela.r_offset = rel->r_offset; locrela.r_addend = 0; if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache)) return -1; } /* Perform relocations with addend if there are any: */ relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) { if (reloc_non_plt_obj(obj_rtld, obj, rela, cache)) return -1; } return 0; } /* Process the PLT relocations. */ int reloc_plt(Obj_Entry *obj) { /* All PLT relocations are the same kind: either Elf_Rel or Elf_Rela. */ if (obj->pltrelsize != 0) { const Elf_Rel *rellim; const Elf_Rel *rel; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { Elf_Addr *where; assert(ELF_R_TYPE(rel->r_info) == R_ALPHA_JMP_SLOT); /* Relocate the GOT slot pointing into the PLT. */ where = (Elf_Addr *)(obj->relocbase + rel->r_offset); *where += (Elf_Addr)obj->relocbase; } } else { const Elf_Rela *relalim; const Elf_Rela *rela; relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); for (rela = obj->pltrela; rela < relalim; rela++) { Elf_Addr *where; assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); /* Relocate the GOT slot pointing into the PLT. */ where = (Elf_Addr *)(obj->relocbase + rela->r_offset); *where += (Elf_Addr)obj->relocbase; } } return 0; } /* Relocate the jump slots in an object. */ int reloc_jmpslots(Obj_Entry *obj) { if (obj->jmpslots_done) return 0; /* All PLT relocations are the same kind: either Elf_Rel or Elf_Rela. */ if (obj->pltrelsize != 0) { const Elf_Rel *rellim; const Elf_Rel *rel; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { Elf_Addr *where; const Elf_Sym *def; const Obj_Entry *defobj; assert(ELF_R_TYPE(rel->r_info) == R_ALPHA_JMP_SLOT); where = (Elf_Addr *)(obj->relocbase + rel->r_offset); def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); if (def == NULL) return -1; reloc_jmpslot(where, - (Elf_Addr)(defobj->relocbase + def->st_value)); + (Elf_Addr)(defobj->relocbase + def->st_value), + defobj); } } else { const Elf_Rela *relalim; const Elf_Rela *rela; relalim = (const Elf_Rela *)((char *)obj->pltrela + obj->pltrelasize); for (rela = obj->pltrela; rela < relalim; rela++) { Elf_Addr *where; const Elf_Sym *def; const Obj_Entry *defobj; assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); where = (Elf_Addr *)(obj->relocbase + rela->r_offset); def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, true, NULL); if (def == NULL) return -1; reloc_jmpslot(where, - (Elf_Addr)(defobj->relocbase + def->st_value)); + (Elf_Addr)(defobj->relocbase + def->st_value), + defobj); } } obj->jmpslots_done = true; return 0; } /* Fixup the jump slot at "where" to transfer control to "target". */ -void -reloc_jmpslot(Elf_Addr *where, Elf_Addr target) +Elf_Addr +reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj) { Elf_Addr stubaddr; dbg(" reloc_jmpslot: where=%p, target=%p", (void *)where, (void *)target); stubaddr = *where; if (stubaddr != target) { int64_t delta; u_int32_t inst[3]; int instct; Elf_Addr pc; int64_t idisp; u_int32_t *stubptr; /* Point this GOT entry directly at the target. */ *where = target; /* * There may be multiple GOT tables, each with an entry * pointing to the stub in the PLT. But we can only find and * fix up the first GOT entry. So we must rewrite the stub as * well, to perform a call to the target if it is executed. * * When the stub gets control, register pv ($27) contains its * address. We adjust its value so that it points to the * target, and then jump indirect through it. * * Each PLT entry has room for 3 instructions. If the * adjustment amount fits in a signed 32-bit integer, we can * simply add it to register pv. Otherwise we must load the * GOT entry itself into the pv register. */ delta = target - stubaddr; dbg(" stubaddr=%p, where-stubaddr=%ld, delta=%ld", (void *)stubaddr, (long)where - (long)stubaddr, (long)delta); instct = 0; if ((int32_t)delta == delta) { /* * We can adjust pv with a LDA, LDAH sequence. * * First build an LDA instruction to adjust the low 16 bits. */ inst[instct++] = 0x08 << 26 | 27 << 21 | 27 << 16 | (delta & 0xffff); dbg(" LDA $27,%d($27)", (int16_t)delta); /* * Adjust the delta to account for the effects of the LDA, * including sign-extension. */ delta -= (int16_t)delta; if (delta != 0) { /* Build an LDAH instruction to adjust the high 16 bits. */ inst[instct++] = 0x09 << 26 | 27 << 21 | 27 << 16 | (delta >> 16 & 0xffff); dbg(" LDAH $27,%d($27)", (int16_t)(delta >> 16)); } } else { int64_t dhigh; /* We must load the GOT entry from memory. */ delta = (Elf_Addr)where - stubaddr; /* * If the GOT entry is too far away from the PLT entry, * then punt. This PLT entry will have to be looked up * manually for all GOT entries except the first one. * The program will still run, albeit very slowly. It's * extremely unlikely that this case could ever arise in * practice, but we might as well handle it correctly if * it does. */ if ((int32_t)delta != delta) { dbg(" PLT stub too far from GOT to relocate"); - return; + return target; } dhigh = delta - (int16_t)delta; if (dhigh != 0) { /* Build an LDAH instruction to adjust the high 16 bits. */ inst[instct++] = 0x09 << 26 | 27 << 21 | 27 << 16 | (dhigh >> 16 & 0xffff); dbg(" LDAH $27,%d($27)", (int16_t)(dhigh >> 16)); } /* Build an LDQ to load the GOT entry. */ inst[instct++] = 0x29 << 26 | 27 << 21 | 27 << 16 | (delta & 0xffff); dbg(" LDQ $27,%d($27)", (int16_t)delta); } /* * Build a JMP or BR instruction to jump to the target. If * the instruction displacement fits in a sign-extended 21-bit * field, we can use the more efficient BR instruction. * Otherwise we have to jump indirect through the pv register. */ pc = stubaddr + 4 * (instct + 1); idisp = (int64_t)(target - pc) >> 2; if (-0x100000 <= idisp && idisp < 0x100000) { inst[instct++] = 0x30 << 26 | 31 << 21 | (idisp & 0x1fffff); dbg(" BR $31,%p", (void *)target); } else { inst[instct++] = 0x1a << 26 | 31 << 21 | 27 << 16 | (idisp & 0x3fff); dbg(" JMP $31,($27),%d", (int)(idisp & 0x3fff)); } /* * Fill in the tail of the PLT entry first for reentrancy. * Until we have overwritten the first instruction (an * unconditional branch), the remaining instructions have no * effect. */ stubptr = (u_int32_t *)stubaddr; while (instct > 1) { instct--; stubptr[instct] = inst[instct]; } /* * Commit the tail of the instruction sequence to memory * before overwriting the first instruction. */ __asm__ __volatile__("wmb" : : : "memory"); stubptr[0] = inst[0]; } + + return target; } /* Process an R_ALPHA_COPY relocation. */ static int do_copy_relocation(Obj_Entry *dstobj, const Elf_Rela *rela) { void *dstaddr; const Elf_Sym *dstsym; const char *name; unsigned long hash; size_t size; const void *srcaddr; const Elf_Sym *srcsym; Obj_Entry *srcobj; dstaddr = (void *) (dstobj->relocbase + rela->r_offset); dstsym = dstobj->symtab + ELF_R_SYM(rela->r_info); name = dstobj->strtab + dstsym->st_name; hash = elf_hash(name); size = dstsym->st_size; for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) if ((srcsym = symlook_obj(name, hash, srcobj, false)) != NULL) break; if (srcobj == NULL) { _rtld_error("Undefined symbol \"%s\" referenced from COPY" " relocation in %s", name, dstobj->path); return -1; } srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value); memcpy(dstaddr, srcaddr, size); return 0; } /* * Process the special R_ALPHA_COPY relocations in the main program. These * copy data from a shared object into a region in the main program's BSS * segment. * * Returns 0 on success, -1 on failure. */ int do_copy_relocations(Obj_Entry *dstobj) { const Elf_Rel *rellim; const Elf_Rel *rel; const Elf_Rela *relalim; const Elf_Rela *rela; assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize); for (rel = dstobj->rel; dstobj->rel != NULL && rel < rellim; rel++) { if (ELF_R_TYPE(rel->r_info) == R_ALPHA_COPY) { Elf_Rela locrela; locrela.r_info = rel->r_info; locrela.r_offset = rel->r_offset; locrela.r_addend = 0; if (do_copy_relocation(dstobj, &locrela)) return -1; } } relalim = (const Elf_Rela *) ((caddr_t) dstobj->rela + dstobj->relasize); for (rela = dstobj->rela; dstobj->rela != NULL && rela < relalim; rela++) { if (ELF_R_TYPE(rela->r_info) == R_ALPHA_COPY) { if (do_copy_relocation(dstobj, rela)) return -1; } } return 0; } /* Initialize the special PLT entries. */ void init_pltgot(Obj_Entry *obj) { if (obj->pltgot != NULL && (obj->pltrelsize != 0 || obj->pltrelasize != 0)) { /* This function will be called to perform the relocation. */ obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; /* Identify this shared object */ obj->pltgot[3] = (Elf_Addr) obj; } } Index: head/libexec/rtld-elf/alpha/rtld_machdep.h =================================================================== --- head/libexec/rtld-elf/alpha/rtld_machdep.h (revision 85003) +++ head/libexec/rtld-elf/alpha/rtld_machdep.h (revision 85004) @@ -1,43 +1,49 @@ /*- * Copyright (c) 1999, 2000 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef RTLD_MACHDEP_H #define RTLD_MACHDEP_H 1 +struct Struct_Obj_Entry; + /* Return the address of the .dynamic section in the dynamic linker. */ #define rtld_dynamic(obj) (&_DYNAMIC) -void reloc_jmpslot(Elf_Addr *, Elf_Addr); +Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr, + const struct Struct_Obj_Entry *obj); + +#define make_function_pointer(def, defobj) \ + ((defobj)->relocbase + (def)->st_value) /* Atomic operations. */ int cmp0_and_store_int(volatile int *, int); void atomic_add_int(volatile int *, int); void atomic_incr_int(volatile int *); void atomic_decr_int(volatile int *); #endif Index: head/libexec/rtld-elf/amd64/reloc.c =================================================================== --- head/libexec/rtld-elf/amd64/reloc.c (revision 85003) +++ head/libexec/rtld-elf/amd64/reloc.c (revision 85004) @@ -1,253 +1,254 @@ /*- * Copyright 1996, 1997, 1998, 1999 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Dynamic linker for ELF. * * John Polstra . */ #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" #include "rtld.h" /* * Process the special R_386_COPY relocations in the main program. These * copy data from a shared object into a region in the main program's BSS * segment. * * Returns 0 on success, -1 on failure. */ int do_copy_relocations(Obj_Entry *dstobj) { const Elf_Rel *rellim; const Elf_Rel *rel; assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize); for (rel = dstobj->rel; rel < rellim; rel++) { if (ELF_R_TYPE(rel->r_info) == R_386_COPY) { void *dstaddr; const Elf_Sym *dstsym; const char *name; unsigned long hash; size_t size; const void *srcaddr; const Elf_Sym *srcsym; Obj_Entry *srcobj; dstaddr = (void *) (dstobj->relocbase + rel->r_offset); dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info); name = dstobj->strtab + dstsym->st_name; hash = elf_hash(name); size = dstsym->st_size; for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) if ((srcsym = symlook_obj(name, hash, srcobj, false)) != NULL) break; if (srcobj == NULL) { _rtld_error("Undefined symbol \"%s\" referenced from COPY" " relocation in %s", name, dstobj->path); return -1; } srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value); memcpy(dstaddr, srcaddr, size); } } return 0; } /* Initialize the special GOT entries. */ void init_pltgot(Obj_Entry *obj) { if (obj->pltgot != NULL) { obj->pltgot[1] = (Elf_Addr) obj; obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; } } /* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) { const Elf_Rel *rellim; const Elf_Rel *rel; SymCache *cache; cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache)); if (cache != NULL) memset(cache, 0, obj->nchains * sizeof(SymCache)); rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset); switch (ELF_R_TYPE(rel->r_info)) { case R_386_NONE: break; case R_386_32: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_PC32: /* * I don't think the dynamic linker should ever see this * type of relocation. But the binutils-2.6 tools sometimes * generate it. */ { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value) - (Elf_Addr) where; } break; case R_386_COPY: /* * These are deferred until all other relocations have * been done. All we do here is make sure that the COPY * relocation is not in a shared library. They are allowed * only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_386_COPY relocation" " in shared library", obj->path); return -1; } break; case R_386_GLOB_DAT: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where = (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_RELATIVE: *where += (Elf_Addr) obj->relocbase; break; default: _rtld_error("%s: Unsupported relocation type %d" " in non-PLT relocations\n", obj->path, ELF_R_TYPE(rel->r_info)); return -1; } } return 0; } /* Process the PLT relocations. */ int reloc_plt(Obj_Entry *obj) { const Elf_Rel *rellim; const Elf_Rel *rel; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { Elf_Addr *where; assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); /* Relocate the GOT slot pointing into the PLT. */ where = (Elf_Addr *)(obj->relocbase + rel->r_offset); *where += (Elf_Addr)obj->relocbase; } return 0; } /* Relocate the jump slots in an object. */ int reloc_jmpslots(Obj_Entry *obj) { const Elf_Rel *rellim; const Elf_Rel *rel; if (obj->jmpslots_done) return 0; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { - Elf_Addr *where; + Elf_Addr *where, target; const Elf_Sym *def; const Obj_Entry *defobj; assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); where = (Elf_Addr *)(obj->relocbase + rel->r_offset); def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); if (def == NULL) return -1; - reloc_jmpslot(where, (Elf_Addr)(defobj->relocbase + def->st_value)); + target = (Elf_Addr)(defobj->relocbase + def->st_value); + reloc_jmpslot(where, target, defobj); } obj->jmpslots_done = true; return 0; } Index: head/libexec/rtld-elf/amd64/rtld_machdep.h =================================================================== --- head/libexec/rtld-elf/amd64/rtld_machdep.h (revision 85003) +++ head/libexec/rtld-elf/amd64/rtld_machdep.h (revision 85004) @@ -1,65 +1,73 @@ /*- * Copyright (c) 1999, 2000 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef RTLD_MACHDEP_H #define RTLD_MACHDEP_H 1 +struct Struct_Obj_Entry; + /* Return the address of the .dynamic section in the dynamic linker. */ #define rtld_dynamic(obj) \ ((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC)) /* Fixup the jump slot at "where" to transfer control to "target". */ -#define reloc_jmpslot(where, target) \ - do { \ - dbg("reloc_jmpslot: *%p = %p", (void *)(where), \ - (void *)(target)); \ - (*(Elf_Addr *)(where) = (Elf_Addr)(target)); \ - } while (0) +static inline Elf_Addr +reloc_jmpslot(Elf_Addr *where, Elf_Addr target, + const struct Struct_Obj_Entry *obj) +{ + dbg("reloc_jmpslot: *%p = %p", (void *)(where), + (void *)(target)); + (*(Elf_Addr *)(where) = (Elf_Addr)(target)); + return target; +} + +#define make_function_pointer(def, defobj) \ + ((defobj)->relocbase + (def)->st_value) static inline void atomic_decr_int(volatile int *p) { __asm __volatile ("lock; decl %0" : "=m"(*p) : "0"(*p) : "cc"); } static inline void atomic_incr_int(volatile int *p) { __asm __volatile ("lock; incl %0" : "=m"(*p) : "0"(*p) : "cc"); } static inline void atomic_add_int(volatile int *p, int val) { __asm __volatile ("lock; addl %1, %0" : "=m"(*p) : "ri"(val), "0"(*p) : "cc"); } #endif Index: head/libexec/rtld-elf/i386/reloc.c =================================================================== --- head/libexec/rtld-elf/i386/reloc.c (revision 85003) +++ head/libexec/rtld-elf/i386/reloc.c (revision 85004) @@ -1,253 +1,254 @@ /*- * Copyright 1996, 1997, 1998, 1999 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Dynamic linker for ELF. * * John Polstra . */ #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" #include "rtld.h" /* * Process the special R_386_COPY relocations in the main program. These * copy data from a shared object into a region in the main program's BSS * segment. * * Returns 0 on success, -1 on failure. */ int do_copy_relocations(Obj_Entry *dstobj) { const Elf_Rel *rellim; const Elf_Rel *rel; assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize); for (rel = dstobj->rel; rel < rellim; rel++) { if (ELF_R_TYPE(rel->r_info) == R_386_COPY) { void *dstaddr; const Elf_Sym *dstsym; const char *name; unsigned long hash; size_t size; const void *srcaddr; const Elf_Sym *srcsym; Obj_Entry *srcobj; dstaddr = (void *) (dstobj->relocbase + rel->r_offset); dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info); name = dstobj->strtab + dstsym->st_name; hash = elf_hash(name); size = dstsym->st_size; for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) if ((srcsym = symlook_obj(name, hash, srcobj, false)) != NULL) break; if (srcobj == NULL) { _rtld_error("Undefined symbol \"%s\" referenced from COPY" " relocation in %s", name, dstobj->path); return -1; } srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value); memcpy(dstaddr, srcaddr, size); } } return 0; } /* Initialize the special GOT entries. */ void init_pltgot(Obj_Entry *obj) { if (obj->pltgot != NULL) { obj->pltgot[1] = (Elf_Addr) obj; obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; } } /* Process the non-PLT relocations. */ int reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) { const Elf_Rel *rellim; const Elf_Rel *rel; SymCache *cache; cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache)); if (cache != NULL) memset(cache, 0, obj->nchains * sizeof(SymCache)); rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); for (rel = obj->rel; rel < rellim; rel++) { Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset); switch (ELF_R_TYPE(rel->r_info)) { case R_386_NONE: break; case R_386_32: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_PC32: /* * I don't think the dynamic linker should ever see this * type of relocation. But the binutils-2.6 tools sometimes * generate it. */ { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where += (Elf_Addr) (defobj->relocbase + def->st_value) - (Elf_Addr) where; } break; case R_386_COPY: /* * These are deferred until all other relocations have * been done. All we do here is make sure that the COPY * relocation is not in a shared library. They are allowed * only in executable files. */ if (!obj->mainprog) { _rtld_error("%s: Unexpected R_386_COPY relocation" " in shared library", obj->path); return -1; } break; case R_386_GLOB_DAT: { const Elf_Sym *def; const Obj_Entry *defobj; def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, false, cache); if (def == NULL) return -1; *where = (Elf_Addr) (defobj->relocbase + def->st_value); } break; case R_386_RELATIVE: *where += (Elf_Addr) obj->relocbase; break; default: _rtld_error("%s: Unsupported relocation type %d" " in non-PLT relocations\n", obj->path, ELF_R_TYPE(rel->r_info)); return -1; } } return 0; } /* Process the PLT relocations. */ int reloc_plt(Obj_Entry *obj) { const Elf_Rel *rellim; const Elf_Rel *rel; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { Elf_Addr *where; assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); /* Relocate the GOT slot pointing into the PLT. */ where = (Elf_Addr *)(obj->relocbase + rel->r_offset); *where += (Elf_Addr)obj->relocbase; } return 0; } /* Relocate the jump slots in an object. */ int reloc_jmpslots(Obj_Entry *obj) { const Elf_Rel *rellim; const Elf_Rel *rel; if (obj->jmpslots_done) return 0; rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); for (rel = obj->pltrel; rel < rellim; rel++) { - Elf_Addr *where; + Elf_Addr *where, target; const Elf_Sym *def; const Obj_Entry *defobj; assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); where = (Elf_Addr *)(obj->relocbase + rel->r_offset); def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); if (def == NULL) return -1; - reloc_jmpslot(where, (Elf_Addr)(defobj->relocbase + def->st_value)); + target = (Elf_Addr)(defobj->relocbase + def->st_value); + reloc_jmpslot(where, target, defobj); } obj->jmpslots_done = true; return 0; } Index: head/libexec/rtld-elf/i386/rtld_machdep.h =================================================================== --- head/libexec/rtld-elf/i386/rtld_machdep.h (revision 85003) +++ head/libexec/rtld-elf/i386/rtld_machdep.h (revision 85004) @@ -1,65 +1,73 @@ /*- * Copyright (c) 1999, 2000 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef RTLD_MACHDEP_H #define RTLD_MACHDEP_H 1 +struct Struct_Obj_Entry; + /* Return the address of the .dynamic section in the dynamic linker. */ #define rtld_dynamic(obj) \ ((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC)) /* Fixup the jump slot at "where" to transfer control to "target". */ -#define reloc_jmpslot(where, target) \ - do { \ - dbg("reloc_jmpslot: *%p = %p", (void *)(where), \ - (void *)(target)); \ - (*(Elf_Addr *)(where) = (Elf_Addr)(target)); \ - } while (0) +static inline Elf_Addr +reloc_jmpslot(Elf_Addr *where, Elf_Addr target, + const struct Struct_Obj_Entry *obj) +{ + dbg("reloc_jmpslot: *%p = %p", (void *)(where), + (void *)(target)); + (*(Elf_Addr *)(where) = (Elf_Addr)(target)); + return target; +} + +#define make_function_pointer(def, defobj) \ + ((defobj)->relocbase + (def)->st_value) static inline void atomic_decr_int(volatile int *p) { __asm __volatile ("lock; decl %0" : "=m"(*p) : "0"(*p) : "cc"); } static inline void atomic_incr_int(volatile int *p) { __asm __volatile ("lock; incl %0" : "=m"(*p) : "0"(*p) : "cc"); } static inline void atomic_add_int(volatile int *p, int val) { __asm __volatile ("lock; addl %1, %0" : "=m"(*p) : "ri"(val), "0"(*p) : "cc"); } #endif Index: head/libexec/rtld-elf/ia64/Makefile.inc =================================================================== --- head/libexec/rtld-elf/ia64/Makefile.inc (nonexistent) +++ head/libexec/rtld-elf/ia64/Makefile.inc (revision 85004) @@ -0,0 +1,2 @@ +# $FreeBSD$ +LDFLAGS+= -Wl,--export-dynamic Property changes on: head/libexec/rtld-elf/ia64/Makefile.inc ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/libexec/rtld-elf/ia64/lockdflt.c =================================================================== --- head/libexec/rtld-elf/ia64/lockdflt.c (nonexistent) +++ head/libexec/rtld-elf/ia64/lockdflt.c (revision 85004) @@ -0,0 +1,181 @@ +/*- + * Copyright 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Thread locking implementation for the dynamic linker. + * + * We use the "simple, non-scalable reader-preference lock" from: + * + * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer + * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on + * Principles and Practice of Parallel Programming, April 1991. + * + * In this algorithm the lock is a single word. Its low-order bit is + * set when a writer holds the lock. The remaining high-order bits + * contain a count of readers desiring the lock. The algorithm requires + * atomic "compare_and_store" and "add" operations, which we implement + * using assembly language sequences in "rtld_start.S". + * + * These are spinlocks. When spinning we call nanosleep() for 1 + * microsecond each time around the loop. This will most likely yield + * the CPU to other threads (including, we hope, the lockholder) allowing + * them to make some progress. + */ + +#include +#include +#include + +#include "debug.h" +#include "rtld.h" + +/* + * This value of CACHE_LINE_SIZE is conservative. The actual size + * is 32 on the 21064, 21064A, 21066, 21066A, and 21164. It is 64 + * on the 21264. Compaq recommends sequestering each lock in its own + * 128-byte block to allow for future implementations with larger + * cache lines. + */ +#define CACHE_LINE_SIZE 128 + +#define WAFLAG 0x1 /* A writer holds the lock */ +#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */ + +typedef struct Struct_Lock { + volatile int lock; + void *base; +} Lock; + +static const struct timespec usec = { 0, 1000 }; /* 1 usec. */ +static sigset_t fullsigmask, oldsigmask; + +static void * +lock_create(void *context) +{ + void *base; + char *p; + uintptr_t r; + Lock *l; + + /* + * Arrange for the lock to occupy its own cache line. First, we + * optimistically allocate just a cache line, hoping that malloc + * will give us a well-aligned block of memory. If that doesn't + * work, we allocate a larger block and take a well-aligned cache + * line from it. + */ + base = xmalloc(CACHE_LINE_SIZE); + p = (char *)base; + if ((uintptr_t)p % CACHE_LINE_SIZE != 0) { + free(base); + base = xmalloc(2 * CACHE_LINE_SIZE); + p = (char *)base; + if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0) + p += CACHE_LINE_SIZE - r; + } + l = (Lock *)p; + l->base = base; + l->lock = 0; + return l; +} + +static void +lock_destroy(void *lock) +{ + Lock *l = (Lock *)lock; + + free(l->base); +} + +static void +rlock_acquire(void *lock) +{ + Lock *l = (Lock *)lock; + + atomic_add_int(&l->lock, RC_INCR); + while (l->lock & WAFLAG) + nanosleep(&usec, NULL); +} + +static void +wlock_acquire(void *lock) +{ + Lock *l = (Lock *)lock; + sigset_t tmp_oldsigmask; + + for ( ; ; ) { + sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask); + if (cmp0_and_store_int(&l->lock, WAFLAG) == 0) + break; + sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL); + nanosleep(&usec, NULL); + } + oldsigmask = tmp_oldsigmask; +} + +static void +rlock_release(void *lock) +{ + Lock *l = (Lock *)lock; + + atomic_add_int(&l->lock, -RC_INCR); +} + +static void +wlock_release(void *lock) +{ + Lock *l = (Lock *)lock; + + atomic_add_int(&l->lock, -WAFLAG); + sigprocmask(SIG_SETMASK, &oldsigmask, NULL); +} + +void +lockdflt_init(LockInfo *li) +{ + li->context = NULL; + li->lock_create = lock_create; + li->rlock_acquire = rlock_acquire; + li->wlock_acquire = wlock_acquire; + li->rlock_release = rlock_release; + li->wlock_release = wlock_release; + li->lock_destroy = lock_destroy; + li->context_destroy = NULL; + /* + * Construct a mask to block all signals except traps which might + * conceivably be generated within the dynamic linker itself. + */ + sigfillset(&fullsigmask); + sigdelset(&fullsigmask, SIGILL); + sigdelset(&fullsigmask, SIGTRAP); + sigdelset(&fullsigmask, SIGABRT); + sigdelset(&fullsigmask, SIGEMT); + sigdelset(&fullsigmask, SIGFPE); + sigdelset(&fullsigmask, SIGBUS); + sigdelset(&fullsigmask, SIGSEGV); + sigdelset(&fullsigmask, SIGSYS); +} Property changes on: head/libexec/rtld-elf/ia64/lockdflt.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/libexec/rtld-elf/ia64/reloc.c =================================================================== --- head/libexec/rtld-elf/ia64/reloc.c (nonexistent) +++ head/libexec/rtld-elf/ia64/reloc.c (revision 85004) @@ -0,0 +1,435 @@ +/*- + * Copyright 1996, 1997, 1998, 1999 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Dynamic linker for ELF. + * + * John Polstra . + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" + +extern Elf_Dyn _DYNAMIC; + +/* + * Macros for loading/storing unaligned 64-bit values. These are + * needed because relocations can point to unaligned data. This + * occurs in the DWARF2 exception frame tables generated by the + * compiler, for instance. + * + * We don't use these when relocating jump slots and GOT entries, + * since they are guaranteed to be aligned. + * + * XXX dfr stub for now. + */ +#define load64(p) (*(u_int64_t *) (p)) +#define store64(p, v) (*(u_int64_t *) (p) = (v)) + +/* Allocate an @fptr. */ + +#define FPTR_CHUNK_SIZE 64 + +struct fptr_chunk { + struct fptr fptrs[FPTR_CHUNK_SIZE]; +}; + +static struct fptr_chunk first_chunk; +static struct fptr_chunk *current_chunk = &first_chunk; +static struct fptr *next_fptr = &first_chunk.fptrs[0]; +static struct fptr *last_fptr = &first_chunk.fptrs[FPTR_CHUNK_SIZE]; + +/* + * We use static storage initially so that we don't have to call + * malloc during init_rtld(). + */ +static struct fptr * +alloc_fptr(Elf_Addr target, Elf_Addr gp) +{ + struct fptr* fptr; + + if (next_fptr == last_fptr) { + current_chunk = malloc(sizeof(struct fptr_chunk)); + next_fptr = ¤t_chunk->fptrs[0]; + last_fptr = ¤t_chunk->fptrs[FPTR_CHUNK_SIZE]; + } + fptr = next_fptr; + next_fptr++; + fptr->target = target; + fptr->gp = gp; + return fptr; +} + +/* Relocate a non-PLT object with addend. */ +static int +reloc_non_plt_obj(Obj_Entry *obj_rtld, Obj_Entry *obj, const Elf_Rela *rela, + SymCache *cache, struct fptr **fptrs) +{ + Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rela->r_offset); + + switch (ELF_R_TYPE(rela->r_info)) { + case R_IA64_REL64LSB: + /* + * We handle rtld's relocations in rtld_start.S + */ + if (obj != obj_rtld) + store64(where, + load64(where) + (Elf_Addr) obj->relocbase); + break; + + case R_IA64_DIR64LSB: { + const Elf_Sym *def; + const Obj_Entry *defobj; + Elf_Addr target; + + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + if (def == NULL) + return -1; + target = (Elf_Addr) (defobj->relocbase + def->st_value); + store64(where, target + rela->r_addend); + break; + } + + case R_IA64_FPTR64LSB: { + /* + * We have to make sure that all @fptr references to + * the same function are identical so that code can + * compare function pointers. We actually only bother + * to ensure this within a single object. If the + * caller's alloca failed, we don't even ensure that. + */ + const Elf_Sym *def; + const Obj_Entry *defobj; + struct fptr *fptr = 0; + Elf_Addr target, gp; + + /* + * Not sure why the call to find_symdef() doesn't work + * properly (it fails if the symbol is local). Perhaps + * this is a toolchain issue - revisit after we + * upgrade the ia64 toolchain. + */ + def = find_symdef(ELF_R_SYM(rela->r_info), obj, &defobj, + false, cache); + if (def == NULL) { + def = &obj->symtab[ELF_R_SYM(rela->r_info)]; + defobj = obj; + } + target = (Elf_Addr) (defobj->relocbase + def->st_value); + gp = (Elf_Addr) defobj->pltgot; + + /* + * Find the @fptr, using fptrs as a helper. + */ + if (fptrs) + fptr = fptrs[ELF_R_SYM(rela->r_info)]; + if (!fptr) { + fptr = alloc_fptr(target, gp); + if (fptrs) + fptrs[ELF_R_SYM(rela->r_info)] = fptr; + } + store64(where, (Elf_Addr) fptr); + break; + } + + default: + _rtld_error("%s: Unsupported relocation type %d" + " in non-PLT relocations\n", obj->path, + ELF_R_TYPE(rela->r_info)); + return -1; + } + + return(0); +} + +/* Process the non-PLT relocations. */ +int +reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) +{ + const Elf_Rel *rellim; + const Elf_Rel *rel; + const Elf_Rela *relalim; + const Elf_Rela *rela; + SymCache *cache; + struct fptr **fptrs; + + cache = (SymCache *)alloca(obj->nchains * sizeof(SymCache)); + if (cache != NULL) + memset(cache, 0, obj->nchains * sizeof(SymCache)); + + /* + * When relocating rtld itself, we need to avoid using malloc. + */ + if (obj == obj_rtld) + fptrs = (struct fptr **) + alloca(obj->nchains * sizeof(struct fptr *)); + else + fptrs = (struct fptr **) + malloc(obj->nchains * sizeof(struct fptr *)); + + if (fptrs == NULL) + return -1; + memset(fptrs, 0, obj->nchains * sizeof(struct fptr *)); + + /* Perform relocations without addend if there are any: */ + rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); + for (rel = obj->rel; obj->rel != NULL && rel < rellim; rel++) { + Elf_Rela locrela; + + locrela.r_info = rel->r_info; + locrela.r_offset = rel->r_offset; + locrela.r_addend = 0; + if (reloc_non_plt_obj(obj_rtld, obj, &locrela, cache, fptrs)) + return -1; + } + + /* Perform relocations with addend if there are any: */ + relalim = (const Elf_Rela *) ((caddr_t) obj->rela + obj->relasize); + for (rela = obj->rela; obj->rela != NULL && rela < relalim; rela++) { + if (reloc_non_plt_obj(obj_rtld, obj, rela, cache, fptrs)) + return -1; + } + + /* + * Remember the fptrs in case of later calls to dlsym(). Don't + * bother for rtld - we will lazily create a table in + * make_function_pointer(). At this point we still can't risk + * calling malloc(). + */ + if (obj != obj_rtld) + obj->priv = fptrs; + else + obj->priv = NULL; + + return 0; +} + +/* Process the PLT relocations. */ +int +reloc_plt(Obj_Entry *obj) +{ + /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ + if (obj->pltrelsize != 0) { + const Elf_Rel *rellim; + const Elf_Rel *rel; + + rellim = (const Elf_Rel *) + ((char *)obj->pltrel + obj->pltrelsize); + for (rel = obj->pltrel; rel < rellim; rel++) { + Elf_Addr *where; + + assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB); + + /* Relocate the @fptr pointing into the PLT. */ + where = (Elf_Addr *)(obj->relocbase + rel->r_offset); + *where += (Elf_Addr)obj->relocbase; + } + } else { + const Elf_Rela *relalim; + const Elf_Rela *rela; + + relalim = (const Elf_Rela *) + ((char *)obj->pltrela + obj->pltrelasize); + for (rela = obj->pltrela; rela < relalim; rela++) { + Elf_Addr *where; + + assert(ELF_R_TYPE(rela->r_info) == R_IA64_IPLTLSB); + + /* Relocate the @fptr pointing into the PLT. */ + where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + *where += (Elf_Addr)obj->relocbase; + } + } + return 0; +} + +/* Relocate the jump slots in an object. */ +int +reloc_jmpslots(Obj_Entry *obj) +{ + if (obj->jmpslots_done) + return 0; + /* All PLT relocations are the same kind: Elf_Rel or Elf_Rela. */ + if (obj->pltrelsize != 0) { + const Elf_Rel *rellim; + const Elf_Rel *rel; + + rellim = (const Elf_Rel *) + ((char *)obj->pltrel + obj->pltrelsize); + for (rel = obj->pltrel; rel < rellim; rel++) { + Elf_Addr *where; + const Elf_Sym *def; + const Obj_Entry *defobj; + + assert(ELF_R_TYPE(rel->r_info) == R_IA64_IPLTLSB); + where = (Elf_Addr *)(obj->relocbase + rel->r_offset); + def = find_symdef(ELF_R_SYM(rel->r_info), obj, + &defobj, true, NULL); + if (def == NULL) + return -1; + reloc_jmpslot(where, + (Elf_Addr)(defobj->relocbase + + def->st_value), + defobj); + } + } else { + const Elf_Rela *relalim; + const Elf_Rela *rela; + + relalim = (const Elf_Rela *) + ((char *)obj->pltrela + obj->pltrelasize); + for (rela = obj->pltrela; rela < relalim; rela++) { + Elf_Addr *where; + const Elf_Sym *def; + const Obj_Entry *defobj; + + /* assert(ELF_R_TYPE(rela->r_info) == R_ALPHA_JMP_SLOT); */ + where = (Elf_Addr *)(obj->relocbase + rela->r_offset); + def = find_symdef(ELF_R_SYM(rela->r_info), obj, + &defobj, true, NULL); + if (def == NULL) + return -1; + reloc_jmpslot(where, + (Elf_Addr)(defobj->relocbase + + def->st_value), + defobj); + } + } + obj->jmpslots_done = true; + return 0; +} + +/* Fixup the jump slot at "where" to transfer control to "target". */ +Elf_Addr +reloc_jmpslot(Elf_Addr *where, Elf_Addr target, const Obj_Entry *obj) +{ + Elf_Addr stubaddr; + + dbg(" reloc_jmpslot: where=%p, target=%p, gp=%p", + (void *)where, (void *)target, (void *)obj->pltgot); + stubaddr = *where; + if (stubaddr != target) { + + /* + * Point this @fptr directly at the target. Update the + * gp value first so that we don't break another cpu + * which is currently executing the PLT entry. + */ + where[1] = (Elf_Addr) obj->pltgot; + ia64_mf(); + where[0] = target; + ia64_mf(); + } + + /* + * The caller needs an @fptr for the adjusted entry. The PLT + * entry serves this purpose nicely. + */ + return (Elf_Addr) where; +} + +/* + * XXX ia64 doesn't seem to have copy relocations. + * + * Returns 0 on success, -1 on failure. + */ +int +do_copy_relocations(Obj_Entry *dstobj) +{ + + return 0; +} + +/* + * Return the @fptr representing a given function symbol. + */ +void * +make_function_pointer(const Elf_Sym *sym, const Obj_Entry *obj) +{ + struct fptr **fptrs = obj->priv; + int index = sym - obj->symtab; + + if (!fptrs) { + /* + * This should only happen for something like + * dlsym("dlopen"). Actually, I'm not sure it can ever + * happen. + */ + fptrs = (struct fptr **) + malloc(obj->nchains * sizeof(struct fptr *)); + memset(fptrs, 0, obj->nchains * sizeof(struct fptr *)); + ((Obj_Entry*) obj)->priv = fptrs; + } + if (!fptrs[index]) { + Elf_Addr target, gp; + target = (Elf_Addr) (obj->relocbase + sym->st_value); + gp = (Elf_Addr) obj->pltgot; + fptrs[index] = alloc_fptr(target, gp); + } + return fptrs[index]; +} + +/* Initialize the special PLT entries. */ +void +init_pltgot(Obj_Entry *obj) +{ + const Elf_Dyn *dynp; + Elf_Addr *pltres = 0; + + /* + * Find the PLT RESERVE section. + */ + for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { + if (dynp->d_tag == DT_IA64_PLT_RESERVE) + pltres = (u_int64_t *) + (obj->relocbase + dynp->d_un.d_ptr); + } + if (!pltres) + errx(1, "Can't find DT_IA64_PLT_RESERVE entry"); + + /* + * The PLT RESERVE section is used to get values to pass to + * _rtld_bind when lazy binding. + */ + pltres[0] = (Elf_Addr) obj; + pltres[1] = FPTR_TARGET(_rtld_bind_start); + pltres[2] = FPTR_GP(_rtld_bind_start); +} Property changes on: head/libexec/rtld-elf/ia64/reloc.c ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/libexec/rtld-elf/ia64/rtld_machdep.h =================================================================== --- head/libexec/rtld-elf/ia64/rtld_machdep.h (nonexistent) +++ head/libexec/rtld-elf/ia64/rtld_machdep.h (revision 85004) @@ -0,0 +1,57 @@ +/*- + * Copyright (c) 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef RTLD_MACHDEP_H +#define RTLD_MACHDEP_H 1 + +/* + * Macros for cracking ia64 function pointers. + */ +struct fptr { + Elf_Addr target; + Elf_Addr gp; +}; + +#define FPTR_TARGET(f) (((struct fptr *) (f))->target) +#define FPTR_GP(f) (((struct fptr *) (f))->gp) + +/* Return the address of the .dynamic section in the dynamic linker. */ +#define rtld_dynamic(obj) (&_DYNAMIC) + +struct Struct_Obj_Entry; + +Elf_Addr reloc_jmpslot(Elf_Addr *, Elf_Addr, const struct Struct_Obj_Entry *); +void *make_function_pointer(const Elf_Sym *, const struct Struct_Obj_Entry *); + +/* Atomic operations. */ +int cmp0_and_store_int(volatile int *, int); +void atomic_add_int(volatile int *, int); +void atomic_incr_int(volatile int *); +void atomic_decr_int(volatile int *); + +#endif Property changes on: head/libexec/rtld-elf/ia64/rtld_machdep.h ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/libexec/rtld-elf/ia64/rtld_start.S =================================================================== --- head/libexec/rtld-elf/ia64/rtld_start.S (nonexistent) +++ head/libexec/rtld-elf/ia64/rtld_start.S (revision 85004) @@ -0,0 +1,306 @@ +/* $FreeBSD$ */ +/* From: NetBSD: rtld_start.S,v 1.1 1996/12/16 20:38:09 cgd Exp */ + +/* + * Copyright 1996 Matt Thomas + * Copyright 2000 John D. Polstra + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +ENTRY(_rtld_start, 0) + alloc r2=ar.pfs,3,0,3,0 + ;; +1: mov r14=ip // calculate gp + addl r3=@gprel(1b),r0 + ;; + sub gp=r14,r3 + ;; + .section .sdata +2: data4 @ltv(1b) // unrelocated address of 1b + .align 8 + .previous + add r15=@gprel(2b),gp + ;; + ld8 r15=[r15] + ;; + sub out0=r14,r15 // out0 is image base address + br.call.sptk.many rp=_rtld_reloc // fixup image + + add sp=-16,sp // 16 bytes for us, 16 for _rtld + ;; + mov out0=in0 + add out1=16,sp // address for exit proc + add out2=24,sp // address for obj_main + + br.call.sptk.many rp=_rtld // r8=_rtld(sp, &exit_proc, &obj_main) + + add r16=16,sp // address for exit proc + ;; + ld8 r15=[r16] // read exit proc + add sp=16,sp // readjust stack + mov b7=r8 // address of real _start + ;; + alloc r2=ar.pfs,0,0,3,0 // dump register frame + mov out2=r15 + + br.call.sptk.many rp=b7 // transfer to main program + br.call.sptk.many rp=exit // die +END(_rtld_start) + +/* + * _rtld_bind_start: lookup a lazy binding and transfer to real target + * + * Arguments: + * r1 gp value for rtld + * r15 Index in plt + * r16 Obj_Entry of caller + * in0-in7 Arguments for target procedure + * rp Return address back to caller + */ +ENTRY(_rtld_bind_start, 0) +{ .mii + alloc loc0=ar.pfs,8,6,3,0 // space to save r8-r11 + add r17=16-8*16,sp // leave 16 bytes for _rtld_bind + add r18=32-8*16,sp + ;; +} { .mii + mov loc2=r8 // structure return address + add sp=-8*16,sp // space to save f8-f15 + mov loc1=rp + ;; +} { .mii + stf.spill [r17]=f8,32 // save float arguments + mov loc3=r9 // language specific + mov loc4=r10 // language specific +} { .mii + stf.spill [r18]=f9,32 + mov loc5=r11 // language specific + shl out1=r15,4 // 16 * index + ;; +} { .mmi + stf.spill [r17]=f10,32 + stf.spill [r18]=f11,32 + mov out0=r16 // Obj_Entry for caller + ;; +} { .mmi + stf.spill [r17]=f12,32 + stf.spill [r18]=f13,32 + shladd out1=r15,3,out1 // rela offset = 24 * index + ;; +} { .mmb + stf.spill [r17]=f14,32 + stf.spill [r18]=f15,32 + br.call.sptk.many rp=_rtld_bind +} { .mii + ld8 r14=[r8],8 // target address + add r17=16,sp + add r18=32,sp + ;; +} { .mii + ld8 r1=[r8] // target gp + mov ar.pfs=loc0 // clean up + mov rp=loc1 +} { .mmi + ldf.fill f8=[r17],32 // restore float arguments + ldf.fill f9=[r18],32 + mov r8=loc2 // restore structure pointer + ;; +} { .mmi + ldf.fill f10=[r17],32 + ldf.fill f11=[r18],32 + mov r9=loc3 + ;; +} { .mmi + ldf.fill f12=[r17],32 + ldf.fill f13=[r18],32 + mov r10=loc4 + ;; +} { .mmi + ldf.fill f14=[r17],32 + ldf.fill f15=[r18],32 + mov r11=loc5 + ;; +} { .mii + nop.m 0 + mov b7=r14 + add sp=8*16,sp + ;; +} { .mib + alloc r14=ar.pfs,0,0,8,0 // drop our register frame + nop.i 0 + br.sptk.many b7 // jump to target +} +END(_rtld_bind_start) + +/* + * int cmp0_and_store_int(volatile int *p, int newval); + * + * If an int holds 0, store newval into it; else do nothing. Returns + * the previous value. + */ +ENTRY(cmp0_and_store_int, 2) + mov ar.ccv=0 + ;; + cmpxchg4.acq r8=[in0],in1,ar.ccv + br.ret.sptk.many rp +END(cmp0_and_store_int) + +ENTRY(atomic_add_int, 2) +1: ld4 r14=[in0] + ;; + mov ar.ccv=r14 + add r15=in1,r14 + ;; + cmpxchg4.acq r16=[in0],r15,ar.ccv + ;; + cmp.ne p6,p0=r14,r16 +(p6) br.cond.spnt.few 1b + br.ret.sptk.many rp +END(atomic_add_int) + +/* Atomically increment an int. */ +ENTRY(atomic_incr_int, 1) +1: ld4 r14=[in0] + ;; + mov ar.ccv=r14 + add r15=1,r14 + ;; + cmpxchg4.acq r16=[in0],r15,ar.ccv + ;; + cmp.ne p6,p0=r14,r16 +(p6) br.cond.spnt.few 1b + br.ret.sptk.many rp +END(atomic_incr_int) + +/* Atomically decrement an int. */ +ENTRY(atomic_decr_int, 1) +1: ld4 r14=[in0] + ;; + mov ar.ccv=r14 + add r15=-1,r14 + ;; + cmpxchg4.acq r16=[in0],r15,ar.ccv + ;; + cmp.ne p6,p0=r14,r16 +(p6) br.cond.spnt.few 1b + br.ret.sptk.many rp +END(atomic_decr_int) + +#define DT_NULL 0 /* Terminating entry. */ +#define DT_RELA 7 /* Address of ElfNN_Rela relocations. */ +#define DT_RELASZ 8 /* Total size of ElfNN_Rela relocations. */ +#define DT_RELAENT 9 /* Size of each ElfNN_Rela relocation entry. */ + +#define R_IA64_NONE 0 /* None */ +#define R_IA64_DIR64LSB 0x27 /* word64 LSB S + A */ +#define R_IA64_REL64LSB 0x6f /* word64 LSB BD + A */ + +/* + * _rtld_reloc: relocate the rtld image, apart from @fptrs. + * + * Assumes that rtld was linked at zero and that we only need to + * handle REL64LSB and DIR64LSB relocations. + * + * Arguments: + * r1 gp value for rtld + * in0 rtld base address + */ +STATIC_ENTRY(_rtld_reloc, 1) + alloc loc0=ar.pfs,1,2,0,0 + mov loc1=rp + ;; + movl r15=@gprel(_DYNAMIC) // find _DYNAMIC etc. + ;; + add r15=r15,gp // relocate _DYNAMIC etc. + ;; +1: ld8 r16=[r15],8 // read r15->d_tag + ;; + ld8 r17=[r15],8 // and r15->d_val + ;; + cmp.eq p6,p0=DT_NULL,r16 // done? +(p6) br.cond.dpnt.few 2f + ;; + cmp.eq p6,p0=DT_RELA,r16 + ;; +(p6) add r18=r17,in0 // found rela section + ;; + cmp.eq p6,p0=DT_RELASZ,r16 + ;; +(p6) mov r19=r17 // found rela size + ;; + cmp.eq p6,p0=DT_RELAENT,r16 + ;; +(p6) mov r22=r17 // found rela entry size + ;; + br.sptk.few 1b + +2: + ld8 r15=[r18],8 // read r_offset + ;; + ld8 r16=[r18],8 // read r_info + add r15=r15,in0 // relocate r_offset + ;; + ld8 r17=[r18],8 // read r_addend + sub r19=r19,r22 // update relasz + + extr.u r23=r16,0,32 // ELF64_R_TYPE(r16) + ;; + cmp.eq p6,p0=R_IA64_NONE,r23 +(p6) br.cond.dpnt.few 3f + ;; + cmp.eq p6,p0=R_IA64_DIR64LSB,r23 + ;; +(p6) br.cond.dptk.few 4f + ;; + cmp.eq p6,p0=R_IA64_REL64LSB,r23 + ;; +(p6) br.cond.dptk.few 4f + ;; + +3: cmp.ltu p6,p0=0,r19 // more? +(p6) br.cond.dptk.few 2b // loop + + mov r8=0 // success return value + ;; + br.cond.sptk.few 9f // done + +4: + ld8 r16=[r15] // read value + ;; + add r16=r16,in0 // relocate it + ;; + st8 [r15]=r16 // and store it back + br.cond.sptk.few 3b + +9: + mov ar.pfs=loc0 + mov rp=loc1 + ;; + br.ret.sptk.few rp + +END(_rtld_reloc) Property changes on: head/libexec/rtld-elf/ia64/rtld_start.S ___________________________________________________________________ Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: head/libexec/rtld-elf/map_object.c =================================================================== --- head/libexec/rtld-elf/map_object.c (revision 85003) +++ head/libexec/rtld-elf/map_object.c (revision 85004) @@ -1,309 +1,310 @@ /*- * Copyright 1996-1998 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #include #include #include #include #include #include #include #include +#include "debug.h" #include "rtld.h" static int protflags(int); /* Elf flags -> mmap protection */ /* * Map a shared object into memory. The "fd" argument is a file descriptor, * which must be open on the object and positioned at its beginning. * The "path" argument is a pathname that is used only for error messages. * * The return value is a pointer to a newly-allocated Obj_Entry structure * for the shared object. Returns NULL on failure. */ Obj_Entry * map_object(int fd, const char *path, const struct stat *sb) { Obj_Entry *obj; union { Elf_Ehdr hdr; char buf[PAGE_SIZE]; } u; int nbytes; Elf_Phdr *phdr; Elf_Phdr *phlimit; Elf_Phdr *segs[2]; int nsegs; Elf_Phdr *phdyn; Elf_Phdr *phphdr; Elf_Phdr *phinterp; caddr_t mapbase; size_t mapsize; Elf_Off base_offset; Elf_Addr base_vaddr; Elf_Addr base_vlimit; caddr_t base_addr; Elf_Off data_offset; Elf_Addr data_vaddr; Elf_Addr data_vlimit; caddr_t data_addr; Elf_Addr clear_vaddr; caddr_t clear_addr; size_t nclear; Elf_Addr bss_vaddr; Elf_Addr bss_vlimit; caddr_t bss_addr; if ((nbytes = read(fd, u.buf, PAGE_SIZE)) == -1) { _rtld_error("%s: read error: %s", path, strerror(errno)); return NULL; } /* Make sure the file is valid */ if (nbytes < sizeof(Elf_Ehdr) || u.hdr.e_ident[EI_MAG0] != ELFMAG0 || u.hdr.e_ident[EI_MAG1] != ELFMAG1 || u.hdr.e_ident[EI_MAG2] != ELFMAG2 || u.hdr.e_ident[EI_MAG3] != ELFMAG3) { _rtld_error("%s: invalid file format", path); return NULL; } if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) { _rtld_error("%s: unsupported file layout", path); return NULL; } if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT || u.hdr.e_version != EV_CURRENT) { _rtld_error("%s: unsupported file version", path); return NULL; } if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) { _rtld_error("%s: unsupported file type", path); return NULL; } if (u.hdr.e_machine != ELF_TARG_MACH) { _rtld_error("%s: unsupported machine", path); return NULL; } /* * We rely on the program header being in the first page. This is * not strictly required by the ABI specification, but it seems to * always true in practice. And, it simplifies things considerably. */ if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) { _rtld_error( "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); return NULL; } if (u.hdr.e_phoff + u.hdr.e_phnum*sizeof(Elf_Phdr) > nbytes) { _rtld_error("%s: program header too large", path); return NULL; } /* * Scan the program header entries, and save key information. * * We rely on there being exactly two load segments, text and data, * in that order. */ phdr = (Elf_Phdr *) (u.buf + u.hdr.e_phoff); phlimit = phdr + u.hdr.e_phnum; nsegs = 0; phdyn = phphdr = phinterp = NULL; while (phdr < phlimit) { switch (phdr->p_type) { case PT_INTERP: phinterp = phdr; break; case PT_LOAD: if (nsegs >= 2) { _rtld_error("%s: too many PT_LOAD segments", path); return NULL; } segs[nsegs] = phdr; ++nsegs; break; case PT_PHDR: phphdr = phdr; break; case PT_DYNAMIC: phdyn = phdr; break; } ++phdr; } if (phdyn == NULL) { _rtld_error("%s: object is not dynamically-linked", path); return NULL; } if (nsegs < 2) { _rtld_error("%s: too few PT_LOAD segments", path); return NULL; } if (segs[0]->p_align < PAGE_SIZE || segs[1]->p_align < PAGE_SIZE) { _rtld_error("%s: PT_LOAD segments not page-aligned", path); return NULL; } /* * Map the entire address space of the object, to stake out our * contiguous region, and to establish the base address for relocation. */ base_offset = trunc_page(segs[0]->p_offset); base_vaddr = trunc_page(segs[0]->p_vaddr); base_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_memsz); mapsize = base_vlimit - base_vaddr; base_addr = u.hdr.e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL; mapbase = mmap(base_addr, mapsize, protflags(segs[0]->p_flags), MAP_PRIVATE, fd, base_offset); if (mapbase == (caddr_t) -1) { _rtld_error("%s: mmap of entire address space failed: %s", path, strerror(errno)); return NULL; } if (base_addr != NULL && mapbase != base_addr) { _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", path, base_addr, mapbase); munmap(mapbase, mapsize); return NULL; } /* Overlay the data segment onto the proper region. */ data_offset = trunc_page(segs[1]->p_offset); data_vaddr = trunc_page(segs[1]->p_vaddr); data_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_filesz); data_addr = mapbase + (data_vaddr - base_vaddr); if (mmap(data_addr, data_vlimit - data_vaddr, protflags(segs[1]->p_flags), MAP_PRIVATE|MAP_FIXED, fd, data_offset) == (caddr_t) -1) { _rtld_error("%s: mmap of data failed: %s", path, strerror(errno)); return NULL; } /* Clear any BSS in the last page of the data segment. */ clear_vaddr = segs[1]->p_vaddr + segs[1]->p_filesz; clear_addr = mapbase + (clear_vaddr - base_vaddr); if ((nclear = data_vlimit - clear_vaddr) > 0) memset(clear_addr, 0, nclear); /* Overlay the BSS segment onto the proper region. */ bss_vaddr = data_vlimit; bss_vlimit = round_page(segs[1]->p_vaddr + segs[1]->p_memsz); bss_addr = mapbase + (bss_vaddr - base_vaddr); if (bss_vlimit > bss_vaddr) { /* There is something to do */ if (mmap(bss_addr, bss_vlimit - bss_vaddr, protflags(segs[1]->p_flags), MAP_PRIVATE|MAP_FIXED|MAP_ANON, -1, 0) == (caddr_t) -1) { _rtld_error("%s: mmap of bss failed: %s", path, strerror(errno)); return NULL; } } obj = obj_new(); if (sb != NULL) { obj->dev = sb->st_dev; obj->ino = sb->st_ino; } obj->mapbase = mapbase; obj->mapsize = mapsize; obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - base_vaddr; obj->vaddrbase = base_vaddr; obj->relocbase = mapbase - base_vaddr; obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); if (u.hdr.e_entry != 0) obj->entry = (caddr_t) (obj->relocbase + u.hdr.e_entry); if (phphdr != NULL) { obj->phdr = (const Elf_Phdr *) (obj->relocbase + phphdr->p_vaddr); obj->phsize = phphdr->p_memsz; } if (phinterp != NULL) obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); return obj; } void obj_free(Obj_Entry *obj) { Objlist_Entry *elm; free(obj->path); while (obj->needed != NULL) { Needed_Entry *needed = obj->needed; obj->needed = needed->next; free(needed); } while (!STAILQ_EMPTY(&obj->dldags)) { elm = STAILQ_FIRST(&obj->dldags); STAILQ_REMOVE_HEAD(&obj->dldags, link); free(elm); } while (!STAILQ_EMPTY(&obj->dagmembers)) { elm = STAILQ_FIRST(&obj->dagmembers); STAILQ_REMOVE_HEAD(&obj->dagmembers, link); free(elm); } free(obj); } Obj_Entry * obj_new(void) { Obj_Entry *obj; obj = CNEW(Obj_Entry); STAILQ_INIT(&obj->dldags); STAILQ_INIT(&obj->dagmembers); return obj; } /* * Given a set of ELF protection flags, return the corresponding protection * flags for MMAP. */ static int protflags(int elfflags) { int prot = 0; if (elfflags & PF_R) prot |= PROT_READ; if (elfflags & PF_W) prot |= PROT_WRITE; if (elfflags & PF_X) prot |= PROT_EXEC; return prot; } Index: head/libexec/rtld-elf/rtld.c =================================================================== --- head/libexec/rtld-elf/rtld.c (revision 85003) +++ head/libexec/rtld-elf/rtld.c (revision 85004) @@ -1,2112 +1,2137 @@ /*- * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * Dynamic linker for ELF. * * John Polstra . */ #ifndef __GNUC__ #error "GCC is needed to compile this file" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include "debug.h" #include "rtld.h" #define END_SYM "_end" #define PATH_RTLD "/usr/libexec/ld-elf.so.1" /* Types. */ typedef void (*func_ptr_type)(); /* * This structure provides a reentrant way to keep a list of objects and * check which ones have already been processed in some way. */ typedef struct Struct_DoneList { const Obj_Entry **objs; /* Array of object pointers */ unsigned int num_alloc; /* Allocated size of the array */ unsigned int num_used; /* Number of array slots used */ } DoneList; /* * Function declarations. */ static const char *basename(const char *); static void die(void); static void digest_dynamic(Obj_Entry *); static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); static Obj_Entry *dlcheck(void *); static bool donelist_check(DoneList *, const Obj_Entry *); static void errmsg_restore(char *); static char *errmsg_save(void); static char *find_library(const char *, const Obj_Entry *); static const char *gethints(void); static void init_dag(Obj_Entry *); static void init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *); static void init_rtld(caddr_t); static void initlist_add_neededs(Needed_Entry *needed, Objlist *list); static void initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list); static bool is_exported(const Elf_Sym *); static void linkmap_add(Obj_Entry *); static void linkmap_delete(Obj_Entry *); static int load_needed_objects(Obj_Entry *); static int load_preload_objects(void); static Obj_Entry *load_object(char *); static void lock_check(void); static Obj_Entry *obj_from_addr(const void *); static void objlist_call_fini(Objlist *); static void objlist_call_init(Objlist *); static void objlist_clear(Objlist *); static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); static void objlist_init(Objlist *); static void objlist_push_head(Objlist *, Obj_Entry *); static void objlist_push_tail(Objlist *, Obj_Entry *); static void objlist_remove(Objlist *, Obj_Entry *); static void objlist_remove_unref(Objlist *); static int relocate_objects(Obj_Entry *, bool); static void rtld_exit(void); static char *search_library_path(const char *, const char *); static void set_program_var(const char *, const void *); static const Elf_Sym *symlook_default(const char *, unsigned long hash, const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt); static const Elf_Sym *symlook_list(const char *, unsigned long, Objlist *, const Obj_Entry **, bool in_plt, DoneList *); static void trace_loaded_objects(Obj_Entry *obj); static void unload_object(Obj_Entry *); static void unref_dag(Obj_Entry *); void r_debug_state(struct r_debug*, struct link_map*); void xprintf(const char *, ...); /* * Data declarations. */ static char *error_message; /* Message for dlerror(), or NULL */ struct r_debug r_debug; /* for GDB; */ static bool trust; /* False for setuid and setgid programs */ static char *ld_bind_now; /* Environment variable for immediate binding */ static char *ld_debug; /* Environment variable for debugging */ static char *ld_library_path; /* Environment variable for search path */ static char *ld_preload; /* Environment variable for libraries to load first */ static char *ld_tracing; /* Called from ldd to print libs */ static Obj_Entry *obj_list; /* Head of linked list of shared objects */ static Obj_Entry **obj_tail; /* Link field of last object in list */ static Obj_Entry *obj_main; /* The main program shared object */ static Obj_Entry obj_rtld; /* The dynamic linker shared object */ static unsigned int obj_count; /* Number of objects in obj_list */ static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ STAILQ_HEAD_INITIALIZER(list_global); static Objlist list_main = /* Objects loaded at program startup */ STAILQ_HEAD_INITIALIZER(list_main); static Objlist list_fini = /* Objects needing fini() calls */ STAILQ_HEAD_INITIALIZER(list_fini); static LockInfo lockinfo; static Elf_Sym sym_zero; /* For resolving undefined weak refs. */ #define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); extern Elf_Dyn _DYNAMIC; #pragma weak _DYNAMIC /* * These are the functions the dynamic linker exports to application * programs. They are the only symbols the dynamic linker is willing * to export from itself. */ static func_ptr_type exports[] = { (func_ptr_type) &_rtld_error, (func_ptr_type) &dlclose, (func_ptr_type) &dlerror, (func_ptr_type) &dlopen, (func_ptr_type) &dlsym, (func_ptr_type) &dladdr, (func_ptr_type) &dllockinit, NULL }; /* * Global declarations normally provided by crt1. The dynamic linker is * not built with crt1, so we have to provide them ourselves. */ char *__progname; char **environ; /* * Fill in a DoneList with an allocation large enough to hold all of * the currently-loaded objects. Keep this as a macro since it calls * alloca and we want that to occur within the scope of the caller. */ #define donelist_init(dlp) \ ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ assert((dlp)->objs != NULL), \ (dlp)->num_alloc = obj_count, \ (dlp)->num_used = 0) static __inline void rlock_acquire(void) { lockinfo.rlock_acquire(lockinfo.thelock); atomic_incr_int(&lockinfo.rcount); lock_check(); } static __inline void wlock_acquire(void) { lockinfo.wlock_acquire(lockinfo.thelock); atomic_incr_int(&lockinfo.wcount); lock_check(); } static __inline void rlock_release(void) { atomic_decr_int(&lockinfo.rcount); lockinfo.rlock_release(lockinfo.thelock); } static __inline void wlock_release(void) { atomic_decr_int(&lockinfo.wcount); lockinfo.wlock_release(lockinfo.thelock); } /* * Main entry point for dynamic linking. The first argument is the * stack pointer. The stack is expected to be laid out as described * in the SVR4 ABI specification, Intel 386 Processor Supplement. * Specifically, the stack pointer points to a word containing * ARGC. Following that in the stack is a null-terminated sequence * of pointers to argument strings. Then comes a null-terminated * sequence of pointers to environment strings. Finally, there is a * sequence of "auxiliary vector" entries. * * The second argument points to a place to store the dynamic linker's * exit procedure pointer and the third to a place to store the main * program's object. * * The return value is the main program's entry point. */ func_ptr_type _rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) { Elf_Auxinfo *aux_info[AT_COUNT]; int i; int argc; char **argv; char **env; Elf_Auxinfo *aux; Elf_Auxinfo *auxp; const char *argv0; Obj_Entry *obj; Obj_Entry **preload_tail; Objlist initlist; /* * On entry, the dynamic linker itself has not been relocated yet. * Be very careful not to reference any global data until after * init_rtld has returned. It is OK to reference file-scope statics * and string constants, and to call static and global functions. */ /* Find the auxiliary vector on the stack. */ argc = *sp++; argv = (char **) sp; sp += argc + 1; /* Skip over arguments and NULL terminator */ env = (char **) sp; while (*sp++ != 0) /* Skip over environment, and NULL terminator */ ; aux = (Elf_Auxinfo *) sp; /* Digest the auxiliary vector. */ for (i = 0; i < AT_COUNT; i++) aux_info[i] = NULL; for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { if (auxp->a_type < AT_COUNT) aux_info[auxp->a_type] = auxp; } /* Initialize and relocate ourselves. */ assert(aux_info[AT_BASE] != NULL); init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr); __progname = obj_rtld.path; argv0 = argv[0] != NULL ? argv[0] : "(null)"; environ = env; trust = geteuid() == getuid() && getegid() == getgid(); ld_bind_now = getenv("LD_BIND_NOW"); if (trust) { ld_debug = getenv("LD_DEBUG"); ld_library_path = getenv("LD_LIBRARY_PATH"); ld_preload = getenv("LD_PRELOAD"); } ld_tracing = getenv("LD_TRACE_LOADED_OBJECTS"); if (ld_debug != NULL && *ld_debug != '\0') debug = 1; dbg("%s is initialized, base address = %p", __progname, (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); dbg("RTLD dynamic = %p", obj_rtld.dynamic); dbg("RTLD pltgot = %p", obj_rtld.pltgot); /* * Load the main program, or process its program header if it is * already loaded. */ if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ int fd = aux_info[AT_EXECFD]->a_un.a_val; dbg("loading main program"); obj_main = map_object(fd, argv0, NULL); close(fd); if (obj_main == NULL) die(); } else { /* Main program already loaded. */ const Elf_Phdr *phdr; int phnum; caddr_t entry; dbg("processing main program's program header"); assert(aux_info[AT_PHDR] != NULL); phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; assert(aux_info[AT_PHNUM] != NULL); phnum = aux_info[AT_PHNUM]->a_un.a_val; assert(aux_info[AT_PHENT] != NULL); assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); assert(aux_info[AT_ENTRY] != NULL); entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) die(); } obj_main->path = xstrdup(argv0); obj_main->mainprog = true; /* * Get the actual dynamic linker pathname from the executable if * possible. (It should always be possible.) That ensures that * gdb will find the right dynamic linker even if a non-standard * one is being used. */ if (obj_main->interp != NULL && strcmp(obj_main->interp, obj_rtld.path) != 0) { free(obj_rtld.path); obj_rtld.path = xstrdup(obj_main->interp); } digest_dynamic(obj_main); linkmap_add(obj_main); linkmap_add(&obj_rtld); /* Link the main program into the list of objects. */ *obj_tail = obj_main; obj_tail = &obj_main->next; obj_count++; obj_main->refcount++; /* Make sure we don't call the main program's init and fini functions. */ obj_main->init = obj_main->fini = NULL; /* Initialize a fake symbol for resolving undefined weak references. */ sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); sym_zero.st_shndx = SHN_ABS; dbg("loading LD_PRELOAD libraries"); if (load_preload_objects() == -1) die(); preload_tail = obj_tail; dbg("loading needed objects"); if (load_needed_objects(obj_main) == -1) die(); /* Make a list of all objects loaded at startup. */ for (obj = obj_list; obj != NULL; obj = obj->next) objlist_push_tail(&list_main, obj); if (ld_tracing) { /* We're done */ trace_loaded_objects(obj_main); exit(0); } if (relocate_objects(obj_main, ld_bind_now != NULL && *ld_bind_now != '\0') == -1) die(); dbg("doing copy relocations"); if (do_copy_relocations(obj_main) == -1) die(); dbg("initializing key program variables"); set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); set_program_var("environ", env); dbg("initializing thread locks"); lockdflt_init(&lockinfo); lockinfo.thelock = lockinfo.lock_create(lockinfo.context); /* Make a list of init functions to call. */ objlist_init(&initlist); initlist_add_objects(obj_list, preload_tail, &initlist); r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ objlist_call_init(&initlist); wlock_acquire(); objlist_clear(&initlist); wlock_release(); dbg("transferring control to program entry point = %p", obj_main->entry); /* Return the exit procedure and the program entry point. */ *exit_proc = rtld_exit; *objp = obj_main; return (func_ptr_type) obj_main->entry; } Elf_Addr _rtld_bind(Obj_Entry *obj, Elf_Word reloff) { const Elf_Rel *rel; const Elf_Sym *def; const Obj_Entry *defobj; Elf_Addr *where; Elf_Addr target; rlock_acquire(); if (obj->pltrel) rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); else rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); where = (Elf_Addr *) (obj->relocbase + rel->r_offset); def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); if (def == NULL) die(); target = (Elf_Addr)(defobj->relocbase + def->st_value); dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", defobj->strtab + def->st_name, basename(obj->path), (void *)target, basename(defobj->path)); - reloc_jmpslot(where, target); + /* + * Write the new contents for the jmpslot. Note that depending on + * architecture, the value which we need to return back to the + * lazy binding trampoline may or may not be the target + * address. The value returned from reloc_jmpslot() is the value + * that the trampoline needs. + */ + target = reloc_jmpslot(where, target, defobj); rlock_release(); return target; } /* * Error reporting function. Use it like printf. If formats the message * into a buffer, and sets things up so that the next call to dlerror() * will return the message. */ void _rtld_error(const char *fmt, ...) { static char buf[512]; va_list ap; va_start(ap, fmt); vsnprintf(buf, sizeof buf, fmt, ap); error_message = buf; va_end(ap); } /* * Return a dynamically-allocated copy of the current error message, if any. */ static char * errmsg_save(void) { return error_message == NULL ? NULL : xstrdup(error_message); } /* * Restore the current error message from a copy which was previously saved * by errmsg_save(). The copy is freed. */ static void errmsg_restore(char *saved_msg) { if (saved_msg == NULL) error_message = NULL; else { _rtld_error("%s", saved_msg); free(saved_msg); } } static const char * basename(const char *name) { const char *p = strrchr(name, '/'); return p != NULL ? p + 1 : name; } static void die(void) { const char *msg = dlerror(); if (msg == NULL) msg = "Fatal error"; errx(1, "%s", msg); } /* * Process a shared object's DYNAMIC section, and save the important * information in its Obj_Entry structure. */ static void digest_dynamic(Obj_Entry *obj) { const Elf_Dyn *dynp; Needed_Entry **needed_tail = &obj->needed; const Elf_Dyn *dyn_rpath = NULL; int plttype = DT_REL; for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { switch (dynp->d_tag) { case DT_REL: obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_RELSZ: obj->relsize = dynp->d_un.d_val; break; case DT_RELENT: assert(dynp->d_un.d_val == sizeof(Elf_Rel)); break; case DT_JMPREL: obj->pltrel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_PLTRELSZ: obj->pltrelsize = dynp->d_un.d_val; break; case DT_RELA: obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_RELASZ: obj->relasize = dynp->d_un.d_val; break; case DT_RELAENT: assert(dynp->d_un.d_val == sizeof(Elf_Rela)); break; case DT_PLTREL: plttype = dynp->d_un.d_val; assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); break; case DT_SYMTAB: obj->symtab = (const Elf_Sym *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_SYMENT: assert(dynp->d_un.d_val == sizeof(Elf_Sym)); break; case DT_STRTAB: obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_STRSZ: obj->strsize = dynp->d_un.d_val; break; case DT_HASH: { - const Elf_Addr *hashtab = (const Elf_Addr *) + const Elf_Hashelt *hashtab = (const Elf_Hashelt *) (obj->relocbase + dynp->d_un.d_ptr); obj->nbuckets = hashtab[0]; obj->nchains = hashtab[1]; obj->buckets = hashtab + 2; obj->chains = obj->buckets + obj->nbuckets; } break; case DT_NEEDED: if (!obj->rtld) { Needed_Entry *nep = NEW(Needed_Entry); nep->name = dynp->d_un.d_val; nep->obj = NULL; nep->next = NULL; *needed_tail = nep; needed_tail = &nep->next; } break; case DT_PLTGOT: obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_TEXTREL: obj->textrel = true; break; case DT_SYMBOLIC: obj->symbolic = true; break; case DT_RPATH: /* * We have to wait until later to process this, because we * might not have gotten the address of the string table yet. */ dyn_rpath = dynp; break; case DT_SONAME: /* Not used by the dynamic linker. */ break; case DT_INIT: obj->init = (InitFunc) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_FINI: obj->fini = (InitFunc) (obj->relocbase + dynp->d_un.d_ptr); break; case DT_DEBUG: /* XXX - not implemented yet */ dbg("Filling in DT_DEBUG entry"); ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; break; default: dbg("Ignoring d_tag %d = %#x", dynp->d_tag, dynp->d_tag); break; } } obj->traced = false; if (plttype == DT_RELA) { obj->pltrela = (const Elf_Rela *) obj->pltrel; obj->pltrel = NULL; obj->pltrelasize = obj->pltrelsize; obj->pltrelsize = 0; } if (dyn_rpath != NULL) obj->rpath = obj->strtab + dyn_rpath->d_un.d_val; } /* * Process a shared object's program header. This is used only for the * main program, when the kernel has already loaded the main program * into memory before calling the dynamic linker. It creates and * returns an Obj_Entry structure. */ static Obj_Entry * digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) { Obj_Entry *obj; const Elf_Phdr *phlimit = phdr + phnum; const Elf_Phdr *ph; int nsegs = 0; obj = obj_new(); for (ph = phdr; ph < phlimit; ph++) { switch (ph->p_type) { case PT_PHDR: if ((const Elf_Phdr *)ph->p_vaddr != phdr) { _rtld_error("%s: invalid PT_PHDR", path); return NULL; } obj->phdr = (const Elf_Phdr *) ph->p_vaddr; obj->phsize = ph->p_memsz; break; case PT_INTERP: obj->interp = (const char *) ph->p_vaddr; break; case PT_LOAD: if (nsegs >= 2) { _rtld_error("%s: too many PT_LOAD segments", path); return NULL; } if (nsegs == 0) { /* First load segment */ obj->vaddrbase = trunc_page(ph->p_vaddr); obj->mapbase = (caddr_t) obj->vaddrbase; obj->relocbase = obj->mapbase - obj->vaddrbase; obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - obj->vaddrbase; } else { /* Last load segment */ obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - obj->vaddrbase; } nsegs++; break; case PT_DYNAMIC: obj->dynamic = (const Elf_Dyn *) ph->p_vaddr; break; } } if (nsegs < 2) { _rtld_error("%s: too few PT_LOAD segments", path); return NULL; } obj->entry = entry; return obj; } static Obj_Entry * dlcheck(void *handle) { Obj_Entry *obj; for (obj = obj_list; obj != NULL; obj = obj->next) if (obj == (Obj_Entry *) handle) break; if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { _rtld_error("Invalid shared object handle %p", handle); return NULL; } return obj; } /* * If the given object is already in the donelist, return true. Otherwise * add the object to the list and return false. */ static bool donelist_check(DoneList *dlp, const Obj_Entry *obj) { unsigned int i; for (i = 0; i < dlp->num_used; i++) if (dlp->objs[i] == obj) return true; /* * Our donelist allocation should always be sufficient. But if * our threads locking isn't working properly, more shared objects * could have been loaded since we allocated the list. That should * never happen, but we'll handle it properly just in case it does. */ if (dlp->num_used < dlp->num_alloc) dlp->objs[dlp->num_used++] = obj; return false; } /* * Hash function for symbol table lookup. Don't even think about changing * this. It is specified by the System V ABI. */ unsigned long elf_hash(const char *name) { const unsigned char *p = (const unsigned char *) name; unsigned long h = 0; unsigned long g; while (*p != '\0') { h = (h << 4) + *p++; if ((g = h & 0xf0000000) != 0) h ^= g >> 24; h &= ~g; } return h; } /* * Find the library with the given name, and return its full pathname. * The returned string is dynamically allocated. Generates an error * message and returns NULL if the library cannot be found. * * If the second argument is non-NULL, then it refers to an already- * loaded shared object, whose library search path will be searched. * * The search order is: * rpath in the referencing file * LD_LIBRARY_PATH * ldconfig hints * /usr/lib */ static char * find_library(const char *name, const Obj_Entry *refobj) { char *pathname; if (strchr(name, '/') != NULL) { /* Hard coded pathname */ if (name[0] != '/' && !trust) { _rtld_error("Absolute pathname required for shared object \"%s\"", name); return NULL; } return xstrdup(name); } dbg(" Searching for \"%s\"", name); if ((refobj != NULL && (pathname = search_library_path(name, refobj->rpath)) != NULL) || (pathname = search_library_path(name, ld_library_path)) != NULL || (pathname = search_library_path(name, gethints())) != NULL || (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL) return pathname; _rtld_error("Shared object \"%s\" not found", name); return NULL; } /* * Given a symbol number in a referencing object, find the corresponding * definition of the symbol. Returns a pointer to the symbol, or NULL if * no definition was found. Returns a pointer to the Obj_Entry of the * defining object via the reference parameter DEFOBJ_OUT. */ const Elf_Sym * find_symdef(unsigned long symnum, const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt, SymCache *cache) { const Elf_Sym *ref; const Elf_Sym *def; const Obj_Entry *defobj; const char *name; unsigned long hash; /* * If we have already found this symbol, get the information from * the cache. */ if (symnum >= refobj->nchains) return NULL; /* Bad object */ if (cache != NULL && cache[symnum].sym != NULL) { *defobj_out = cache[symnum].obj; return cache[symnum].sym; } ref = refobj->symtab + symnum; name = refobj->strtab + ref->st_name; hash = elf_hash(name); defobj = NULL; def = symlook_default(name, hash, refobj, &defobj, in_plt); /* * If we found no definition and the reference is weak, treat the * symbol as having the value zero. */ if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { def = &sym_zero; defobj = obj_main; } if (def != NULL) { *defobj_out = defobj; /* Record the information in the cache to avoid subsequent lookups. */ if (cache != NULL) { cache[symnum].sym = def; cache[symnum].obj = defobj; } - } else - _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); + } else { + if (refobj != &obj_rtld) + _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); + } return def; } /* * Return the search path from the ldconfig hints file, reading it if * necessary. Returns NULL if there are problems with the hints file, * or if the search path there is empty. */ static const char * gethints(void) { static char *hints; if (hints == NULL) { int fd; struct elfhints_hdr hdr; char *p; /* Keep from trying again in case the hints file is bad. */ hints = ""; if ((fd = open(_PATH_ELF_HINTS, O_RDONLY)) == -1) return NULL; if (read(fd, &hdr, sizeof hdr) != sizeof hdr || hdr.magic != ELFHINTS_MAGIC || hdr.version != 1) { close(fd); return NULL; } p = xmalloc(hdr.dirlistlen + 1); if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || read(fd, p, hdr.dirlistlen + 1) != hdr.dirlistlen + 1) { free(p); close(fd); return NULL; } hints = p; close(fd); } return hints[0] != '\0' ? hints : NULL; } static void init_dag(Obj_Entry *root) { DoneList donelist; donelist_init(&donelist); init_dag1(root, root, &donelist); } static void init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *dlp) { const Needed_Entry *needed; if (donelist_check(dlp, obj)) return; objlist_push_tail(&obj->dldags, root); objlist_push_tail(&root->dagmembers, obj); for (needed = obj->needed; needed != NULL; needed = needed->next) if (needed->obj != NULL) init_dag1(root, needed->obj, dlp); } /* * Initialize the dynamic linker. The argument is the address at which * the dynamic linker has been mapped into memory. The primary task of * this function is to relocate the dynamic linker. */ static void init_rtld(caddr_t mapbase) { /* * Conjure up an Obj_Entry structure for the dynamic linker. * * The "path" member is supposed to be dynamically-allocated, but we * aren't yet initialized sufficiently to do that. Below we will * replace the static version with a dynamically-allocated copy. */ obj_rtld.path = PATH_RTLD; obj_rtld.rtld = true; obj_rtld.mapbase = mapbase; #ifdef PIC obj_rtld.relocbase = mapbase; #endif if (&_DYNAMIC != 0) { obj_rtld.dynamic = rtld_dynamic(&obj_rtld); digest_dynamic(&obj_rtld); assert(obj_rtld.needed == NULL); assert(!obj_rtld.textrel); /* * Temporarily put the dynamic linker entry into the object list, so * that symbols can be found. */ obj_list = &obj_rtld; obj_tail = &obj_rtld.next; obj_count = 1; relocate_objects(&obj_rtld, true); } /* Make the object list empty again. */ obj_list = NULL; obj_tail = &obj_list; obj_count = 0; /* Replace the path with a dynamically allocated copy. */ obj_rtld.path = xstrdup(obj_rtld.path); r_debug.r_brk = r_debug_state; r_debug.r_state = RT_CONSISTENT; } /* * Add the init functions from a needed object list (and its recursive * needed objects) to "list". This is not used directly; it is a helper * function for initlist_add_objects(). The write lock must be held * when this function is called. */ static void initlist_add_neededs(Needed_Entry *needed, Objlist *list) { /* Recursively process the successor needed objects. */ if (needed->next != NULL) initlist_add_neededs(needed->next, list); /* Process the current needed object. */ if (needed->obj != NULL) initlist_add_objects(needed->obj, &needed->obj->next, list); } /* * Scan all of the DAGs rooted in the range of objects from "obj" to * "tail" and add their init functions to "list". This recurses over * the DAGs and ensure the proper init ordering such that each object's * needed libraries are initialized before the object itself. At the * same time, this function adds the objects to the global finalization * list "list_fini" in the opposite order. The write lock must be * held when this function is called. */ static void initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list) { if (obj->init_done) return; obj->init_done = true; /* Recursively process the successor objects. */ if (&obj->next != tail) initlist_add_objects(obj->next, tail, list); /* Recursively process the needed objects. */ if (obj->needed != NULL) initlist_add_neededs(obj->needed, list); /* Add the object to the init list. */ if (obj->init != NULL) objlist_push_tail(list, obj); /* Add the object to the global fini list in the reverse order. */ if (obj->fini != NULL) objlist_push_head(&list_fini, obj); } +#ifndef FPTR_TARGET +#define FPTR_TARGET(f) ((Elf_Addr) (f)) +#endif + static bool is_exported(const Elf_Sym *def) { - func_ptr_type value; + Elf_Addr value; const func_ptr_type *p; - value = (func_ptr_type)(obj_rtld.relocbase + def->st_value); - for (p = exports; *p != NULL; p++) - if (*p == value) + value = (Elf_Addr)(obj_rtld.relocbase + def->st_value); + for (p = exports; *p != NULL; p++) + if (FPTR_TARGET(*p) == value) return true; return false; } /* * Given a shared object, traverse its list of needed objects, and load * each of them. Returns 0 on success. Generates an error message and * returns -1 on failure. */ static int load_needed_objects(Obj_Entry *first) { Obj_Entry *obj; for (obj = first; obj != NULL; obj = obj->next) { Needed_Entry *needed; for (needed = obj->needed; needed != NULL; needed = needed->next) { const char *name = obj->strtab + needed->name; char *path = find_library(name, obj); needed->obj = NULL; if (path == NULL && !ld_tracing) return -1; if (path) { needed->obj = load_object(path); if (needed->obj == NULL && !ld_tracing) return -1; /* XXX - cleanup */ } } } return 0; } static int load_preload_objects(void) { char *p = ld_preload; static const char delim[] = " \t:;"; if (p == NULL) return NULL; p += strspn(p, delim); while (*p != '\0') { size_t len = strcspn(p, delim); char *path; char savech; savech = p[len]; p[len] = '\0'; if ((path = find_library(p, NULL)) == NULL) return -1; if (load_object(path) == NULL) return -1; /* XXX - cleanup */ p[len] = savech; p += len; p += strspn(p, delim); } return 0; } /* * Load a shared object into memory, if it is not already loaded. The * argument must be a string allocated on the heap. This function assumes * responsibility for freeing it when necessary. * * Returns a pointer to the Obj_Entry for the object. Returns NULL * on failure. */ static Obj_Entry * load_object(char *path) { Obj_Entry *obj; int fd = -1; struct stat sb; for (obj = obj_list->next; obj != NULL; obj = obj->next) if (strcmp(obj->path, path) == 0) break; /* * If we didn't find a match by pathname, open the file and check * again by device and inode. This avoids false mismatches caused * by multiple links or ".." in pathnames. * * To avoid a race, we open the file and use fstat() rather than * using stat(). */ if (obj == NULL) { if ((fd = open(path, O_RDONLY)) == -1) { _rtld_error("Cannot open \"%s\"", path); return NULL; } if (fstat(fd, &sb) == -1) { _rtld_error("Cannot fstat \"%s\"", path); close(fd); return NULL; } for (obj = obj_list->next; obj != NULL; obj = obj->next) { if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) { close(fd); break; } } } if (obj == NULL) { /* First use of this object, so we must map it in */ dbg("loading \"%s\"", path); obj = map_object(fd, path, &sb); close(fd); if (obj == NULL) { free(path); return NULL; } obj->path = path; digest_dynamic(obj); *obj_tail = obj; obj_tail = &obj->next; obj_count++; linkmap_add(obj); /* for GDB */ dbg(" %p .. %p: %s", obj->mapbase, obj->mapbase + obj->mapsize - 1, obj->path); if (obj->textrel) dbg(" WARNING: %s has impure text", obj->path); } else free(path); obj->refcount++; return obj; } /* * Check for locking violations and die if one is found. */ static void lock_check(void) { int rcount, wcount; rcount = lockinfo.rcount; wcount = lockinfo.wcount; assert(rcount >= 0); assert(wcount >= 0); if (wcount > 1 || (wcount != 0 && rcount != 0)) { _rtld_error("Application locking error: %d readers and %d writers" " in dynamic linker. See DLLOCKINIT(3) in manual pages.", rcount, wcount); die(); } } static Obj_Entry * obj_from_addr(const void *addr) { unsigned long endhash; Obj_Entry *obj; endhash = elf_hash(END_SYM); for (obj = obj_list; obj != NULL; obj = obj->next) { const Elf_Sym *endsym; if (addr < (void *) obj->mapbase) continue; if ((endsym = symlook_obj(END_SYM, endhash, obj, true)) == NULL) continue; /* No "end" symbol?! */ if (addr < (void *) (obj->relocbase + endsym->st_value)) return obj; } return NULL; } /* * Call the finalization functions for each of the objects in "list" * which are unreferenced. All of the objects are expected to have * non-NULL fini functions. */ static void objlist_call_fini(Objlist *list) { Objlist_Entry *elm; char *saved_msg; /* * Preserve the current error message since a fini function might * call into the dynamic linker and overwrite it. */ saved_msg = errmsg_save(); STAILQ_FOREACH(elm, list, link) { if (elm->obj->refcount == 0) { dbg("calling fini function for %s", elm->obj->path); (*elm->obj->fini)(); } } errmsg_restore(saved_msg); } /* * Call the initialization functions for each of the objects in * "list". All of the objects are expected to have non-NULL init * functions. */ static void objlist_call_init(Objlist *list) { Objlist_Entry *elm; char *saved_msg; /* * Preserve the current error message since an init function might * call into the dynamic linker and overwrite it. */ saved_msg = errmsg_save(); STAILQ_FOREACH(elm, list, link) { dbg("calling init function for %s", elm->obj->path); (*elm->obj->init)(); } errmsg_restore(saved_msg); } static void objlist_clear(Objlist *list) { Objlist_Entry *elm; while (!STAILQ_EMPTY(list)) { elm = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, link); free(elm); } } static Objlist_Entry * objlist_find(Objlist *list, const Obj_Entry *obj) { Objlist_Entry *elm; STAILQ_FOREACH(elm, list, link) if (elm->obj == obj) return elm; return NULL; } static void objlist_init(Objlist *list) { STAILQ_INIT(list); } static void objlist_push_head(Objlist *list, Obj_Entry *obj) { Objlist_Entry *elm; elm = NEW(Objlist_Entry); elm->obj = obj; STAILQ_INSERT_HEAD(list, elm, link); } static void objlist_push_tail(Objlist *list, Obj_Entry *obj) { Objlist_Entry *elm; elm = NEW(Objlist_Entry); elm->obj = obj; STAILQ_INSERT_TAIL(list, elm, link); } static void objlist_remove(Objlist *list, Obj_Entry *obj) { Objlist_Entry *elm; if ((elm = objlist_find(list, obj)) != NULL) { STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); free(elm); } } /* * Remove all of the unreferenced objects from "list". */ static void objlist_remove_unref(Objlist *list) { Objlist newlist; Objlist_Entry *elm; STAILQ_INIT(&newlist); while (!STAILQ_EMPTY(list)) { elm = STAILQ_FIRST(list); STAILQ_REMOVE_HEAD(list, link); if (elm->obj->refcount == 0) free(elm); else STAILQ_INSERT_TAIL(&newlist, elm, link); } *list = newlist; } /* * Relocate newly-loaded shared objects. The argument is a pointer to * the Obj_Entry for the first such object. All objects from the first * to the end of the list of objects are relocated. Returns 0 on success, * or -1 on failure. */ static int relocate_objects(Obj_Entry *first, bool bind_now) { Obj_Entry *obj; for (obj = first; obj != NULL; obj = obj->next) { if (obj != &obj_rtld) dbg("relocating \"%s\"", obj->path); if (obj->nbuckets == 0 || obj->nchains == 0 || obj->buckets == NULL || obj->symtab == NULL || obj->strtab == NULL) { _rtld_error("%s: Shared object has no run-time symbol table", obj->path); return -1; } if (obj->textrel) { /* There are relocations to the write-protected text segment. */ if (mprotect(obj->mapbase, obj->textsize, PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { _rtld_error("%s: Cannot write-enable text segment: %s", obj->path, strerror(errno)); return -1; } } /* Process the non-PLT relocations. */ if (reloc_non_plt(obj, &obj_rtld)) return -1; if (obj->textrel) { /* Re-protected the text segment. */ if (mprotect(obj->mapbase, obj->textsize, PROT_READ|PROT_EXEC) == -1) { _rtld_error("%s: Cannot write-protect text segment: %s", obj->path, strerror(errno)); return -1; } } /* Process the PLT relocations. */ if (reloc_plt(obj) == -1) return -1; /* Relocate the jump slots if we are doing immediate binding. */ if (bind_now) if (reloc_jmpslots(obj) == -1) return -1; /* * Set up the magic number and version in the Obj_Entry. These * were checked in the crt1.o from the original ElfKit, so we * set them for backward compatibility. */ obj->magic = RTLD_MAGIC; obj->version = RTLD_VERSION; /* Set the special PLT or GOT entries. */ init_pltgot(obj); } return 0; } /* * Cleanup procedure. It will be called (by the atexit mechanism) just * before the process exits. */ static void rtld_exit(void) { Obj_Entry *obj; dbg("rtld_exit()"); wlock_acquire(); /* Clear all the reference counts so the fini functions will be called. */ for (obj = obj_list; obj != NULL; obj = obj->next) obj->refcount = 0; wlock_release(); objlist_call_fini(&list_fini); /* No need to remove the items from the list, since we are exiting. */ } static char * search_library_path(const char *name, const char *path) { size_t namelen = strlen(name); const char *p = path; if (p == NULL) return NULL; p += strspn(p, ":;"); while (*p != '\0') { size_t len = strcspn(p, ":;"); if (*p == '/' || trust) { char *pathname; const char *dir = p; size_t dirlen = len; pathname = xmalloc(dirlen + 1 + namelen + 1); strncpy(pathname, dir, dirlen); pathname[dirlen] = '/'; strcpy(pathname + dirlen + 1, name); dbg(" Trying \"%s\"", pathname); if (access(pathname, F_OK) == 0) /* We found it */ return pathname; free(pathname); } p += len; p += strspn(p, ":;"); } return NULL; } int dlclose(void *handle) { Obj_Entry *root; wlock_acquire(); root = dlcheck(handle); if (root == NULL) { wlock_release(); return -1; } /* Unreference the object and its dependencies. */ root->dl_refcount--; unref_dag(root); if (root->refcount == 0) { /* * The object is no longer referenced, so we must unload it. * First, call the fini functions with no locks held. */ wlock_release(); objlist_call_fini(&list_fini); wlock_acquire(); objlist_remove_unref(&list_fini); /* Finish cleaning up the newly-unreferenced objects. */ GDB_STATE(RT_DELETE,&root->linkmap); unload_object(root); GDB_STATE(RT_CONSISTENT,NULL); } wlock_release(); return 0; } const char * dlerror(void) { char *msg = error_message; error_message = NULL; return msg; } /* * This function is deprecated and has no effect. */ void dllockinit(void *context, void *(*lock_create)(void *context), void (*rlock_acquire)(void *lock), void (*wlock_acquire)(void *lock), void (*lock_release)(void *lock), void (*lock_destroy)(void *lock), void (*context_destroy)(void *context)) { static void *cur_context; static void (*cur_context_destroy)(void *); /* Just destroy the context from the previous call, if necessary. */ if (cur_context_destroy != NULL) cur_context_destroy(cur_context); cur_context = context; cur_context_destroy = context_destroy; } void * dlopen(const char *name, int mode) { Obj_Entry **old_obj_tail; Obj_Entry *obj; Objlist initlist; objlist_init(&initlist); wlock_acquire(); GDB_STATE(RT_ADD,NULL); old_obj_tail = obj_tail; obj = NULL; if (name == NULL) { obj = obj_main; obj->refcount++; } else { char *path = find_library(name, obj_main); if (path != NULL) obj = load_object(path); } if (obj) { obj->dl_refcount++; if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) objlist_push_tail(&list_global, obj); mode &= RTLD_MODEMASK; if (*old_obj_tail != NULL) { /* We loaded something new. */ assert(*old_obj_tail == obj); if (load_needed_objects(obj) == -1 || (init_dag(obj), relocate_objects(obj, mode == RTLD_NOW)) == -1) { obj->dl_refcount--; unref_dag(obj); if (obj->refcount == 0) unload_object(obj); obj = NULL; } else { /* Make list of init functions to call. */ initlist_add_objects(obj, &obj->next, &initlist); } } } GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); /* Call the init functions with no locks held. */ wlock_release(); objlist_call_init(&initlist); wlock_acquire(); objlist_clear(&initlist); wlock_release(); return obj; } void * dlsym(void *handle, const char *name) { const Obj_Entry *obj; unsigned long hash; const Elf_Sym *def; const Obj_Entry *defobj; hash = elf_hash(name); def = NULL; defobj = NULL; rlock_acquire(); if (handle == NULL || handle == RTLD_NEXT || handle == RTLD_DEFAULT) { void *retaddr; retaddr = __builtin_return_address(0); /* __GNUC__ only */ if ((obj = obj_from_addr(retaddr)) == NULL) { _rtld_error("Cannot determine caller's shared object"); rlock_release(); return NULL; } if (handle == NULL) { /* Just the caller's shared object. */ def = symlook_obj(name, hash, obj, true); defobj = obj; } else if (handle == RTLD_NEXT) { /* Objects after caller's */ while ((obj = obj->next) != NULL) { if ((def = symlook_obj(name, hash, obj, true)) != NULL) { defobj = obj; break; } } } else { assert(handle == RTLD_DEFAULT); def = symlook_default(name, hash, obj, &defobj, true); } } else { if ((obj = dlcheck(handle)) == NULL) { rlock_release(); return NULL; } if (obj->mainprog) { DoneList donelist; /* Search main program and all libraries loaded by it. */ donelist_init(&donelist); def = symlook_list(name, hash, &list_main, &defobj, true, &donelist); } else { /* * XXX - This isn't correct. The search should include the whole * DAG rooted at the given object. */ def = symlook_obj(name, hash, obj, true); defobj = obj; } } if (def != NULL) { rlock_release(); - return defobj->relocbase + def->st_value; + + /* + * The value required by the caller is derived from the value + * of the symbol. For the ia64 architecture, we need to + * construct a function descriptor which the caller can use to + * call the function with the right 'gp' value. For other + * architectures and for non-functions, the value is simply + * the relocated value of the symbol. + */ + if (ELF_ST_TYPE(def->st_info) == STT_FUNC) + return make_function_pointer(def, defobj); + else + return defobj->relocbase + def->st_value; } _rtld_error("Undefined symbol \"%s\"", name); rlock_release(); return NULL; } int dladdr(const void *addr, Dl_info *info) { const Obj_Entry *obj; const Elf_Sym *def; void *symbol_addr; unsigned long symoffset; rlock_acquire(); obj = obj_from_addr(addr); if (obj == NULL) { _rtld_error("No shared object contains address"); rlock_release(); return 0; } info->dli_fname = obj->path; info->dli_fbase = obj->mapbase; info->dli_saddr = (void *)0; info->dli_sname = NULL; /* * Walk the symbol list looking for the symbol whose address is * closest to the address sent in. */ for (symoffset = 0; symoffset < obj->nchains; symoffset++) { def = obj->symtab + symoffset; /* * For skip the symbol if st_shndx is either SHN_UNDEF or * SHN_COMMON. */ if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) continue; /* * If the symbol is greater than the specified address, or if it * is further away from addr than the current nearest symbol, * then reject it. */ symbol_addr = obj->relocbase + def->st_value; if (symbol_addr > addr || symbol_addr < info->dli_saddr) continue; /* Update our idea of the nearest symbol. */ info->dli_sname = obj->strtab + def->st_name; info->dli_saddr = symbol_addr; /* Exact match? */ if (info->dli_saddr == addr) break; } rlock_release(); return 1; } static void linkmap_add(Obj_Entry *obj) { struct link_map *l = &obj->linkmap; struct link_map *prev; obj->linkmap.l_name = obj->path; obj->linkmap.l_addr = obj->mapbase; obj->linkmap.l_ld = obj->dynamic; #ifdef __mips__ /* GDB needs load offset on MIPS to use the symbols */ obj->linkmap.l_offs = obj->relocbase; #endif if (r_debug.r_map == NULL) { r_debug.r_map = l; return; } /* * Scan to the end of the list, but not past the entry for the * dynamic linker, which we want to keep at the very end. */ for (prev = r_debug.r_map; prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; prev = prev->l_next) ; /* Link in the new entry. */ l->l_prev = prev; l->l_next = prev->l_next; if (l->l_next != NULL) l->l_next->l_prev = l; prev->l_next = l; } static void linkmap_delete(Obj_Entry *obj) { struct link_map *l = &obj->linkmap; if (l->l_prev == NULL) { if ((r_debug.r_map = l->l_next) != NULL) l->l_next->l_prev = NULL; return; } if ((l->l_prev->l_next = l->l_next) != NULL) l->l_next->l_prev = l->l_prev; } /* * Function for the debugger to set a breakpoint on to gain control. * * The two parameters allow the debugger to easily find and determine * what the runtime loader is doing and to whom it is doing it. * * When the loadhook trap is hit (r_debug_state, set at program * initialization), the arguments can be found on the stack: * * +8 struct link_map *m * +4 struct r_debug *rd * +0 RetAddr */ void r_debug_state(struct r_debug* rd, struct link_map *m) { } /* * Set a pointer variable in the main program to the given value. This * is used to set key variables such as "environ" before any of the * init functions are called. */ static void set_program_var(const char *name, const void *value) { const Obj_Entry *obj; unsigned long hash; hash = elf_hash(name); for (obj = obj_main; obj != NULL; obj = obj->next) { const Elf_Sym *def; if ((def = symlook_obj(name, hash, obj, false)) != NULL) { const void **addr; addr = (const void **)(obj->relocbase + def->st_value); dbg("\"%s\": *%p <-- %p", name, addr, value); *addr = value; break; } } } /* * Given a symbol name in a referencing object, find the corresponding * definition of the symbol. Returns a pointer to the symbol, or NULL if * no definition was found. Returns a pointer to the Obj_Entry of the * defining object via the reference parameter DEFOBJ_OUT. */ static const Elf_Sym * symlook_default(const char *name, unsigned long hash, const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt) { DoneList donelist; const Elf_Sym *def; const Elf_Sym *symp; const Obj_Entry *obj; const Obj_Entry *defobj; const Objlist_Entry *elm; def = NULL; defobj = NULL; donelist_init(&donelist); /* Look first in the referencing object if linked symbolically. */ if (refobj->symbolic && !donelist_check(&donelist, refobj)) { symp = symlook_obj(name, hash, refobj, in_plt); if (symp != NULL) { def = symp; defobj = refobj; } } /* Search all objects loaded at program start up. */ if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { symp = symlook_list(name, hash, &list_main, &obj, in_plt, &donelist); if (symp != NULL && (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { def = symp; defobj = obj; } } /* Search all dlopened DAGs containing the referencing object. */ STAILQ_FOREACH(elm, &refobj->dldags, link) { if (def != NULL && ELF_ST_BIND(def->st_info) != STB_WEAK) break; symp = symlook_list(name, hash, &elm->obj->dagmembers, &obj, in_plt, &donelist); if (symp != NULL && (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { def = symp; defobj = obj; } } /* Search all RTLD_GLOBAL objects. */ if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { symp = symlook_list(name, hash, &list_global, &obj, in_plt, &donelist); if (symp != NULL && (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { def = symp; defobj = obj; } } /* * Search the dynamic linker itself, and possibly resolve the * symbol from there. This is how the application links to * dynamic linker services such as dlopen. Only the values listed * in the "exports" array can be resolved from the dynamic linker. */ if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { symp = symlook_obj(name, hash, &obj_rtld, in_plt); if (symp != NULL && is_exported(symp)) { def = symp; defobj = &obj_rtld; } } if (def != NULL) *defobj_out = defobj; return def; } static const Elf_Sym * symlook_list(const char *name, unsigned long hash, Objlist *objlist, const Obj_Entry **defobj_out, bool in_plt, DoneList *dlp) { const Elf_Sym *symp; const Elf_Sym *def; const Obj_Entry *defobj; const Objlist_Entry *elm; def = NULL; defobj = NULL; STAILQ_FOREACH(elm, objlist, link) { if (donelist_check(dlp, elm->obj)) continue; if ((symp = symlook_obj(name, hash, elm->obj, in_plt)) != NULL) { if (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK) { def = symp; defobj = elm->obj; if (ELF_ST_BIND(def->st_info) != STB_WEAK) break; } } } if (def != NULL) *defobj_out = defobj; return def; } /* * Search the symbol table of a single shared object for a symbol of * the given name. Returns a pointer to the symbol, or NULL if no * definition was found. * * The symbol's hash value is passed in for efficiency reasons; that * eliminates many recomputations of the hash value. */ const Elf_Sym * symlook_obj(const char *name, unsigned long hash, const Obj_Entry *obj, bool in_plt) { if (obj->buckets != NULL) { unsigned long symnum = obj->buckets[hash % obj->nbuckets]; while (symnum != STN_UNDEF) { const Elf_Sym *symp; const char *strp; if (symnum >= obj->nchains) return NULL; /* Bad object */ symp = obj->symtab + symnum; strp = obj->strtab + symp->st_name; if (name[0] == strp[0] && strcmp(name, strp) == 0) return symp->st_shndx != SHN_UNDEF || (!in_plt && symp->st_value != 0 && ELF_ST_TYPE(symp->st_info) == STT_FUNC) ? symp : NULL; symnum = obj->chains[symnum]; } } return NULL; } static void trace_loaded_objects(Obj_Entry *obj) { char *fmt1, *fmt2, *fmt, *main_local; int c; if ((main_local = getenv("LD_TRACE_LOADED_OBJECTS_PROGNAME")) == NULL) main_local = ""; if ((fmt1 = getenv("LD_TRACE_LOADED_OBJECTS_FMT1")) == NULL) fmt1 = "\t%o => %p (%x)\n"; if ((fmt2 = getenv("LD_TRACE_LOADED_OBJECTS_FMT2")) == NULL) fmt2 = "\t%o (%x)\n"; for (; obj; obj = obj->next) { Needed_Entry *needed; char *name, *path; bool is_lib; for (needed = obj->needed; needed; needed = needed->next) { if (needed->obj != NULL) { if (needed->obj->traced) continue; needed->obj->traced = true; path = needed->obj->path; } else path = "not found"; name = (char *)obj->strtab + needed->name; is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ fmt = is_lib ? fmt1 : fmt2; while ((c = *fmt++) != '\0') { switch (c) { default: putchar(c); continue; case '\\': switch (c = *fmt) { case '\0': continue; case 'n': putchar('\n'); break; case 't': putchar('\t'); break; } break; case '%': switch (c = *fmt) { case '\0': continue; case '%': default: putchar(c); break; case 'A': printf("%s", main_local); break; case 'a': printf("%s", obj_main->path); break; case 'o': printf("%s", name); break; #if 0 case 'm': printf("%d", sodp->sod_major); break; case 'n': printf("%d", sodp->sod_minor); break; #endif case 'p': printf("%s", path); break; case 'x': printf("%p", needed->obj ? needed->obj->mapbase : 0); break; } break; } ++fmt; } } } } /* * Unload a dlopened object and its dependencies from memory and from * our data structures. It is assumed that the DAG rooted in the * object has already been unreferenced, and that the object has a * reference count of 0. */ static void unload_object(Obj_Entry *root) { Obj_Entry *obj; Obj_Entry **linkp; Objlist_Entry *elm; assert(root->refcount == 0); /* Remove the DAG from all objects' DAG lists. */ STAILQ_FOREACH(elm, &root->dagmembers , link) objlist_remove(&elm->obj->dldags, root); /* Remove the DAG from the RTLD_GLOBAL list. */ objlist_remove(&list_global, root); /* Unmap all objects that are no longer referenced. */ linkp = &obj_list->next; while ((obj = *linkp) != NULL) { if (obj->refcount == 0) { dbg("unloading \"%s\"", obj->path); munmap(obj->mapbase, obj->mapsize); linkmap_delete(obj); *linkp = obj->next; obj_count--; obj_free(obj); } else linkp = &obj->next; } obj_tail = linkp; } static void unref_dag(Obj_Entry *root) { const Needed_Entry *needed; if (root->refcount == 0) return; root->refcount--; if (root->refcount == 0) for (needed = root->needed; needed != NULL; needed = needed->next) if (needed->obj != NULL) unref_dag(needed->obj); } /* * Non-mallocing printf, for use by malloc itself. * XXX - This doesn't belong in this module. */ void xprintf(const char *fmt, ...) { char buf[256]; va_list ap; va_start(ap, fmt); vsprintf(buf, fmt, ap); (void)write(STDOUT_FILENO, buf, strlen(buf)); va_end(ap); } Index: head/libexec/rtld-elf/rtld.h =================================================================== --- head/libexec/rtld-elf/rtld.h (revision 85003) +++ head/libexec/rtld-elf/rtld.h (revision 85004) @@ -1,203 +1,204 @@ /*- * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ #ifndef RTLD_H /* { */ #define RTLD_H 1 #include #include #include #include #include #include #include "rtld_machdep.h" #ifndef STANDARD_LIBRARY_PATH #define STANDARD_LIBRARY_PATH "/usr/lib/elf:/usr/lib" #endif #define NEW(type) ((type *) xmalloc(sizeof(type))) #define CNEW(type) ((type *) xcalloc(sizeof(type))) /* We might as well do booleans like C++. */ typedef unsigned char bool; #define false 0 #define true 1 struct stat; struct Struct_Obj_Entry; /* Lists of shared objects */ typedef struct Struct_Objlist_Entry { STAILQ_ENTRY(Struct_Objlist_Entry) link; struct Struct_Obj_Entry *obj; } Objlist_Entry; typedef STAILQ_HEAD(Struct_Objlist, Struct_Objlist_Entry) Objlist; /* Types of init and fini functions */ typedef void (*InitFunc)(void); /* Lists of shared object dependencies */ typedef struct Struct_Needed_Entry { struct Struct_Needed_Entry *next; struct Struct_Obj_Entry *obj; unsigned long name; /* Offset of name in string table */ } Needed_Entry; /* Lock object */ typedef struct Struct_LockInfo { void *context; /* Client context for creating locks */ void *thelock; /* The one big lock */ /* Debugging aids. */ volatile int rcount; /* Number of readers holding lock */ volatile int wcount; /* Number of writers holding lock */ /* Methods */ void *(*lock_create)(void *context); void (*rlock_acquire)(void *lock); void (*wlock_acquire)(void *lock); void (*rlock_release)(void *lock); void (*wlock_release)(void *lock); void (*lock_destroy)(void *lock); void (*context_destroy)(void *context); } LockInfo; /* * Shared object descriptor. * * Items marked with "(%)" are dynamically allocated, and must be freed * when the structure is destroyed. * * CAUTION: It appears that the JDK port peeks into these structures. * It looks at "next" and "mapbase" at least. Don't add new members * near the front, until this can be straightened out. */ typedef struct Struct_Obj_Entry { /* * These two items have to be set right for compatibility with the * original ElfKit crt1.o. */ Elf_Word magic; /* Magic number (sanity check) */ Elf_Word version; /* Version number of struct format */ struct Struct_Obj_Entry *next; char *path; /* Pathname of underlying file (%) */ int refcount; int dl_refcount; /* Number of times loaded by dlopen */ /* These items are computed by map_object() or by digest_phdr(). */ caddr_t mapbase; /* Base address of mapped region */ size_t mapsize; /* Size of mapped region in bytes */ size_t textsize; /* Size of text segment in bytes */ Elf_Addr vaddrbase; /* Base address in shared object file */ caddr_t relocbase; /* Relocation constant = mapbase - vaddrbase */ const Elf_Dyn *dynamic; /* Dynamic section */ caddr_t entry; /* Entry point */ const Elf_Phdr *phdr; /* Program header if it is mapped, else NULL */ size_t phsize; /* Size of program header in bytes */ const char *interp; /* Pathname of the interpreter, if any */ /* Items from the dynamic section. */ Elf_Addr *pltgot; /* PLT or GOT, depending on architecture */ const Elf_Rel *rel; /* Relocation entries */ unsigned long relsize; /* Size in bytes of relocation info */ const Elf_Rela *rela; /* Relocation entries with addend */ unsigned long relasize; /* Size in bytes of addend relocation info */ const Elf_Rel *pltrel; /* PLT relocation entries */ unsigned long pltrelsize; /* Size in bytes of PLT relocation info */ const Elf_Rela *pltrela; /* PLT relocation entries with addend */ unsigned long pltrelasize; /* Size in bytes of PLT addend reloc info */ const Elf_Sym *symtab; /* Symbol table */ const char *strtab; /* String table */ unsigned long strsize; /* Size in bytes of string table */ - const Elf_Addr *buckets; /* Hash table buckets array */ + const Elf_Hashelt *buckets; /* Hash table buckets array */ unsigned long nbuckets; /* Number of buckets */ - const Elf_Addr *chains; /* Hash table chain array */ + const Elf_Hashelt *chains; /* Hash table chain array */ unsigned long nchains; /* Number of chains */ const char *rpath; /* Search path specified in object */ Needed_Entry *needed; /* Shared objects needed by this one (%) */ InitFunc init; /* Initialization function to call */ InitFunc fini; /* Termination function to call */ bool mainprog; /* True if this is the main program */ bool rtld; /* True if this is the dynamic linker */ bool textrel; /* True if there are relocations to text seg */ bool symbolic; /* True if generated with "-Bsymbolic" */ bool traced; /* Already printed in ldd trace output */ bool jmpslots_done; /* Already have relocated the jump slots */ bool init_done; /* Already have added object to init list */ struct link_map linkmap; /* for GDB */ Objlist dldags; /* Object belongs to these dlopened DAGs (%) */ Objlist dagmembers; /* DAG has these members (%) */ dev_t dev; /* Object's filesystem's device */ ino_t ino; /* Object's inode number */ + void *priv; /* Platform-dependant */ } Obj_Entry; #define RTLD_MAGIC 0xd550b87a #define RTLD_VERSION 1 /* * Symbol cache entry used during relocation to avoid multiple lookups * of the same symbol. */ typedef struct Struct_SymCache { const Elf_Sym *sym; /* Symbol table entry */ const Obj_Entry *obj; /* Shared object which defines it */ } SymCache; extern void _rtld_error(const char *, ...) __printflike(1, 2); extern Obj_Entry *map_object(int, const char *, const struct stat *); extern void *xcalloc(size_t); extern void *xmalloc(size_t); extern char *xstrdup(const char *); extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; /* * Function declarations. */ int do_copy_relocations(Obj_Entry *); unsigned long elf_hash(const char *); const Elf_Sym *find_symdef(unsigned long, const Obj_Entry *, const Obj_Entry **, bool, SymCache *); void init_pltgot(Obj_Entry *); void lockdflt_init(LockInfo *); void obj_free(Obj_Entry *); Obj_Entry *obj_new(void); int reloc_non_plt(Obj_Entry *, Obj_Entry *); int reloc_plt(Obj_Entry *); int reloc_jmpslots(Obj_Entry *); void _rtld_bind_start(void); const Elf_Sym *symlook_obj(const char *, unsigned long, const Obj_Entry *, bool); #endif /* } */