diff --git a/src/bin/rtld-elf/Makefile b/src/bin/rtld-elf/Makefile new file mode 100644 index 0000000..5c97230 --- /dev/null +++ b/src/bin/rtld-elf/Makefile @@ -0,0 +1,58 @@ +# $Id$ +# Application Makefile (C) 2002-2004 The UbixOS Project + +# Include Global 'Source' Options +include ../../Makefile.inc +include ../Makefile.inc + +INCLUDES = -I../../include.new -I./ + +CFLAGS+= -Wall -DFREEBSD_ELF -DIN_RTLD -DPIC +CFLAGS = -O2 -fno-strict-aliasing -pipe -Wall -DFREEBSD_ELF -DIN_RTLD -I./ -fpic -elf -DPIC -std=gnu99 -Wformat=2 -Wno-format-extra-args -Werror -DDEBUG + + +#Linker +LD = ld + +#Binary File Name +OUTPUT = ld-elf.so.1 + +#Delete Program +REMOVE = rm -f + +#Objects +OBJS = rtld_start.o reloc.o rtld.o rtld_lock.o map_object.o malloc.o xmalloc.o debug.o libmap.o + +LIBRARIES = #../../lib/libc/libc.so + + +# Link The Binary +$(OUTPUT) : $(OBJS) +# $(CC) -fpic -nostdlib -shared -Wl,-soname,$(OUTPUT) -e .rtld_start -o $(OUTPUT) $(OBJS) $(LIBRARIES) $(SUBS) +# $(CC) $(CFLAGS) -nostdlib -e .rtld_start -elf -shared -Wl,-Bsymbolic -o $(OUTPUT) $(OBJS) -lc_pic +# $(CC) $(CFLAGS) -nostdlib -e .rtld_start -elf -shared -Wl,-Bsymbolic -o $(OUTPUT) $(OBJS) ./libc_pic.a #-lc_pic + $(CC) $(CFLAGS) -nostdlib -e .rtld_start -elf -shared -Wl,-Bsymbolic -o $(OUTPUT) $(OBJS) ../../lib/libc/pic.a #-lc_pic +# strip $(OUTPUT) + +# Compile the source files +.cpp.o: + $(CXX) -Wall -O $(CFLAGS) $(INCLUDES) -c -o $@ $< + +.cc.o: + $(CXX) -Wall -O $(CFLAGS) $(INCLUDES) -c -o $@ $< + +.cc.s: + $(CXX) -Wall -O $(CFLAGS) $(INCLUDES) -S -o $@ $< + +.c.o: + $(CC) -Wall -O $(CFLAGS) $(INCLUDES) -c -o $@ $< + +.c.s: + $(CC) -Wall -O $(CFLAGS) $(INCLUDES) -S -o $@ $< + +.S.o: + $(CC) -Wall $(CLFAGS) $(INCLUDES) -c -o $@ $< + +# Clean Up The junk +clean: + $(REMOVE) $(OBJS) $(OUTPUT) diff --git a/src/bin/rtld-elf/debug.c b/src/bin/rtld-elf/debug.c new file mode 100644 index 0000000..3a83b66 --- /dev/null +++ b/src/bin/rtld-elf/debug.c @@ -0,0 +1,143 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/debug.c,v 1.4 2003/06/19 16:09:18 mdodd Exp $ + */ + +/* + * Support for printing debugging messages. + */ + +#include +#include + +#include "debug.h" +#include "rtld.h" + +static const char rel_header[] = + " symbol name r_info r_offset st_value st_size address value\n" + " ------------------------------------------------------------------------------\n"; +static const char rel_format[] = " %-25s %6lx %08lx %08lx %7d %10p %08lx\n"; + +int debug = 0; + +void +debug_printf(const char *format, ...) +{ + if (debug) { + va_list ap; + va_start(ap, format); + + fflush(stdout); + vfprintf(stderr, format, ap); + putc('\n', stderr); + + va_end(ap); + } +} + +void +dump_relocations (Obj_Entry *obj0) +{ + Obj_Entry *obj; + + for (obj = obj0; obj != NULL; obj = obj->next) { + dump_obj_relocations(obj); + } +} + +void +dump_obj_relocations (Obj_Entry *obj) +{ + + printf("Object \"%s\", relocbase %p\n", obj->path, obj->relocbase); + + if (obj->relsize) { + printf("Non-PLT Relocations: %ld\n", + (obj->relsize / sizeof(Elf_Rel))); + dump_Elf_Rel(obj, obj->rel, obj->relsize); + } + + if (obj->relasize) { + printf("Non-PLT Relocations with Addend: %ld\n", + (obj->relasize / sizeof(Elf_Rela))); + dump_Elf_Rela(obj, obj->rela, obj->relasize); + } + + if (obj->pltrelsize) { + printf("PLT Relocations: %ld\n", + (obj->pltrelsize / sizeof(Elf_Rel))); + dump_Elf_Rel(obj, obj->pltrel, obj->pltrelsize); + } + + if (obj->pltrelasize) { + printf("PLT Relocations with Addend: %ld\n", + (obj->pltrelasize / sizeof(Elf_Rela))); + dump_Elf_Rela(obj, obj->pltrela, obj->pltrelasize); + } +} + +void +dump_Elf_Rel (Obj_Entry *obj, const Elf_Rel *rel0, u_long relsize) +{ + const Elf_Rel *rel; + const Elf_Rel *rellim; + const Elf_Sym *sym; + Elf_Addr *dstaddr; + + printf("%s", rel_header); + rellim = (const Elf_Rel *)((const char *)rel0 + relsize); + for (rel = rel0; rel < rellim; rel++) { + dstaddr = (Elf_Addr *)(obj->relocbase + rel->r_offset); + sym = obj->symtab + ELF_R_SYM(rel->r_info); + printf(rel_format, + obj->strtab + sym->st_name, + (u_long)rel->r_info, (u_long)rel->r_offset, + (u_long)sym->st_value, (int)sym->st_size, + dstaddr, (u_long)*dstaddr); + } + return; +} + +void +dump_Elf_Rela (Obj_Entry *obj, const Elf_Rela *rela0, u_long relasize) +{ + const Elf_Rela *rela; + const Elf_Rela *relalim; + const Elf_Sym *sym; + Elf_Addr *dstaddr; + + printf("%s", rel_header); + relalim = (const Elf_Rela *)((const char *)rela0 + relasize); + for (rela = rela0; rela < relalim; rela++) { + dstaddr = (Elf_Addr *)(obj->relocbase + rela->r_offset); + sym = obj->symtab + ELF_R_SYM(rela->r_info); + printf(rel_format, + obj->strtab + sym->st_name, + (u_long)rela->r_info, (u_long)rela->r_offset, + (u_long)sym->st_value, (int)sym->st_size, + dstaddr, (u_long)*dstaddr); + } + return; +} diff --git a/src/bin/rtld-elf/debug.h b/src/bin/rtld-elf/debug.h new file mode 100644 index 0000000..68faa0a --- /dev/null +++ b/src/bin/rtld-elf/debug.h @@ -0,0 +1,66 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/debug.h,v 1.6 2004/03/21 01:21:26 peter Exp $ + */ + +/* + * Support for printing debugging messages. + */ + +#ifndef DEBUG_H +#define DEBUG_H 1 + +#ifndef __GNUC__ +#error "This file must be compiled with GCC" +#endif + +#include + +#include +#include + +extern void debug_printf(const char *, ...) __printflike(1, 2); +extern int debug; + +#ifdef DEBUG +#define dbg(format, args...) debug_printf(format , ## args) +#else +#define dbg(format, args...) ((void) 0) +#endif + +#ifndef COMPAT_32BIT +#define _MYNAME "ld-elf.so.1" +#else +#define _MYNAME "ld-elf32.so.1" +#endif + +#define assert(cond) ((cond) ? (void) 0 : \ + (msg(_MYNAME ": assert failed: " __FILE__ ":" \ + __XSTRING(__LINE__) "\n"), abort())) +#define msg(s) write(STDOUT_FILENO, s, strlen(s)) +#define trace() msg(_MYNAME ": " __XSTRING(__LINE__) "\n") + + +#endif /* DEBUG_H */ diff --git a/src/bin/rtld-elf/libmap.c b/src/bin/rtld-elf/libmap.c new file mode 100644 index 0000000..b78fe4c --- /dev/null +++ b/src/bin/rtld-elf/libmap.c @@ -0,0 +1,371 @@ +/* + * $FreeBSD: src/libexec/rtld-elf/libmap.c,v 1.14 2005/02/04 02:46:41 mdodd Exp $ + */ + +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" +#include "libmap.h" + +#ifndef _PATH_LIBMAP_CONF +#define _PATH_LIBMAP_CONF "/etc/libmap.conf" +#endif + +#ifdef COMPAT_32BIT +#undef _PATH_LIBMAP_CONF +#define _PATH_LIBMAP_CONF "/etc/libmap32.conf" +#endif + +TAILQ_HEAD(lm_list, lm); +struct lm { + char *f; + char *t; + + TAILQ_ENTRY(lm) lm_link; +}; + +TAILQ_HEAD(lmp_list, lmp) lmp_head = TAILQ_HEAD_INITIALIZER(lmp_head); +struct lmp { + char *p; + enum { T_EXACT=0, T_BASENAME, T_DIRECTORY } type; + struct lm_list lml; + TAILQ_ENTRY(lmp) lmp_link; +}; + +static int lm_count; + +static void lmc_parse (FILE *); +static void lm_add (const char *, const char *, const char *); +static void lm_free (struct lm_list *); +static char * lml_find (struct lm_list *, const char *); +static struct lm_list * lmp_find (const char *); +static struct lm_list * lmp_init (char *); +static const char * quickbasename (const char *); +static int readstrfn (void * cookie, char *buf, int len); +static int closestrfn (void * cookie); + +#define iseol(c) (((c) == '#') || ((c) == '\0') || \ + ((c) == '\n') || ((c) == '\r')) + +int +lm_init (char *libmap_override) +{ + FILE *fp; + + dbg("%s(\"%s\")", __func__, libmap_override); + + TAILQ_INIT(&lmp_head); + + fp = fopen(_PATH_LIBMAP_CONF, "r"); + if (fp) { + lmc_parse(fp); + fclose(fp); + } + + if (libmap_override) { + char *p; + /* do some character replacement to make $LIBMAP look like a + text file, then "open" it with funopen */ + libmap_override = xstrdup(libmap_override); + + for (p = libmap_override; *p; p++) { + switch (*p) { + case '=': + *p = ' '; break; + case ',': + *p = '\n'; break; + } + } + fp = funopen(libmap_override, readstrfn, NULL, NULL, closestrfn); + if (fp) { + lmc_parse(fp); + fclose(fp); + } + } + + return (lm_count == 0); +} + +static void +lmc_parse (FILE *fp) +{ + char *cp; + char *f, *t, *c, *p; + char prog[MAXPATHLEN]; + char line[MAXPATHLEN + 2]; + + dbg("%s(%p)", __func__, fp); + + p = NULL; + while ((cp = fgets(line, MAXPATHLEN + 1, fp)) != NULL) { + t = f = c = NULL; + + /* Skip over leading space */ + while (isspace(*cp)) cp++; + + /* Found a comment or EOL */ + if (iseol(*cp)) continue; + + /* Found a constraint selector */ + if (*cp == '[') { + cp++; + + /* Skip leading space */ + while (isspace(*cp)) cp++; + + /* Found comment, EOL or end of selector */ + if (iseol(*cp) || *cp == ']') + continue; + + c = cp++; + /* Skip to end of word */ + while (!isspace(*cp) && !iseol(*cp) && *cp != ']') + cp++; + + /* Skip and zero out trailing space */ + while (isspace(*cp)) *cp++ = '\0'; + + /* Check if there is a closing brace */ + if (*cp != ']') continue; + + /* Terminate string if there was no trailing space */ + *cp++ = '\0'; + + /* + * There should be nothing except whitespace or comment + from this point to the end of the line. + */ + while(isspace(*cp)) cp++; + if (!iseol(*cp)) continue; + + strcpy(prog, c); + p = prog; + continue; + } + + /* Parse the 'from' candidate. */ + f = cp++; + while (!isspace(*cp) && !iseol(*cp)) cp++; + + /* Skip and zero out the trailing whitespace */ + while (isspace(*cp)) *cp++ = '\0'; + + /* Found a comment or EOL */ + if (iseol(*cp)) continue; + + /* Parse 'to' mapping */ + t = cp++; + while (!isspace(*cp) && !iseol(*cp)) cp++; + + /* Skip and zero out the trailing whitespace */ + while (isspace(*cp)) *cp++ = '\0'; + + /* Should be no extra tokens at this point */ + if (!iseol(*cp)) continue; + + *cp = '\0'; + lm_add(p, f, t); + } +} + +static void +lm_free (struct lm_list *lml) +{ + struct lm *lm; + + dbg("%s(%p)", __func__, lml); + + while (!TAILQ_EMPTY(lml)) { + lm = TAILQ_FIRST(lml); + TAILQ_REMOVE(lml, lm, lm_link); + free(lm->f); + free(lm->t); + free(lm); + } + return; +} + +void +lm_fini (void) +{ + struct lmp *lmp; + + dbg("%s()", __func__); + + while (!TAILQ_EMPTY(&lmp_head)) { + lmp = TAILQ_FIRST(&lmp_head); + TAILQ_REMOVE(&lmp_head, lmp, lmp_link); + free(lmp->p); + lm_free(&lmp->lml); + free(lmp); + } + return; +} + +static void +lm_add (const char *p, const char *f, const char *t) +{ + struct lm_list *lml; + struct lm *lm; + + if (p == NULL) + p = "$DEFAULT$"; + + dbg("%s(\"%s\", \"%s\", \"%s\")", __func__, p, f, t); + + if ((lml = lmp_find(p)) == NULL) + lml = lmp_init(xstrdup(p)); + + lm = xmalloc(sizeof(struct lm)); + lm->f = xstrdup(f); + lm->t = xstrdup(t); + TAILQ_INSERT_HEAD(lml, lm, lm_link); + lm_count++; +} + +char * +lm_find (const char *p, const char *f) +{ + struct lm_list *lml; + char *t; + + dbg("%s(\"%s\", \"%s\")", __func__, p, f); + + if (p != NULL && (lml = lmp_find(p)) != NULL) { + t = lml_find(lml, f); + if (t != NULL) { + /* + * Add a global mapping if we have + * a successful constrained match. + */ + lm_add(NULL, f, t); + return (t); + } + } + lml = lmp_find("$DEFAULT$"); + if (lml != NULL) + return (lml_find(lml, f)); + else + return (NULL); +} + +/* Given a libmap translation list and a library name, return the + replacement library, or NULL */ +#ifdef COMPAT_32BIT +char * +lm_findn (const char *p, const char *f, const int n) +{ + char pathbuf[64], *s, *t; + + if (n < sizeof(pathbuf) - 1) { + memcpy(pathbuf, f, n); + pathbuf[n] = '\0'; + s = pathbuf; + } else { + s = xmalloc(n + 1); + strcpy(s, f); + } + t = lm_find(p, s); + if (s != pathbuf) + free(s); + return (t); +} +#endif + +static char * +lml_find (struct lm_list *lmh, const char *f) +{ + struct lm *lm; + + dbg("%s(%p, \"%s\")", __func__, lmh, f); + + TAILQ_FOREACH(lm, lmh, lm_link) + if (strcmp(f, lm->f) == 0) + return (lm->t); + return (NULL); +} + +/* Given an executable name, return a pointer to the translation list or + NULL if no matches */ +static struct lm_list * +lmp_find (const char *n) +{ + struct lmp *lmp; + + dbg("%s(\"%s\")", __func__, n); + + TAILQ_FOREACH(lmp, &lmp_head, lmp_link) + if ((lmp->type == T_EXACT && strcmp(n, lmp->p) == 0) || + (lmp->type == T_DIRECTORY && strncmp(n, lmp->p, strlen(lmp->p)) == 0) || + (lmp->type == T_BASENAME && strcmp(quickbasename(n), lmp->p) == 0)) + return (&lmp->lml); + return (NULL); +} + +static struct lm_list * +lmp_init (char *n) +{ + struct lmp *lmp; + + dbg("%s(\"%s\")", __func__, n); + + lmp = xmalloc(sizeof(struct lmp)); + lmp->p = n; + if (n[strlen(n)-1] == '/') + lmp->type = T_DIRECTORY; + else if (strchr(n,'/') == NULL) + lmp->type = T_BASENAME; + else + lmp->type = T_EXACT; + TAILQ_INIT(&lmp->lml); + TAILQ_INSERT_HEAD(&lmp_head, lmp, lmp_link); + + return (&lmp->lml); +} + +/* libc basename is overkill. Return a pointer to the character after the + last /, or the original string if there are no slashes. */ +static const char * +quickbasename (const char *path) +{ + const char *p = path; + for (; *path; path++) { + if (*path == '/') + p = path+1; + } + return (p); +} + +static int +readstrfn(void * cookie, char *buf, int len) +{ + static char *current; + static int left; + int copied; + + copied = 0; + if (!current) { + current = cookie; + left = strlen(cookie); + } + while (*current && left && len) { + *buf++ = *current++; + left--; + len--; + copied++; + } + return copied; +} + +static int +closestrfn(void * cookie) +{ + free(cookie); + return 0; +} diff --git a/src/bin/rtld-elf/libmap.h b/src/bin/rtld-elf/libmap.h new file mode 100644 index 0000000..6b0991d --- /dev/null +++ b/src/bin/rtld-elf/libmap.h @@ -0,0 +1,10 @@ +/* + * $FreeBSD: src/libexec/rtld-elf/libmap.h,v 1.4 2005/02/04 02:46:41 mdodd Exp $ + */ + +int lm_init (char *); +void lm_fini (void); +char * lm_find (const char *, const char *); +#ifdef COMPAT_32BIT +char * lm_findn (const char *, const char *, const int); +#endif diff --git a/src/bin/rtld-elf/malloc.c b/src/bin/rtld-elf/malloc.c new file mode 100644 index 0000000..c8fa19e --- /dev/null +++ b/src/bin/rtld-elf/malloc.c @@ -0,0 +1,502 @@ +/*- + * Copyright (c) 1983 Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#if defined(LIBC_SCCS) && !defined(lint) +/*static char *sccsid = "from: @(#)malloc.c 5.11 (Berkeley) 2/23/91";*/ +static char *rcsid = "$FreeBSD: src/libexec/rtld-elf/malloc.c,v 1.10 2003/08/22 02:22:59 imp Exp $"; +#endif /* LIBC_SCCS and not lint */ + +/* + * malloc.c (Caltech) 2/21/82 + * Chris Kingsley, kingsley@cit-20. + * + * This is a very fast storage allocator. It allocates blocks of a small + * number of different sizes, and keeps free lists of each size. Blocks that + * don't exactly fit are passed up to the next larger size. In this + * implementation, the available sizes are 2^n-4 (or 2^n-10) bytes long. + * This is designed for use in a virtual memory environment. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifndef BSD +#define MAP_COPY MAP_PRIVATE +#define MAP_FILE 0 +#define MAP_ANON 0 +#endif + +#ifndef BSD /* Need do better than this */ +#define NEED_DEV_ZERO 1 +#endif + +static void morecore(); +static int findbucket(); + +/* + * Pre-allocate mmap'ed pages + */ +#define NPOOLPAGES (32*1024/pagesz) +static caddr_t pagepool_start, pagepool_end; +static int morepages(); + +/* + * The overhead on a block is at least 4 bytes. When free, this space + * contains a pointer to the next free block, and the bottom two bits must + * be zero. When in use, the first byte is set to MAGIC, and the second + * byte is the size index. The remaining bytes are for alignment. + * If range checking is enabled then a second word holds the size of the + * requested block, less 1, rounded up to a multiple of sizeof(RMAGIC). + * The order of elements is critical: ov_magic must overlay the low order + * bits of ov_next, and ov_magic can not be a valid ov_next bit pattern. + */ +union overhead { + union overhead *ov_next; /* when free */ + struct { + u_char ovu_magic; /* magic number */ + u_char ovu_index; /* bucket # */ +#ifdef RCHECK + u_short ovu_rmagic; /* range magic number */ + u_int ovu_size; /* actual block size */ +#endif + } ovu; +#define ov_magic ovu.ovu_magic +#define ov_index ovu.ovu_index +#define ov_rmagic ovu.ovu_rmagic +#define ov_size ovu.ovu_size +}; + +#define MAGIC 0xef /* magic # on accounting info */ +#define RMAGIC 0x5555 /* magic # on range info */ + +#ifdef RCHECK +#define RSLOP sizeof (u_short) +#else +#define RSLOP 0 +#endif + +/* + * nextf[i] is the pointer to the next free block of size 2^(i+3). The + * smallest allocatable block is 8 bytes. The overhead information + * precedes the data area returned to the user. + */ +#define NBUCKETS 30 +static union overhead *nextf[NBUCKETS]; + +static int pagesz; /* page size */ +static int pagebucket; /* page size bucket */ + +#ifdef MSTATS +/* + * nmalloc[i] is the difference between the number of mallocs and frees + * for a given block size. + */ +static u_int nmalloc[NBUCKETS]; +#include +#endif + +#if defined(MALLOC_DEBUG) || defined(RCHECK) +#define ASSERT(p) if (!(p)) botch("p") +#include +static void +botch(s) + char *s; +{ + fprintf(stderr, "\r\nassertion botched: %s\r\n", s); + (void) fflush(stderr); /* just in case user buffered it */ + abort(); +} +#else +#define ASSERT(p) +#endif + +/* Debugging stuff */ +static void xprintf(const char *, ...); +#define TRACE() xprintf("TRACE %s:%d\n", __FILE__, __LINE__) + +void * +malloc(nbytes) + size_t nbytes; +{ + register union overhead *op; + register int bucket; + register long n; + register unsigned amt; + + /* + * First time malloc is called, setup page size and + * align break pointer so all data will be page aligned. + */ + if (pagesz == 0) { + pagesz = n = getpagesize(); + if (morepages(NPOOLPAGES) == 0) + return NULL; + op = (union overhead *)(pagepool_start); + n = n - sizeof (*op) - ((long)op & (n - 1)); + if (n < 0) + n += pagesz; + if (n) { + pagepool_start += n; + } + bucket = 0; + amt = 8; + while ((unsigned)pagesz > amt) { + amt <<= 1; + bucket++; + } + pagebucket = bucket; + } + /* + * Convert amount of memory requested into closest block size + * stored in hash buckets which satisfies request. + * Account for space used per block for accounting. + */ + if (nbytes <= (unsigned long)(n = pagesz - sizeof (*op) - RSLOP)) { +#ifndef RCHECK + amt = 8; /* size of first bucket */ + bucket = 0; +#else + amt = 16; /* size of first bucket */ + bucket = 1; +#endif + n = -(sizeof (*op) + RSLOP); + } else { + amt = pagesz; + bucket = pagebucket; + } + while (nbytes > amt + n) { + amt <<= 1; + if (amt == 0) + return (NULL); + bucket++; + } + /* + * If nothing in hash bucket right now, + * request more memory from the system. + */ + if ((op = nextf[bucket]) == NULL) { + morecore(bucket); + if ((op = nextf[bucket]) == NULL) + return (NULL); + } + /* remove from linked list */ + nextf[bucket] = op->ov_next; + op->ov_magic = MAGIC; + op->ov_index = bucket; +#ifdef MSTATS + nmalloc[bucket]++; +#endif +#ifdef RCHECK + /* + * Record allocated size of block and + * bound space with magic numbers. + */ + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + op->ov_rmagic = RMAGIC; + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return ((char *)(op + 1)); +} + +/* + * Allocate more memory to the indicated bucket. + */ +static void +morecore(bucket) + int bucket; +{ + register union overhead *op; + register int sz; /* size of desired block */ + int amt; /* amount to allocate */ + int nblks; /* how many blocks we get */ + + /* + * sbrk_size <= 0 only for big, FLUFFY, requests (about + * 2^30 bytes on a VAX, I think) or for a negative arg. + */ + sz = 1 << (bucket + 3); +#ifdef MALLOC_DEBUG + ASSERT(sz > 0); +#else + if (sz <= 0) + return; +#endif + if (sz < pagesz) { + amt = pagesz; + nblks = amt / sz; + } else { + amt = sz + pagesz; + nblks = 1; + } + if (amt > pagepool_end - pagepool_start) + if (morepages(amt/pagesz + NPOOLPAGES) == 0) + return; + op = (union overhead *)pagepool_start; + pagepool_start += amt; + + /* + * Add new memory allocated to that on + * free list for this hash bucket. + */ + nextf[bucket] = op; + while (--nblks > 0) { + op->ov_next = (union overhead *)((caddr_t)op + sz); + op = (union overhead *)((caddr_t)op + sz); + } +} + +void +free(cp) + void *cp; +{ + register int size; + register union overhead *op; + + if (cp == NULL) + return; + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); +#ifdef MALLOC_DEBUG + ASSERT(op->ov_magic == MAGIC); /* make sure it was in use */ +#else + if (op->ov_magic != MAGIC) + return; /* sanity */ +#endif +#ifdef RCHECK + ASSERT(op->ov_rmagic == RMAGIC); + ASSERT(*(u_short *)((caddr_t)(op + 1) + op->ov_size) == RMAGIC); +#endif + size = op->ov_index; + ASSERT(size < NBUCKETS); + op->ov_next = nextf[size]; /* also clobbers ov_magic */ + nextf[size] = op; +#ifdef MSTATS + nmalloc[size]--; +#endif +} + +/* + * When a program attempts "storage compaction" as mentioned in the + * old malloc man page, it realloc's an already freed block. Usually + * this is the last block it freed; occasionally it might be farther + * back. We have to search all the free lists for the block in order + * to determine its bucket: 1st we make one pass thru the lists + * checking only the first block in each; if that fails we search + * ``realloc_srchlen'' blocks in each list for a match (the variable + * is extern so the caller can modify it). If that fails we just copy + * however many bytes was given to realloc() and hope it's not huge. + */ +int realloc_srchlen = 4; /* 4 should be plenty, -1 =>'s whole list */ + +void * +realloc(cp, nbytes) + void *cp; + size_t nbytes; +{ + register u_int onb; + register int i; + union overhead *op; + char *res; + int was_alloced = 0; + + if (cp == NULL) + return (malloc(nbytes)); + op = (union overhead *)((caddr_t)cp - sizeof (union overhead)); + if (op->ov_magic == MAGIC) { + was_alloced++; + i = op->ov_index; + } else { + /* + * Already free, doing "compaction". + * + * Search for the old block of memory on the + * free list. First, check the most common + * case (last element free'd), then (this failing) + * the last ``realloc_srchlen'' items free'd. + * If all lookups fail, then assume the size of + * the memory block being realloc'd is the + * largest possible (so that all "nbytes" of new + * memory are copied into). Note that this could cause + * a memory fault if the old area was tiny, and the moon + * is gibbous. However, that is very unlikely. + */ + if ((i = findbucket(op, 1)) < 0 && + (i = findbucket(op, realloc_srchlen)) < 0) + i = NBUCKETS; + } + onb = 1 << (i + 3); + if (onb < (u_int)pagesz) + onb -= sizeof (*op) + RSLOP; + else + onb += pagesz - sizeof (*op) - RSLOP; + /* avoid the copy if same size block */ + if (was_alloced) { + if (i) { + i = 1 << (i + 2); + if (i < pagesz) + i -= sizeof (*op) + RSLOP; + else + i += pagesz - sizeof (*op) - RSLOP; + } + if (nbytes <= onb && nbytes > (size_t)i) { +#ifdef RCHECK + op->ov_size = (nbytes + RSLOP - 1) & ~(RSLOP - 1); + *(u_short *)((caddr_t)(op + 1) + op->ov_size) = RMAGIC; +#endif + return(cp); + } else + free(cp); + } + if ((res = malloc(nbytes)) == NULL) + return (NULL); + if (cp != res) /* common optimization if "compacting" */ + bcopy(cp, res, (nbytes < onb) ? nbytes : onb); + return (res); +} + +/* + * Search ``srchlen'' elements of each free list for a block whose + * header starts at ``freep''. If srchlen is -1 search the whole list. + * Return bucket number, or -1 if not found. + */ +static int +findbucket(freep, srchlen) + union overhead *freep; + int srchlen; +{ + register union overhead *p; + register int i, j; + + for (i = 0; i < NBUCKETS; i++) { + j = 0; + for (p = nextf[i]; p && j != srchlen; p = p->ov_next) { + if (p == freep) + return (i); + j++; + } + } + return (-1); +} + +#ifdef MSTATS +/* + * mstats - print out statistics about malloc + * + * Prints two lines of numbers, one showing the length of the free list + * for each size category, the second showing the number of mallocs - + * frees for each size category. + */ +mstats(s) + char *s; +{ + register int i, j; + register union overhead *p; + int totfree = 0, + totused = 0; + + fprintf(stderr, "Memory allocation statistics %s\nfree:\t", s); + for (i = 0; i < NBUCKETS; i++) { + for (j = 0, p = nextf[i]; p; p = p->ov_next, j++) + ; + fprintf(stderr, " %d", j); + totfree += j * (1 << (i + 3)); + } + fprintf(stderr, "\nused:\t"); + for (i = 0; i < NBUCKETS; i++) { + fprintf(stderr, " %d", nmalloc[i]); + totused += nmalloc[i] * (1 << (i + 3)); + } + fprintf(stderr, "\n\tTotal in use: %d, total free: %d\n", + totused, totfree); +} +#endif + + +static int +morepages(n) +int n; +{ + int fd = -1; + int offset; + +#ifdef NEED_DEV_ZERO + fd = open(_PATH_DEVZERO, O_RDWR, 0); + if (fd == -1) + perror(_PATH_DEVZERO); +#endif + + if (pagepool_end - pagepool_start > pagesz) { + caddr_t addr = (caddr_t) + (((long)pagepool_start + pagesz - 1) & ~(pagesz - 1)); + if (munmap(addr, pagepool_end - addr) != 0) + warn("morepages: munmap %p", addr); + } + + offset = (long)pagepool_start - ((long)pagepool_start & ~(pagesz - 1)); + + if ((pagepool_start = mmap(0, n * pagesz, + PROT_READ|PROT_WRITE, + MAP_ANON|MAP_COPY, fd, 0)) == (caddr_t)-1) { + xprintf("Cannot map anonymous memory"); + return 0; + } + pagepool_end = pagepool_start + n * pagesz; + pagepool_start += offset; + +#ifdef NEED_DEV_ZERO + close(fd); +#endif + return n; +} + +/* + * Non-mallocing printf, for use by malloc itself. + */ +static void +xprintf(const char *fmt, ...) +{ + char buf[256]; + va_list ap; + + va_start(ap, fmt); + vsprintf(buf, fmt, ap); + (void)write(STDOUT_FILENO, buf, strlen(buf)); + va_end(ap); +} diff --git a/src/bin/rtld-elf/map_object.c b/src/bin/rtld-elf/map_object.c new file mode 100644 index 0000000..8d5f24a --- /dev/null +++ b/src/bin/rtld-elf/map_object.c @@ -0,0 +1,372 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/map_object.c,v 1.16 2005/02/27 12:55:40 dfr Exp $ + */ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" + +static Elf_Ehdr *get_elf_header (int, const char *); +static int convert_prot(int); /* Elf flags -> mmap protection */ +static int convert_flags(int); /* Elf flags -> mmap flags */ + +/* + * Map a shared object into memory. The "fd" argument is a file descriptor, + * which must be open on the object and positioned at its beginning. + * The "path" argument is a pathname that is used only for error messages. + * + * The return value is a pointer to a newly-allocated Obj_Entry structure + * for the shared object. Returns NULL on failure. + */ +Obj_Entry * +map_object(int fd, const char *path, const struct stat *sb) +{ + Obj_Entry *obj; + Elf_Ehdr *hdr; + int i; + Elf_Phdr *phdr; + Elf_Phdr *phlimit; + Elf_Phdr **segs; + int nsegs; + Elf_Phdr *phdyn; + Elf_Phdr *phphdr; + Elf_Phdr *phinterp; + Elf_Phdr *phtls; + caddr_t mapbase; + size_t mapsize; + Elf_Off base_offset; + Elf_Addr base_vaddr; + Elf_Addr base_vlimit; + caddr_t base_addr; + Elf_Off data_offset; + Elf_Addr data_vaddr; + Elf_Addr data_vlimit; + caddr_t data_addr; + int data_prot; + int data_flags; + Elf_Addr clear_vaddr; + caddr_t clear_addr; + caddr_t clear_page; + size_t nclear; + Elf_Addr bss_vaddr; + Elf_Addr bss_vlimit; + caddr_t bss_addr; + + hdr = get_elf_header(fd, path); + if (hdr == NULL) + return (NULL); + + /* + * Scan the program header entries, and save key information. + * + * We rely on there being exactly two load segments, text and data, + * in that order. + */ + phdr = (Elf_Phdr *) ((char *)hdr + hdr->e_phoff); + phlimit = phdr + hdr->e_phnum; + nsegs = -1; + phdyn = phphdr = phinterp = phtls = NULL; + segs = alloca(sizeof(segs[0]) * hdr->e_phnum); + while (phdr < phlimit) { + switch (phdr->p_type) { + + case PT_INTERP: + phinterp = phdr; + break; + + case PT_LOAD: + segs[++nsegs] = phdr; + if (segs[nsegs]->p_align < PAGE_SIZE) { + _rtld_error("%s: PT_LOAD segment %d not page-aligned", + path, nsegs); + return NULL; + } + break; + + case PT_PHDR: + phphdr = phdr; + break; + + case PT_DYNAMIC: + phdyn = phdr; + break; + + case PT_TLS: + phtls = phdr; + break; + } + + ++phdr; + } + if (phdyn == NULL) { + _rtld_error("%s: object is not dynamically-linked", path); + return NULL; + } + + if (nsegs < 0) { + _rtld_error("%s: too few PT_LOAD segments", path); + return NULL; + } + + /* + * Map the entire address space of the object, to stake out our + * contiguous region, and to establish the base address for relocation. + */ + base_offset = trunc_page(segs[0]->p_offset); + base_vaddr = trunc_page(segs[0]->p_vaddr); + base_vlimit = round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz); + mapsize = base_vlimit - base_vaddr; + base_addr = hdr->e_type == ET_EXEC ? (caddr_t) base_vaddr : NULL; + + mapbase = mmap(base_addr, mapsize, convert_prot(segs[0]->p_flags), + convert_flags(segs[0]->p_flags), fd, base_offset); + if (mapbase == (caddr_t) -1) { + _rtld_error("%s: mmap of entire address space failed: %s", + path, strerror(errno)); + return NULL; + } + if (base_addr != NULL && mapbase != base_addr) { + _rtld_error("%s: mmap returned wrong address: wanted %p, got %p", + path, base_addr, mapbase); + munmap(mapbase, mapsize); + return NULL; + } + + for (i = 0; i <= nsegs; i++) { + /* Overlay the segment onto the proper region. */ + data_offset = trunc_page(segs[i]->p_offset); + data_vaddr = trunc_page(segs[i]->p_vaddr); + data_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_filesz); + data_addr = mapbase + (data_vaddr - base_vaddr); + data_prot = convert_prot(segs[i]->p_flags); + data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED; + /* Do not call mmap on the first segment - this is redundant */ + if (i && mmap(data_addr, data_vlimit - data_vaddr, data_prot, + data_flags, fd, data_offset) == (caddr_t) -1) { + _rtld_error("%s: mmap of data failed: %s", path, strerror(errno)); + return NULL; + } + + /* Clear any BSS in the last page of the segment. */ + clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz; + clear_addr = mapbase + (clear_vaddr - base_vaddr); + clear_page = mapbase + (trunc_page(clear_vaddr) - base_vaddr); + if ((nclear = data_vlimit - clear_vaddr) > 0) { + /* Make sure the end of the segment is writable */ + if ((data_prot & PROT_WRITE) == 0 && + -1 == mprotect(clear_page, PAGE_SIZE, data_prot|PROT_WRITE)) { + _rtld_error("%s: mprotect failed: %s", path, + strerror(errno)); + return NULL; + } + + memset(clear_addr, 0, nclear); + + /* Reset the data protection back */ + if ((data_prot & PROT_WRITE) == 0) + mprotect(clear_page, PAGE_SIZE, data_prot); + } + + /* Overlay the BSS segment onto the proper region. */ + bss_vaddr = data_vlimit; + bss_vlimit = round_page(segs[i]->p_vaddr + segs[i]->p_memsz); + bss_addr = mapbase + (bss_vaddr - base_vaddr); + if (bss_vlimit > bss_vaddr) { /* There is something to do */ + if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot, + MAP_PRIVATE|MAP_FIXED|MAP_ANON, -1, 0) == (caddr_t) -1) { + _rtld_error("%s: mmap of bss failed: %s", path, + strerror(errno)); + return NULL; + } + } + } + + obj = obj_new(); + if (sb != NULL) { + obj->dev = sb->st_dev; + obj->ino = sb->st_ino; + } + obj->mapbase = mapbase; + obj->mapsize = mapsize; + obj->textsize = round_page(segs[0]->p_vaddr + segs[0]->p_memsz) - + base_vaddr; + obj->vaddrbase = base_vaddr; + obj->relocbase = mapbase - base_vaddr; + obj->dynamic = (const Elf_Dyn *) (obj->relocbase + phdyn->p_vaddr); + if (hdr->e_entry != 0) + obj->entry = (caddr_t) (obj->relocbase + hdr->e_entry); + if (phphdr != NULL) { + obj->phdr = (const Elf_Phdr *) (obj->relocbase + phphdr->p_vaddr); + obj->phsize = phphdr->p_memsz; + } + if (phinterp != NULL) + obj->interp = (const char *) (obj->relocbase + phinterp->p_vaddr); + if (phtls != NULL) { + tls_dtv_generation++; + obj->tlsindex = ++tls_max_index; + obj->tlssize = phtls->p_memsz; + obj->tlsalign = phtls->p_align; + obj->tlsinitsize = phtls->p_filesz; + obj->tlsinit = mapbase + phtls->p_vaddr; + } + return obj; +} + +static Elf_Ehdr * +get_elf_header (int fd, const char *path) +{ + static union { + Elf_Ehdr hdr; + char buf[PAGE_SIZE]; + } u; + ssize_t nbytes; + + if ((nbytes = read(fd, u.buf, PAGE_SIZE)) == -1) { + _rtld_error("%s: read error: %s", path, strerror(errno)); + return NULL; + } + + /* Make sure the file is valid */ + if (nbytes < (ssize_t)sizeof(Elf_Ehdr) || !IS_ELF(u.hdr)) { + _rtld_error("%s: invalid file format", path); + return NULL; + } + if (u.hdr.e_ident[EI_CLASS] != ELF_TARG_CLASS + || u.hdr.e_ident[EI_DATA] != ELF_TARG_DATA) { + _rtld_error("%s: unsupported file layout", path); + return NULL; + } + if (u.hdr.e_ident[EI_VERSION] != EV_CURRENT + || u.hdr.e_version != EV_CURRENT) { + _rtld_error("%s: unsupported file version", path); + return NULL; + } + if (u.hdr.e_type != ET_EXEC && u.hdr.e_type != ET_DYN) { + _rtld_error("%s: unsupported file type", path); + return NULL; + } + if (u.hdr.e_machine != ELF_TARG_MACH) { + _rtld_error("%s: unsupported machine", path); + return NULL; + } + + /* + * We rely on the program header being in the first page. This is + * not strictly required by the ABI specification, but it seems to + * always true in practice. And, it simplifies things considerably. + */ + if (u.hdr.e_phentsize != sizeof(Elf_Phdr)) { + _rtld_error( + "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path); + return NULL; + } + if (u.hdr.e_phoff + u.hdr.e_phnum * sizeof(Elf_Phdr) > (size_t)nbytes) { + _rtld_error("%s: program header too large", path); + return NULL; + } + + return (&u.hdr); +} + +void +obj_free(Obj_Entry *obj) +{ + Objlist_Entry *elm; + + if (obj->tls_done) { + free_tls_offset(obj); + } + free(obj->path); + while (obj->needed != NULL) { + Needed_Entry *needed = obj->needed; + obj->needed = needed->next; + free(needed); + } + while (!STAILQ_EMPTY(&obj->dldags)) { + elm = STAILQ_FIRST(&obj->dldags); + STAILQ_REMOVE_HEAD(&obj->dldags, link); + free(elm); + } + while (!STAILQ_EMPTY(&obj->dagmembers)) { + elm = STAILQ_FIRST(&obj->dagmembers); + STAILQ_REMOVE_HEAD(&obj->dagmembers, link); + free(elm); + } + free(obj->origin_path); + free(obj->priv); + free(obj); +} + +Obj_Entry * +obj_new(void) +{ + Obj_Entry *obj; + + obj = CNEW(Obj_Entry); + STAILQ_INIT(&obj->dldags); + STAILQ_INIT(&obj->dagmembers); + return obj; +} + +/* + * Given a set of ELF protection flags, return the corresponding protection + * flags for MMAP. + */ +static int +convert_prot(int elfflags) +{ + int prot = 0; + if (elfflags & PF_R) + prot |= PROT_READ; + if (elfflags & PF_W) + prot |= PROT_WRITE; + if (elfflags & PF_X) + prot |= PROT_EXEC; + return prot; +} + +static int +convert_flags(int elfflags) +{ + int flags = MAP_PRIVATE; /* All mappings are private */ + + /* + * Readonly mappings are marked "MAP_NOCORE", because they can be + * reconstructed by a debugger. + */ + if (!(elfflags & PF_W)) + flags |= MAP_NOCORE; + return flags; +} diff --git a/src/bin/rtld-elf/reloc.c b/src/bin/rtld-elf/reloc.c new file mode 100644 index 0000000..ba9be47 --- /dev/null +++ b/src/bin/rtld-elf/reloc.c @@ -0,0 +1,367 @@ +/*- + * Copyright 1996, 1997, 1998, 1999 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/i386/reloc.c,v 1.18 2005/06/29 23:15:36 peter Exp $ + */ + +/* + * Dynamic linker for ELF. + * + * John Polstra . + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" + +/* + * Process the special R_386_COPY relocations in the main program. These + * copy data from a shared object into a region in the main program's BSS + * segment. + * + * Returns 0 on success, -1 on failure. + */ +int +do_copy_relocations(Obj_Entry *dstobj) +{ + const Elf_Rel *rellim; + const Elf_Rel *rel; + + assert(dstobj->mainprog); /* COPY relocations are invalid elsewhere */ + + rellim = (const Elf_Rel *) ((caddr_t) dstobj->rel + dstobj->relsize); + for (rel = dstobj->rel; rel < rellim; rel++) { + if (ELF_R_TYPE(rel->r_info) == R_386_COPY) { + void *dstaddr; + const Elf_Sym *dstsym; + const char *name; + unsigned long hash; + size_t size; + const void *srcaddr; + const Elf_Sym *srcsym; + Obj_Entry *srcobj; + + dstaddr = (void *) (dstobj->relocbase + rel->r_offset); + dstsym = dstobj->symtab + ELF_R_SYM(rel->r_info); + name = dstobj->strtab + dstsym->st_name; + hash = elf_hash(name); + size = dstsym->st_size; + + for (srcobj = dstobj->next; srcobj != NULL; srcobj = srcobj->next) + if ((srcsym = symlook_obj(name, hash, srcobj, false)) != NULL) + break; + + if (srcobj == NULL) { + _rtld_error("Undefined symbol \"%s\" referenced from COPY" + " relocation in %s", name, dstobj->path); + return -1; + } + + srcaddr = (const void *) (srcobj->relocbase + srcsym->st_value); + memcpy(dstaddr, srcaddr, size); + } + } + + return 0; +} + +/* Initialize the special GOT entries. */ +void +init_pltgot(Obj_Entry *obj) +{ + if (obj->pltgot != NULL) { + obj->pltgot[1] = (Elf_Addr) obj; + obj->pltgot[2] = (Elf_Addr) &_rtld_bind_start; + } +} + +/* Process the non-PLT relocations. */ +int +reloc_non_plt(Obj_Entry *obj, Obj_Entry *obj_rtld) +{ + const Elf_Rel *rellim; + const Elf_Rel *rel; + SymCache *cache; + int bytes = obj->nchains * sizeof(SymCache); + int r = -1; + + /* + * The dynamic loader may be called from a thread, we have + * limited amounts of stack available so we cannot use alloca(). + */ + cache = mmap(NULL, bytes, PROT_READ|PROT_WRITE, MAP_ANON, -1, 0); + if (cache == MAP_FAILED) + cache = NULL; + + rellim = (const Elf_Rel *) ((caddr_t) obj->rel + obj->relsize); + for (rel = obj->rel; rel < rellim; rel++) { + Elf_Addr *where = (Elf_Addr *) (obj->relocbase + rel->r_offset); + + switch (ELF_R_TYPE(rel->r_info)) { + + case R_386_NONE: + break; + + case R_386_32: + { + const Elf_Sym *def; + const Obj_Entry *defobj; + + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); + + if (def == NULL) + goto done; + *where += (Elf_Addr) (defobj->relocbase + def->st_value); + } + break; + + case R_386_PC32: + /* + * I don't think the dynamic linker should ever see this + * type of relocation. But the binutils-2.6 tools sometimes + * generate it. + */ + { + const Elf_Sym *def; + const Obj_Entry *defobj; +//dbg("A.1"); + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); +//dbg("A.2"); + if (def == NULL) + goto done; + + *where += + (Elf_Addr) (defobj->relocbase + def->st_value) - + (Elf_Addr) where; + } + break; + + case R_386_COPY: + /* + * These are deferred until all other relocations have + * been done. All we do here is make sure that the COPY + * relocation is not in a shared library. They are allowed + * only in executable files. + */ +//dbg("386_COPY"); + if (!obj->mainprog) { + _rtld_error("%s: Unexpected R_386_COPY relocation" + " in shared library", obj->path); + goto done; + } + break; + + case R_386_GLOB_DAT: + { +//dbg("386_GD"); + const Elf_Sym *def; + const Obj_Entry *defobj; + + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); + if (def == NULL) + goto done; + + *where = (Elf_Addr) (defobj->relocbase + def->st_value); + } + break; + + case R_386_RELATIVE: + *where += (Elf_Addr) obj->relocbase; + break; + + case R_386_TLS_TPOFF: + { + const Elf_Sym *def; + const Obj_Entry *defobj; + + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); + if (def == NULL) + goto done; + + /* + * We lazily allocate offsets for static TLS as we + * see the first relocation that references the + * TLS block. This allows us to support (small + * amounts of) static TLS in dynamically loaded + * modules. If we run out of space, we generate an + * error. + */ + if (!defobj->tls_done) { + if (!allocate_tls_offset((Obj_Entry*) defobj)) { + _rtld_error("%s: No space available for static " + "Thread Local Storage", obj->path); + goto done; + } + } + + *where += (Elf_Addr) (def->st_value - defobj->tlsoffset); + } + break; + + case R_386_TLS_DTPMOD32: + { + const Elf_Sym *def; + const Obj_Entry *defobj; + + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); + if (def == NULL) + goto done; + + *where += (Elf_Addr) defobj->tlsindex; + } + break; + + case R_386_TLS_DTPOFF32: + { + const Elf_Sym *def; + const Obj_Entry *defobj; + + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, + false, cache); + if (def == NULL) + goto done; + + *where += (Elf_Addr) def->st_value; + } + break; + + default: + _rtld_error("%s: Unsupported relocation type %d" + " in non-PLT relocations\n", obj->path, + ELF_R_TYPE(rel->r_info)); + goto done; + } + } + r = 0; +done: + if (cache) + munmap(cache, bytes); + return(r); +} + +/* Process the PLT relocations. */ +int +reloc_plt(Obj_Entry *obj) +{ + const Elf_Rel *rellim; + const Elf_Rel *rel; + + rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); + for (rel = obj->pltrel; rel < rellim; rel++) { + Elf_Addr *where; + + assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); + + /* Relocate the GOT slot pointing into the PLT. */ + where = (Elf_Addr *)(obj->relocbase + rel->r_offset); + *where += (Elf_Addr)obj->relocbase; + } + return 0; +} + +/* Relocate the jump slots in an object. */ +int +reloc_jmpslots(Obj_Entry *obj) +{ + const Elf_Rel *rellim; + const Elf_Rel *rel; + + if (obj->jmpslots_done) + return 0; + rellim = (const Elf_Rel *)((char *)obj->pltrel + obj->pltrelsize); + for (rel = obj->pltrel; rel < rellim; rel++) { + Elf_Addr *where, target; + const Elf_Sym *def; + const Obj_Entry *defobj; + + assert(ELF_R_TYPE(rel->r_info) == R_386_JMP_SLOT); + where = (Elf_Addr *)(obj->relocbase + rel->r_offset); + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); + if (def == NULL) + return -1; + target = (Elf_Addr)(defobj->relocbase + def->st_value); + reloc_jmpslot(where, target, defobj, obj, rel); + } + obj->jmpslots_done = true; + return 0; +} + +void +allocate_initial_tls(Obj_Entry *objs) +{ + void* tls; + + /* + * Fix the size of the static TLS block by using the maximum + * offset allocated so far and adding a bit for dynamic modules to + * use. + */ + tls_static_space = tls_last_offset + RTLD_STATIC_TLS_EXTRA; + tls = allocate_tls(objs, NULL, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); + i386_set_gsbase(tls); +} + +/* GNU ABI */ +__attribute__((__regparm__(1))) +void *___tls_get_addr(tls_index *ti) +{ + Elf_Addr** segbase; + Elf_Addr* dtv; + + __asm __volatile("movl %%gs:0, %0" : "=r" (segbase)); + dtv = segbase[1]; + + return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset); +} + +/* Sun ABI */ +void *__tls_get_addr(tls_index *ti) +{ + Elf_Addr** segbase; + Elf_Addr* dtv; + + __asm __volatile("movl %%gs:0, %0" : "=r" (segbase)); + dtv = segbase[1]; + + return tls_get_addr_common(&segbase[1], ti->ti_module, ti->ti_offset); +} diff --git a/src/bin/rtld-elf/rtld.c b/src/bin/rtld-elf/rtld.c new file mode 100644 index 0000000..7281c04 --- /dev/null +++ b/src/bin/rtld-elf/rtld.c @@ -0,0 +1,2840 @@ +/*- + * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. + * Copyright 2003 Alexander Kabaev . + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $Id$ + */ + +/* + * Dynamic linker for ELF. + * + * John Polstra . + */ + +#ifndef __GNUC__ +#error "GCC is needed to compile this file" +#endif + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "debug.h" +#include "rtld.h" +#include "libmap.h" +#include "rtld_tls.h" + +#ifndef COMPAT_32BIT +#define PATH_RTLD "/libexec/ld-elf.so.1" +#else +#define PATH_RTLD "/libexec/ld-elf32.so.1" +#endif + +/* Types. */ +typedef void (*func_ptr_type)(); +typedef void * (*path_enum_proc) (const char *path, size_t len, void *arg); + +/* + * This structure provides a reentrant way to keep a list of objects and + * check which ones have already been processed in some way. + */ +typedef struct Struct_DoneList { + const Obj_Entry **objs; /* Array of object pointers */ + unsigned int num_alloc; /* Allocated size of the array */ + unsigned int num_used; /* Number of array slots used */ +} DoneList; + +/* + * Function declarations. + */ +static const char *basename(const char *); +static void die(void); +static void digest_dynamic(Obj_Entry *, int); +static Obj_Entry *digest_phdr(const Elf_Phdr *, int, caddr_t, const char *); +static Obj_Entry *dlcheck(void *); +static int do_search_info(const Obj_Entry *obj, int, struct dl_serinfo *); +static bool donelist_check(DoneList *, const Obj_Entry *); +static void errmsg_restore(char *); +static char *errmsg_save(void); +static void *fill_search_info(const char *, size_t, void *); +static char *find_library(const char *, const Obj_Entry *); +static const char *gethints(void); +static void init_dag(Obj_Entry *); +static void init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *); +static void init_rtld(caddr_t); +static void initlist_add_neededs(Needed_Entry *needed, Objlist *list); +static void initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, + Objlist *list); +static bool is_exported(const Elf_Sym *); +static void linkmap_add(Obj_Entry *); +static void linkmap_delete(Obj_Entry *); +static int load_needed_objects(Obj_Entry *); +static int load_preload_objects(void); +static Obj_Entry *load_object(char *); +static Obj_Entry *obj_from_addr(const void *); +static void objlist_call_fini(Objlist *); +static void objlist_call_init(Objlist *); +static void objlist_clear(Objlist *); +static Objlist_Entry *objlist_find(Objlist *, const Obj_Entry *); +static void objlist_init(Objlist *); +static void objlist_push_head(Objlist *, Obj_Entry *); +static void objlist_push_tail(Objlist *, Obj_Entry *); +static void objlist_remove(Objlist *, Obj_Entry *); +static void objlist_remove_unref(Objlist *); +static void *path_enumerate(const char *, path_enum_proc, void *); +static int relocate_objects(Obj_Entry *, bool, Obj_Entry *); +static int rtld_dirname(const char *, char *); +static void rtld_exit(void); +static char *search_library_path(const char *, const char *); +static const void **get_program_var_addr(const char *name); +static void set_program_var(const char *, const void *); +static const Elf_Sym *symlook_default(const char *, unsigned long hash, + const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt); +static const Elf_Sym *symlook_list(const char *, unsigned long, + Objlist *, const Obj_Entry **, bool in_plt, DoneList *); +static void trace_loaded_objects(Obj_Entry *obj); +static void unlink_object(Obj_Entry *); +static void unload_object(Obj_Entry *); +static void unref_dag(Obj_Entry *); +static void ref_dag(Obj_Entry *); + +void r_debug_state(struct r_debug*, struct link_map*); + +/* + * Data declarations. + */ +static char *error_message; /* Message for dlerror(), or NULL */ +struct r_debug r_debug; /* for GDB; */ +static bool libmap_disable; /* Disable libmap */ +static char *libmap_override; /* Maps to use in addition to libmap.conf */ +static bool trust; /* False for setuid and setgid programs */ +static bool dangerous_ld_env; /* True if environment variables have been + used to affect the libraries loaded */ +static char *ld_bind_now; /* Environment variable for immediate binding */ +static char *ld_debug; /* Environment variable for debugging */ +static char *ld_library_path; /* Environment variable for search path */ +static char *ld_preload; /* Environment variable for libraries to + load first */ +static char *ld_tracing; /* Called from ldd to print libs */ +static Obj_Entry *obj_list; /* Head of linked list of shared objects */ +static Obj_Entry **obj_tail; /* Link field of last object in list */ +static Obj_Entry *obj_main; /* The main program shared object */ +static Obj_Entry obj_rtld; /* The dynamic linker shared object */ +static unsigned int obj_count; /* Number of objects in obj_list */ + +static Objlist list_global = /* Objects dlopened with RTLD_GLOBAL */ + STAILQ_HEAD_INITIALIZER(list_global); +static Objlist list_main = /* Objects loaded at program startup */ + STAILQ_HEAD_INITIALIZER(list_main); +static Objlist list_fini = /* Objects needing fini() calls */ + STAILQ_HEAD_INITIALIZER(list_fini); + +static Elf_Sym sym_zero; /* For resolving undefined weak refs. */ + +#define GDB_STATE(s,m) r_debug.r_state = s; r_debug_state(&r_debug,m); + +extern Elf_Dyn _DYNAMIC; +#pragma weak _DYNAMIC +#ifndef RTLD_IS_DYNAMIC +#define RTLD_IS_DYNAMIC() (&_DYNAMIC != NULL) +#endif + +/* + * These are the functions the dynamic linker exports to application + * programs. They are the only symbols the dynamic linker is willing + * to export from itself. + */ +static func_ptr_type exports[] = { + (func_ptr_type) &_rtld_error, + (func_ptr_type) &dlclose, + (func_ptr_type) &dlerror, + (func_ptr_type) &dlopen, + (func_ptr_type) &dlsym, + (func_ptr_type) &dladdr, + (func_ptr_type) &dllockinit, + (func_ptr_type) &dlinfo, + (func_ptr_type) &_rtld_thread_init, +#ifdef __i386__ + (func_ptr_type) &___tls_get_addr, +#endif + (func_ptr_type) &__tls_get_addr, + (func_ptr_type) &_rtld_allocate_tls, + (func_ptr_type) &_rtld_free_tls, + NULL +}; + +/* + * Global declarations normally provided by crt1. The dynamic linker is + * not built with crt1, so we have to provide them ourselves. + */ +char *__progname; +char **environ; + +/* + * Globals to control TLS allocation. + */ +size_t tls_last_offset; /* Static TLS offset of last module */ +size_t tls_last_size; /* Static TLS size of last module */ +size_t tls_static_space; /* Static TLS space allocated */ +int tls_dtv_generation = 1; /* Used to detect when dtv size changes */ +int tls_max_index = 1; /* Largest module index allocated */ + +/* + * Fill in a DoneList with an allocation large enough to hold all of + * the currently-loaded objects. Keep this as a macro since it calls + * alloca and we want that to occur within the scope of the caller. + */ +#define donelist_init(dlp) \ + ((dlp)->objs = alloca(obj_count * sizeof (dlp)->objs[0]), \ + assert((dlp)->objs != NULL), \ + (dlp)->num_alloc = obj_count, \ + (dlp)->num_used = 0) + +/* + * Main entry point for dynamic linking. The first argument is the + * stack pointer. The stack is expected to be laid out as described + * in the SVR4 ABI specification, Intel 386 Processor Supplement. + * Specifically, the stack pointer points to a word containing + * ARGC. Following that in the stack is a null-terminated sequence + * of pointers to argument strings. Then comes a null-terminated + * sequence of pointers to environment strings. Finally, there is a + * sequence of "auxiliary vector" entries. + * + * The second argument points to a place to store the dynamic linker's + * exit procedure pointer and the third to a place to store the main + * program's object. + * + * The return value is the main program's entry point. + */ +func_ptr_type +_rtld(Elf_Addr *sp, func_ptr_type *exit_proc, Obj_Entry **objp) +{ + Elf_Auxinfo *aux_info[AT_COUNT]; + int i; + int argc; + char **argv; + char **env; + Elf_Auxinfo *aux; + Elf_Auxinfo *auxp; + const char *argv0; + Objlist_Entry *entry; + Obj_Entry *obj; + Obj_Entry **preload_tail; + Objlist initlist; + int lockstate; + + /* + * On entry, the dynamic linker itself has not been relocated yet. + * Be very careful not to reference any global data until after + * init_rtld has returned. It is OK to reference file-scope statics + * and string constants, and to call static and global functions. + */ + + /* Find the auxiliary vector on the stack. */ + argc = *sp++; + argv = (char **) sp; + + sp += argc + 1; /* Skip over arguments and NULL terminator */ + env = (char **) sp; + + while (*sp++ != 0); /* Skip over environment, and NULL terminator */ + + aux = (Elf_Auxinfo *) sp; + + + /* Digest the auxiliary vector. */ + for (i = 0; i < AT_COUNT; i++) + aux_info[i] = NULL; + + for (auxp = aux; auxp->a_type != AT_NULL; auxp++) { + if (auxp->a_type < AT_COUNT) { + aux_info[auxp->a_type] = auxp; + } + } + + /* Initialize and relocate ourselves. */ + assert(aux_info[AT_BASE] != NULL); + + init_rtld((caddr_t) aux_info[AT_BASE]->a_un.a_ptr); + + __progname = obj_rtld.path; + argv0 = argv[0] != NULL ? argv[0] : "(null)"; + environ = env; + + trust = !issetugid(); + + ld_bind_now = getenv(LD_ "BIND_NOW"); + if (trust) { + ld_debug = getenv(LD_ "DEBUG"); + libmap_disable = getenv(LD_ "LIBMAP_DISABLE") != NULL; + libmap_override = getenv(LD_ "LIBMAP"); + ld_library_path = "/lib";//getenv(LD_ "LIBRARY_PATH"); + ld_preload = getenv(LD_ "PRELOAD"); + dangerous_ld_env = libmap_disable || (libmap_override != NULL) || + (ld_library_path != NULL) || (ld_preload != NULL); + } else + dangerous_ld_env = 0; + ld_tracing = getenv(LD_ "TRACE_LOADED_OBJECTS"); + + + if (ld_debug != NULL && *ld_debug != '\0') + debug = 1; + + dbg("%s is initialized, base address = %p", __progname, + (caddr_t) aux_info[AT_BASE]->a_un.a_ptr); + dbg("RTLD dynamic = %p", obj_rtld.dynamic); + dbg("RTLD pltgot = %p", obj_rtld.pltgot); + + /* + * Load the main program, or process its program header if it is + * already loaded. + */ + if (aux_info[AT_EXECFD] != NULL) { /* Load the main program. */ + int fd = aux_info[AT_EXECFD]->a_un.a_val; + dbg("loading main program"); + obj_main = map_object(fd, argv0, NULL); + close(fd); + if (obj_main == NULL) + die(); + } else { /* Main program already loaded. */ + const Elf_Phdr *phdr; + int phnum; + caddr_t entry; + + dbg("processing main program's program header"); + assert(aux_info[AT_PHDR] != NULL); + phdr = (const Elf_Phdr *) aux_info[AT_PHDR]->a_un.a_ptr; + assert(aux_info[AT_PHNUM] != NULL); + phnum = aux_info[AT_PHNUM]->a_un.a_val; + assert(aux_info[AT_PHENT] != NULL); + assert(aux_info[AT_PHENT]->a_un.a_val == sizeof(Elf_Phdr)); + assert(aux_info[AT_ENTRY] != NULL); + entry = (caddr_t) aux_info[AT_ENTRY]->a_un.a_ptr; + if ((obj_main = digest_phdr(phdr, phnum, entry, argv0)) == NULL) + die(); + } + + obj_main->path = xstrdup(argv0); + obj_main->mainprog = true; + + /* + * Get the actual dynamic linker pathname from the executable if + * possible. (It should always be possible.) That ensures that + * gdb will find the right dynamic linker even if a non-standard + * one is being used. + */ + if (obj_main->interp != NULL && + strcmp(obj_main->interp, obj_rtld.path) != 0) { + free(obj_rtld.path); + obj_rtld.path = xstrdup(obj_main->interp); + __progname = obj_rtld.path; + } + + + digest_dynamic(obj_main, 0); + + linkmap_add(obj_main); + linkmap_add(&obj_rtld); + + /* Link the main program into the list of objects. */ + *obj_tail = obj_main; + obj_tail = &obj_main->next; + obj_count++; + /* Make sure we don't call the main program's init and fini functions. */ + obj_main->init = obj_main->fini = (Elf_Addr)NULL; + + /* Initialize a fake symbol for resolving undefined weak references. */ + sym_zero.st_info = ELF_ST_INFO(STB_GLOBAL, STT_NOTYPE); + sym_zero.st_shndx = SHN_UNDEF; + + if (!libmap_disable) + libmap_disable = (bool)lm_init(libmap_override); + + dbg("loading LD_PRELOAD libraries"); + if (load_preload_objects() == -1) + die(); + preload_tail = obj_tail; + + dbg("loading needed objects"); + if (load_needed_objects(obj_main) == -1) + die(); + + /* Make a list of all objects loaded at startup. */ + for (obj = obj_list; obj != NULL; obj = obj->next) { + objlist_push_tail(&list_main, obj); + obj->refcount++; + } + + if (ld_tracing) { /* We're done */ + trace_loaded_objects(obj_main); + exit(0); + } + + if (getenv(LD_ "DUMP_REL_PRE") != NULL) { + dump_relocations(obj_main); + exit (0); + } + + /* setup TLS for main thread */ + dbg("initializing initial thread local storage"); + STAILQ_FOREACH(entry, &list_main, link) { + /* + * Allocate all the initial objects out of the static TLS + * block even if they didn't ask for it. + */ + allocate_tls_offset(entry->obj); + } + + allocate_initial_tls(obj_list); + +debug = 1; + if (relocate_objects(obj_main, + ld_bind_now != NULL && *ld_bind_now != '\0', &obj_rtld) == -1) + die(); + + dbg("doing copy relocations"); + if (do_copy_relocations(obj_main) == -1) + die(); + + if (getenv(LD_ "DUMP_REL_POST") != NULL) { + dump_relocations(obj_main); + exit (0); + } + + dbg("initializing key program variables"); + set_program_var("__progname", argv[0] != NULL ? basename(argv[0]) : ""); + set_program_var("environ", env); + + dbg("initializing thread locks"); + lockdflt_init(); + + /* Make a list of init functions to call. */ + objlist_init(&initlist); + initlist_add_objects(obj_list, preload_tail, &initlist); + + r_debug_state(NULL, &obj_main->linkmap); /* say hello to gdb! */ + + objlist_call_init(&initlist); + lockstate = wlock_acquire(rtld_bind_lock); + objlist_clear(&initlist); + wlock_release(rtld_bind_lock, lockstate); + + dbg("transferring control to program entry point = %p", obj_main->entry); + + /* Return the exit procedure and the program entry point. */ + *exit_proc = rtld_exit; + *objp = obj_main; + return (func_ptr_type) obj_main->entry; +} + +Elf_Addr +_rtld_bind(Obj_Entry *obj, Elf_Size reloff) +{ + const Elf_Rel *rel; + const Elf_Sym *def; + const Obj_Entry *defobj; + Elf_Addr *where; + Elf_Addr target; + int lockstate; + + lockstate = rlock_acquire(rtld_bind_lock); + if (obj->pltrel) + rel = (const Elf_Rel *) ((caddr_t) obj->pltrel + reloff); + else + rel = (const Elf_Rel *) ((caddr_t) obj->pltrela + reloff); + + where = (Elf_Addr *) (obj->relocbase + rel->r_offset); + def = find_symdef(ELF_R_SYM(rel->r_info), obj, &defobj, true, NULL); + if (def == NULL) + die(); + + target = (Elf_Addr)(defobj->relocbase + def->st_value); + + dbg("\"%s\" in \"%s\" ==> %p in \"%s\"", + defobj->strtab + def->st_name, basename(obj->path), + (void *)target, basename(defobj->path)); + + /* + * Write the new contents for the jmpslot. Note that depending on + * architecture, the value which we need to return back to the + * lazy binding trampoline may or may not be the target + * address. The value returned from reloc_jmpslot() is the value + * that the trampoline needs. + */ + target = reloc_jmpslot(where, target, defobj, obj, rel); + rlock_release(rtld_bind_lock, lockstate); + return target; +} + +/* + * Error reporting function. Use it like printf. If formats the message + * into a buffer, and sets things up so that the next call to dlerror() + * will return the message. + */ +void +_rtld_error(const char *fmt, ...) +{ + static char buf[512]; + va_list ap; + + va_start(ap, fmt); + vsnprintf(buf, sizeof buf, fmt, ap); + error_message = buf; + va_end(ap); +} + +/* + * Return a dynamically-allocated copy of the current error message, if any. + */ +static char * +errmsg_save(void) +{ + return error_message == NULL ? NULL : xstrdup(error_message); +} + +/* + * Restore the current error message from a copy which was previously saved + * by errmsg_save(). The copy is freed. + */ +static void +errmsg_restore(char *saved_msg) +{ + if (saved_msg == NULL) + error_message = NULL; + else { + _rtld_error("%s", saved_msg); + free(saved_msg); + } +} + +static const char * +basename(const char *name) +{ + const char *p = strrchr(name, '/'); + return p != NULL ? p + 1 : name; +} + +static void +die(void) +{ + const char *msg = dlerror(); + + if (msg == NULL) + msg = "Fatal error"; + errx(1, "%s", msg); +} + +/* + * Process a shared object's DYNAMIC section, and save the important + * information in its Obj_Entry structure. + */ +static void +digest_dynamic(Obj_Entry *obj, int early) +{ + const Elf_Dyn *dynp; + Needed_Entry **needed_tail = &obj->needed; + const Elf_Dyn *dyn_rpath = NULL; + int plttype = DT_REL; + + obj->bind_now = false; + for (dynp = obj->dynamic; dynp->d_tag != DT_NULL; dynp++) { + switch (dynp->d_tag) { + + case DT_REL: + obj->rel = (const Elf_Rel *) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_RELSZ: + obj->relsize = dynp->d_un.d_val; + break; + + case DT_RELENT: + assert(dynp->d_un.d_val == sizeof(Elf_Rel)); + break; + + case DT_JMPREL: + obj->pltrel = (const Elf_Rel *) + (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_PLTRELSZ: + obj->pltrelsize = dynp->d_un.d_val; + break; + + case DT_RELA: + obj->rela = (const Elf_Rela *) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_RELASZ: + obj->relasize = dynp->d_un.d_val; + break; + + case DT_RELAENT: + assert(dynp->d_un.d_val == sizeof(Elf_Rela)); + break; + + case DT_PLTREL: + plttype = dynp->d_un.d_val; + assert(dynp->d_un.d_val == DT_REL || plttype == DT_RELA); + break; + + case DT_SYMTAB: + obj->symtab = (const Elf_Sym *) + (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_SYMENT: + assert(dynp->d_un.d_val == sizeof(Elf_Sym)); + break; + + case DT_STRTAB: + obj->strtab = (const char *) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_STRSZ: + obj->strsize = dynp->d_un.d_val; + break; + + case DT_HASH: + { + const Elf_Hashelt *hashtab = (const Elf_Hashelt *) + (obj->relocbase + dynp->d_un.d_ptr); + obj->nbuckets = hashtab[0]; + obj->nchains = hashtab[1]; + obj->buckets = hashtab + 2; + obj->chains = obj->buckets + obj->nbuckets; + } + break; + + case DT_NEEDED: + if (!obj->rtld) { + Needed_Entry *nep = NEW(Needed_Entry); + nep->name = dynp->d_un.d_val; + nep->obj = NULL; + nep->next = NULL; + + *needed_tail = nep; + needed_tail = &nep->next; + } + break; + + case DT_PLTGOT: + obj->pltgot = (Elf_Addr *) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_TEXTREL: + obj->textrel = true; + break; + + case DT_SYMBOLIC: + obj->symbolic = true; + break; + + case DT_RPATH: + case DT_RUNPATH: /* XXX: process separately */ + /* + * We have to wait until later to process this, because we + * might not have gotten the address of the string table yet. + */ + dyn_rpath = dynp; + break; + + case DT_SONAME: + /* Not used by the dynamic linker. */ + break; + + case DT_INIT: + obj->init = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_FINI: + obj->fini = (Elf_Addr) (obj->relocbase + dynp->d_un.d_ptr); + break; + + case DT_DEBUG: + /* XXX - not implemented yet */ + if (!early) + dbg("Filling in DT_DEBUG entry"); + ((Elf_Dyn*)dynp)->d_un.d_ptr = (Elf_Addr) &r_debug; + break; + + case DT_FLAGS: + if (dynp->d_un.d_val & DF_ORIGIN) { + obj->origin_path = xmalloc(PATH_MAX); + if (rtld_dirname(obj->path, obj->origin_path) == -1) + die(); + } + if (dynp->d_un.d_val & DF_SYMBOLIC) + obj->symbolic = true; + if (dynp->d_un.d_val & DF_TEXTREL) + obj->textrel = true; + if (dynp->d_un.d_val & DF_BIND_NOW) + obj->bind_now = true; + if (dynp->d_un.d_val & DF_STATIC_TLS) + ; + break; + + default: + if (!early) { + dbg("Ignoring d_tag %ld = %#lx", (long)dynp->d_tag, + (long)dynp->d_tag); + } + break; + } + } + + obj->traced = false; + + if (plttype == DT_RELA) { + obj->pltrela = (const Elf_Rela *) obj->pltrel; + obj->pltrel = NULL; + obj->pltrelasize = obj->pltrelsize; + obj->pltrelsize = 0; + } + + if (dyn_rpath != NULL) + obj->rpath = obj->strtab + dyn_rpath->d_un.d_val; +} + +/* + * Process a shared object's program header. This is used only for the + * main program, when the kernel has already loaded the main program + * into memory before calling the dynamic linker. It creates and + * returns an Obj_Entry structure. + */ +static Obj_Entry * +digest_phdr(const Elf_Phdr *phdr, int phnum, caddr_t entry, const char *path) +{ + Obj_Entry *obj; + const Elf_Phdr *phlimit = phdr + phnum; + const Elf_Phdr *ph; + int nsegs = 0; + + obj = obj_new(); + for (ph = phdr; ph < phlimit; ph++) { + switch (ph->p_type) { + + case PT_PHDR: + if ((const Elf_Phdr *)ph->p_vaddr != phdr) { + _rtld_error("%s: invalid PT_PHDR", path); + return NULL; + } + obj->phdr = (const Elf_Phdr *) ph->p_vaddr; + obj->phsize = ph->p_memsz; + break; + + case PT_INTERP: + obj->interp = (const char *) ph->p_vaddr; + break; + + case PT_LOAD: + if (nsegs == 0) { /* First load segment */ + obj->vaddrbase = trunc_page(ph->p_vaddr); + obj->mapbase = (caddr_t) obj->vaddrbase; + obj->relocbase = obj->mapbase - obj->vaddrbase; + obj->textsize = round_page(ph->p_vaddr + ph->p_memsz) - + obj->vaddrbase; + } else { /* Last load segment */ + obj->mapsize = round_page(ph->p_vaddr + ph->p_memsz) - + obj->vaddrbase; + } + nsegs++; + break; + + case PT_DYNAMIC: + obj->dynamic = (const Elf_Dyn *) ph->p_vaddr; + break; + + case PT_TLS: + obj->tlsindex = 1; + obj->tlssize = ph->p_memsz; + obj->tlsalign = ph->p_align; + obj->tlsinitsize = ph->p_filesz; + obj->tlsinit = (void*) ph->p_vaddr; + break; + } + } + if (nsegs < 1) { + _rtld_error("%s: too few PT_LOAD segments", path); + return NULL; + } + + obj->entry = entry; + return obj; +} + +static Obj_Entry * +dlcheck(void *handle) +{ + Obj_Entry *obj; + + for (obj = obj_list; obj != NULL; obj = obj->next) + if (obj == (Obj_Entry *) handle) + break; + + if (obj == NULL || obj->refcount == 0 || obj->dl_refcount == 0) { + _rtld_error("Invalid shared object handle %p", handle); + return NULL; + } + return obj; +} + +/* + * If the given object is already in the donelist, return true. Otherwise + * add the object to the list and return false. + */ +static bool +donelist_check(DoneList *dlp, const Obj_Entry *obj) +{ + unsigned int i; + + for (i = 0; i < dlp->num_used; i++) + if (dlp->objs[i] == obj) + return true; + /* + * Our donelist allocation should always be sufficient. But if + * our threads locking isn't working properly, more shared objects + * could have been loaded since we allocated the list. That should + * never happen, but we'll handle it properly just in case it does. + */ + if (dlp->num_used < dlp->num_alloc) + dlp->objs[dlp->num_used++] = obj; + return false; +} + +/* + * Hash function for symbol table lookup. Don't even think about changing + * this. It is specified by the System V ABI. + */ +unsigned long +elf_hash(const char *name) +{ + const unsigned char *p = (const unsigned char *) name; + unsigned long h = 0; + unsigned long g; + + while (*p != '\0') { + h = (h << 4) + *p++; + if ((g = h & 0xf0000000) != 0) + h ^= g >> 24; + h &= ~g; + } + return h; +} + +/* + * Find the library with the given name, and return its full pathname. + * The returned string is dynamically allocated. Generates an error + * message and returns NULL if the library cannot be found. + * + * If the second argument is non-NULL, then it refers to an already- + * loaded shared object, whose library search path will be searched. + * + * The search order is: + * LD_LIBRARY_PATH + * rpath in the referencing file + * ldconfig hints + * /lib:/usr/lib + */ +static char * +find_library(const char *xname, const Obj_Entry *refobj) +{ + char *pathname; + char *name; + + if (strchr(xname, '/') != NULL) { /* Hard coded pathname */ + if (xname[0] != '/' && !trust) { + _rtld_error("Absolute pathname required for shared object \"%s\"", + xname); + return NULL; + } + return xstrdup(xname); + } + + if (libmap_disable || (refobj == NULL) || + (name = lm_find(refobj->path, xname)) == NULL) + name = (char *)xname; + + dbg(" Searching for \"%s\"", name); + if ((pathname = search_library_path(name, ld_library_path)) != NULL || + (refobj != NULL && + (pathname = search_library_path(name, refobj->rpath)) != NULL) || + (pathname = search_library_path(name, gethints())) != NULL || + (pathname = search_library_path(name, STANDARD_LIBRARY_PATH)) != NULL) + return pathname; + + if(refobj != NULL && refobj->path != NULL) { + _rtld_error("Shared object \"%s\" not found, required by \"%s\"", + name, basename(refobj->path)); + } else { + _rtld_error("Shared object \"%s\" not found", name); + } + return NULL; +} + +/* + * Given a symbol number in a referencing object, find the corresponding + * definition of the symbol. Returns a pointer to the symbol, or NULL if + * no definition was found. Returns a pointer to the Obj_Entry of the + * defining object via the reference parameter DEFOBJ_OUT. + */ +const Elf_Sym * +find_symdef(unsigned long symnum, const Obj_Entry *refobj, + const Obj_Entry **defobj_out, bool in_plt, SymCache *cache) +{ + const Elf_Sym *ref; + const Elf_Sym *def; + const Obj_Entry *defobj; + const char *name; + unsigned long hash; + + /* + * If we have already found this symbol, get the information from + * the cache. + */ + if (symnum >= refobj->nchains) + return NULL; /* Bad object */ + + if (cache != NULL && cache[symnum].sym != NULL) { + *defobj_out = cache[symnum].obj; + return cache[symnum].sym; + } + + ref = refobj->symtab + symnum; + name = refobj->strtab + ref->st_name; + defobj = NULL; + + /* + * We don't have to do a full scale lookup if the symbol is local. + * We know it will bind to the instance in this load module; to + * which we already have a pointer (ie ref). By not doing a lookup, + * we not only improve performance, but it also avoids unresolvable + * symbols when local symbols are not in the hash table. This has + * been seen with the ia64 toolchain. + */ + if (ELF_ST_BIND(ref->st_info) != STB_LOCAL) { + if (ELF_ST_TYPE(ref->st_info) == STT_SECTION) { + _rtld_error("%s: Bogus symbol table entry %lu", refobj->path, + symnum); + } + hash = elf_hash(name); + def = symlook_default(name, hash, refobj, &defobj, in_plt); + } else { + def = ref; + defobj = refobj; + } + + /* + * If we found no definition and the reference is weak, treat the + * symbol as having the value zero. + */ + if (def == NULL && ELF_ST_BIND(ref->st_info) == STB_WEAK) { + def = &sym_zero; + defobj = obj_main; + } + + if (def != NULL) { + *defobj_out = defobj; + /* Record the information in the cache to avoid subsequent lookups. */ + if (cache != NULL) { + cache[symnum].sym = def; + cache[symnum].obj = defobj; + } + } else { + if (refobj != &obj_rtld) + _rtld_error("%s: Undefined symbol \"%s\"", refobj->path, name); + } + return def; +} + +/* + * Return the search path from the ldconfig hints file, reading it if + * necessary. Returns NULL if there are problems with the hints file, + * or if the search path there is empty. + */ +static const char * +gethints(void) +{ + static char *hints; + + if (hints == NULL) { + int fd; + struct elfhints_hdr hdr; + char *p; + + /* Keep from trying again in case the hints file is bad. */ + hints = ""; + + if ((fd = open(_PATH_ELF_HINTS, O_RDONLY)) == -1) + return NULL; + if (read(fd, &hdr, sizeof hdr) != sizeof hdr || + hdr.magic != ELFHINTS_MAGIC || + hdr.version != 1) { + close(fd); + return NULL; + } + p = xmalloc(hdr.dirlistlen + 1); + if (lseek(fd, hdr.strtab + hdr.dirlist, SEEK_SET) == -1 || + read(fd, p, hdr.dirlistlen + 1) != (ssize_t)hdr.dirlistlen + 1) { + free(p); + close(fd); + return NULL; + } + hints = p; + close(fd); + } + return hints[0] != '\0' ? hints : NULL; +} + +static void +init_dag(Obj_Entry *root) +{ + DoneList donelist; + + donelist_init(&donelist); + init_dag1(root, root, &donelist); +} + +static void +init_dag1(Obj_Entry *root, Obj_Entry *obj, DoneList *dlp) +{ + const Needed_Entry *needed; + + if (donelist_check(dlp, obj)) + return; + + obj->refcount++; + objlist_push_tail(&obj->dldags, root); + objlist_push_tail(&root->dagmembers, obj); + for (needed = obj->needed; needed != NULL; needed = needed->next) + if (needed->obj != NULL) + init_dag1(root, needed->obj, dlp); +} + +/* + * Initialize the dynamic linker. The argument is the address at which + * the dynamic linker has been mapped into memory. The primary task of + * this function is to relocate the dynamic linker. + */ +static void +init_rtld(caddr_t mapbase) +{ + Obj_Entry objtmp; /* Temporary rtld object */ + + /* + * Conjure up an Obj_Entry structure for the dynamic linker. + * + * The "path" member can't be initialized yet because string constatns + * cannot yet be acessed. Below we will set it correctly. + */ + memset(&objtmp, 0, sizeof(objtmp)); + objtmp.path = NULL; + objtmp.rtld = true; + objtmp.mapbase = mapbase; +#ifdef PIC + objtmp.relocbase = mapbase; +#endif + if (RTLD_IS_DYNAMIC()) { + objtmp.dynamic = rtld_dynamic(&objtmp); + digest_dynamic(&objtmp, 1); + assert(objtmp.needed == NULL); + assert(!objtmp.textrel); + + /* + * Temporarily put the dynamic linker entry into the object list, so + * that symbols can be found. + */ + + relocate_objects(&objtmp, true, &objtmp); + } + + /* Initialize the object list. */ + obj_tail = &obj_list; + + /* Now that non-local variables can be accesses, copy out obj_rtld. */ + memcpy(&obj_rtld, &objtmp, sizeof(obj_rtld)); + + /* Replace the path with a dynamically allocated copy. */ + obj_rtld.path = xstrdup(PATH_RTLD); + + r_debug.r_brk = r_debug_state; + r_debug.r_state = RT_CONSISTENT; +} + +/* + * Add the init functions from a needed object list (and its recursive + * needed objects) to "list". This is not used directly; it is a helper + * function for initlist_add_objects(). The write lock must be held + * when this function is called. + */ +static void +initlist_add_neededs(Needed_Entry *needed, Objlist *list) +{ + /* Recursively process the successor needed objects. */ + if (needed->next != NULL) + initlist_add_neededs(needed->next, list); + + /* Process the current needed object. */ + if (needed->obj != NULL) + initlist_add_objects(needed->obj, &needed->obj->next, list); +} + +/* + * Scan all of the DAGs rooted in the range of objects from "obj" to + * "tail" and add their init functions to "list". This recurses over + * the DAGs and ensure the proper init ordering such that each object's + * needed libraries are initialized before the object itself. At the + * same time, this function adds the objects to the global finalization + * list "list_fini" in the opposite order. The write lock must be + * held when this function is called. + */ +static void +initlist_add_objects(Obj_Entry *obj, Obj_Entry **tail, Objlist *list) +{ + if (obj->init_done) + return; + obj->init_done = true; + + /* Recursively process the successor objects. */ + if (&obj->next != tail) + initlist_add_objects(obj->next, tail, list); + + /* Recursively process the needed objects. */ + if (obj->needed != NULL) + initlist_add_neededs(obj->needed, list); + + /* Add the object to the init list. */ + if (obj->init != (Elf_Addr)NULL) + objlist_push_tail(list, obj); + + /* Add the object to the global fini list in the reverse order. */ + if (obj->fini != (Elf_Addr)NULL) + objlist_push_head(&list_fini, obj); +} + +#ifndef FPTR_TARGET +#define FPTR_TARGET(f) ((Elf_Addr) (f)) +#endif + +static bool +is_exported(const Elf_Sym *def) +{ + Elf_Addr value; + const func_ptr_type *p; + + value = (Elf_Addr)(obj_rtld.relocbase + def->st_value); + for (p = exports; *p != NULL; p++) + if (FPTR_TARGET(*p) == value) + return true; + return false; +} + +/* + * Given a shared object, traverse its list of needed objects, and load + * each of them. Returns 0 on success. Generates an error message and + * returns -1 on failure. + */ +static int +load_needed_objects(Obj_Entry *first) +{ + Obj_Entry *obj; + + for (obj = first; obj != NULL; obj = obj->next) { + Needed_Entry *needed; + + for (needed = obj->needed; needed != NULL; needed = needed->next) { + const char *name = obj->strtab + needed->name; + char *path = find_library(name, obj); + + needed->obj = NULL; + if (path == NULL && !ld_tracing) + return -1; +printf("\n\nPATH: [%s]\n",path); + if (path) { + needed->obj = load_object(path); + if (needed->obj == NULL && !ld_tracing) + return -1; /* XXX - cleanup */ + } + } + } + + return 0; +} + +static int +load_preload_objects(void) +{ + char *p = ld_preload; + static const char delim[] = " \t:;"; + + if (p == NULL) + return 0; + + p += strspn(p, delim); + while (*p != '\0') { + size_t len = strcspn(p, delim); + char *path; + char savech; + + savech = p[len]; + p[len] = '\0'; + if ((path = find_library(p, NULL)) == NULL) + return -1; + if (load_object(path) == NULL) + return -1; /* XXX - cleanup */ + p[len] = savech; + p += len; + p += strspn(p, delim); + } + return 0; +} + +/* + * Load a shared object into memory, if it is not already loaded. The + * argument must be a string allocated on the heap. This function assumes + * responsibility for freeing it when necessary. + * + * Returns a pointer to the Obj_Entry for the object. Returns NULL + * on failure. + */ +static Obj_Entry * +load_object(char *path) +{ + Obj_Entry *obj; + int fd = -1; + struct stat sb; + struct statfs fs; + + for (obj = obj_list->next; obj != NULL; obj = obj->next) + if (strcmp(obj->path, path) == 0) + break; + + /* + * If we didn't find a match by pathname, open the file and check + * again by device and inode. This avoids false mismatches caused + * by multiple links or ".." in pathnames. + * + * To avoid a race, we open the file and use fstat() rather than + * using stat(). + */ + if (obj == NULL) { + if ((fd = open(path, O_RDONLY)) == -1) { + _rtld_error("Cannot open \"%s\"", path); + return NULL; + } + if (fstat(fd, &sb) == -1) { + _rtld_error("Cannot fstat \"%s\"", path); + close(fd); + return NULL; + } + for (obj = obj_list->next; obj != NULL; obj = obj->next) { + if (obj->ino == sb.st_ino && obj->dev == sb.st_dev) { + close(fd); + break; + } + } + } + + if (obj == NULL) { /* First use of this object, so we must map it in */ + /* + * but first, make sure that environment variables haven't been + * used to circumvent the noexec flag on a filesystem. + */ + if (dangerous_ld_env) { + if (fstatfs(fd, &fs) != 0) { + _rtld_error("Cannot fstatfs \"%s\"", path); + close(fd); + return NULL; + } + if (fs.f_flags & MNT_NOEXEC) { + _rtld_error("Cannot execute objects on %s\n", fs.f_mntonname); + close(fd); + return NULL; + } + } + dbg("loading \"%s\"", path); + printf("\n\n loading \"%s\"\n", path); + obj = map_object(fd, path, &sb); + printf("\nmapped object\n"); + close(fd); + if (obj == NULL) { + free(path); + return NULL; + } + + obj->path = path; + digest_dynamic(obj, 0); + + *obj_tail = obj; + obj_tail = &obj->next; + obj_count++; + linkmap_add(obj); /* for GDB & dlinfo() */ + + dbg(" %p .. %p: %s", obj->mapbase, + obj->mapbase + obj->mapsize - 1, obj->path); + if (obj->textrel) + dbg(" WARNING: %s has impure text", obj->path); + } else + free(path); + + return obj; +} + +static Obj_Entry * +obj_from_addr(const void *addr) +{ + Obj_Entry *obj; + + for (obj = obj_list; obj != NULL; obj = obj->next) { + if (addr < (void *) obj->mapbase) + continue; + if (addr < (void *) (obj->mapbase + obj->mapsize)) + return obj; + } + return NULL; +} + +/* + * Call the finalization functions for each of the objects in "list" + * which are unreferenced. All of the objects are expected to have + * non-NULL fini functions. + */ +static void +objlist_call_fini(Objlist *list) +{ + Objlist_Entry *elm; + char *saved_msg; + + /* + * Preserve the current error message since a fini function might + * call into the dynamic linker and overwrite it. + */ + saved_msg = errmsg_save(); + STAILQ_FOREACH(elm, list, link) { + if (elm->obj->refcount == 0) { + dbg("calling fini function for %s at %p", elm->obj->path, + (void *)elm->obj->fini); + call_initfini_pointer(elm->obj, elm->obj->fini); + } + } + errmsg_restore(saved_msg); +} + +/* + * Call the initialization functions for each of the objects in + * "list". All of the objects are expected to have non-NULL init + * functions. + */ +static void +objlist_call_init(Objlist *list) +{ + Objlist_Entry *elm; + char *saved_msg; + + /* + * Preserve the current error message since an init function might + * call into the dynamic linker and overwrite it. + */ + saved_msg = errmsg_save(); + STAILQ_FOREACH(elm, list, link) { + dbg("calling init function for %s at %p", elm->obj->path, + (void *)elm->obj->init); + call_initfini_pointer(elm->obj, elm->obj->init); + } + errmsg_restore(saved_msg); +} + +static void +objlist_clear(Objlist *list) +{ + Objlist_Entry *elm; + + while (!STAILQ_EMPTY(list)) { + elm = STAILQ_FIRST(list); + STAILQ_REMOVE_HEAD(list, link); + free(elm); + } +} + +static Objlist_Entry * +objlist_find(Objlist *list, const Obj_Entry *obj) +{ + Objlist_Entry *elm; + + STAILQ_FOREACH(elm, list, link) + if (elm->obj == obj) + return elm; + return NULL; +} + +static void +objlist_init(Objlist *list) +{ + STAILQ_INIT(list); +} + +static void +objlist_push_head(Objlist *list, Obj_Entry *obj) +{ + Objlist_Entry *elm; + + elm = NEW(Objlist_Entry); + elm->obj = obj; + STAILQ_INSERT_HEAD(list, elm, link); +} + +static void +objlist_push_tail(Objlist *list, Obj_Entry *obj) +{ + Objlist_Entry *elm; + + elm = NEW(Objlist_Entry); + elm->obj = obj; + STAILQ_INSERT_TAIL(list, elm, link); +} + +static void +objlist_remove(Objlist *list, Obj_Entry *obj) +{ + Objlist_Entry *elm; + + if ((elm = objlist_find(list, obj)) != NULL) { + STAILQ_REMOVE(list, elm, Struct_Objlist_Entry, link); + free(elm); + } +} + +/* + * Remove all of the unreferenced objects from "list". + */ +static void +objlist_remove_unref(Objlist *list) +{ + Objlist newlist; + Objlist_Entry *elm; + + STAILQ_INIT(&newlist); + while (!STAILQ_EMPTY(list)) { + elm = STAILQ_FIRST(list); + STAILQ_REMOVE_HEAD(list, link); + if (elm->obj->refcount == 0) + free(elm); + else + STAILQ_INSERT_TAIL(&newlist, elm, link); + } + *list = newlist; +} + +/* + * Relocate newly-loaded shared objects. The argument is a pointer to + * the Obj_Entry for the first such object. All objects from the first + * to the end of the list of objects are relocated. Returns 0 on success, + * or -1 on failure. + */ +static int +relocate_objects(Obj_Entry *first, bool bind_now, Obj_Entry *rtldobj) +{ + Obj_Entry *obj; + + for (obj = first; obj != NULL; obj = obj->next) { + if (obj != rtldobj) + dbg("relocating \"%s\"", obj->path); + if (obj->nbuckets == 0 || obj->nchains == 0 || obj->buckets == NULL || + obj->symtab == NULL || obj->strtab == NULL) { + _rtld_error("%s: Shared object has no run-time symbol table", + obj->path); + return -1; + } + + if (obj->textrel) { + /* There are relocations to the write-protected text segment. */ + if (mprotect(obj->mapbase, obj->textsize, + PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { + _rtld_error("%s: Cannot write-enable text segment: %s", + obj->path, strerror(errno)); + return -1; + } + } + + /* Process the non-PLT relocations. */ + if (reloc_non_plt(obj, rtldobj)) + return -1; + + if (obj->textrel) { /* Re-protected the text segment. */ + if (mprotect(obj->mapbase, obj->textsize, + PROT_READ|PROT_EXEC) == -1) { + _rtld_error("%s: Cannot write-protect text segment: %s", + obj->path, strerror(errno)); + return -1; + } + } + + /* Process the PLT relocations. */ + if (reloc_plt(obj) == -1) + return -1; + /* Relocate the jump slots if we are doing immediate binding. */ + if (obj->bind_now || bind_now) + if (reloc_jmpslots(obj) == -1) + return -1; + + /* + * Set up the magic number and version in the Obj_Entry. These + * were checked in the crt1.o from the original ElfKit, so we + * set them for backward compatibility. + */ + obj->magic = RTLD_MAGIC; + obj->version = RTLD_VERSION; + + /* Set the special PLT or GOT entries. */ + init_pltgot(obj); + } + + return 0; +} + +/* + * Cleanup procedure. It will be called (by the atexit mechanism) just + * before the process exits. + */ +static void +rtld_exit(void) +{ + Obj_Entry *obj; + + dbg("rtld_exit()"); + /* Clear all the reference counts so the fini functions will be called. */ + for (obj = obj_list; obj != NULL; obj = obj->next) + obj->refcount = 0; + objlist_call_fini(&list_fini); + /* No need to remove the items from the list, since we are exiting. */ + if (!libmap_disable) + lm_fini(); +} + +static void * +path_enumerate(const char *path, path_enum_proc callback, void *arg) +{ +#ifdef COMPAT_32BIT + const char *trans; +#endif + if (path == NULL) + return (NULL); + + path += strspn(path, ":;"); + while (*path != '\0') { + size_t len; + char *res; + + len = strcspn(path, ":;"); +#ifdef COMPAT_32BIT + trans = lm_findn(NULL, path, len); + if (trans) + res = callback(trans, strlen(trans), arg); + else +#endif + res = callback(path, len, arg); + + if (res != NULL) + return (res); + + path += len; + path += strspn(path, ":;"); + } + + return (NULL); +} + +struct try_library_args { + const char *name; + size_t namelen; + char *buffer; + size_t buflen; +}; + +static void * +try_library_path(const char *dir, size_t dirlen, void *param) +{ + struct try_library_args *arg; + + arg = param; + if (*dir == '/' || trust) { + char *pathname; + + if (dirlen + 1 + arg->namelen + 1 > arg->buflen) + return (NULL); + + pathname = arg->buffer; + strncpy(pathname, dir, dirlen); + pathname[dirlen] = '/'; + strcpy(pathname + dirlen + 1, arg->name); + + dbg(" Trying \"%s\"", pathname); + if (access(pathname, F_OK) == 0) { /* We found it */ + pathname = xmalloc(dirlen + 1 + arg->namelen + 1); + strcpy(pathname, arg->buffer); + return (pathname); + } + } + return (NULL); +} + +static char * +search_library_path(const char *name, const char *path) +{ + char *p; + struct try_library_args arg; + + if (path == NULL) + return NULL; +printf("name: [%s]\n",name); + arg.name = name; + arg.namelen = strlen(name); + arg.buffer = xmalloc(PATH_MAX); + arg.buflen = PATH_MAX; + + p = path_enumerate(path, try_library_path, &arg); + + free(arg.buffer); + + return (p); +} + +int +dlclose(void *handle) +{ + Obj_Entry *root; + int lockstate; + + lockstate = wlock_acquire(rtld_bind_lock); + root = dlcheck(handle); + if (root == NULL) { + wlock_release(rtld_bind_lock, lockstate); + return -1; + } + + /* Unreference the object and its dependencies. */ + root->dl_refcount--; + + unref_dag(root); + + if (root->refcount == 0) { + /* + * The object is no longer referenced, so we must unload it. + * First, call the fini functions with no locks held. + */ + wlock_release(rtld_bind_lock, lockstate); + objlist_call_fini(&list_fini); + lockstate = wlock_acquire(rtld_bind_lock); + objlist_remove_unref(&list_fini); + + /* Finish cleaning up the newly-unreferenced objects. */ + GDB_STATE(RT_DELETE,&root->linkmap); + unload_object(root); + GDB_STATE(RT_CONSISTENT,NULL); + } + wlock_release(rtld_bind_lock, lockstate); + return 0; +} + +const char * +dlerror(void) +{ + char *msg = error_message; + error_message = NULL; + return msg; +} + +/* + * This function is deprecated and has no effect. + */ +void +dllockinit(void *context, + void *(*lock_create)(void *context), + void (*rlock_acquire)(void *lock), + void (*wlock_acquire)(void *lock), + void (*lock_release)(void *lock), + void (*lock_destroy)(void *lock), + void (*context_destroy)(void *context)) +{ + static void *cur_context; + static void (*cur_context_destroy)(void *); + + /* Just destroy the context from the previous call, if necessary. */ + if (cur_context_destroy != NULL) + cur_context_destroy(cur_context); + cur_context = context; + cur_context_destroy = context_destroy; +} + +void * +dlopen(const char *name, int mode) +{ + Obj_Entry **old_obj_tail; + Obj_Entry *obj; + Objlist initlist; + int result, lockstate; + + ld_tracing = (mode & RTLD_TRACE) == 0 ? NULL : "1"; + if (ld_tracing != NULL) + environ = (char **)*get_program_var_addr("environ"); + + objlist_init(&initlist); + + lockstate = wlock_acquire(rtld_bind_lock); + GDB_STATE(RT_ADD,NULL); + + old_obj_tail = obj_tail; + obj = NULL; + if (name == NULL) { + obj = obj_main; + obj->refcount++; + } else { + char *path = find_library(name, obj_main); + if (path != NULL) + obj = load_object(path); + } + + if (obj) { + obj->dl_refcount++; + if (mode & RTLD_GLOBAL && objlist_find(&list_global, obj) == NULL) + objlist_push_tail(&list_global, obj); + mode &= RTLD_MODEMASK; + if (*old_obj_tail != NULL) { /* We loaded something new. */ + assert(*old_obj_tail == obj); + + result = load_needed_objects(obj); + if (result != -1 && ld_tracing) + goto trace; + + if (result == -1 || + (init_dag(obj), relocate_objects(obj, mode == RTLD_NOW, + &obj_rtld)) == -1) { + obj->dl_refcount--; + unref_dag(obj); + if (obj->refcount == 0) + unload_object(obj); + obj = NULL; + } else { + /* Make list of init functions to call. */ + initlist_add_objects(obj, &obj->next, &initlist); + } + } else { + + /* Bump the reference counts for objects on this DAG. */ + ref_dag(obj); + + if (ld_tracing) + goto trace; + } + } + + GDB_STATE(RT_CONSISTENT,obj ? &obj->linkmap : NULL); + + /* Call the init functions with no locks held. */ + wlock_release(rtld_bind_lock, lockstate); + objlist_call_init(&initlist); + lockstate = wlock_acquire(rtld_bind_lock); + objlist_clear(&initlist); + wlock_release(rtld_bind_lock, lockstate); + return obj; +trace: + trace_loaded_objects(obj); + wlock_release(rtld_bind_lock, lockstate); + exit(0); +} + +void * +dlsym(void *handle, const char *name) +{ + const Obj_Entry *obj; + unsigned long hash; + const Elf_Sym *def; + const Obj_Entry *defobj; + int lockstate; + + hash = elf_hash(name); + def = NULL; + defobj = NULL; + + lockstate = rlock_acquire(rtld_bind_lock); + if (handle == NULL || handle == RTLD_NEXT || + handle == RTLD_DEFAULT || handle == RTLD_SELF) { + void *retaddr; + + retaddr = __builtin_return_address(0); /* __GNUC__ only */ + if ((obj = obj_from_addr(retaddr)) == NULL) { + _rtld_error("Cannot determine caller's shared object"); + rlock_release(rtld_bind_lock, lockstate); + return NULL; + } + if (handle == NULL) { /* Just the caller's shared object. */ + def = symlook_obj(name, hash, obj, true); + defobj = obj; + } else if (handle == RTLD_NEXT || /* Objects after caller's */ + handle == RTLD_SELF) { /* ... caller included */ + if (handle == RTLD_NEXT) + obj = obj->next; + for (; obj != NULL; obj = obj->next) { + if ((def = symlook_obj(name, hash, obj, true)) != NULL) { + defobj = obj; + break; + } + } + } else { + assert(handle == RTLD_DEFAULT); + def = symlook_default(name, hash, obj, &defobj, true); + } + } else { + if ((obj = dlcheck(handle)) == NULL) { + rlock_release(rtld_bind_lock, lockstate); + return NULL; + } + + if (obj->mainprog) { + DoneList donelist; + + /* Search main program and all libraries loaded by it. */ + donelist_init(&donelist); + def = symlook_list(name, hash, &list_main, &defobj, true, + &donelist); + } else { + /* + * XXX - This isn't correct. The search should include the whole + * DAG rooted at the given object. + */ + def = symlook_obj(name, hash, obj, true); + defobj = obj; + } + } + + if (def != NULL) { + rlock_release(rtld_bind_lock, lockstate); + + /* + * The value required by the caller is derived from the value + * of the symbol. For the ia64 architecture, we need to + * construct a function descriptor which the caller can use to + * call the function with the right 'gp' value. For other + * architectures and for non-functions, the value is simply + * the relocated value of the symbol. + */ + if (ELF_ST_TYPE(def->st_info) == STT_FUNC) + return make_function_pointer(def, defobj); + else + return defobj->relocbase + def->st_value; + } + + _rtld_error("Undefined symbol \"%s\"", name); + rlock_release(rtld_bind_lock, lockstate); + return NULL; +} + +int +dladdr(const void *addr, Dl_info *info) +{ + const Obj_Entry *obj; + const Elf_Sym *def; + void *symbol_addr; + unsigned long symoffset; + int lockstate; + + lockstate = rlock_acquire(rtld_bind_lock); + obj = obj_from_addr(addr); + if (obj == NULL) { + _rtld_error("No shared object contains address"); + rlock_release(rtld_bind_lock, lockstate); + return 0; + } + info->dli_fname = obj->path; + info->dli_fbase = obj->mapbase; + info->dli_saddr = (void *)0; + info->dli_sname = NULL; + + /* + * Walk the symbol list looking for the symbol whose address is + * closest to the address sent in. + */ + for (symoffset = 0; symoffset < obj->nchains; symoffset++) { + def = obj->symtab + symoffset; + + /* + * For skip the symbol if st_shndx is either SHN_UNDEF or + * SHN_COMMON. + */ + if (def->st_shndx == SHN_UNDEF || def->st_shndx == SHN_COMMON) + continue; + + /* + * If the symbol is greater than the specified address, or if it + * is further away from addr than the current nearest symbol, + * then reject it. + */ + symbol_addr = obj->relocbase + def->st_value; + if (symbol_addr > addr || symbol_addr < info->dli_saddr) + continue; + + /* Update our idea of the nearest symbol. */ + info->dli_sname = obj->strtab + def->st_name; + info->dli_saddr = symbol_addr; + + /* Exact match? */ + if (info->dli_saddr == addr) + break; + } + rlock_release(rtld_bind_lock, lockstate); + return 1; +} + +int +dlinfo(void *handle, int request, void *p) +{ + const Obj_Entry *obj; + int error, lockstate; + + lockstate = rlock_acquire(rtld_bind_lock); + + if (handle == NULL || handle == RTLD_SELF) { + void *retaddr; + + retaddr = __builtin_return_address(0); /* __GNUC__ only */ + if ((obj = obj_from_addr(retaddr)) == NULL) + _rtld_error("Cannot determine caller's shared object"); + } else + obj = dlcheck(handle); + + if (obj == NULL) { + rlock_release(rtld_bind_lock, lockstate); + return (-1); + } + + error = 0; + switch (request) { + case RTLD_DI_LINKMAP: + *((struct link_map const **)p) = &obj->linkmap; + break; + case RTLD_DI_ORIGIN: + error = rtld_dirname(obj->path, p); + break; + + case RTLD_DI_SERINFOSIZE: + case RTLD_DI_SERINFO: + error = do_search_info(obj, request, (struct dl_serinfo *)p); + break; + + default: + _rtld_error("Invalid request %d passed to dlinfo()", request); + error = -1; + } + + rlock_release(rtld_bind_lock, lockstate); + + return (error); +} + +struct fill_search_info_args { + int request; + unsigned int flags; + Dl_serinfo *serinfo; + Dl_serpath *serpath; + char *strspace; +}; + +static void * +fill_search_info(const char *dir, size_t dirlen, void *param) +{ + struct fill_search_info_args *arg; + + arg = param; + + if (arg->request == RTLD_DI_SERINFOSIZE) { + arg->serinfo->dls_cnt ++; + arg->serinfo->dls_size += sizeof(Dl_serpath) + dirlen + 1; + } else { + struct dl_serpath *s_entry; + + s_entry = arg->serpath; + s_entry->dls_name = arg->strspace; + s_entry->dls_flags = arg->flags; + + strncpy(arg->strspace, dir, dirlen); + arg->strspace[dirlen] = '\0'; + + arg->strspace += dirlen + 1; + arg->serpath++; + } + + return (NULL); +} + +static int +do_search_info(const Obj_Entry *obj, int request, struct dl_serinfo *info) +{ + struct dl_serinfo _info; + struct fill_search_info_args args; + + args.request = RTLD_DI_SERINFOSIZE; + args.serinfo = &_info; + + _info.dls_size = __offsetof(struct dl_serinfo, dls_serpath); + _info.dls_cnt = 0; + + path_enumerate(ld_library_path, fill_search_info, &args); + path_enumerate(obj->rpath, fill_search_info, &args); + path_enumerate(gethints(), fill_search_info, &args); + path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args); + + + if (request == RTLD_DI_SERINFOSIZE) { + info->dls_size = _info.dls_size; + info->dls_cnt = _info.dls_cnt; + return (0); + } + + if (info->dls_cnt != _info.dls_cnt || info->dls_size != _info.dls_size) { + _rtld_error("Uninitialized Dl_serinfo struct passed to dlinfo()"); + return (-1); + } + + args.request = RTLD_DI_SERINFO; + args.serinfo = info; + args.serpath = &info->dls_serpath[0]; + args.strspace = (char *)&info->dls_serpath[_info.dls_cnt]; + + args.flags = LA_SER_LIBPATH; + if (path_enumerate(ld_library_path, fill_search_info, &args) != NULL) + return (-1); + + args.flags = LA_SER_RUNPATH; + if (path_enumerate(obj->rpath, fill_search_info, &args) != NULL) + return (-1); + + args.flags = LA_SER_CONFIG; + if (path_enumerate(gethints(), fill_search_info, &args) != NULL) + return (-1); + + args.flags = LA_SER_DEFAULT; + if (path_enumerate(STANDARD_LIBRARY_PATH, fill_search_info, &args) != NULL) + return (-1); + return (0); +} + +static int +rtld_dirname(const char *path, char *bname) +{ + const char *endp; + + /* Empty or NULL string gets treated as "." */ + if (path == NULL || *path == '\0') { + bname[0] = '.'; + bname[1] = '\0'; + return (0); + } + + /* Strip trailing slashes */ + endp = path + strlen(path) - 1; + while (endp > path && *endp == '/') + endp--; + + /* Find the start of the dir */ + while (endp > path && *endp != '/') + endp--; + + /* Either the dir is "/" or there are no slashes */ + if (endp == path) { + bname[0] = *endp == '/' ? '/' : '.'; + bname[1] = '\0'; + return (0); + } else { + do { + endp--; + } while (endp > path && *endp == '/'); + } + + if (endp - path + 2 > PATH_MAX) + { + _rtld_error("Filename is too long: %s", path); + return(-1); + } + + strncpy(bname, path, endp - path + 1); + bname[endp - path + 1] = '\0'; + return (0); +} + +static void +linkmap_add(Obj_Entry *obj) +{ + struct link_map *l = &obj->linkmap; + struct link_map *prev; + + obj->linkmap.l_name = obj->path; + obj->linkmap.l_addr = obj->mapbase; + obj->linkmap.l_ld = obj->dynamic; +#ifdef __mips__ + /* GDB needs load offset on MIPS to use the symbols */ + obj->linkmap.l_offs = obj->relocbase; +#endif + + if (r_debug.r_map == NULL) { + r_debug.r_map = l; + return; + } + + /* + * Scan to the end of the list, but not past the entry for the + * dynamic linker, which we want to keep at the very end. + */ + for (prev = r_debug.r_map; + prev->l_next != NULL && prev->l_next != &obj_rtld.linkmap; + prev = prev->l_next) + ; + + /* Link in the new entry. */ + l->l_prev = prev; + l->l_next = prev->l_next; + if (l->l_next != NULL) + l->l_next->l_prev = l; + prev->l_next = l; +} + +static void +linkmap_delete(Obj_Entry *obj) +{ + struct link_map *l = &obj->linkmap; + + if (l->l_prev == NULL) { + if ((r_debug.r_map = l->l_next) != NULL) + l->l_next->l_prev = NULL; + return; + } + + if ((l->l_prev->l_next = l->l_next) != NULL) + l->l_next->l_prev = l->l_prev; +} + +/* + * Function for the debugger to set a breakpoint on to gain control. + * + * The two parameters allow the debugger to easily find and determine + * what the runtime loader is doing and to whom it is doing it. + * + * When the loadhook trap is hit (r_debug_state, set at program + * initialization), the arguments can be found on the stack: + * + * +8 struct link_map *m + * +4 struct r_debug *rd + * +0 RetAddr + */ +void +r_debug_state(struct r_debug* rd, struct link_map *m) +{ +} + +/* + * Get address of the pointer variable in the main program. + */ +static const void ** +get_program_var_addr(const char *name) +{ + const Obj_Entry *obj; + unsigned long hash; + + hash = elf_hash(name); + for (obj = obj_main; obj != NULL; obj = obj->next) { + const Elf_Sym *def; + + if ((def = symlook_obj(name, hash, obj, false)) != NULL) { + const void **addr; + + addr = (const void **)(obj->relocbase + def->st_value); + return addr; + } + } + return NULL; +} + +/* + * Set a pointer variable in the main program to the given value. This + * is used to set key variables such as "environ" before any of the + * init functions are called. + */ +static void +set_program_var(const char *name, const void *value) +{ + const void **addr; + + if ((addr = get_program_var_addr(name)) != NULL) { + dbg("\"%s\": *%p <-- %p", name, addr, value); + *addr = value; + } +} + +/* + * Given a symbol name in a referencing object, find the corresponding + * definition of the symbol. Returns a pointer to the symbol, or NULL if + * no definition was found. Returns a pointer to the Obj_Entry of the + * defining object via the reference parameter DEFOBJ_OUT. + */ +static const Elf_Sym * +symlook_default(const char *name, unsigned long hash, + const Obj_Entry *refobj, const Obj_Entry **defobj_out, bool in_plt) +{ + DoneList donelist; + const Elf_Sym *def; + const Elf_Sym *symp; + const Obj_Entry *obj; + const Obj_Entry *defobj; + const Objlist_Entry *elm; + def = NULL; + defobj = NULL; + donelist_init(&donelist); + + /* Look first in the referencing object if linked symbolically. */ + if (refobj->symbolic && !donelist_check(&donelist, refobj)) { + symp = symlook_obj(name, hash, refobj, in_plt); + if (symp != NULL) { + def = symp; + defobj = refobj; + } + } + + /* Search all objects loaded at program start up. */ + if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { + symp = symlook_list(name, hash, &list_main, &obj, in_plt, &donelist); + if (symp != NULL && + (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { + def = symp; + defobj = obj; + } + } + + /* Search all DAGs whose roots are RTLD_GLOBAL objects. */ + STAILQ_FOREACH(elm, &list_global, link) { + if (def != NULL && ELF_ST_BIND(def->st_info) != STB_WEAK) + break; + symp = symlook_list(name, hash, &elm->obj->dagmembers, &obj, in_plt, + &donelist); + if (symp != NULL && + (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { + def = symp; + defobj = obj; + } + } + + /* Search all dlopened DAGs containing the referencing object. */ + STAILQ_FOREACH(elm, &refobj->dldags, link) { + if (def != NULL && ELF_ST_BIND(def->st_info) != STB_WEAK) + break; + symp = symlook_list(name, hash, &elm->obj->dagmembers, &obj, in_plt, + &donelist); + if (symp != NULL && + (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK)) { + def = symp; + defobj = obj; + } + } + + /* + * Search the dynamic linker itself, and possibly resolve the + * symbol from there. This is how the application links to + * dynamic linker services such as dlopen. Only the values listed + * in the "exports" array can be resolved from the dynamic linker. + */ + if (def == NULL || ELF_ST_BIND(def->st_info) == STB_WEAK) { + symp = symlook_obj(name, hash, &obj_rtld, in_plt); + if (symp != NULL && is_exported(symp)) { + def = symp; + defobj = &obj_rtld; + } + } + + if (def != NULL) + *defobj_out = defobj; + return def; +} + +static const Elf_Sym * +symlook_list(const char *name, unsigned long hash, Objlist *objlist, + const Obj_Entry **defobj_out, bool in_plt, DoneList *dlp) +{ + const Elf_Sym *symp; + const Elf_Sym *def; + const Obj_Entry *defobj; + const Objlist_Entry *elm; + + def = NULL; + defobj = NULL; + STAILQ_FOREACH(elm, objlist, link) { + if (donelist_check(dlp, elm->obj)) + continue; + if ((symp = symlook_obj(name, hash, elm->obj, in_plt)) != NULL) { + if (def == NULL || ELF_ST_BIND(symp->st_info) != STB_WEAK) { + def = symp; + defobj = elm->obj; + if (ELF_ST_BIND(def->st_info) != STB_WEAK) + break; + } + } + } + if (def != NULL) + *defobj_out = defobj; + return def; +} + +/* + * Search the symbol table of a single shared object for a symbol of + * the given name. Returns a pointer to the symbol, or NULL if no + * definition was found. + * + * The symbol's hash value is passed in for efficiency reasons; that + * eliminates many recomputations of the hash value. + */ +const Elf_Sym * +symlook_obj(const char *name, unsigned long hash, const Obj_Entry *obj, + bool in_plt) +{ + if (obj->buckets != NULL) { + unsigned long symnum = obj->buckets[hash % obj->nbuckets]; + + while (symnum != STN_UNDEF) { + const Elf_Sym *symp; + const char *strp; + + if (symnum >= obj->nchains) + return NULL; /* Bad object */ + symp = obj->symtab + symnum; + strp = obj->strtab + symp->st_name; + + if (name[0] == strp[0] && strcmp(name, strp) == 0) + return symp->st_shndx != SHN_UNDEF || + (!in_plt && symp->st_value != 0 && + ELF_ST_TYPE(symp->st_info) == STT_FUNC) ? symp : NULL; + + symnum = obj->chains[symnum]; + } + } + return NULL; +} + +static void +trace_loaded_objects(Obj_Entry *obj) +{ + char *fmt1, *fmt2, *fmt, *main_local, *list_containers; + int c; + + if ((main_local = getenv(LD_ "TRACE_LOADED_OBJECTS_PROGNAME")) == NULL) + main_local = ""; + + if ((fmt1 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT1")) == NULL) + fmt1 = "\t%o => %p (%x)\n"; + + if ((fmt2 = getenv(LD_ "TRACE_LOADED_OBJECTS_FMT2")) == NULL) + fmt2 = "\t%o (%x)\n"; + + list_containers = getenv(LD_ "TRACE_LOADED_OBJECTS_ALL"); + + for (; obj; obj = obj->next) { + Needed_Entry *needed; + char *name, *path; + bool is_lib; + + if (list_containers && obj->needed != NULL) + printf("%s:\n", obj->path); + for (needed = obj->needed; needed; needed = needed->next) { + if (needed->obj != NULL) { + if (needed->obj->traced && !list_containers) + continue; + needed->obj->traced = true; + path = needed->obj->path; + } else + path = "not found"; + + name = (char *)obj->strtab + needed->name; + is_lib = strncmp(name, "lib", 3) == 0; /* XXX - bogus */ + + fmt = is_lib ? fmt1 : fmt2; + while ((c = *fmt++) != '\0') { + switch (c) { + default: + putchar(c); + continue; + case '\\': + switch (c = *fmt) { + case '\0': + continue; + case 'n': + putchar('\n'); + break; + case 't': + putchar('\t'); + break; + } + break; + case '%': + switch (c = *fmt) { + case '\0': + continue; + case '%': + default: + putchar(c); + break; + case 'A': + printf("%s", main_local); + break; + case 'a': + printf("%s", obj_main->path); + break; + case 'o': + printf("%s", name); + break; +#if 0 + case 'm': + printf("%d", sodp->sod_major); + break; + case 'n': + printf("%d", sodp->sod_minor); + break; +#endif + case 'p': + printf("%s", path); + break; + case 'x': + printf("%p", needed->obj ? needed->obj->mapbase : 0); + break; + } + break; + } + ++fmt; + } + } + } +} + +/* + * Unload a dlopened object and its dependencies from memory and from + * our data structures. It is assumed that the DAG rooted in the + * object has already been unreferenced, and that the object has a + * reference count of 0. + */ +static void +unload_object(Obj_Entry *root) +{ + Obj_Entry *obj; + Obj_Entry **linkp; + + assert(root->refcount == 0); + + /* + * Pass over the DAG removing unreferenced objects from + * appropriate lists. + */ + unlink_object(root); + + /* Unmap all objects that are no longer referenced. */ + linkp = &obj_list->next; + while ((obj = *linkp) != NULL) { + if (obj->refcount == 0) { + dbg("unloading \"%s\"", obj->path); + munmap(obj->mapbase, obj->mapsize); + linkmap_delete(obj); + *linkp = obj->next; + obj_count--; + obj_free(obj); + } else + linkp = &obj->next; + } + obj_tail = linkp; +} + +static void +unlink_object(Obj_Entry *root) +{ + Objlist_Entry *elm; + + if (root->refcount == 0) { + /* Remove the object from the RTLD_GLOBAL list. */ + objlist_remove(&list_global, root); + + /* Remove the object from all objects' DAG lists. */ + STAILQ_FOREACH(elm, &root->dagmembers , link) { + objlist_remove(&elm->obj->dldags, root); + if (elm->obj != root) + unlink_object(elm->obj); + } + } +} + +static void +ref_dag(Obj_Entry *root) +{ + Objlist_Entry *elm; + + STAILQ_FOREACH(elm, &root->dagmembers , link) + elm->obj->refcount++; +} + +static void +unref_dag(Obj_Entry *root) +{ + Objlist_Entry *elm; + + STAILQ_FOREACH(elm, &root->dagmembers , link) + elm->obj->refcount--; +} + +/* + * Common code for MD __tls_get_addr(). + */ +void * +tls_get_addr_common(Elf_Addr** dtvp, int index, size_t offset) +{ + Elf_Addr* dtv = *dtvp; + int lockstate; + + /* Check dtv generation in case new modules have arrived */ + if (dtv[0] != tls_dtv_generation) { + Elf_Addr* newdtv; + int to_copy; + + lockstate = wlock_acquire(rtld_bind_lock); + newdtv = calloc(1, (tls_max_index + 2) * sizeof(Elf_Addr)); + to_copy = dtv[1]; + if (to_copy > tls_max_index) + to_copy = tls_max_index; + memcpy(&newdtv[2], &dtv[2], to_copy * sizeof(Elf_Addr)); + newdtv[0] = tls_dtv_generation; + newdtv[1] = tls_max_index; + free(dtv); + wlock_release(rtld_bind_lock, lockstate); + *dtvp = newdtv; + } + + /* Dynamically allocate module TLS if necessary */ + if (!dtv[index + 1]) { + /* Signal safe, wlock will block out signals. */ + lockstate = wlock_acquire(rtld_bind_lock); + if (!dtv[index + 1]) + dtv[index + 1] = (Elf_Addr)allocate_module_tls(index); + wlock_release(rtld_bind_lock, lockstate); + } + return (void*) (dtv[index + 1] + offset); +} + +/* XXX not sure what variants to use for arm. */ + +#if defined(__ia64__) || defined(__alpha__) || defined(__powerpc__) + +/* + * Allocate Static TLS using the Variant I method. + */ +void * +allocate_tls(Obj_Entry *objs, void *oldtcb, size_t tcbsize, size_t tcbalign) +{ + Obj_Entry *obj; + char *tcb; + Elf_Addr **tls; + Elf_Addr *dtv; + Elf_Addr addr; + int i; + + if (oldtcb != NULL && tcbsize == TLS_TCB_SIZE) + return (oldtcb); + + assert(tcbsize >= TLS_TCB_SIZE); + tcb = calloc(1, tls_static_space - TLS_TCB_SIZE + tcbsize); + tls = (Elf_Addr **)(tcb + tcbsize - TLS_TCB_SIZE); + + if (oldtcb != NULL) { + memcpy(tls, oldtcb, tls_static_space); + free(oldtcb); + + /* Adjust the DTV. */ + dtv = tls[0]; + for (i = 0; i < dtv[1]; i++) { + if (dtv[i+2] >= (Elf_Addr)oldtcb && + dtv[i+2] < (Elf_Addr)oldtcb + tls_static_space) { + dtv[i+2] = dtv[i+2] - (Elf_Addr)oldtcb + (Elf_Addr)tls; + } + } + } else { + dtv = calloc(tls_max_index + 2, sizeof(Elf_Addr)); + tls[0] = dtv; + dtv[0] = tls_dtv_generation; + dtv[1] = tls_max_index; + + for (obj = objs; obj; obj = obj->next) { + if (obj->tlsoffset) { + addr = (Elf_Addr)tls + obj->tlsoffset; + memset((void*) (addr + obj->tlsinitsize), + 0, obj->tlssize - obj->tlsinitsize); + if (obj->tlsinit) + memcpy((void*) addr, obj->tlsinit, + obj->tlsinitsize); + dtv[obj->tlsindex + 1] = addr; + } + } + } + + return (tcb); +} + +void +free_tls(void *tcb, size_t tcbsize, size_t tcbalign) +{ + Elf_Addr *dtv; + Elf_Addr tlsstart, tlsend; + int dtvsize, i; + + assert(tcbsize >= TLS_TCB_SIZE); + + tlsstart = (Elf_Addr)tcb + tcbsize - TLS_TCB_SIZE; + tlsend = tlsstart + tls_static_space; + + dtv = *(Elf_Addr **)tlsstart; + dtvsize = dtv[1]; + for (i = 0; i < dtvsize; i++) { + if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] >= tlsend)) { + free((void*)dtv[i+2]); + } + } + free(dtv); + free(tcb); +} + +#endif + +#if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) || \ + defined(__arm__) + +/* + * Allocate Static TLS using the Variant II method. + */ +void * +allocate_tls(Obj_Entry *objs, void *oldtls, size_t tcbsize, size_t tcbalign) +{ + Obj_Entry *obj; + size_t size; + char *tls; + Elf_Addr *dtv, *olddtv; + Elf_Addr segbase, oldsegbase, addr; + int i; + + size = round(tls_static_space, tcbalign); + + assert(tcbsize >= 2*sizeof(Elf_Addr)); + tls = malloc(size + tcbsize); + dtv = calloc(1, (tls_max_index + 2) * sizeof(Elf_Addr)); + + segbase = (Elf_Addr)(tls + size); + ((Elf_Addr*)segbase)[0] = segbase; + ((Elf_Addr*)segbase)[1] = (Elf_Addr) dtv; + + dtv[0] = tls_dtv_generation; + dtv[1] = tls_max_index; + + if (oldtls) { + /* + * Copy the static TLS block over whole. + */ + oldsegbase = (Elf_Addr) oldtls; + memcpy((void *)(segbase - tls_static_space), + (const void *)(oldsegbase - tls_static_space), + tls_static_space); + + /* + * If any dynamic TLS blocks have been created tls_get_addr(), + * move them over. + */ + olddtv = ((Elf_Addr**)oldsegbase)[1]; + for (i = 0; i < olddtv[1]; i++) { + if (olddtv[i+2] < oldsegbase - size || olddtv[i+2] > oldsegbase) { + dtv[i+2] = olddtv[i+2]; + olddtv[i+2] = 0; + } + } + + /* + * We assume that this block was the one we created with + * allocate_initial_tls(). + */ + free_tls(oldtls, 2*sizeof(Elf_Addr), sizeof(Elf_Addr)); + } else { + for (obj = objs; obj; obj = obj->next) { + if (obj->tlsoffset) { + addr = segbase - obj->tlsoffset; + memset((void*) (addr + obj->tlsinitsize), + 0, obj->tlssize - obj->tlsinitsize); + if (obj->tlsinit) + memcpy((void*) addr, obj->tlsinit, obj->tlsinitsize); + dtv[obj->tlsindex + 1] = addr; + } + } + } + + return (void*) segbase; +} + +void +free_tls(void *tls, size_t tcbsize, size_t tcbalign) +{ + size_t size; + Elf_Addr* dtv; + int dtvsize, i; + Elf_Addr tlsstart, tlsend; + + /* + * Figure out the size of the initial TLS block so that we can + * find stuff which ___tls_get_addr() allocated dynamically. + */ + size = round(tls_static_space, tcbalign); + + dtv = ((Elf_Addr**)tls)[1]; + dtvsize = dtv[1]; + tlsend = (Elf_Addr) tls; + tlsstart = tlsend - size; + for (i = 0; i < dtvsize; i++) { + if (dtv[i+2] && (dtv[i+2] < tlsstart || dtv[i+2] > tlsend)) { + free((void*) dtv[i+2]); + } + } + + free((void*) tlsstart); +} + +#endif + +/* + * Allocate TLS block for module with given index. + */ +void * +allocate_module_tls(int index) +{ + Obj_Entry* obj; + char* p; + + for (obj = obj_list; obj; obj = obj->next) { + if (obj->tlsindex == index) + break; + } + if (!obj) { + _rtld_error("Can't find module with TLS index %d", index); + die(); + } + + p = malloc(obj->tlssize); + memcpy(p, obj->tlsinit, obj->tlsinitsize); + memset(p + obj->tlsinitsize, 0, obj->tlssize - obj->tlsinitsize); + + return p; +} + +bool +allocate_tls_offset(Obj_Entry *obj) +{ + size_t off; + + if (obj->tls_done) + return true; + + if (obj->tlssize == 0) { + obj->tls_done = true; + return true; + } + + if (obj->tlsindex == 1) + off = calculate_first_tls_offset(obj->tlssize, obj->tlsalign); + else + off = calculate_tls_offset(tls_last_offset, tls_last_size, + obj->tlssize, obj->tlsalign); + + /* + * If we have already fixed the size of the static TLS block, we + * must stay within that size. When allocating the static TLS, we + * leave a small amount of space spare to be used for dynamically + * loading modules which use static TLS. + */ + if (tls_static_space) { + if (calculate_tls_end(off, obj->tlssize) > tls_static_space) + return false; + } + + tls_last_offset = obj->tlsoffset = off; + tls_last_size = obj->tlssize; + obj->tls_done = true; + + return true; +} + +void +free_tls_offset(Obj_Entry *obj) +{ +#if defined(__i386__) || defined(__amd64__) || defined(__sparc64__) || \ + defined(__arm__) + /* + * If we were the last thing to allocate out of the static TLS + * block, we give our space back to the 'allocator'. This is a + * simplistic workaround to allow libGL.so.1 to be loaded and + * unloaded multiple times. We only handle the Variant II + * mechanism for now - this really needs a proper allocator. + */ + if (calculate_tls_end(obj->tlsoffset, obj->tlssize) + == calculate_tls_end(tls_last_offset, tls_last_size)) { + tls_last_offset -= obj->tlssize; + tls_last_size = 0; + } +#endif +} + +void * +_rtld_allocate_tls(void *oldtls, size_t tcbsize, size_t tcbalign) +{ + void *ret; + int lockstate; + + lockstate = wlock_acquire(rtld_bind_lock); + ret = allocate_tls(obj_list, oldtls, tcbsize, tcbalign); + wlock_release(rtld_bind_lock, lockstate); + return (ret); +} + +void +_rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign) +{ + int lockstate; + + lockstate = wlock_acquire(rtld_bind_lock); + free_tls(tcb, tcbsize, tcbalign); + wlock_release(rtld_bind_lock, lockstate); +} diff --git a/src/bin/rtld-elf/rtld.h b/src/bin/rtld-elf/rtld.h new file mode 100644 index 0000000..d8dd379 --- /dev/null +++ b/src/bin/rtld-elf/rtld.h @@ -0,0 +1,252 @@ +/*- + * Copyright 1996, 1997, 1998, 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/rtld.h,v 1.35.2.1 2005/12/30 22:13:56 marcel Exp $ + */ + +#ifndef RTLD_H /* { */ +#define RTLD_H 1 + +#include +#include +#include + +#include +#include +#include + +#include "rtld_lock.h" +#include "rtld_machdep.h" + +#ifdef COMPAT_32BIT +#undef STANDARD_LIBRARY_PATH +#undef _PATH_ELF_HINTS +#define _PATH_ELF_HINTS "/var/run/ld-elf32.so.hints" +/* For running 32 bit binaries */ +#define STANDARD_LIBRARY_PATH "/lib32:/usr/lib32" +#define LD_ "LD_32_" +#endif + +#ifndef STANDARD_LIBRARY_PATH +#define STANDARD_LIBRARY_PATH "/lib:/usr/lib" +#endif +#ifndef LD_ +#define LD_ "LD_" +#endif + +#define NEW(type) ((type *) xmalloc(sizeof(type))) +#define CNEW(type) ((type *) xcalloc(sizeof(type))) + +/* We might as well do booleans like C++. */ +typedef unsigned char bool; +#define false 0 +#define true 1 + +extern size_t tls_last_offset; +extern size_t tls_last_size; +extern size_t tls_static_space; +extern int tls_dtv_generation; +extern int tls_max_index; + +struct stat; +struct Struct_Obj_Entry; + +/* Lists of shared objects */ +typedef struct Struct_Objlist_Entry { + STAILQ_ENTRY(Struct_Objlist_Entry) link; + struct Struct_Obj_Entry *obj; +} Objlist_Entry; + +typedef STAILQ_HEAD(Struct_Objlist, Struct_Objlist_Entry) Objlist; + +/* Types of init and fini functions */ +typedef void (*InitFunc)(void); + +/* Lists of shared object dependencies */ +typedef struct Struct_Needed_Entry { + struct Struct_Needed_Entry *next; + struct Struct_Obj_Entry *obj; + unsigned long name; /* Offset of name in string table */ +} Needed_Entry; + +/* Lock object */ +typedef struct Struct_LockInfo { + void *context; /* Client context for creating locks */ + void *thelock; /* The one big lock */ + /* Debugging aids. */ + volatile int rcount; /* Number of readers holding lock */ + volatile int wcount; /* Number of writers holding lock */ + /* Methods */ + void *(*lock_create)(void *context); + void (*rlock_acquire)(void *lock); + void (*wlock_acquire)(void *lock); + void (*rlock_release)(void *lock); + void (*wlock_release)(void *lock); + void (*lock_destroy)(void *lock); + void (*context_destroy)(void *context); +} LockInfo; + +/* + * Shared object descriptor. + * + * Items marked with "(%)" are dynamically allocated, and must be freed + * when the structure is destroyed. + * + * CAUTION: It appears that the JDK port peeks into these structures. + * It looks at "next" and "mapbase" at least. Don't add new members + * near the front, until this can be straightened out. + */ +typedef struct Struct_Obj_Entry { + /* + * These two items have to be set right for compatibility with the + * original ElfKit crt1.o. + */ + Elf_Size magic; /* Magic number (sanity check) */ + Elf_Size version; /* Version number of struct format */ + + struct Struct_Obj_Entry *next; + char *path; /* Pathname of underlying file (%) */ + char *origin_path; /* Directory path of origin file */ + int refcount; + int dl_refcount; /* Number of times loaded by dlopen */ + + /* These items are computed by map_object() or by digest_phdr(). */ + caddr_t mapbase; /* Base address of mapped region */ + size_t mapsize; /* Size of mapped region in bytes */ + size_t textsize; /* Size of text segment in bytes */ + Elf_Addr vaddrbase; /* Base address in shared object file */ + caddr_t relocbase; /* Relocation constant = mapbase - vaddrbase */ + const Elf_Dyn *dynamic; /* Dynamic section */ + caddr_t entry; /* Entry point */ + const Elf_Phdr *phdr; /* Program header if it is mapped, else NULL */ + size_t phsize; /* Size of program header in bytes */ + const char *interp; /* Pathname of the interpreter, if any */ + + /* TLS information */ + int tlsindex; /* Index in DTV for this module */ + void *tlsinit; /* Base address of TLS init block */ + size_t tlsinitsize; /* Size of TLS init block for this module */ + size_t tlssize; /* Size of TLS block for this module */ + size_t tlsoffset; /* Offset of static TLS block for this module */ + size_t tlsalign; /* Alignment of static TLS block */ + + /* Items from the dynamic section. */ + Elf_Addr *pltgot; /* PLT or GOT, depending on architecture */ + const Elf_Rel *rel; /* Relocation entries */ + unsigned long relsize; /* Size in bytes of relocation info */ + const Elf_Rela *rela; /* Relocation entries with addend */ + unsigned long relasize; /* Size in bytes of addend relocation info */ + const Elf_Rel *pltrel; /* PLT relocation entries */ + unsigned long pltrelsize; /* Size in bytes of PLT relocation info */ + const Elf_Rela *pltrela; /* PLT relocation entries with addend */ + unsigned long pltrelasize; /* Size in bytes of PLT addend reloc info */ + const Elf_Sym *symtab; /* Symbol table */ + const char *strtab; /* String table */ + unsigned long strsize; /* Size in bytes of string table */ + + const Elf_Hashelt *buckets; /* Hash table buckets array */ + unsigned long nbuckets; /* Number of buckets */ + const Elf_Hashelt *chains; /* Hash table chain array */ + unsigned long nchains; /* Number of chains */ + + const char *rpath; /* Search path specified in object */ + Needed_Entry *needed; /* Shared objects needed by this one (%) */ + + Elf_Addr init; /* Initialization function to call */ + Elf_Addr fini; /* Termination function to call */ + + bool mainprog; /* True if this is the main program */ + bool rtld; /* True if this is the dynamic linker */ + bool textrel; /* True if there are relocations to text seg */ + bool symbolic; /* True if generated with "-Bsymbolic" */ + bool bind_now; /* True if all relocations should be made first */ + bool traced; /* Already printed in ldd trace output */ + bool jmpslots_done; /* Already have relocated the jump slots */ + bool init_done; /* Already have added object to init list */ + bool tls_done; /* Already allocated offset for static TLS */ + + struct link_map linkmap; /* for GDB and dlinfo() */ + Objlist dldags; /* Object belongs to these dlopened DAGs (%) */ + Objlist dagmembers; /* DAG has these members (%) */ + dev_t dev; /* Object's filesystem's device */ + ino_t ino; /* Object's inode number */ + void *priv; /* Platform-dependant */ +} Obj_Entry; + +#define RTLD_MAGIC 0xd550b87a +#define RTLD_VERSION 1 + +#define RTLD_STATIC_TLS_EXTRA 64 + +/* + * Symbol cache entry used during relocation to avoid multiple lookups + * of the same symbol. + */ +typedef struct Struct_SymCache { + const Elf_Sym *sym; /* Symbol table entry */ + const Obj_Entry *obj; /* Shared object which defines it */ +} SymCache; + +extern void _rtld_error(const char *, ...) __printflike(1, 2); +extern Obj_Entry *map_object(int, const char *, const struct stat *); +extern void *xcalloc(size_t); +extern void *xmalloc(size_t); +extern char *xstrdup(const char *); +extern Elf_Addr _GLOBAL_OFFSET_TABLE_[]; + +extern void dump_relocations (Obj_Entry *); +extern void dump_obj_relocations (Obj_Entry *); +extern void dump_Elf_Rel (Obj_Entry *, const Elf_Rel *, u_long); +extern void dump_Elf_Rela (Obj_Entry *, const Elf_Rela *, u_long); + +/* + * Function declarations. + */ +unsigned long elf_hash(const char *); +const Elf_Sym *find_symdef(unsigned long, const Obj_Entry *, + const Obj_Entry **, bool, SymCache *); +void init_pltgot(Obj_Entry *); +void lockdflt_init(void); +void obj_free(Obj_Entry *); +Obj_Entry *obj_new(void); +void _rtld_bind_start(void); +const Elf_Sym *symlook_obj(const char *, unsigned long, + const Obj_Entry *, bool); +void *tls_get_addr_common(Elf_Addr** dtvp, int index, size_t offset); +void *allocate_tls(Obj_Entry *, void *, size_t, size_t); +void free_tls(void *, size_t, size_t); +void *allocate_module_tls(int index); +bool allocate_tls_offset(Obj_Entry *obj); +void free_tls_offset(Obj_Entry *obj); + +/* + * MD function declarations. + */ +int do_copy_relocations(Obj_Entry *); +int reloc_non_plt(Obj_Entry *, Obj_Entry *); +int reloc_plt(Obj_Entry *); +int reloc_jmpslots(Obj_Entry *); +void allocate_initial_tls(Obj_Entry *); + +#endif /* } */ diff --git a/src/bin/rtld-elf/rtld_lock.c b/src/bin/rtld-elf/rtld_lock.c new file mode 100644 index 0000000..9a9fa05 --- /dev/null +++ b/src/bin/rtld-elf/rtld_lock.c @@ -0,0 +1,317 @@ +/*- + * Copyright 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * from: FreeBSD: src/libexec/rtld-elf/sparc64/lockdflt.c,v 1.3 2002/10/09 + * $FreeBSD: src/libexec/rtld-elf/rtld_lock.c,v 1.3 2004/11/16 20:45:51 jhb Exp $ + */ + +/* + * Thread locking implementation for the dynamic linker. + * + * We use the "simple, non-scalable reader-preference lock" from: + * + * J. M. Mellor-Crummey and M. L. Scott. "Scalable Reader-Writer + * Synchronization for Shared-Memory Multiprocessors." 3rd ACM Symp. on + * Principles and Practice of Parallel Programming, April 1991. + * + * In this algorithm the lock is a single word. Its low-order bit is + * set when a writer holds the lock. The remaining high-order bits + * contain a count of readers desiring the lock. The algorithm requires + * atomic "compare_and_store" and "add" operations, which we implement + * using assembly language sequences in "rtld_start.S". + */ + +#include +#include +#include + +#include "debug.h" +#include "rtld.h" +#include "rtld_machdep.h" + +#define WAFLAG 0x1 /* A writer holds the lock */ +#define RC_INCR 0x2 /* Adjusts count of readers desiring lock */ + +typedef struct Struct_Lock { + volatile int lock; + void *base; +} Lock; + +static sigset_t fullsigmask, oldsigmask; +static int thread_flag; + +static void * +def_lock_create() +{ + void *base; + char *p; + uintptr_t r; + Lock *l; + + /* + * Arrange for the lock to occupy its own cache line. First, we + * optimistically allocate just a cache line, hoping that malloc + * will give us a well-aligned block of memory. If that doesn't + * work, we allocate a larger block and take a well-aligned cache + * line from it. + */ + base = xmalloc(CACHE_LINE_SIZE); + p = (char *)base; + if ((uintptr_t)p % CACHE_LINE_SIZE != 0) { + free(base); + base = xmalloc(2 * CACHE_LINE_SIZE); + p = (char *)base; + if ((r = (uintptr_t)p % CACHE_LINE_SIZE) != 0) + p += CACHE_LINE_SIZE - r; + } + l = (Lock *)p; + l->base = base; + l->lock = 0; + return l; +} + +static void +def_lock_destroy(void *lock) +{ + Lock *l = (Lock *)lock; + + free(l->base); +} + +static void +def_rlock_acquire(void *lock) +{ + Lock *l = (Lock *)lock; + + atomic_add_acq_int(&l->lock, RC_INCR); + while (l->lock & WAFLAG) + ; /* Spin */ +} + +static void +def_wlock_acquire(void *lock) +{ + Lock *l = (Lock *)lock; + sigset_t tmp_oldsigmask; + + for ( ; ; ) { + sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask); + if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG)) + break; + sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL); + } + oldsigmask = tmp_oldsigmask; +} + +static void +def_lock_release(void *lock) +{ + Lock *l = (Lock *)lock; + + if ((l->lock & WAFLAG) == 0) + atomic_add_rel_int(&l->lock, -RC_INCR); + else { + atomic_add_rel_int(&l->lock, -WAFLAG); + sigprocmask(SIG_SETMASK, &oldsigmask, NULL); + } +} + +static int +def_thread_set_flag(int mask) +{ + int old_val = thread_flag; + thread_flag |= mask; + return (old_val); +} + +static int +def_thread_clr_flag(int mask) +{ + int old_val = thread_flag; + thread_flag &= ~mask; + return (old_val); +} + +/* + * Public interface exposed to the rest of the dynamic linker. + */ +static struct RtldLockInfo lockinfo; +static struct RtldLockInfo deflockinfo; + +static __inline int +thread_mask_set(int mask) +{ + return lockinfo.thread_set_flag(mask); +} + +static __inline void +thread_mask_clear(int mask) +{ + lockinfo.thread_clr_flag(mask); +} + +#define RTLD_LOCK_CNT 2 +struct rtld_lock { + void *handle; + int mask; +} rtld_locks[RTLD_LOCK_CNT]; + +rtld_lock_t rtld_bind_lock = &rtld_locks[0]; +rtld_lock_t rtld_libc_lock = &rtld_locks[1]; + +int +rlock_acquire(rtld_lock_t lock) +{ + if (thread_mask_set(lock->mask)) { + dbg("rlock_acquire: recursed"); + return (0); + } + lockinfo.rlock_acquire(lock->handle); + return (1); +} + +int +wlock_acquire(rtld_lock_t lock) +{ + if (thread_mask_set(lock->mask)) { + dbg("wlock_acquire: recursed"); + return (0); + } + lockinfo.wlock_acquire(lock->handle); + return (1); +} + +void +rlock_release(rtld_lock_t lock, int locked) +{ + if (locked == 0) + return; + thread_mask_clear(lock->mask); + lockinfo.lock_release(lock->handle); +} + +void +wlock_release(rtld_lock_t lock, int locked) +{ + if (locked == 0) + return; + thread_mask_clear(lock->mask); + lockinfo.lock_release(lock->handle); +} + +void +lockdflt_init() +{ + int i; + + deflockinfo.rtli_version = RTLI_VERSION; + deflockinfo.lock_create = def_lock_create; + deflockinfo.lock_destroy = def_lock_destroy; + deflockinfo.rlock_acquire = def_rlock_acquire; + deflockinfo.wlock_acquire = def_wlock_acquire; + deflockinfo.lock_release = def_lock_release; + deflockinfo.thread_set_flag = def_thread_set_flag; + deflockinfo.thread_clr_flag = def_thread_clr_flag; + deflockinfo.at_fork = NULL; + + for (i = 0; i < RTLD_LOCK_CNT; i++) { + rtld_locks[i].mask = (1 << i); + rtld_locks[i].handle = NULL; + } + + memcpy(&lockinfo, &deflockinfo, sizeof(lockinfo)); + _rtld_thread_init(NULL); + /* + * Construct a mask to block all signals except traps which might + * conceivably be generated within the dynamic linker itself. + */ + sigfillset(&fullsigmask); + sigdelset(&fullsigmask, SIGILL); + sigdelset(&fullsigmask, SIGTRAP); + sigdelset(&fullsigmask, SIGABRT); + sigdelset(&fullsigmask, SIGEMT); + sigdelset(&fullsigmask, SIGFPE); + sigdelset(&fullsigmask, SIGBUS); + sigdelset(&fullsigmask, SIGSEGV); + sigdelset(&fullsigmask, SIGSYS); +} + +/* + * Callback function to allow threads implementation to + * register their own locking primitives if the default + * one is not suitable. + * The current context should be the only context + * executing at the invocation time. + */ +void +_rtld_thread_init(struct RtldLockInfo *pli) +{ + int flags, i; + void *locks[RTLD_LOCK_CNT]; + + /* disable all locking while this function is running */ + flags = thread_mask_set(~0); + + if (pli == NULL) + pli = &deflockinfo; + + + for (i = 0; i < RTLD_LOCK_CNT; i++) + if ((locks[i] = pli->lock_create()) == NULL) + break; + + if (i < RTLD_LOCK_CNT) { + while (--i >= 0) + pli->lock_destroy(locks[i]); + abort(); + } + + for (i = 0; i < RTLD_LOCK_CNT; i++) { + if (rtld_locks[i].handle == NULL) + continue; + if (flags & rtld_locks[i].mask) + lockinfo.lock_release(rtld_locks[i].handle); + lockinfo.lock_destroy(rtld_locks[i].handle); + } + + for (i = 0; i < RTLD_LOCK_CNT; i++) { + rtld_locks[i].handle = locks[i]; + if (flags & rtld_locks[i].mask) + pli->wlock_acquire(rtld_locks[i].handle); + } + + lockinfo.lock_create = pli->lock_create; + lockinfo.lock_destroy = pli->lock_destroy; + lockinfo.rlock_acquire = pli->rlock_acquire; + lockinfo.wlock_acquire = pli->wlock_acquire; + lockinfo.lock_release = pli->lock_release; + lockinfo.thread_set_flag = pli->thread_set_flag; + lockinfo.thread_clr_flag = pli->thread_clr_flag; + lockinfo.at_fork = pli->at_fork; + + /* restore thread locking state, this time with new locks */ + thread_mask_clear(~0); + thread_mask_set(flags); + dbg("_rtld_thread_init: done"); +} diff --git a/src/bin/rtld-elf/rtld_lock.h b/src/bin/rtld-elf/rtld_lock.h new file mode 100644 index 0000000..906e472 --- /dev/null +++ b/src/bin/rtld-elf/rtld_lock.h @@ -0,0 +1,63 @@ +/*- + * Copyright 2003 Alexander Kabaev. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/rtld_lock.h,v 1.2 2003/06/19 02:39:37 mdodd Exp $ + */ + +#ifndef _RTLD_LOCK_H_ +#define _RTLD_LOCK_H_ + +#define RTLI_VERSION 0x01 + +struct RtldLockInfo +{ + unsigned int rtli_version; + void *(*lock_create)(void); + void (*lock_destroy)(void *); + void (*rlock_acquire)(void *); + void (*wlock_acquire)(void *); + void (*lock_release)(void *); + int (*thread_set_flag)(int); + int (*thread_clr_flag)(int); + void (*at_fork)(void); +}; + +extern void _rtld_thread_init(struct RtldLockInfo *); + +#ifdef IN_RTLD + +struct rtld_lock; +typedef struct rtld_lock *rtld_lock_t; + +extern rtld_lock_t rtld_bind_lock; +extern rtld_lock_t rtld_libc_lock; + +int rlock_acquire(rtld_lock_t); +int wlock_acquire(rtld_lock_t); +void rlock_release(rtld_lock_t, int); +void wlock_release(rtld_lock_t, int); + +#endif /* IN_RTLD */ + +#endif diff --git a/src/bin/rtld-elf/rtld_machdep.h b/src/bin/rtld-elf/rtld_machdep.h new file mode 100644 index 0000000..3bc939e --- /dev/null +++ b/src/bin/rtld-elf/rtld_machdep.h @@ -0,0 +1,77 @@ +/*- + * Copyright (c) 1999, 2000 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/i386/rtld_machdep.h,v 1.10 2004/08/03 08:50:59 dfr Exp $ + */ + +#ifndef RTLD_MACHDEP_H +#define RTLD_MACHDEP_H 1 + +#include +#include + +#define CACHE_LINE_SIZE 32 + +struct Struct_Obj_Entry; + +/* Return the address of the .dynamic section in the dynamic linker. */ +#define rtld_dynamic(obj) \ + ((const Elf_Dyn *)((obj)->relocbase + (Elf_Addr)&_DYNAMIC)) + +/* Fixup the jump slot at "where" to transfer control to "target". */ +static inline Elf_Addr +reloc_jmpslot(Elf_Addr *where, Elf_Addr target, + const struct Struct_Obj_Entry *obj, + const struct Struct_Obj_Entry *refobj, const Elf_Rel *rel) +{ + dbg("reloc_jmpslot: *%p = %p", (void *)(where), + (void *)(target)); + (*(Elf_Addr *)(where) = (Elf_Addr)(target)); + return target; +} + +#define make_function_pointer(def, defobj) \ + ((defobj)->relocbase + (def)->st_value) + +#define call_initfini_pointer(obj, target) \ + (((InitFunc)(target))()) + +#define round(size, align) \ + (((size) + (align) - 1) & ~((align) - 1)) +#define calculate_first_tls_offset(size, align) \ + round(size, align) +#define calculate_tls_offset(prev_offset, prev_size, size, align) \ + round((prev_offset) + (size), align) +#define calculate_tls_end(off, size) (off) + +typedef struct { + unsigned long ti_module; + unsigned long ti_offset; +} tls_index; + +extern void *___tls_get_addr(tls_index *ti) __attribute__((__regparm__(1))); +extern void *__tls_get_addr(tls_index *ti); + +#endif diff --git a/src/bin/rtld-elf/rtld_start.S b/src/bin/rtld-elf/rtld_start.S new file mode 100644 index 0000000..65d900d --- /dev/null +++ b/src/bin/rtld-elf/rtld_start.S @@ -0,0 +1,91 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/i386/rtld_start.S,v 1.4 2005/05/19 07:32:42 dfr Exp $ + */ + + .text + .align 4 + .globl .rtld_start + .type .rtld_start,@function +.rtld_start: + xorl %ebp,%ebp # Clear frame pointer for good form + movl %esp,%eax # Save initial stack pointer + movl %esp,%esi # Save initial stack pointer + andl $0xfffffff0,%esp # Align stack pointer + subl $16,%esp # A place to store exit procedure addr + movl %esp,%ebx # save address of exit proc + movl %esp,%ecx # construct address of obj_main + addl $4,%ecx + subl $4,%esp # Keep stack aligned + pushl %ecx # Pass address of obj_main + pushl %ebx # Pass address of exit proc + pushl %eax # Pass initial stack pointer to rtld + call _rtld@PLT # Call rtld(sp); returns entry point + addl $16,%esp # Remove arguments from stack + popl %edx # Get exit procedure address + movl %esi,%esp # Ignore obj_main +/* + * At this point, %eax contains the entry point of the main program, and + * %edx contains a pointer to a termination function that should be + * registered with atexit(). (crt1.o registers it.) + */ +.globl .rtld_goto_main +.rtld_goto_main: # This symbol exists just to make debugging easier. + jmp *%eax # Enter main program + + +/* + * Binder entry point. Control is transferred to here by code in the PLT. + * On entry, there are two arguments on the stack. In ascending address + * order, they are (1) "obj", a pointer to the calling object's Obj_Entry, + * and (2) "reloff", the byte offset of the appropriate relocation entry + * in the PLT relocation table. + * + * We are careful to preserve all registers, even the the caller-save + * registers. That is because this code may be invoked by low-level + * assembly-language code that is not ABI-compliant. + */ + .align 4 + .globl _rtld_bind_start + .type _rtld_bind_start,@function +_rtld_bind_start: + pushf # Save eflags + pushl %eax # Save %eax + pushl %edx # Save %edx + pushl %ecx # Save %ecx + pushl 20(%esp) # Copy reloff argument + pushl 20(%esp) # Copy obj argument + + call _rtld_bind@PLT # Transfer control to the binder + /* Now %eax contains the entry point of the function being called. */ + + addl $8,%esp # Discard binder arguments + movl %eax,20(%esp) # Store target over obj argument + popl %ecx # Restore %ecx + popl %edx # Restore %edx + popl %eax # Restore %eax + popf # Restore eflags + leal 4(%esp),%esp # Discard reloff, do not change eflags + ret # "Return" to target address diff --git a/src/bin/rtld-elf/rtld_tls.h b/src/bin/rtld-elf/rtld_tls.h new file mode 100644 index 0000000..4e17934 --- /dev/null +++ b/src/bin/rtld-elf/rtld_tls.h @@ -0,0 +1,69 @@ +/*- + * Copyright (c) 2004 Doug Rabson + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/rtld_tls.h,v 1.1 2004/08/03 08:50:58 dfr Exp $ + */ + +/* + * Semi-public interface from thread libraries to rtld for managing + * TLS. + */ + +#ifndef _RTLD_TLS_H_ +#define _RTLD_TLS_H_ + +/* + * Allocate a TLS block for a new thread. The memory allocated will + * include 'tcbsize' bytes aligned to a 'tcbalign' boundary (in bytes) + * for the thread library's private purposes. The location of the TCB + * block is returned by this function. For architectures using + * 'Variant I' TLS, the thread local storage follows the TCB, and for + * 'Variant II', the thread local storage precedes it. For + * architectures using the 'Variant II' model (e.g. i386, amd64, + * sparc64), the TCB must begin with two pointer fields which are used + * by rtld for its TLS implementation. For the 'Variant I' model, the + * TCB must begin with a single pointer field for rtld's + * implementation. + * + * If the value of 'oldtls' is non-NULL, the new TLS block will be + * initialised using the values contained in 'oldtls' and 'oldtls' + * will be freed. This is typically used when initialising a thread + * library to migrate from using the initial bootstrap TLS block + * created by rtld to one which contains suitable thread library + * private data. + * + * The value returned from this function is suitable for installing + * directly into the thread pointer register. + */ +extern void *_rtld_allocate_tls(void* oldtls, size_t tcbsize, size_t tcbalign); + +/* + * Free a TLS block allocated using _rtld_allocate_tls(). The tcbsize + * and tcbalign parameters must be the same as those used to allocate + * the block. + */ +extern void _rtld_free_tls(void *tcb, size_t tcbsize, size_t tcbalign); + +#endif diff --git a/src/bin/rtld-elf/xmalloc.c b/src/bin/rtld-elf/xmalloc.c new file mode 100644 index 0000000..785e162 --- /dev/null +++ b/src/bin/rtld-elf/xmalloc.c @@ -0,0 +1,59 @@ +/*- + * Copyright 1996-1998 John D. Polstra. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD: src/libexec/rtld-elf/xmalloc.c,v 1.3 2003/06/19 05:28:26 mdodd Exp $ + */ + +#include +#include +#include +#include + +void *xcalloc(size_t); +void *xmalloc(size_t); +char *xstrdup(const char *); + +void * +xcalloc(size_t size) +{ + return memset(xmalloc(size), 0, size); +} + +void * +xmalloc(size_t size) +{ + void *p = malloc(size); + if (p == NULL) + err(1, "Out of memory"); + return p; +} + +char * +xstrdup(const char *s) +{ + char *p = strdup(s); + if (p == NULL) + err(1, "Out of memory"); + return p; +}