]>
Commit | Line | Data |
---|---|---|
320054e8 | 1 | #define _GNU_SOURCE |
d4db3fa2 | 2 | #define SYSCALL_NO_TLS 1 |
320054e8 DG |
3 | #include <stdio.h> |
4 | #include <stdlib.h> | |
5 | #include <stdarg.h> | |
6 | #include <stddef.h> | |
7 | #include <string.h> | |
8 | #include <unistd.h> | |
9 | #include <stdint.h> | |
10 | #include <elf.h> | |
11 | #include <sys/mman.h> | |
12 | #include <limits.h> | |
13 | #include <fcntl.h> | |
14 | #include <sys/stat.h> | |
15 | #include <errno.h> | |
16 | #include <link.h> | |
17 | #include <setjmp.h> | |
18 | #include <pthread.h> | |
19 | #include <ctype.h> | |
20 | #include <dlfcn.h> | |
f41256b6 DG |
21 | #include <semaphore.h> |
22 | #include <sys/membarrier.h> | |
320054e8 DG |
23 | #include "pthread_impl.h" |
24 | #include "libc.h" | |
25 | #include "dynlink.h" | |
26 | #include "malloc_impl.h" | |
27 | ||
28 | static void error(const char *, ...); | |
29 | ||
30 | #define MAXP2(a,b) (-(-(a)&-(b))) | |
31 | #define ALIGN(x,y) ((x)+(y)-1 & -(y)) | |
32 | ||
f41256b6 DG |
33 | #define container_of(p,t,m) ((t*)((char *)(p)-offsetof(t,m))) |
34 | #define countof(a) ((sizeof (a))/(sizeof (a)[0])) | |
35 | ||
320054e8 DG |
36 | struct debug { |
37 | int ver; | |
38 | void *head; | |
39 | void (*bp)(void); | |
40 | int state; | |
41 | void *base; | |
42 | }; | |
43 | ||
44 | struct td_index { | |
45 | size_t args[2]; | |
46 | struct td_index *next; | |
47 | }; | |
48 | ||
49 | struct dso { | |
50 | #if DL_FDPIC | |
51 | struct fdpic_loadmap *loadmap; | |
52 | #else | |
53 | unsigned char *base; | |
54 | #endif | |
55 | char *name; | |
56 | size_t *dynv; | |
57 | struct dso *next, *prev; | |
58 | ||
59 | Phdr *phdr; | |
60 | int phnum; | |
61 | size_t phentsize; | |
62 | Sym *syms; | |
63 | Elf_Symndx *hashtab; | |
64 | uint32_t *ghashtab; | |
65 | int16_t *versym; | |
66 | char *strings; | |
67 | struct dso *syms_next, *lazy_next; | |
68 | size_t *lazy, lazy_cnt; | |
69 | unsigned char *map; | |
70 | size_t map_len; | |
71 | dev_t dev; | |
72 | ino_t ino; | |
73 | char relocated; | |
74 | char constructed; | |
75 | char kernel_mapped; | |
f41256b6 DG |
76 | char mark; |
77 | char bfs_built; | |
78 | char runtime_loaded; | |
320054e8 | 79 | struct dso **deps, *needed_by; |
f41256b6 DG |
80 | size_t ndeps_direct; |
81 | size_t next_dep; | |
82 | int ctor_visitor; | |
320054e8 DG |
83 | char *rpath_orig, *rpath; |
84 | struct tls_module tls; | |
85 | size_t tls_id; | |
86 | size_t relro_start, relro_end; | |
87 | uintptr_t *new_dtv; | |
88 | unsigned char *new_tls; | |
320054e8 DG |
89 | struct td_index *td_index; |
90 | struct dso *fini_next; | |
91 | char *shortname; | |
92 | #if DL_FDPIC | |
93 | unsigned char *base; | |
94 | #else | |
95 | struct fdpic_loadmap *loadmap; | |
96 | #endif | |
97 | struct funcdesc { | |
98 | void *addr; | |
99 | size_t *got; | |
100 | } *funcdescs; | |
101 | size_t *got; | |
102 | char buf[]; | |
103 | }; | |
104 | ||
105 | struct symdef { | |
106 | Sym *sym; | |
107 | struct dso *dso; | |
108 | }; | |
109 | ||
110 | static struct builtin_tls { | |
111 | char c; | |
112 | struct pthread pt; | |
113 | void *space[16]; | |
114 | } builtin_tls[1]; | |
115 | #define MIN_TLS_ALIGN offsetof(struct builtin_tls, pt) | |
116 | ||
117 | #define ADDEND_LIMIT 4096 | |
118 | static size_t *saved_addends, *apply_addends_to; | |
119 | ||
120 | static struct dso ldso; | |
121 | static struct dso *head, *tail, *fini_head, *syms_tail, *lazy_head; | |
122 | static char *env_path, *sys_path; | |
123 | static unsigned long long gencnt; | |
124 | static int runtime; | |
125 | static int ldd_mode; | |
126 | static int ldso_fail; | |
127 | static int noload; | |
f41256b6 | 128 | static int shutting_down; |
320054e8 DG |
129 | static jmp_buf *rtld_fail; |
130 | static pthread_rwlock_t lock; | |
131 | static struct debug debug; | |
132 | static struct tls_module *tls_tail; | |
133 | static size_t tls_cnt, tls_offset, tls_align = MIN_TLS_ALIGN; | |
134 | static size_t static_tls_cnt; | |
f41256b6 DG |
135 | static pthread_mutex_t init_fini_lock; |
136 | static pthread_cond_t ctor_cond; | |
137 | static struct dso *builtin_deps[2]; | |
138 | static struct dso *const no_deps[1]; | |
139 | static struct dso *builtin_ctor_queue[4]; | |
140 | static struct dso **main_ctor_queue; | |
320054e8 DG |
141 | static struct fdpic_loadmap *app_loadmap; |
142 | static struct fdpic_dummy_loadmap app_dummy_loadmap; | |
320054e8 DG |
143 | |
144 | struct debug *_dl_debug_addr = &debug; | |
145 | ||
146 | extern hidden int __malloc_replaced; | |
147 | ||
148 | hidden void (*const __init_array_start)(void)=0, (*const __fini_array_start)(void)=0; | |
149 | ||
150 | extern hidden void (*const __init_array_end)(void), (*const __fini_array_end)(void); | |
151 | ||
152 | weak_alias(__init_array_start, __init_array_end); | |
153 | weak_alias(__fini_array_start, __fini_array_end); | |
154 | ||
155 | static int dl_strcmp(const char *l, const char *r) | |
156 | { | |
157 | for (; *l==*r && *l; l++, r++); | |
158 | return *(unsigned char *)l - *(unsigned char *)r; | |
159 | } | |
160 | #define strcmp(l,r) dl_strcmp(l,r) | |
161 | ||
162 | /* Compute load address for a virtual address in a given dso. */ | |
163 | #if DL_FDPIC | |
164 | static void *laddr(const struct dso *p, size_t v) | |
165 | { | |
166 | size_t j=0; | |
167 | if (!p->loadmap) return p->base + v; | |
168 | for (j=0; v-p->loadmap->segs[j].p_vaddr >= p->loadmap->segs[j].p_memsz; j++); | |
169 | return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); | |
170 | } | |
171 | static void *laddr_pg(const struct dso *p, size_t v) | |
172 | { | |
173 | size_t j=0; | |
174 | size_t pgsz = PAGE_SIZE; | |
175 | if (!p->loadmap) return p->base + v; | |
176 | for (j=0; ; j++) { | |
177 | size_t a = p->loadmap->segs[j].p_vaddr; | |
178 | size_t b = a + p->loadmap->segs[j].p_memsz; | |
179 | a &= -pgsz; | |
180 | b += pgsz-1; | |
181 | b &= -pgsz; | |
182 | if (v-a<b-a) break; | |
183 | } | |
184 | return (void *)(v - p->loadmap->segs[j].p_vaddr + p->loadmap->segs[j].addr); | |
185 | } | |
186 | #define fpaddr(p, v) ((void (*)())&(struct funcdesc){ \ | |
187 | laddr(p, v), (p)->got }) | |
188 | #else | |
189 | #define laddr(p, v) (void *)((p)->base + (v)) | |
190 | #define laddr_pg(p, v) laddr(p, v) | |
191 | #define fpaddr(p, v) ((void (*)())laddr(p, v)) | |
192 | #endif | |
193 | ||
194 | static void decode_vec(size_t *v, size_t *a, size_t cnt) | |
195 | { | |
196 | size_t i; | |
197 | for (i=0; i<cnt; i++) a[i] = 0; | |
198 | for (; v[0]; v+=2) if (v[0]-1<cnt-1) { | |
199 | a[0] |= 1UL<<v[0]; | |
200 | a[v[0]] = v[1]; | |
201 | } | |
202 | } | |
203 | ||
204 | static int search_vec(size_t *v, size_t *r, size_t key) | |
205 | { | |
206 | for (; v[0]!=key; v+=2) | |
207 | if (!v[0]) return 0; | |
208 | *r = v[1]; | |
209 | return 1; | |
210 | } | |
211 | ||
212 | static uint32_t sysv_hash(const char *s0) | |
213 | { | |
214 | const unsigned char *s = (void *)s0; | |
215 | uint_fast32_t h = 0; | |
216 | while (*s) { | |
217 | h = 16*h + *s++; | |
218 | h ^= h>>24 & 0xf0; | |
219 | } | |
220 | return h & 0xfffffff; | |
221 | } | |
222 | ||
223 | static uint32_t gnu_hash(const char *s0) | |
224 | { | |
225 | const unsigned char *s = (void *)s0; | |
226 | uint_fast32_t h = 5381; | |
227 | for (; *s; s++) | |
228 | h += h*32 + *s; | |
229 | return h; | |
230 | } | |
231 | ||
232 | static Sym *sysv_lookup(const char *s, uint32_t h, struct dso *dso) | |
233 | { | |
234 | size_t i; | |
235 | Sym *syms = dso->syms; | |
236 | Elf_Symndx *hashtab = dso->hashtab; | |
237 | char *strings = dso->strings; | |
238 | for (i=hashtab[2+h%hashtab[0]]; i; i=hashtab[2+hashtab[0]+i]) { | |
239 | if ((!dso->versym || dso->versym[i] >= 0) | |
240 | && (!strcmp(s, strings+syms[i].st_name))) | |
241 | return syms+i; | |
242 | } | |
243 | return 0; | |
244 | } | |
245 | ||
246 | static Sym *gnu_lookup(uint32_t h1, uint32_t *hashtab, struct dso *dso, const char *s) | |
247 | { | |
248 | uint32_t nbuckets = hashtab[0]; | |
249 | uint32_t *buckets = hashtab + 4 + hashtab[2]*(sizeof(size_t)/4); | |
250 | uint32_t i = buckets[h1 % nbuckets]; | |
251 | ||
252 | if (!i) return 0; | |
253 | ||
254 | uint32_t *hashval = buckets + nbuckets + (i - hashtab[1]); | |
255 | ||
256 | for (h1 |= 1; ; i++) { | |
257 | uint32_t h2 = *hashval++; | |
258 | if ((h1 == (h2|1)) && (!dso->versym || dso->versym[i] >= 0) | |
259 | && !strcmp(s, dso->strings + dso->syms[i].st_name)) | |
260 | return dso->syms+i; | |
261 | if (h2 & 1) break; | |
262 | } | |
263 | ||
264 | return 0; | |
265 | } | |
266 | ||
267 | static Sym *gnu_lookup_filtered(uint32_t h1, uint32_t *hashtab, struct dso *dso, const char *s, uint32_t fofs, size_t fmask) | |
268 | { | |
269 | const size_t *bloomwords = (const void *)(hashtab+4); | |
270 | size_t f = bloomwords[fofs & (hashtab[2]-1)]; | |
271 | if (!(f & fmask)) return 0; | |
272 | ||
273 | f >>= (h1 >> hashtab[3]) % (8 * sizeof f); | |
274 | if (!(f & 1)) return 0; | |
275 | ||
276 | return gnu_lookup(h1, hashtab, dso, s); | |
277 | } | |
278 | ||
279 | #define OK_TYPES (1<<STT_NOTYPE | 1<<STT_OBJECT | 1<<STT_FUNC | 1<<STT_COMMON | 1<<STT_TLS) | |
280 | #define OK_BINDS (1<<STB_GLOBAL | 1<<STB_WEAK | 1<<STB_GNU_UNIQUE) | |
281 | ||
282 | #ifndef ARCH_SYM_REJECT_UND | |
283 | #define ARCH_SYM_REJECT_UND(s) 0 | |
284 | #endif | |
285 | ||
286 | static struct symdef find_sym(struct dso *dso, const char *s, int need_def) | |
287 | { | |
288 | uint32_t h = 0, gh = gnu_hash(s), gho = gh / (8*sizeof(size_t)), *ght; | |
289 | size_t ghm = 1ul << gh % (8*sizeof(size_t)); | |
290 | struct symdef def = {0}; | |
291 | for (; dso; dso=dso->syms_next) { | |
292 | Sym *sym; | |
293 | if ((ght = dso->ghashtab)) { | |
294 | sym = gnu_lookup_filtered(gh, ght, dso, s, gho, ghm); | |
295 | } else { | |
296 | if (!h) h = sysv_hash(s); | |
297 | sym = sysv_lookup(s, h, dso); | |
298 | } | |
299 | if (!sym) continue; | |
300 | if (!sym->st_shndx) | |
301 | if (need_def || (sym->st_info&0xf) == STT_TLS | |
302 | || ARCH_SYM_REJECT_UND(sym)) | |
303 | continue; | |
304 | if (!sym->st_value) | |
305 | if ((sym->st_info&0xf) != STT_TLS) | |
306 | continue; | |
307 | if (!(1<<(sym->st_info&0xf) & OK_TYPES)) continue; | |
308 | if (!(1<<(sym->st_info>>4) & OK_BINDS)) continue; | |
309 | def.sym = sym; | |
310 | def.dso = dso; | |
311 | break; | |
312 | } | |
313 | return def; | |
314 | } | |
315 | ||
316 | static void do_relocs(struct dso *dso, size_t *rel, size_t rel_size, size_t stride) | |
317 | { | |
318 | unsigned char *base = dso->base; | |
319 | Sym *syms = dso->syms; | |
320 | char *strings = dso->strings; | |
321 | Sym *sym; | |
322 | const char *name; | |
323 | void *ctx; | |
324 | int type; | |
325 | int sym_index; | |
326 | struct symdef def; | |
327 | size_t *reloc_addr; | |
328 | size_t sym_val; | |
329 | size_t tls_val; | |
330 | size_t addend; | |
331 | int skip_relative = 0, reuse_addends = 0, save_slot = 0; | |
332 | ||
333 | if (dso == &ldso) { | |
334 | /* Only ldso's REL table needs addend saving/reuse. */ | |
335 | if (rel == apply_addends_to) | |
336 | reuse_addends = 1; | |
337 | skip_relative = 1; | |
338 | } | |
339 | ||
340 | for (; rel_size; rel+=stride, rel_size-=stride*sizeof(size_t)) { | |
341 | if (skip_relative && IS_RELATIVE(rel[1], dso->syms)) continue; | |
342 | type = R_TYPE(rel[1]); | |
343 | if (type == REL_NONE) continue; | |
344 | reloc_addr = laddr(dso, rel[0]); | |
345 | ||
346 | if (stride > 2) { | |
347 | addend = rel[2]; | |
348 | } else if (type==REL_GOT || type==REL_PLT|| type==REL_COPY) { | |
349 | addend = 0; | |
350 | } else if (reuse_addends) { | |
351 | /* Save original addend in stage 2 where the dso | |
352 | * chain consists of just ldso; otherwise read back | |
353 | * saved addend since the inline one was clobbered. */ | |
354 | if (head==&ldso) | |
355 | saved_addends[save_slot] = *reloc_addr; | |
356 | addend = saved_addends[save_slot++]; | |
357 | } else { | |
358 | addend = *reloc_addr; | |
359 | } | |
360 | ||
361 | sym_index = R_SYM(rel[1]); | |
362 | if (sym_index) { | |
363 | sym = syms + sym_index; | |
364 | name = strings + sym->st_name; | |
365 | ctx = type==REL_COPY ? head->syms_next : head; | |
366 | def = (sym->st_info&0xf) == STT_SECTION | |
367 | ? (struct symdef){ .dso = dso, .sym = sym } | |
368 | : find_sym(ctx, name, type==REL_PLT); | |
369 | if (!def.sym && (sym->st_shndx != SHN_UNDEF | |
370 | || sym->st_info>>4 != STB_WEAK)) { | |
371 | if (dso->lazy && (type==REL_PLT || type==REL_GOT)) { | |
372 | dso->lazy[3*dso->lazy_cnt+0] = rel[0]; | |
373 | dso->lazy[3*dso->lazy_cnt+1] = rel[1]; | |
374 | dso->lazy[3*dso->lazy_cnt+2] = addend; | |
375 | dso->lazy_cnt++; | |
376 | continue; | |
377 | } | |
378 | error("Error relocating %s: %s: symbol not found", | |
379 | dso->name, name); | |
380 | if (runtime) longjmp(*rtld_fail, 1); | |
381 | continue; | |
382 | } | |
383 | } else { | |
384 | sym = 0; | |
385 | def.sym = 0; | |
386 | def.dso = dso; | |
387 | } | |
388 | ||
389 | sym_val = def.sym ? (size_t)laddr(def.dso, def.sym->st_value) : 0; | |
390 | tls_val = def.sym ? def.sym->st_value : 0; | |
391 | ||
392 | if ((type == REL_TPOFF || type == REL_TPOFF_NEG) | |
393 | && runtime && def.dso->tls_id > static_tls_cnt) { | |
394 | error("Error relocating %s: %s: initial-exec TLS " | |
395 | "resolves to dynamic definition in %s", | |
396 | dso->name, name, def.dso->name); | |
397 | longjmp(*rtld_fail, 1); | |
398 | } | |
399 | ||
400 | switch(type) { | |
401 | case REL_NONE: | |
402 | break; | |
403 | case REL_OFFSET: | |
404 | addend -= (size_t)reloc_addr; | |
405 | case REL_SYMBOLIC: | |
406 | case REL_GOT: | |
407 | case REL_PLT: | |
408 | *reloc_addr = sym_val + addend; | |
409 | break; | |
410 | case REL_RELATIVE: | |
411 | *reloc_addr = (size_t)base + addend; | |
412 | break; | |
413 | case REL_SYM_OR_REL: | |
414 | if (sym) *reloc_addr = sym_val + addend; | |
415 | else *reloc_addr = (size_t)base + addend; | |
416 | break; | |
417 | case REL_COPY: | |
418 | memcpy(reloc_addr, (void *)sym_val, sym->st_size); | |
419 | break; | |
420 | case REL_OFFSET32: | |
421 | *(uint32_t *)reloc_addr = sym_val + addend | |
422 | - (size_t)reloc_addr; | |
423 | break; | |
424 | case REL_FUNCDESC: | |
425 | *reloc_addr = def.sym ? (size_t)(def.dso->funcdescs | |
426 | + (def.sym - def.dso->syms)) : 0; | |
427 | break; | |
428 | case REL_FUNCDESC_VAL: | |
429 | if ((sym->st_info&0xf) == STT_SECTION) *reloc_addr += sym_val; | |
430 | else *reloc_addr = sym_val; | |
431 | reloc_addr[1] = def.sym ? (size_t)def.dso->got : 0; | |
432 | break; | |
433 | case REL_DTPMOD: | |
434 | *reloc_addr = def.dso->tls_id; | |
435 | break; | |
436 | case REL_DTPOFF: | |
437 | *reloc_addr = tls_val + addend - DTP_OFFSET; | |
438 | break; | |
439 | #ifdef TLS_ABOVE_TP | |
440 | case REL_TPOFF: | |
441 | *reloc_addr = tls_val + def.dso->tls.offset + TPOFF_K + addend; | |
442 | break; | |
443 | #else | |
444 | case REL_TPOFF: | |
445 | *reloc_addr = tls_val - def.dso->tls.offset + addend; | |
446 | break; | |
447 | case REL_TPOFF_NEG: | |
448 | *reloc_addr = def.dso->tls.offset - tls_val + addend; | |
449 | break; | |
450 | #endif | |
451 | case REL_TLSDESC: | |
452 | if (stride<3) addend = reloc_addr[1]; | |
453 | if (runtime && def.dso->tls_id > static_tls_cnt) { | |
454 | struct td_index *new = malloc(sizeof *new); | |
455 | if (!new) { | |
456 | error( | |
457 | "Error relocating %s: cannot allocate TLSDESC for %s", | |
458 | dso->name, sym ? name : "(local)" ); | |
459 | longjmp(*rtld_fail, 1); | |
460 | } | |
461 | new->next = dso->td_index; | |
462 | dso->td_index = new; | |
463 | new->args[0] = def.dso->tls_id; | |
464 | new->args[1] = tls_val + addend - DTP_OFFSET; | |
465 | reloc_addr[0] = (size_t)__tlsdesc_dynamic; | |
466 | reloc_addr[1] = (size_t)new; | |
467 | } else { | |
468 | reloc_addr[0] = (size_t)__tlsdesc_static; | |
469 | #ifdef TLS_ABOVE_TP | |
470 | reloc_addr[1] = tls_val + def.dso->tls.offset | |
471 | + TPOFF_K + addend; | |
472 | #else | |
473 | reloc_addr[1] = tls_val - def.dso->tls.offset | |
474 | + addend; | |
475 | #endif | |
476 | } | |
477 | #ifdef TLSDESC_BACKWARDS | |
478 | /* Some archs (32-bit ARM at least) invert the order of | |
479 | * the descriptor members. Fix them up here. */ | |
480 | size_t tmp = reloc_addr[0]; | |
481 | reloc_addr[0] = reloc_addr[1]; | |
482 | reloc_addr[1] = tmp; | |
483 | #endif | |
484 | break; | |
485 | default: | |
486 | error("Error relocating %s: unsupported relocation type %d", | |
487 | dso->name, type); | |
488 | if (runtime) longjmp(*rtld_fail, 1); | |
489 | continue; | |
490 | } | |
491 | } | |
492 | } | |
493 | ||
494 | static void redo_lazy_relocs() | |
495 | { | |
496 | struct dso *p = lazy_head, *next; | |
497 | lazy_head = 0; | |
498 | for (; p; p=next) { | |
499 | next = p->lazy_next; | |
500 | size_t size = p->lazy_cnt*3*sizeof(size_t); | |
501 | p->lazy_cnt = 0; | |
502 | do_relocs(p, p->lazy, size, 3); | |
503 | if (p->lazy_cnt) { | |
504 | p->lazy_next = lazy_head; | |
505 | lazy_head = p; | |
506 | } else { | |
507 | free(p->lazy); | |
508 | p->lazy = 0; | |
509 | p->lazy_next = 0; | |
510 | } | |
511 | } | |
512 | } | |
513 | ||
514 | /* A huge hack: to make up for the wastefulness of shared libraries | |
515 | * needing at least a page of dirty memory even if they have no global | |
516 | * data, we reclaim the gaps at the beginning and end of writable maps | |
517 | * and "donate" them to the heap. */ | |
518 | ||
519 | static void reclaim(struct dso *dso, size_t start, size_t end) | |
520 | { | |
521 | if (start >= dso->relro_start && start < dso->relro_end) start = dso->relro_end; | |
522 | if (end >= dso->relro_start && end < dso->relro_end) end = dso->relro_start; | |
523 | if (start >= end) return; | |
524 | char *base = laddr_pg(dso, start); | |
525 | __malloc_donate(base, base+(end-start)); | |
526 | } | |
527 | ||
528 | static void reclaim_gaps(struct dso *dso) | |
529 | { | |
530 | Phdr *ph = dso->phdr; | |
531 | size_t phcnt = dso->phnum; | |
532 | ||
533 | for (; phcnt--; ph=(void *)((char *)ph+dso->phentsize)) { | |
534 | if (ph->p_type!=PT_LOAD) continue; | |
535 | if ((ph->p_flags&(PF_R|PF_W))!=(PF_R|PF_W)) continue; | |
536 | reclaim(dso, ph->p_vaddr & -PAGE_SIZE, ph->p_vaddr); | |
537 | reclaim(dso, ph->p_vaddr+ph->p_memsz, | |
538 | ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE); | |
539 | } | |
540 | } | |
541 | ||
542 | static void *mmap_fixed(void *p, size_t n, int prot, int flags, int fd, off_t off) | |
543 | { | |
544 | static int no_map_fixed; | |
545 | char *q; | |
546 | if (!no_map_fixed) { | |
547 | q = mmap(p, n, prot, flags|MAP_FIXED, fd, off); | |
548 | if (!DL_NOMMU_SUPPORT || q != MAP_FAILED || errno != EINVAL) | |
549 | return q; | |
550 | no_map_fixed = 1; | |
551 | } | |
552 | /* Fallbacks for MAP_FIXED failure on NOMMU kernels. */ | |
553 | if (flags & MAP_ANONYMOUS) { | |
554 | memset(p, 0, n); | |
555 | return p; | |
556 | } | |
557 | ssize_t r; | |
558 | if (lseek(fd, off, SEEK_SET) < 0) return MAP_FAILED; | |
559 | for (q=p; n; q+=r, off+=r, n-=r) { | |
560 | r = read(fd, q, n); | |
561 | if (r < 0 && errno != EINTR) return MAP_FAILED; | |
562 | if (!r) { | |
563 | memset(q, 0, n); | |
564 | break; | |
565 | } | |
566 | } | |
567 | return p; | |
568 | } | |
569 | ||
570 | static void unmap_library(struct dso *dso) | |
571 | { | |
572 | if (dso->loadmap) { | |
573 | size_t i; | |
574 | for (i=0; i<dso->loadmap->nsegs; i++) { | |
575 | if (!dso->loadmap->segs[i].p_memsz) | |
576 | continue; | |
577 | munmap((void *)dso->loadmap->segs[i].addr, | |
578 | dso->loadmap->segs[i].p_memsz); | |
579 | } | |
580 | free(dso->loadmap); | |
581 | } else if (dso->map && dso->map_len) { | |
582 | munmap(dso->map, dso->map_len); | |
583 | } | |
584 | } | |
585 | ||
586 | static void *map_library(int fd, struct dso *dso) | |
587 | { | |
588 | Ehdr buf[(896+sizeof(Ehdr))/sizeof(Ehdr)]; | |
589 | void *allocated_buf=0; | |
590 | size_t phsize; | |
591 | size_t addr_min=SIZE_MAX, addr_max=0, map_len; | |
592 | size_t this_min, this_max; | |
593 | size_t nsegs = 0; | |
594 | off_t off_start; | |
595 | Ehdr *eh; | |
596 | Phdr *ph, *ph0; | |
597 | unsigned prot; | |
598 | unsigned char *map=MAP_FAILED, *base; | |
599 | size_t dyn=0; | |
600 | size_t tls_image=0; | |
601 | size_t i; | |
602 | ||
603 | ssize_t l = read(fd, buf, sizeof buf); | |
604 | eh = buf; | |
605 | if (l<0) return 0; | |
606 | if (l<sizeof *eh || (eh->e_type != ET_DYN && eh->e_type != ET_EXEC)) | |
607 | goto noexec; | |
608 | phsize = eh->e_phentsize * eh->e_phnum; | |
609 | if (phsize > sizeof buf - sizeof *eh) { | |
610 | allocated_buf = malloc(phsize); | |
611 | if (!allocated_buf) return 0; | |
612 | l = pread(fd, allocated_buf, phsize, eh->e_phoff); | |
613 | if (l < 0) goto error; | |
614 | if (l != phsize) goto noexec; | |
615 | ph = ph0 = allocated_buf; | |
616 | } else if (eh->e_phoff + phsize > l) { | |
617 | l = pread(fd, buf+1, phsize, eh->e_phoff); | |
618 | if (l < 0) goto error; | |
619 | if (l != phsize) goto noexec; | |
620 | ph = ph0 = (void *)(buf + 1); | |
621 | } else { | |
622 | ph = ph0 = (void *)((char *)buf + eh->e_phoff); | |
623 | } | |
624 | for (i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { | |
625 | if (ph->p_type == PT_DYNAMIC) { | |
626 | dyn = ph->p_vaddr; | |
627 | } else if (ph->p_type == PT_TLS) { | |
628 | tls_image = ph->p_vaddr; | |
629 | dso->tls.align = ph->p_align; | |
630 | dso->tls.len = ph->p_filesz; | |
631 | dso->tls.size = ph->p_memsz; | |
632 | } else if (ph->p_type == PT_GNU_RELRO) { | |
633 | dso->relro_start = ph->p_vaddr & -PAGE_SIZE; | |
634 | dso->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE; | |
635 | } else if (ph->p_type == PT_GNU_STACK) { | |
636 | if (!runtime && ph->p_memsz > __default_stacksize) { | |
637 | __default_stacksize = | |
638 | ph->p_memsz < DEFAULT_STACK_MAX ? | |
639 | ph->p_memsz : DEFAULT_STACK_MAX; | |
640 | } | |
641 | } | |
642 | if (ph->p_type != PT_LOAD) continue; | |
643 | nsegs++; | |
644 | if (ph->p_vaddr < addr_min) { | |
645 | addr_min = ph->p_vaddr; | |
646 | off_start = ph->p_offset; | |
647 | prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | | |
648 | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | | |
649 | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); | |
650 | } | |
651 | if (ph->p_vaddr+ph->p_memsz > addr_max) { | |
652 | addr_max = ph->p_vaddr+ph->p_memsz; | |
653 | } | |
654 | } | |
655 | if (!dyn) goto noexec; | |
656 | if (DL_FDPIC && !(eh->e_flags & FDPIC_CONSTDISP_FLAG)) { | |
657 | dso->loadmap = calloc(1, sizeof *dso->loadmap | |
658 | + nsegs * sizeof *dso->loadmap->segs); | |
659 | if (!dso->loadmap) goto error; | |
660 | dso->loadmap->nsegs = nsegs; | |
661 | for (ph=ph0, i=0; i<nsegs; ph=(void *)((char *)ph+eh->e_phentsize)) { | |
662 | if (ph->p_type != PT_LOAD) continue; | |
663 | prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | | |
664 | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | | |
665 | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); | |
666 | map = mmap(0, ph->p_memsz + (ph->p_vaddr & PAGE_SIZE-1), | |
667 | prot, MAP_PRIVATE, | |
668 | fd, ph->p_offset & -PAGE_SIZE); | |
669 | if (map == MAP_FAILED) { | |
670 | unmap_library(dso); | |
671 | goto error; | |
672 | } | |
673 | dso->loadmap->segs[i].addr = (size_t)map + | |
674 | (ph->p_vaddr & PAGE_SIZE-1); | |
675 | dso->loadmap->segs[i].p_vaddr = ph->p_vaddr; | |
676 | dso->loadmap->segs[i].p_memsz = ph->p_memsz; | |
677 | i++; | |
678 | if (prot & PROT_WRITE) { | |
679 | size_t brk = (ph->p_vaddr & PAGE_SIZE-1) | |
680 | + ph->p_filesz; | |
681 | size_t pgbrk = brk + PAGE_SIZE-1 & -PAGE_SIZE; | |
682 | size_t pgend = brk + ph->p_memsz - ph->p_filesz | |
683 | + PAGE_SIZE-1 & -PAGE_SIZE; | |
684 | if (pgend > pgbrk && mmap_fixed(map+pgbrk, | |
685 | pgend-pgbrk, prot, | |
686 | MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, | |
687 | -1, off_start) == MAP_FAILED) | |
688 | goto error; | |
689 | memset(map + brk, 0, pgbrk-brk); | |
690 | } | |
691 | } | |
692 | map = (void *)dso->loadmap->segs[0].addr; | |
693 | map_len = 0; | |
694 | goto done_mapping; | |
695 | } | |
696 | addr_max += PAGE_SIZE-1; | |
697 | addr_max &= -PAGE_SIZE; | |
698 | addr_min &= -PAGE_SIZE; | |
699 | off_start &= -PAGE_SIZE; | |
700 | map_len = addr_max - addr_min + off_start; | |
701 | /* The first time, we map too much, possibly even more than | |
702 | * the length of the file. This is okay because we will not | |
703 | * use the invalid part; we just need to reserve the right | |
704 | * amount of virtual address space to map over later. */ | |
705 | map = DL_NOMMU_SUPPORT | |
706 | ? mmap((void *)addr_min, map_len, PROT_READ|PROT_WRITE|PROT_EXEC, | |
707 | MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) | |
708 | : mmap((void *)addr_min, map_len, prot, | |
709 | MAP_PRIVATE, fd, off_start); | |
710 | if (map==MAP_FAILED) goto error; | |
711 | dso->map = map; | |
712 | dso->map_len = map_len; | |
713 | /* If the loaded file is not relocatable and the requested address is | |
714 | * not available, then the load operation must fail. */ | |
715 | if (eh->e_type != ET_DYN && addr_min && map!=(void *)addr_min) { | |
716 | errno = EBUSY; | |
717 | goto error; | |
718 | } | |
719 | base = map - addr_min; | |
720 | dso->phdr = 0; | |
721 | dso->phnum = 0; | |
722 | for (ph=ph0, i=eh->e_phnum; i; i--, ph=(void *)((char *)ph+eh->e_phentsize)) { | |
723 | if (ph->p_type != PT_LOAD) continue; | |
724 | /* Check if the programs headers are in this load segment, and | |
725 | * if so, record the address for use by dl_iterate_phdr. */ | |
726 | if (!dso->phdr && eh->e_phoff >= ph->p_offset | |
727 | && eh->e_phoff+phsize <= ph->p_offset+ph->p_filesz) { | |
728 | dso->phdr = (void *)(base + ph->p_vaddr | |
729 | + (eh->e_phoff-ph->p_offset)); | |
730 | dso->phnum = eh->e_phnum; | |
731 | dso->phentsize = eh->e_phentsize; | |
732 | } | |
733 | this_min = ph->p_vaddr & -PAGE_SIZE; | |
734 | this_max = ph->p_vaddr+ph->p_memsz+PAGE_SIZE-1 & -PAGE_SIZE; | |
735 | off_start = ph->p_offset & -PAGE_SIZE; | |
736 | prot = (((ph->p_flags&PF_R) ? PROT_READ : 0) | | |
737 | ((ph->p_flags&PF_W) ? PROT_WRITE: 0) | | |
738 | ((ph->p_flags&PF_X) ? PROT_EXEC : 0)); | |
739 | /* Reuse the existing mapping for the lowest-address LOAD */ | |
740 | if ((ph->p_vaddr & -PAGE_SIZE) != addr_min || DL_NOMMU_SUPPORT) | |
741 | if (mmap_fixed(base+this_min, this_max-this_min, prot, MAP_PRIVATE|MAP_FIXED, fd, off_start) == MAP_FAILED) | |
742 | goto error; | |
743 | if (ph->p_memsz > ph->p_filesz && (ph->p_flags&PF_W)) { | |
744 | size_t brk = (size_t)base+ph->p_vaddr+ph->p_filesz; | |
745 | size_t pgbrk = brk+PAGE_SIZE-1 & -PAGE_SIZE; | |
746 | memset((void *)brk, 0, pgbrk-brk & PAGE_SIZE-1); | |
747 | if (pgbrk-(size_t)base < this_max && mmap_fixed((void *)pgbrk, (size_t)base+this_max-pgbrk, prot, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) == MAP_FAILED) | |
748 | goto error; | |
749 | } | |
750 | } | |
751 | for (i=0; ((size_t *)(base+dyn))[i]; i+=2) | |
752 | if (((size_t *)(base+dyn))[i]==DT_TEXTREL) { | |
753 | if (mprotect(map, map_len, PROT_READ|PROT_WRITE|PROT_EXEC) | |
754 | && errno != ENOSYS) | |
755 | goto error; | |
756 | break; | |
757 | } | |
758 | done_mapping: | |
759 | dso->base = base; | |
760 | dso->dynv = laddr(dso, dyn); | |
761 | if (dso->tls.size) dso->tls.image = laddr(dso, tls_image); | |
762 | free(allocated_buf); | |
763 | return map; | |
764 | noexec: | |
765 | errno = ENOEXEC; | |
766 | error: | |
767 | if (map!=MAP_FAILED) unmap_library(dso); | |
768 | free(allocated_buf); | |
769 | return 0; | |
770 | } | |
771 | ||
772 | static int path_open(const char *name, const char *s, char *buf, size_t buf_size) | |
773 | { | |
774 | size_t l; | |
775 | int fd; | |
776 | for (;;) { | |
777 | s += strspn(s, ":\n"); | |
778 | l = strcspn(s, ":\n"); | |
779 | if (l-1 >= INT_MAX) return -1; | |
780 | if (snprintf(buf, buf_size, "%.*s/%s", (int)l, s, name) < buf_size) { | |
781 | if ((fd = open(buf, O_RDONLY|O_CLOEXEC))>=0) return fd; | |
782 | switch (errno) { | |
783 | case ENOENT: | |
784 | case ENOTDIR: | |
785 | case EACCES: | |
786 | case ENAMETOOLONG: | |
787 | break; | |
788 | default: | |
789 | /* Any negative value but -1 will inhibit | |
790 | * futher path search. */ | |
791 | return -2; | |
792 | } | |
793 | } | |
794 | s += l; | |
795 | } | |
796 | } | |
797 | ||
798 | static int fixup_rpath(struct dso *p, char *buf, size_t buf_size) | |
799 | { | |
800 | size_t n, l; | |
801 | const char *s, *t, *origin; | |
802 | char *d; | |
803 | if (p->rpath || !p->rpath_orig) return 0; | |
804 | if (!strchr(p->rpath_orig, '$')) { | |
805 | p->rpath = p->rpath_orig; | |
806 | return 0; | |
807 | } | |
808 | n = 0; | |
809 | s = p->rpath_orig; | |
810 | while ((t=strchr(s, '$'))) { | |
811 | if (strncmp(t, "$ORIGIN", 7) && strncmp(t, "${ORIGIN}", 9)) | |
812 | return 0; | |
813 | s = t+1; | |
814 | n++; | |
815 | } | |
816 | if (n > SSIZE_MAX/PATH_MAX) return 0; | |
817 | ||
818 | if (p->kernel_mapped) { | |
819 | /* $ORIGIN searches cannot be performed for the main program | |
820 | * when it is suid/sgid/AT_SECURE. This is because the | |
821 | * pathname is under the control of the caller of execve. | |
822 | * For libraries, however, $ORIGIN can be processed safely | |
823 | * since the library's pathname came from a trusted source | |
824 | * (either system paths or a call to dlopen). */ | |
825 | if (libc.secure) | |
826 | return 0; | |
827 | l = readlink("/proc/self/exe", buf, buf_size); | |
828 | if (l == -1) switch (errno) { | |
829 | case ENOENT: | |
830 | case ENOTDIR: | |
831 | case EACCES: | |
832 | break; | |
833 | default: | |
834 | return -1; | |
835 | } | |
836 | if (l >= buf_size) | |
837 | return 0; | |
838 | buf[l] = 0; | |
839 | origin = buf; | |
840 | } else { | |
841 | origin = p->name; | |
842 | } | |
843 | t = strrchr(origin, '/'); | |
844 | if (t) { | |
845 | l = t-origin; | |
846 | } else { | |
847 | /* Normally p->name will always be an absolute or relative | |
848 | * pathname containing at least one '/' character, but in the | |
849 | * case where ldso was invoked as a command to execute a | |
850 | * program in the working directory, app.name may not. Fix. */ | |
851 | origin = "."; | |
852 | l = 1; | |
853 | } | |
854 | /* Disallow non-absolute origins for suid/sgid/AT_SECURE. */ | |
855 | if (libc.secure && *origin != '/') | |
856 | return 0; | |
857 | p->rpath = malloc(strlen(p->rpath_orig) + n*l + 1); | |
858 | if (!p->rpath) return -1; | |
859 | ||
860 | d = p->rpath; | |
861 | s = p->rpath_orig; | |
862 | while ((t=strchr(s, '$'))) { | |
863 | memcpy(d, s, t-s); | |
864 | d += t-s; | |
865 | memcpy(d, origin, l); | |
866 | d += l; | |
867 | /* It was determined previously that the '$' is followed | |
868 | * either by "ORIGIN" or "{ORIGIN}". */ | |
869 | s = t + 7 + 2*(t[1]=='{'); | |
870 | } | |
871 | strcpy(d, s); | |
872 | return 0; | |
873 | } | |
874 | ||
875 | static void decode_dyn(struct dso *p) | |
876 | { | |
877 | size_t dyn[DYN_CNT]; | |
878 | decode_vec(p->dynv, dyn, DYN_CNT); | |
879 | p->syms = laddr(p, dyn[DT_SYMTAB]); | |
880 | p->strings = laddr(p, dyn[DT_STRTAB]); | |
881 | if (dyn[0]&(1<<DT_HASH)) | |
882 | p->hashtab = laddr(p, dyn[DT_HASH]); | |
883 | if (dyn[0]&(1<<DT_RPATH)) | |
884 | p->rpath_orig = p->strings + dyn[DT_RPATH]; | |
885 | if (dyn[0]&(1<<DT_RUNPATH)) | |
886 | p->rpath_orig = p->strings + dyn[DT_RUNPATH]; | |
887 | if (dyn[0]&(1<<DT_PLTGOT)) | |
888 | p->got = laddr(p, dyn[DT_PLTGOT]); | |
889 | if (search_vec(p->dynv, dyn, DT_GNU_HASH)) | |
890 | p->ghashtab = laddr(p, *dyn); | |
891 | if (search_vec(p->dynv, dyn, DT_VERSYM)) | |
892 | p->versym = laddr(p, *dyn); | |
893 | } | |
894 | ||
895 | static size_t count_syms(struct dso *p) | |
896 | { | |
897 | if (p->hashtab) return p->hashtab[1]; | |
898 | ||
899 | size_t nsym, i; | |
900 | uint32_t *buckets = p->ghashtab + 4 + (p->ghashtab[2]*sizeof(size_t)/4); | |
901 | uint32_t *hashval; | |
902 | for (i = nsym = 0; i < p->ghashtab[0]; i++) { | |
903 | if (buckets[i] > nsym) | |
904 | nsym = buckets[i]; | |
905 | } | |
906 | if (nsym) { | |
907 | hashval = buckets + p->ghashtab[0] + (nsym - p->ghashtab[1]); | |
908 | do nsym++; | |
909 | while (!(*hashval++ & 1)); | |
910 | } | |
911 | return nsym; | |
912 | } | |
913 | ||
914 | static void *dl_mmap(size_t n) | |
915 | { | |
916 | void *p; | |
917 | int prot = PROT_READ|PROT_WRITE, flags = MAP_ANONYMOUS|MAP_PRIVATE; | |
918 | #ifdef SYS_mmap2 | |
919 | p = (void *)__syscall(SYS_mmap2, 0, n, prot, flags, -1, 0); | |
920 | #else | |
921 | p = (void *)__syscall(SYS_mmap, 0, n, prot, flags, -1, 0); | |
922 | #endif | |
f41256b6 | 923 | return (unsigned long)p > -4096UL ? 0 : p; |
320054e8 DG |
924 | } |
925 | ||
926 | static void makefuncdescs(struct dso *p) | |
927 | { | |
928 | static int self_done; | |
929 | size_t nsym = count_syms(p); | |
930 | size_t i, size = nsym * sizeof(*p->funcdescs); | |
931 | ||
932 | if (!self_done) { | |
933 | p->funcdescs = dl_mmap(size); | |
934 | self_done = 1; | |
935 | } else { | |
936 | p->funcdescs = malloc(size); | |
937 | } | |
938 | if (!p->funcdescs) { | |
939 | if (!runtime) a_crash(); | |
940 | error("Error allocating function descriptors for %s", p->name); | |
941 | longjmp(*rtld_fail, 1); | |
942 | } | |
943 | for (i=0; i<nsym; i++) { | |
944 | if ((p->syms[i].st_info&0xf)==STT_FUNC && p->syms[i].st_shndx) { | |
945 | p->funcdescs[i].addr = laddr(p, p->syms[i].st_value); | |
946 | p->funcdescs[i].got = p->got; | |
947 | } else { | |
948 | p->funcdescs[i].addr = 0; | |
949 | p->funcdescs[i].got = 0; | |
950 | } | |
951 | } | |
952 | } | |
953 | ||
954 | static struct dso *load_library(const char *name, struct dso *needed_by) | |
955 | { | |
956 | char buf[2*NAME_MAX+2]; | |
957 | const char *pathname; | |
958 | unsigned char *map; | |
959 | struct dso *p, temp_dso = {0}; | |
960 | int fd; | |
961 | struct stat st; | |
962 | size_t alloc_size; | |
963 | int n_th = 0; | |
964 | int is_self = 0; | |
965 | ||
966 | if (!*name) { | |
967 | errno = EINVAL; | |
968 | return 0; | |
969 | } | |
970 | ||
971 | /* Catch and block attempts to reload the implementation itself */ | |
972 | if (name[0]=='l' && name[1]=='i' && name[2]=='b') { | |
973 | static const char reserved[] = | |
974 | "c.pthread.rt.m.dl.util.xnet."; | |
975 | const char *rp, *next; | |
976 | for (rp=reserved; *rp; rp=next) { | |
977 | next = strchr(rp, '.') + 1; | |
978 | if (strncmp(name+3, rp, next-rp) == 0) | |
979 | break; | |
980 | } | |
981 | if (*rp) { | |
982 | if (ldd_mode) { | |
983 | /* Track which names have been resolved | |
984 | * and only report each one once. */ | |
985 | static unsigned reported; | |
986 | unsigned mask = 1U<<(rp-reserved); | |
987 | if (!(reported & mask)) { | |
988 | reported |= mask; | |
989 | dprintf(1, "\t%s => %s (%p)\n", | |
990 | name, ldso.name, | |
991 | ldso.base); | |
992 | } | |
993 | } | |
994 | is_self = 1; | |
995 | } | |
996 | } | |
997 | if (!strcmp(name, ldso.name)) is_self = 1; | |
998 | if (is_self) { | |
999 | if (!ldso.prev) { | |
1000 | tail->next = &ldso; | |
1001 | ldso.prev = tail; | |
1002 | tail = &ldso; | |
1003 | } | |
1004 | return &ldso; | |
1005 | } | |
1006 | if (strchr(name, '/')) { | |
1007 | pathname = name; | |
1008 | fd = open(name, O_RDONLY|O_CLOEXEC); | |
1009 | } else { | |
1010 | /* Search for the name to see if it's already loaded */ | |
1011 | for (p=head->next; p; p=p->next) { | |
1012 | if (p->shortname && !strcmp(p->shortname, name)) { | |
1013 | return p; | |
1014 | } | |
1015 | } | |
1016 | if (strlen(name) > NAME_MAX) return 0; | |
1017 | fd = -1; | |
1018 | if (env_path) fd = path_open(name, env_path, buf, sizeof buf); | |
1019 | for (p=needed_by; fd == -1 && p; p=p->needed_by) { | |
1020 | if (fixup_rpath(p, buf, sizeof buf) < 0) | |
1021 | fd = -2; /* Inhibit further search. */ | |
1022 | if (p->rpath) | |
1023 | fd = path_open(name, p->rpath, buf, sizeof buf); | |
1024 | } | |
1025 | if (fd == -1) { | |
1026 | if (!sys_path) { | |
1027 | char *prefix = 0; | |
1028 | size_t prefix_len; | |
1029 | if (ldso.name[0]=='/') { | |
1030 | char *s, *t, *z; | |
1031 | for (s=t=z=ldso.name; *s; s++) | |
1032 | if (*s=='/') z=t, t=s; | |
1033 | prefix_len = z-ldso.name; | |
1034 | if (prefix_len < PATH_MAX) | |
1035 | prefix = ldso.name; | |
1036 | } | |
1037 | if (!prefix) { | |
1038 | prefix = ""; | |
1039 | prefix_len = 0; | |
1040 | } | |
1041 | char etc_ldso_path[prefix_len + 1 | |
1042 | + sizeof "/etc/ld-musl-" LDSO_ARCH ".path"]; | |
1043 | snprintf(etc_ldso_path, sizeof etc_ldso_path, | |
1044 | "%.*s/etc/ld-musl-" LDSO_ARCH ".path", | |
1045 | (int)prefix_len, prefix); | |
1046 | FILE *f = fopen(etc_ldso_path, "rbe"); | |
1047 | if (f) { | |
1048 | if (getdelim(&sys_path, (size_t[1]){0}, 0, f) <= 0) { | |
1049 | free(sys_path); | |
1050 | sys_path = ""; | |
1051 | } | |
1052 | fclose(f); | |
1053 | } else if (errno != ENOENT) { | |
1054 | sys_path = ""; | |
1055 | } | |
1056 | } | |
1057 | if (!sys_path) sys_path = "/lib:/usr/local/lib:/usr/lib"; | |
1058 | fd = path_open(name, sys_path, buf, sizeof buf); | |
1059 | } | |
1060 | pathname = buf; | |
1061 | } | |
1062 | if (fd < 0) return 0; | |
1063 | if (fstat(fd, &st) < 0) { | |
1064 | close(fd); | |
1065 | return 0; | |
1066 | } | |
1067 | for (p=head->next; p; p=p->next) { | |
1068 | if (p->dev == st.st_dev && p->ino == st.st_ino) { | |
1069 | /* If this library was previously loaded with a | |
1070 | * pathname but a search found the same inode, | |
1071 | * setup its shortname so it can be found by name. */ | |
1072 | if (!p->shortname && pathname != name) | |
1073 | p->shortname = strrchr(p->name, '/')+1; | |
1074 | close(fd); | |
1075 | return p; | |
1076 | } | |
1077 | } | |
1078 | map = noload ? 0 : map_library(fd, &temp_dso); | |
1079 | close(fd); | |
1080 | if (!map) return 0; | |
1081 | ||
1082 | /* Avoid the danger of getting two versions of libc mapped into the | |
1083 | * same process when an absolute pathname was used. The symbols | |
1084 | * checked are chosen to catch both musl and glibc, and to avoid | |
1085 | * false positives from interposition-hack libraries. */ | |
1086 | decode_dyn(&temp_dso); | |
1087 | if (find_sym(&temp_dso, "__libc_start_main", 1).sym && | |
1088 | find_sym(&temp_dso, "stdin", 1).sym) { | |
1089 | unmap_library(&temp_dso); | |
1090 | return load_library("libc.so", needed_by); | |
1091 | } | |
1092 | /* Past this point, if we haven't reached runtime yet, ldso has | |
1093 | * committed either to use the mapped library or to abort execution. | |
1094 | * Unmapping is not possible, so we can safely reclaim gaps. */ | |
1095 | if (!runtime) reclaim_gaps(&temp_dso); | |
1096 | ||
1097 | /* Allocate storage for the new DSO. When there is TLS, this | |
1098 | * storage must include a reservation for all pre-existing | |
1099 | * threads to obtain copies of both the new TLS, and an | |
1100 | * extended DTV capable of storing an additional slot for | |
1101 | * the newly-loaded DSO. */ | |
1102 | alloc_size = sizeof *p + strlen(pathname) + 1; | |
1103 | if (runtime && temp_dso.tls.image) { | |
1104 | size_t per_th = temp_dso.tls.size + temp_dso.tls.align | |
1105 | + sizeof(void *) * (tls_cnt+3); | |
1106 | n_th = libc.threads_minus_1 + 1; | |
1107 | if (n_th > SSIZE_MAX / per_th) alloc_size = SIZE_MAX; | |
1108 | else alloc_size += n_th * per_th; | |
1109 | } | |
1110 | p = calloc(1, alloc_size); | |
1111 | if (!p) { | |
1112 | unmap_library(&temp_dso); | |
1113 | return 0; | |
1114 | } | |
1115 | memcpy(p, &temp_dso, sizeof temp_dso); | |
1116 | p->dev = st.st_dev; | |
1117 | p->ino = st.st_ino; | |
1118 | p->needed_by = needed_by; | |
1119 | p->name = p->buf; | |
f41256b6 | 1120 | p->runtime_loaded = runtime; |
320054e8 DG |
1121 | strcpy(p->name, pathname); |
1122 | /* Add a shortname only if name arg was not an explicit pathname. */ | |
1123 | if (pathname != name) p->shortname = strrchr(p->name, '/')+1; | |
1124 | if (p->tls.image) { | |
1125 | p->tls_id = ++tls_cnt; | |
1126 | tls_align = MAXP2(tls_align, p->tls.align); | |
1127 | #ifdef TLS_ABOVE_TP | |
d4db3fa2 DG |
1128 | p->tls.offset = tls_offset + ( (p->tls.align-1) & |
1129 | (-tls_offset + (uintptr_t)p->tls.image) ); | |
1130 | tls_offset = p->tls.offset + p->tls.size; | |
320054e8 DG |
1131 | #else |
1132 | tls_offset += p->tls.size + p->tls.align - 1; | |
1133 | tls_offset -= (tls_offset + (uintptr_t)p->tls.image) | |
1134 | & (p->tls.align-1); | |
1135 | p->tls.offset = tls_offset; | |
1136 | #endif | |
1137 | p->new_dtv = (void *)(-sizeof(size_t) & | |
1138 | (uintptr_t)(p->name+strlen(p->name)+sizeof(size_t))); | |
1139 | p->new_tls = (void *)(p->new_dtv + n_th*(tls_cnt+1)); | |
1140 | if (tls_tail) tls_tail->next = &p->tls; | |
1141 | else libc.tls_head = &p->tls; | |
1142 | tls_tail = &p->tls; | |
1143 | } | |
1144 | ||
1145 | tail->next = p; | |
1146 | p->prev = tail; | |
1147 | tail = p; | |
1148 | ||
1149 | if (DL_FDPIC) makefuncdescs(p); | |
1150 | ||
1151 | if (ldd_mode) dprintf(1, "\t%s => %s (%p)\n", name, pathname, p->base); | |
1152 | ||
1153 | return p; | |
1154 | } | |
1155 | ||
f41256b6 DG |
1156 | static void load_direct_deps(struct dso *p) |
1157 | { | |
1158 | size_t i, cnt=0; | |
1159 | ||
1160 | if (p->deps) return; | |
1161 | /* For head, all preloads are direct pseudo-dependencies. | |
1162 | * Count and include them now to avoid realloc later. */ | |
1163 | if (p==head) for (struct dso *q=p->next; q; q=q->next) | |
1164 | cnt++; | |
1165 | for (i=0; p->dynv[i]; i+=2) | |
1166 | if (p->dynv[i] == DT_NEEDED) cnt++; | |
1167 | /* Use builtin buffer for apps with no external deps, to | |
1168 | * preserve property of no runtime failure paths. */ | |
1169 | p->deps = (p==head && cnt<2) ? builtin_deps : | |
1170 | calloc(cnt+1, sizeof *p->deps); | |
1171 | if (!p->deps) { | |
1172 | error("Error loading dependencies for %s", p->name); | |
1173 | if (runtime) longjmp(*rtld_fail, 1); | |
1174 | } | |
1175 | cnt=0; | |
1176 | if (p==head) for (struct dso *q=p->next; q; q=q->next) | |
1177 | p->deps[cnt++] = q; | |
1178 | for (i=0; p->dynv[i]; i+=2) { | |
1179 | if (p->dynv[i] != DT_NEEDED) continue; | |
1180 | struct dso *dep = load_library(p->strings + p->dynv[i+1], p); | |
1181 | if (!dep) { | |
1182 | error("Error loading shared library %s: %m (needed by %s)", | |
1183 | p->strings + p->dynv[i+1], p->name); | |
1184 | if (runtime) longjmp(*rtld_fail, 1); | |
1185 | continue; | |
1186 | } | |
1187 | p->deps[cnt++] = dep; | |
1188 | } | |
1189 | p->deps[cnt] = 0; | |
1190 | p->ndeps_direct = cnt; | |
1191 | } | |
1192 | ||
320054e8 DG |
1193 | static void load_deps(struct dso *p) |
1194 | { | |
f41256b6 DG |
1195 | if (p->deps) return; |
1196 | for (; p; p=p->next) | |
1197 | load_direct_deps(p); | |
1198 | } | |
1199 | ||
1200 | static void extend_bfs_deps(struct dso *p) | |
1201 | { | |
1202 | size_t i, j, cnt, ndeps_all; | |
1203 | struct dso **tmp; | |
1204 | ||
1205 | /* Can't use realloc if the original p->deps was allocated at | |
1206 | * program entry and malloc has been replaced, or if it's | |
1207 | * the builtin non-allocated trivial main program deps array. */ | |
1208 | int no_realloc = (__malloc_replaced && !p->runtime_loaded) | |
1209 | || p->deps == builtin_deps; | |
1210 | ||
1211 | if (p->bfs_built) return; | |
1212 | ndeps_all = p->ndeps_direct; | |
1213 | ||
1214 | /* Mark existing (direct) deps so they won't be duplicated. */ | |
1215 | for (i=0; p->deps[i]; i++) | |
1216 | p->deps[i]->mark = 1; | |
1217 | ||
1218 | /* For each dependency already in the list, copy its list of direct | |
1219 | * dependencies to the list, excluding any items already in the | |
1220 | * list. Note that the list this loop iterates over will grow during | |
1221 | * the loop, but since duplicates are excluded, growth is bounded. */ | |
1222 | for (i=0; p->deps[i]; i++) { | |
1223 | struct dso *dep = p->deps[i]; | |
1224 | for (j=cnt=0; j<dep->ndeps_direct; j++) | |
1225 | if (!dep->deps[j]->mark) cnt++; | |
1226 | tmp = no_realloc ? | |
1227 | malloc(sizeof(*tmp) * (ndeps_all+cnt+1)) : | |
1228 | realloc(p->deps, sizeof(*tmp) * (ndeps_all+cnt+1)); | |
1229 | if (!tmp) { | |
1230 | error("Error recording dependencies for %s", p->name); | |
1231 | if (runtime) longjmp(*rtld_fail, 1); | |
1232 | continue; | |
1233 | } | |
1234 | if (no_realloc) { | |
1235 | memcpy(tmp, p->deps, sizeof(*tmp) * (ndeps_all+1)); | |
1236 | no_realloc = 0; | |
1237 | } | |
1238 | p->deps = tmp; | |
1239 | for (j=0; j<dep->ndeps_direct; j++) { | |
1240 | if (dep->deps[j]->mark) continue; | |
1241 | dep->deps[j]->mark = 1; | |
1242 | p->deps[ndeps_all++] = dep->deps[j]; | |
320054e8 | 1243 | } |
f41256b6 | 1244 | p->deps[ndeps_all] = 0; |
320054e8 | 1245 | } |
f41256b6 DG |
1246 | p->bfs_built = 1; |
1247 | for (p=head; p; p=p->next) | |
1248 | p->mark = 0; | |
320054e8 DG |
1249 | } |
1250 | ||
1251 | static void load_preload(char *s) | |
1252 | { | |
1253 | int tmp; | |
1254 | char *z; | |
1255 | for (z=s; *z; s=z) { | |
1256 | for ( ; *s && (isspace(*s) || *s==':'); s++); | |
1257 | for (z=s; *z && !isspace(*z) && *z!=':'; z++); | |
1258 | tmp = *z; | |
1259 | *z = 0; | |
1260 | load_library(s, 0); | |
1261 | *z = tmp; | |
1262 | } | |
1263 | } | |
1264 | ||
1265 | static void add_syms(struct dso *p) | |
1266 | { | |
1267 | if (!p->syms_next && syms_tail != p) { | |
1268 | syms_tail->syms_next = p; | |
1269 | syms_tail = p; | |
1270 | } | |
1271 | } | |
1272 | ||
1273 | static void revert_syms(struct dso *old_tail) | |
1274 | { | |
1275 | struct dso *p, *next; | |
1276 | /* Chop off the tail of the list of dsos that participate in | |
1277 | * the global symbol table, reverting them to RTLD_LOCAL. */ | |
1278 | for (p=old_tail; p; p=next) { | |
1279 | next = p->syms_next; | |
1280 | p->syms_next = 0; | |
1281 | } | |
1282 | syms_tail = old_tail; | |
1283 | } | |
1284 | ||
1285 | static void do_mips_relocs(struct dso *p, size_t *got) | |
1286 | { | |
1287 | size_t i, j, rel[2]; | |
1288 | unsigned char *base = p->base; | |
1289 | i=0; search_vec(p->dynv, &i, DT_MIPS_LOCAL_GOTNO); | |
1290 | if (p==&ldso) { | |
1291 | got += i; | |
1292 | } else { | |
1293 | while (i--) *got++ += (size_t)base; | |
1294 | } | |
1295 | j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM); | |
1296 | i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO); | |
1297 | Sym *sym = p->syms + j; | |
1298 | rel[0] = (unsigned char *)got - base; | |
1299 | for (i-=j; i; i--, sym++, rel[0]+=sizeof(size_t)) { | |
1300 | rel[1] = R_INFO(sym-p->syms, R_MIPS_JUMP_SLOT); | |
1301 | do_relocs(p, rel, sizeof rel, 2); | |
1302 | } | |
1303 | } | |
1304 | ||
1305 | static void reloc_all(struct dso *p) | |
1306 | { | |
1307 | size_t dyn[DYN_CNT]; | |
1308 | for (; p; p=p->next) { | |
1309 | if (p->relocated) continue; | |
1310 | decode_vec(p->dynv, dyn, DYN_CNT); | |
1311 | if (NEED_MIPS_GOT_RELOCS) | |
1312 | do_mips_relocs(p, laddr(p, dyn[DT_PLTGOT])); | |
1313 | do_relocs(p, laddr(p, dyn[DT_JMPREL]), dyn[DT_PLTRELSZ], | |
1314 | 2+(dyn[DT_PLTREL]==DT_RELA)); | |
1315 | do_relocs(p, laddr(p, dyn[DT_REL]), dyn[DT_RELSZ], 2); | |
1316 | do_relocs(p, laddr(p, dyn[DT_RELA]), dyn[DT_RELASZ], 3); | |
1317 | ||
1318 | if (head != &ldso && p->relro_start != p->relro_end && | |
1319 | mprotect(laddr(p, p->relro_start), p->relro_end-p->relro_start, PROT_READ) | |
1320 | && errno != ENOSYS) { | |
1321 | error("Error relocating %s: RELRO protection failed: %m", | |
1322 | p->name); | |
1323 | if (runtime) longjmp(*rtld_fail, 1); | |
1324 | } | |
1325 | ||
1326 | p->relocated = 1; | |
1327 | } | |
1328 | } | |
1329 | ||
1330 | static void kernel_mapped_dso(struct dso *p) | |
1331 | { | |
1332 | size_t min_addr = -1, max_addr = 0, cnt; | |
1333 | Phdr *ph = p->phdr; | |
1334 | for (cnt = p->phnum; cnt--; ph = (void *)((char *)ph + p->phentsize)) { | |
1335 | if (ph->p_type == PT_DYNAMIC) { | |
1336 | p->dynv = laddr(p, ph->p_vaddr); | |
1337 | } else if (ph->p_type == PT_GNU_RELRO) { | |
1338 | p->relro_start = ph->p_vaddr & -PAGE_SIZE; | |
1339 | p->relro_end = (ph->p_vaddr + ph->p_memsz) & -PAGE_SIZE; | |
1340 | } else if (ph->p_type == PT_GNU_STACK) { | |
1341 | if (!runtime && ph->p_memsz > __default_stacksize) { | |
1342 | __default_stacksize = | |
1343 | ph->p_memsz < DEFAULT_STACK_MAX ? | |
1344 | ph->p_memsz : DEFAULT_STACK_MAX; | |
1345 | } | |
1346 | } | |
1347 | if (ph->p_type != PT_LOAD) continue; | |
1348 | if (ph->p_vaddr < min_addr) | |
1349 | min_addr = ph->p_vaddr; | |
1350 | if (ph->p_vaddr+ph->p_memsz > max_addr) | |
1351 | max_addr = ph->p_vaddr+ph->p_memsz; | |
1352 | } | |
1353 | min_addr &= -PAGE_SIZE; | |
1354 | max_addr = (max_addr + PAGE_SIZE-1) & -PAGE_SIZE; | |
1355 | p->map = p->base + min_addr; | |
1356 | p->map_len = max_addr - min_addr; | |
1357 | p->kernel_mapped = 1; | |
1358 | } | |
1359 | ||
1360 | void __libc_exit_fini() | |
1361 | { | |
1362 | struct dso *p; | |
1363 | size_t dyn[DYN_CNT]; | |
f41256b6 DG |
1364 | int self = __pthread_self()->tid; |
1365 | ||
1366 | /* Take both locks before setting shutting_down, so that | |
1367 | * either lock is sufficient to read its value. The lock | |
1368 | * order matches that in dlopen to avoid deadlock. */ | |
1369 | pthread_rwlock_wrlock(&lock); | |
1370 | pthread_mutex_lock(&init_fini_lock); | |
1371 | shutting_down = 1; | |
1372 | pthread_rwlock_unlock(&lock); | |
320054e8 | 1373 | for (p=fini_head; p; p=p->fini_next) { |
f41256b6 DG |
1374 | while (p->ctor_visitor && p->ctor_visitor!=self) |
1375 | pthread_cond_wait(&ctor_cond, &init_fini_lock); | |
320054e8 DG |
1376 | if (!p->constructed) continue; |
1377 | decode_vec(p->dynv, dyn, DYN_CNT); | |
1378 | if (dyn[0] & (1<<DT_FINI_ARRAY)) { | |
1379 | size_t n = dyn[DT_FINI_ARRAYSZ]/sizeof(size_t); | |
1380 | size_t *fn = (size_t *)laddr(p, dyn[DT_FINI_ARRAY])+n; | |
1381 | while (n--) ((void (*)(void))*--fn)(); | |
1382 | } | |
1383 | #ifndef NO_LEGACY_INITFINI | |
1384 | if ((dyn[0] & (1<<DT_FINI)) && dyn[DT_FINI]) | |
1385 | fpaddr(p, dyn[DT_FINI])(); | |
1386 | #endif | |
1387 | } | |
1388 | } | |
1389 | ||
f41256b6 | 1390 | static struct dso **queue_ctors(struct dso *dso) |
320054e8 | 1391 | { |
f41256b6 DG |
1392 | size_t cnt, qpos, spos, i; |
1393 | struct dso *p, **queue, **stack; | |
1394 | ||
1395 | if (ldd_mode) return 0; | |
1396 | ||
1397 | /* Bound on queue size is the total number of indirect deps. | |
1398 | * If a bfs deps list was built, we can use it. Otherwise, | |
1399 | * bound by the total number of DSOs, which is always safe and | |
1400 | * is reasonable we use it (for main app at startup). */ | |
1401 | if (dso->bfs_built) { | |
1402 | for (cnt=0; dso->deps[cnt]; cnt++) | |
1403 | dso->deps[cnt]->mark = 0; | |
1404 | cnt++; /* self, not included in deps */ | |
1405 | } else { | |
1406 | for (cnt=0, p=head; p; cnt++, p=p->next) | |
1407 | p->mark = 0; | |
1408 | } | |
1409 | cnt++; /* termination slot */ | |
1410 | if (dso==head && cnt <= countof(builtin_ctor_queue)) | |
1411 | queue = builtin_ctor_queue; | |
1412 | else | |
1413 | queue = calloc(cnt, sizeof *queue); | |
1414 | ||
1415 | if (!queue) { | |
1416 | error("Error allocating constructor queue: %m\n"); | |
1417 | if (runtime) longjmp(*rtld_fail, 1); | |
1418 | return 0; | |
1419 | } | |
1420 | ||
1421 | /* Opposite ends of the allocated buffer serve as an output queue | |
1422 | * and a working stack. Setup initial stack with just the argument | |
1423 | * dso and initial queue empty... */ | |
1424 | stack = queue; | |
1425 | qpos = 0; | |
1426 | spos = cnt; | |
1427 | stack[--spos] = dso; | |
1428 | dso->next_dep = 0; | |
1429 | dso->mark = 1; | |
1430 | ||
1431 | /* Then perform pseudo-DFS sort, but ignoring circular deps. */ | |
1432 | while (spos<cnt) { | |
1433 | p = stack[spos++]; | |
1434 | while (p->next_dep < p->ndeps_direct) { | |
1435 | if (p->deps[p->next_dep]->mark) { | |
1436 | p->next_dep++; | |
1437 | } else { | |
1438 | stack[--spos] = p; | |
1439 | p = p->deps[p->next_dep]; | |
1440 | p->next_dep = 0; | |
1441 | p->mark = 1; | |
1442 | } | |
1443 | } | |
1444 | queue[qpos++] = p; | |
1445 | } | |
1446 | queue[qpos] = 0; | |
1447 | for (i=0; i<qpos; i++) queue[i]->mark = 0; | |
1448 | ||
1449 | return queue; | |
1450 | } | |
1451 | ||
1452 | static void do_init_fini(struct dso **queue) | |
1453 | { | |
1454 | struct dso *p; | |
1455 | size_t dyn[DYN_CNT], i; | |
1456 | int self = __pthread_self()->tid; | |
1457 | ||
1458 | pthread_mutex_lock(&init_fini_lock); | |
1459 | for (i=0; (p=queue[i]); i++) { | |
1460 | while ((p->ctor_visitor && p->ctor_visitor!=self) || shutting_down) | |
1461 | pthread_cond_wait(&ctor_cond, &init_fini_lock); | |
1462 | if (p->ctor_visitor || p->constructed) | |
1463 | continue; | |
1464 | p->ctor_visitor = self; | |
1465 | ||
320054e8 DG |
1466 | decode_vec(p->dynv, dyn, DYN_CNT); |
1467 | if (dyn[0] & ((1<<DT_FINI) | (1<<DT_FINI_ARRAY))) { | |
1468 | p->fini_next = fini_head; | |
1469 | fini_head = p; | |
1470 | } | |
f41256b6 DG |
1471 | |
1472 | pthread_mutex_unlock(&init_fini_lock); | |
1473 | ||
320054e8 DG |
1474 | #ifndef NO_LEGACY_INITFINI |
1475 | if ((dyn[0] & (1<<DT_INIT)) && dyn[DT_INIT]) | |
1476 | fpaddr(p, dyn[DT_INIT])(); | |
1477 | #endif | |
1478 | if (dyn[0] & (1<<DT_INIT_ARRAY)) { | |
1479 | size_t n = dyn[DT_INIT_ARRAYSZ]/sizeof(size_t); | |
1480 | size_t *fn = laddr(p, dyn[DT_INIT_ARRAY]); | |
1481 | while (n--) ((void (*)(void))*fn++)(); | |
1482 | } | |
f41256b6 DG |
1483 | |
1484 | pthread_mutex_lock(&init_fini_lock); | |
1485 | p->ctor_visitor = 0; | |
1486 | p->constructed = 1; | |
1487 | pthread_cond_broadcast(&ctor_cond); | |
320054e8 | 1488 | } |
f41256b6 | 1489 | pthread_mutex_unlock(&init_fini_lock); |
320054e8 DG |
1490 | } |
1491 | ||
1492 | void __libc_start_init(void) | |
1493 | { | |
f41256b6 DG |
1494 | do_init_fini(main_ctor_queue); |
1495 | if (!__malloc_replaced && main_ctor_queue != builtin_ctor_queue) | |
1496 | free(main_ctor_queue); | |
1497 | main_ctor_queue = 0; | |
320054e8 DG |
1498 | } |
1499 | ||
1500 | static void dl_debug_state(void) | |
1501 | { | |
1502 | } | |
1503 | ||
1504 | weak_alias(dl_debug_state, _dl_debug_state); | |
1505 | ||
1506 | void __init_tls(size_t *auxv) | |
1507 | { | |
1508 | } | |
1509 | ||
320054e8 DG |
1510 | static void update_tls_size() |
1511 | { | |
1512 | libc.tls_cnt = tls_cnt; | |
1513 | libc.tls_align = tls_align; | |
1514 | libc.tls_size = ALIGN( | |
1515 | (1+tls_cnt) * sizeof(void *) + | |
1516 | tls_offset + | |
1517 | sizeof(struct pthread) + | |
1518 | tls_align * 2, | |
1519 | tls_align); | |
1520 | } | |
1521 | ||
f41256b6 DG |
1522 | static void install_new_tls(void) |
1523 | { | |
1524 | sigset_t set; | |
1525 | pthread_t self = __pthread_self(), td; | |
1526 | struct dso *dtv_provider = container_of(tls_tail, struct dso, tls); | |
1527 | uintptr_t (*newdtv)[tls_cnt+1] = (void *)dtv_provider->new_dtv; | |
1528 | struct dso *p; | |
1529 | size_t i, j; | |
1530 | size_t old_cnt = self->dtv[0]; | |
1531 | ||
1532 | __block_app_sigs(&set); | |
1533 | __tl_lock(); | |
1534 | /* Copy existing dtv contents from all existing threads. */ | |
1535 | for (i=0, td=self; !i || td!=self; i++, td=td->next) { | |
1536 | memcpy(newdtv+i, td->dtv, | |
1537 | (old_cnt+1)*sizeof(uintptr_t)); | |
1538 | newdtv[i][0] = tls_cnt; | |
1539 | } | |
1540 | /* Install new dtls into the enlarged, uninstalled dtv copies. */ | |
1541 | for (p=head; ; p=p->next) { | |
1542 | if (p->tls_id <= old_cnt) continue; | |
1543 | unsigned char *mem = p->new_tls; | |
1544 | for (j=0; j<i; j++) { | |
1545 | unsigned char *new = mem; | |
1546 | new += ((uintptr_t)p->tls.image - (uintptr_t)mem) | |
1547 | & (p->tls.align-1); | |
1548 | memcpy(new, p->tls.image, p->tls.len); | |
1549 | newdtv[j][p->tls_id] = | |
1550 | (uintptr_t)new + DTP_OFFSET; | |
1551 | mem += p->tls.size + p->tls.align; | |
1552 | } | |
1553 | if (p->tls_id == tls_cnt) break; | |
1554 | } | |
1555 | ||
1556 | /* Broadcast barrier to ensure contents of new dtv is visible | |
1557 | * if the new dtv pointer is. The __membarrier function has a | |
1558 | * fallback emulation using signals for kernels that lack the | |
1559 | * feature at the syscall level. */ | |
1560 | ||
1561 | __membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0); | |
1562 | ||
1563 | /* Install new dtv for each thread. */ | |
1564 | for (j=0, td=self; !j || td!=self; j++, td=td->next) { | |
1565 | td->dtv = td->dtv_copy = newdtv[j]; | |
1566 | } | |
1567 | ||
1568 | __tl_unlock(); | |
1569 | __restore_sigs(&set); | |
1570 | } | |
1571 | ||
320054e8 DG |
1572 | /* Stage 1 of the dynamic linker is defined in dlstart.c. It calls the |
1573 | * following stage 2 and stage 3 functions via primitive symbolic lookup | |
1574 | * since it does not have access to their addresses to begin with. */ | |
1575 | ||
1576 | /* Stage 2 of the dynamic linker is called after relative relocations | |
1577 | * have been processed. It can make function calls to static functions | |
1578 | * and access string literals and static data, but cannot use extern | |
1579 | * symbols. Its job is to perform symbolic relocations on the dynamic | |
1580 | * linker itself, but some of the relocations performed may need to be | |
1581 | * replaced later due to copy relocations in the main program. */ | |
1582 | ||
1583 | hidden void __dls2(unsigned char *base, size_t *sp) | |
1584 | { | |
1585 | if (DL_FDPIC) { | |
1586 | void *p1 = (void *)sp[-2]; | |
1587 | void *p2 = (void *)sp[-1]; | |
1588 | if (!p1) { | |
1589 | size_t *auxv, aux[AUX_CNT]; | |
1590 | for (auxv=sp+1+*sp+1; *auxv; auxv++); | |
1591 | auxv++; | |
1592 | decode_vec(auxv, aux, AUX_CNT); | |
1593 | if (aux[AT_BASE]) ldso.base = (void *)aux[AT_BASE]; | |
1594 | else ldso.base = (void *)(aux[AT_PHDR] & -4096); | |
1595 | } | |
1596 | app_loadmap = p2 ? p1 : 0; | |
1597 | ldso.loadmap = p2 ? p2 : p1; | |
1598 | ldso.base = laddr(&ldso, 0); | |
1599 | } else { | |
1600 | ldso.base = base; | |
1601 | } | |
1602 | Ehdr *ehdr = (void *)ldso.base; | |
1603 | ldso.name = ldso.shortname = "libc.so"; | |
1604 | ldso.phnum = ehdr->e_phnum; | |
1605 | ldso.phdr = laddr(&ldso, ehdr->e_phoff); | |
1606 | ldso.phentsize = ehdr->e_phentsize; | |
1607 | kernel_mapped_dso(&ldso); | |
1608 | decode_dyn(&ldso); | |
1609 | ||
1610 | if (DL_FDPIC) makefuncdescs(&ldso); | |
1611 | ||
1612 | /* Prepare storage for to save clobbered REL addends so they | |
1613 | * can be reused in stage 3. There should be very few. If | |
1614 | * something goes wrong and there are a huge number, abort | |
1615 | * instead of risking stack overflow. */ | |
1616 | size_t dyn[DYN_CNT]; | |
1617 | decode_vec(ldso.dynv, dyn, DYN_CNT); | |
1618 | size_t *rel = laddr(&ldso, dyn[DT_REL]); | |
1619 | size_t rel_size = dyn[DT_RELSZ]; | |
1620 | size_t symbolic_rel_cnt = 0; | |
1621 | apply_addends_to = rel; | |
1622 | for (; rel_size; rel+=2, rel_size-=2*sizeof(size_t)) | |
1623 | if (!IS_RELATIVE(rel[1], ldso.syms)) symbolic_rel_cnt++; | |
1624 | if (symbolic_rel_cnt >= ADDEND_LIMIT) a_crash(); | |
1625 | size_t addends[symbolic_rel_cnt+1]; | |
1626 | saved_addends = addends; | |
1627 | ||
1628 | head = &ldso; | |
1629 | reloc_all(&ldso); | |
1630 | ||
1631 | ldso.relocated = 0; | |
1632 | ||
1633 | /* Call dynamic linker stage-2b, __dls2b, looking it up | |
1634 | * symbolically as a barrier against moving the address | |
1635 | * load across the above relocation processing. */ | |
1636 | struct symdef dls2b_def = find_sym(&ldso, "__dls2b", 0); | |
1637 | if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls2b_def.sym-ldso.syms])(sp); | |
1638 | else ((stage3_func)laddr(&ldso, dls2b_def.sym->st_value))(sp); | |
1639 | } | |
1640 | ||
1641 | /* Stage 2b sets up a valid thread pointer, which requires relocations | |
1642 | * completed in stage 2, and on which stage 3 is permitted to depend. | |
1643 | * This is done as a separate stage, with symbolic lookup as a barrier, | |
1644 | * so that loads of the thread pointer and &errno can be pure/const and | |
1645 | * thereby hoistable. */ | |
1646 | ||
d4db3fa2 | 1647 | void __dls2b(size_t *sp) |
320054e8 DG |
1648 | { |
1649 | /* Setup early thread pointer in builtin_tls for ldso/libc itself to | |
1650 | * use during dynamic linking. If possible it will also serve as the | |
1651 | * thread pointer at runtime. */ | |
1652 | libc.tls_size = sizeof builtin_tls; | |
1653 | libc.tls_align = tls_align; | |
1654 | if (__init_tp(__copy_tls((void *)builtin_tls)) < 0) { | |
1655 | a_crash(); | |
1656 | } | |
1657 | ||
1658 | struct symdef dls3_def = find_sym(&ldso, "__dls3", 0); | |
1659 | if (DL_FDPIC) ((stage3_func)&ldso.funcdescs[dls3_def.sym-ldso.syms])(sp); | |
1660 | else ((stage3_func)laddr(&ldso, dls3_def.sym->st_value))(sp); | |
1661 | } | |
1662 | ||
1663 | /* Stage 3 of the dynamic linker is called with the dynamic linker/libc | |
1664 | * fully functional. Its job is to load (if not already loaded) and | |
1665 | * process dependencies and relocations for the main application and | |
1666 | * transfer control to its entry point. */ | |
1667 | ||
d4db3fa2 | 1668 | void __dls3(size_t *sp) |
320054e8 DG |
1669 | { |
1670 | static struct dso app, vdso; | |
1671 | size_t aux[AUX_CNT], *auxv; | |
1672 | size_t i; | |
1673 | char *env_preload=0; | |
1674 | char *replace_argv0=0; | |
1675 | size_t vdso_base; | |
1676 | int argc = *sp; | |
1677 | char **argv = (void *)(sp+1); | |
1678 | char **argv_orig = argv; | |
1679 | char **envp = argv+argc+1; | |
1680 | ||
1681 | /* Find aux vector just past environ[] and use it to initialize | |
1682 | * global data that may be needed before we can make syscalls. */ | |
1683 | __environ = envp; | |
1684 | for (i=argc+1; argv[i]; i++); | |
1685 | libc.auxv = auxv = (void *)(argv+i+1); | |
1686 | decode_vec(auxv, aux, AUX_CNT); | |
1687 | __hwcap = aux[AT_HWCAP]; | |
d4db3fa2 DG |
1688 | search_vec(auxv, &__sysinfo, AT_SYSINFO); |
1689 | __pthread_self()->sysinfo = __sysinfo; | |
320054e8 DG |
1690 | libc.page_size = aux[AT_PAGESZ]; |
1691 | libc.secure = ((aux[0]&0x7800)!=0x7800 || aux[AT_UID]!=aux[AT_EUID] | |
1692 | || aux[AT_GID]!=aux[AT_EGID] || aux[AT_SECURE]); | |
1693 | ||
1694 | /* Only trust user/env if kernel says we're not suid/sgid */ | |
1695 | if (!libc.secure) { | |
1696 | env_path = getenv("LD_LIBRARY_PATH"); | |
1697 | env_preload = getenv("LD_PRELOAD"); | |
1698 | } | |
1699 | ||
1700 | /* If the main program was already loaded by the kernel, | |
1701 | * AT_PHDR will point to some location other than the dynamic | |
1702 | * linker's program headers. */ | |
1703 | if (aux[AT_PHDR] != (size_t)ldso.phdr) { | |
1704 | size_t interp_off = 0; | |
1705 | size_t tls_image = 0; | |
1706 | /* Find load address of the main program, via AT_PHDR vs PT_PHDR. */ | |
1707 | Phdr *phdr = app.phdr = (void *)aux[AT_PHDR]; | |
1708 | app.phnum = aux[AT_PHNUM]; | |
1709 | app.phentsize = aux[AT_PHENT]; | |
1710 | for (i=aux[AT_PHNUM]; i; i--, phdr=(void *)((char *)phdr + aux[AT_PHENT])) { | |
1711 | if (phdr->p_type == PT_PHDR) | |
1712 | app.base = (void *)(aux[AT_PHDR] - phdr->p_vaddr); | |
1713 | else if (phdr->p_type == PT_INTERP) | |
1714 | interp_off = (size_t)phdr->p_vaddr; | |
1715 | else if (phdr->p_type == PT_TLS) { | |
1716 | tls_image = phdr->p_vaddr; | |
1717 | app.tls.len = phdr->p_filesz; | |
1718 | app.tls.size = phdr->p_memsz; | |
1719 | app.tls.align = phdr->p_align; | |
1720 | } | |
1721 | } | |
1722 | if (DL_FDPIC) app.loadmap = app_loadmap; | |
1723 | if (app.tls.size) app.tls.image = laddr(&app, tls_image); | |
1724 | if (interp_off) ldso.name = laddr(&app, interp_off); | |
1725 | if ((aux[0] & (1UL<<AT_EXECFN)) | |
1726 | && strncmp((char *)aux[AT_EXECFN], "/proc/", 6)) | |
1727 | app.name = (char *)aux[AT_EXECFN]; | |
1728 | else | |
1729 | app.name = argv[0]; | |
1730 | kernel_mapped_dso(&app); | |
1731 | } else { | |
1732 | int fd; | |
1733 | char *ldname = argv[0]; | |
1734 | size_t l = strlen(ldname); | |
1735 | if (l >= 3 && !strcmp(ldname+l-3, "ldd")) ldd_mode = 1; | |
1736 | argv++; | |
1737 | while (argv[0] && argv[0][0]=='-' && argv[0][1]=='-') { | |
1738 | char *opt = argv[0]+2; | |
1739 | *argv++ = (void *)-1; | |
1740 | if (!*opt) { | |
1741 | break; | |
1742 | } else if (!memcmp(opt, "list", 5)) { | |
1743 | ldd_mode = 1; | |
1744 | } else if (!memcmp(opt, "library-path", 12)) { | |
1745 | if (opt[12]=='=') env_path = opt+13; | |
1746 | else if (opt[12]) *argv = 0; | |
1747 | else if (*argv) env_path = *argv++; | |
1748 | } else if (!memcmp(opt, "preload", 7)) { | |
1749 | if (opt[7]=='=') env_preload = opt+8; | |
1750 | else if (opt[7]) *argv = 0; | |
1751 | else if (*argv) env_preload = *argv++; | |
1752 | } else if (!memcmp(opt, "argv0", 5)) { | |
1753 | if (opt[5]=='=') replace_argv0 = opt+6; | |
1754 | else if (opt[5]) *argv = 0; | |
1755 | else if (*argv) replace_argv0 = *argv++; | |
1756 | } else { | |
1757 | argv[0] = 0; | |
1758 | } | |
1759 | } | |
1760 | argv[-1] = (void *)(argc - (argv-argv_orig)); | |
1761 | if (!argv[0]) { | |
1762 | dprintf(2, "musl libc (" LDSO_ARCH ")\n" | |
1763 | "Version %s\n" | |
1764 | "Dynamic Program Loader\n" | |
1765 | "Usage: %s [options] [--] pathname%s\n", | |
1766 | __libc_version, ldname, | |
1767 | ldd_mode ? "" : " [args]"); | |
1768 | _exit(1); | |
1769 | } | |
1770 | fd = open(argv[0], O_RDONLY); | |
1771 | if (fd < 0) { | |
1772 | dprintf(2, "%s: cannot load %s: %s\n", ldname, argv[0], strerror(errno)); | |
1773 | _exit(1); | |
1774 | } | |
1775 | Ehdr *ehdr = (void *)map_library(fd, &app); | |
1776 | if (!ehdr) { | |
1777 | dprintf(2, "%s: %s: Not a valid dynamic program\n", ldname, argv[0]); | |
1778 | _exit(1); | |
1779 | } | |
1780 | close(fd); | |
1781 | ldso.name = ldname; | |
1782 | app.name = argv[0]; | |
1783 | aux[AT_ENTRY] = (size_t)laddr(&app, ehdr->e_entry); | |
1784 | /* Find the name that would have been used for the dynamic | |
1785 | * linker had ldd not taken its place. */ | |
1786 | if (ldd_mode) { | |
1787 | for (i=0; i<app.phnum; i++) { | |
1788 | if (app.phdr[i].p_type == PT_INTERP) | |
1789 | ldso.name = laddr(&app, app.phdr[i].p_vaddr); | |
1790 | } | |
1791 | dprintf(1, "\t%s (%p)\n", ldso.name, ldso.base); | |
1792 | } | |
1793 | } | |
1794 | if (app.tls.size) { | |
1795 | libc.tls_head = tls_tail = &app.tls; | |
1796 | app.tls_id = tls_cnt = 1; | |
1797 | #ifdef TLS_ABOVE_TP | |
1798 | app.tls.offset = GAP_ABOVE_TP; | |
d4db3fa2 DG |
1799 | app.tls.offset += (-GAP_ABOVE_TP + (uintptr_t)app.tls.image) |
1800 | & (app.tls.align-1); | |
1801 | tls_offset = app.tls.offset + app.tls.size; | |
320054e8 DG |
1802 | #else |
1803 | tls_offset = app.tls.offset = app.tls.size | |
1804 | + ( -((uintptr_t)app.tls.image + app.tls.size) | |
1805 | & (app.tls.align-1) ); | |
1806 | #endif | |
1807 | tls_align = MAXP2(tls_align, app.tls.align); | |
1808 | } | |
1809 | decode_dyn(&app); | |
1810 | if (DL_FDPIC) { | |
1811 | makefuncdescs(&app); | |
1812 | if (!app.loadmap) { | |
1813 | app.loadmap = (void *)&app_dummy_loadmap; | |
1814 | app.loadmap->nsegs = 1; | |
1815 | app.loadmap->segs[0].addr = (size_t)app.map; | |
1816 | app.loadmap->segs[0].p_vaddr = (size_t)app.map | |
1817 | - (size_t)app.base; | |
1818 | app.loadmap->segs[0].p_memsz = app.map_len; | |
1819 | } | |
1820 | argv[-3] = (void *)app.loadmap; | |
1821 | } | |
1822 | ||
1823 | /* Initial dso chain consists only of the app. */ | |
1824 | head = tail = syms_tail = &app; | |
1825 | ||
1826 | /* Donate unused parts of app and library mapping to malloc */ | |
1827 | reclaim_gaps(&app); | |
1828 | reclaim_gaps(&ldso); | |
1829 | ||
1830 | /* Load preload/needed libraries, add symbols to global namespace. */ | |
f41256b6 | 1831 | ldso.deps = (struct dso **)no_deps; |
320054e8 DG |
1832 | if (env_preload) load_preload(env_preload); |
1833 | load_deps(&app); | |
1834 | for (struct dso *p=head; p; p=p->next) | |
1835 | add_syms(p); | |
1836 | ||
1837 | /* Attach to vdso, if provided by the kernel, last so that it does | |
1838 | * not become part of the global namespace. */ | |
1839 | if (search_vec(auxv, &vdso_base, AT_SYSINFO_EHDR) && vdso_base) { | |
1840 | Ehdr *ehdr = (void *)vdso_base; | |
1841 | Phdr *phdr = vdso.phdr = (void *)(vdso_base + ehdr->e_phoff); | |
1842 | vdso.phnum = ehdr->e_phnum; | |
1843 | vdso.phentsize = ehdr->e_phentsize; | |
1844 | for (i=ehdr->e_phnum; i; i--, phdr=(void *)((char *)phdr + ehdr->e_phentsize)) { | |
1845 | if (phdr->p_type == PT_DYNAMIC) | |
1846 | vdso.dynv = (void *)(vdso_base + phdr->p_offset); | |
1847 | if (phdr->p_type == PT_LOAD) | |
1848 | vdso.base = (void *)(vdso_base - phdr->p_vaddr + phdr->p_offset); | |
1849 | } | |
1850 | vdso.name = ""; | |
1851 | vdso.shortname = "linux-gate.so.1"; | |
1852 | vdso.relocated = 1; | |
f41256b6 | 1853 | vdso.deps = (struct dso **)no_deps; |
320054e8 DG |
1854 | decode_dyn(&vdso); |
1855 | vdso.prev = tail; | |
1856 | tail->next = &vdso; | |
1857 | tail = &vdso; | |
1858 | } | |
1859 | ||
1860 | for (i=0; app.dynv[i]; i+=2) { | |
1861 | if (!DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG) | |
1862 | app.dynv[i+1] = (size_t)&debug; | |
1863 | if (DT_DEBUG_INDIRECT && app.dynv[i]==DT_DEBUG_INDIRECT) { | |
1864 | size_t *ptr = (size_t *) app.dynv[i+1]; | |
1865 | *ptr = (size_t)&debug; | |
1866 | } | |
1867 | } | |
1868 | ||
f41256b6 DG |
1869 | /* This must be done before final relocations, since it calls |
1870 | * malloc, which may be provided by the application. Calling any | |
1871 | * application code prior to the jump to its entry point is not | |
1872 | * valid in our model and does not work with FDPIC, where there | |
1873 | * are additional relocation-like fixups that only the entry point | |
1874 | * code can see to perform. */ | |
1875 | main_ctor_queue = queue_ctors(&app); | |
1876 | ||
320054e8 DG |
1877 | /* The main program must be relocated LAST since it may contin |
1878 | * copy relocations which depend on libraries' relocations. */ | |
1879 | reloc_all(app.next); | |
1880 | reloc_all(&app); | |
1881 | ||
1882 | update_tls_size(); | |
1883 | if (libc.tls_size > sizeof builtin_tls || tls_align > MIN_TLS_ALIGN) { | |
1884 | void *initial_tls = calloc(libc.tls_size, 1); | |
1885 | if (!initial_tls) { | |
1886 | dprintf(2, "%s: Error getting %zu bytes thread-local storage: %m\n", | |
1887 | argv[0], libc.tls_size); | |
1888 | _exit(127); | |
1889 | } | |
1890 | if (__init_tp(__copy_tls(initial_tls)) < 0) { | |
1891 | a_crash(); | |
1892 | } | |
1893 | } else { | |
1894 | size_t tmp_tls_size = libc.tls_size; | |
1895 | pthread_t self = __pthread_self(); | |
1896 | /* Temporarily set the tls size to the full size of | |
1897 | * builtin_tls so that __copy_tls will use the same layout | |
1898 | * as it did for before. Then check, just to be safe. */ | |
1899 | libc.tls_size = sizeof builtin_tls; | |
1900 | if (__copy_tls((void*)builtin_tls) != self) a_crash(); | |
1901 | libc.tls_size = tmp_tls_size; | |
1902 | } | |
1903 | static_tls_cnt = tls_cnt; | |
1904 | ||
1905 | if (ldso_fail) _exit(127); | |
1906 | if (ldd_mode) _exit(0); | |
1907 | ||
1908 | /* Determine if malloc was interposed by a replacement implementation | |
1909 | * so that calloc and the memalign family can harden against the | |
1910 | * possibility of incomplete replacement. */ | |
1911 | if (find_sym(head, "malloc", 1).dso != &ldso) | |
1912 | __malloc_replaced = 1; | |
1913 | ||
1914 | /* Switch to runtime mode: any further failures in the dynamic | |
1915 | * linker are a reportable failure rather than a fatal startup | |
1916 | * error. */ | |
1917 | runtime = 1; | |
1918 | ||
1919 | debug.ver = 1; | |
1920 | debug.bp = dl_debug_state; | |
1921 | debug.head = head; | |
1922 | debug.base = ldso.base; | |
1923 | debug.state = 0; | |
1924 | _dl_debug_state(); | |
1925 | ||
1926 | if (replace_argv0) argv[0] = replace_argv0; | |
1927 | ||
1928 | errno = 0; | |
1929 | ||
1930 | CRTJMP((void *)aux[AT_ENTRY], argv-1); | |
1931 | for(;;); | |
1932 | } | |
1933 | ||
1934 | static void prepare_lazy(struct dso *p) | |
1935 | { | |
1936 | size_t dyn[DYN_CNT], n, flags1=0; | |
1937 | decode_vec(p->dynv, dyn, DYN_CNT); | |
1938 | search_vec(p->dynv, &flags1, DT_FLAGS_1); | |
1939 | if (dyn[DT_BIND_NOW] || (dyn[DT_FLAGS] & DF_BIND_NOW) || (flags1 & DF_1_NOW)) | |
1940 | return; | |
1941 | n = dyn[DT_RELSZ]/2 + dyn[DT_RELASZ]/3 + dyn[DT_PLTRELSZ]/2 + 1; | |
1942 | if (NEED_MIPS_GOT_RELOCS) { | |
1943 | size_t j=0; search_vec(p->dynv, &j, DT_MIPS_GOTSYM); | |
1944 | size_t i=0; search_vec(p->dynv, &i, DT_MIPS_SYMTABNO); | |
1945 | n += i-j; | |
1946 | } | |
1947 | p->lazy = calloc(n, 3*sizeof(size_t)); | |
1948 | if (!p->lazy) { | |
1949 | error("Error preparing lazy relocation for %s: %m", p->name); | |
1950 | longjmp(*rtld_fail, 1); | |
1951 | } | |
1952 | p->lazy_next = lazy_head; | |
1953 | lazy_head = p; | |
1954 | } | |
1955 | ||
1956 | void *dlopen(const char *file, int mode) | |
1957 | { | |
1958 | struct dso *volatile p, *orig_tail, *orig_syms_tail, *orig_lazy_head, *next; | |
1959 | struct tls_module *orig_tls_tail; | |
1960 | size_t orig_tls_cnt, orig_tls_offset, orig_tls_align; | |
1961 | size_t i; | |
1962 | int cs; | |
1963 | jmp_buf jb; | |
f41256b6 | 1964 | struct dso **volatile ctor_queue = 0; |
320054e8 DG |
1965 | |
1966 | if (!file) return head; | |
1967 | ||
1968 | pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &cs); | |
1969 | pthread_rwlock_wrlock(&lock); | |
1970 | __inhibit_ptc(); | |
1971 | ||
1972 | p = 0; | |
f41256b6 DG |
1973 | if (shutting_down) { |
1974 | error("Cannot dlopen while program is exiting."); | |
1975 | goto end; | |
1976 | } | |
320054e8 DG |
1977 | orig_tls_tail = tls_tail; |
1978 | orig_tls_cnt = tls_cnt; | |
1979 | orig_tls_offset = tls_offset; | |
1980 | orig_tls_align = tls_align; | |
1981 | orig_lazy_head = lazy_head; | |
1982 | orig_syms_tail = syms_tail; | |
1983 | orig_tail = tail; | |
1984 | noload = mode & RTLD_NOLOAD; | |
1985 | ||
1986 | rtld_fail = &jb; | |
1987 | if (setjmp(*rtld_fail)) { | |
1988 | /* Clean up anything new that was (partially) loaded */ | |
1989 | revert_syms(orig_syms_tail); | |
1990 | for (p=orig_tail->next; p; p=next) { | |
1991 | next = p->next; | |
1992 | while (p->td_index) { | |
1993 | void *tmp = p->td_index->next; | |
1994 | free(p->td_index); | |
1995 | p->td_index = tmp; | |
1996 | } | |
1997 | free(p->funcdescs); | |
1998 | if (p->rpath != p->rpath_orig) | |
1999 | free(p->rpath); | |
f41256b6 | 2000 | free(p->deps); |
320054e8 DG |
2001 | unmap_library(p); |
2002 | free(p); | |
2003 | } | |
f41256b6 DG |
2004 | free(ctor_queue); |
2005 | ctor_queue = 0; | |
320054e8 DG |
2006 | if (!orig_tls_tail) libc.tls_head = 0; |
2007 | tls_tail = orig_tls_tail; | |
2008 | if (tls_tail) tls_tail->next = 0; | |
2009 | tls_cnt = orig_tls_cnt; | |
2010 | tls_offset = orig_tls_offset; | |
2011 | tls_align = orig_tls_align; | |
2012 | lazy_head = orig_lazy_head; | |
2013 | tail = orig_tail; | |
2014 | tail->next = 0; | |
2015 | p = 0; | |
2016 | goto end; | |
2017 | } else p = load_library(file, head); | |
2018 | ||
2019 | if (!p) { | |
2020 | error(noload ? | |
2021 | "Library %s is not already loaded" : | |
2022 | "Error loading shared library %s: %m", | |
2023 | file); | |
2024 | goto end; | |
2025 | } | |
2026 | ||
2027 | /* First load handling */ | |
f41256b6 DG |
2028 | load_deps(p); |
2029 | extend_bfs_deps(p); | |
2030 | pthread_mutex_lock(&init_fini_lock); | |
2031 | if (!p->constructed) ctor_queue = queue_ctors(p); | |
2032 | pthread_mutex_unlock(&init_fini_lock); | |
2033 | if (!p->relocated && (mode & RTLD_LAZY)) { | |
2034 | prepare_lazy(p); | |
2035 | for (i=0; p->deps[i]; i++) | |
2036 | if (!p->deps[i]->relocated) | |
2037 | prepare_lazy(p->deps[i]); | |
320054e8 | 2038 | } |
f41256b6 | 2039 | if (!p->relocated || (mode & RTLD_GLOBAL)) { |
320054e8 DG |
2040 | /* Make new symbols global, at least temporarily, so we can do |
2041 | * relocations. If not RTLD_GLOBAL, this is reverted below. */ | |
2042 | add_syms(p); | |
2043 | for (i=0; p->deps[i]; i++) | |
2044 | add_syms(p->deps[i]); | |
2045 | } | |
f41256b6 | 2046 | if (!p->relocated) { |
320054e8 DG |
2047 | reloc_all(p); |
2048 | } | |
2049 | ||
2050 | /* If RTLD_GLOBAL was not specified, undo any new additions | |
2051 | * to the global symbol table. This is a nop if the library was | |
2052 | * previously loaded and already global. */ | |
2053 | if (!(mode & RTLD_GLOBAL)) | |
2054 | revert_syms(orig_syms_tail); | |
2055 | ||
2056 | /* Processing of deferred lazy relocations must not happen until | |
2057 | * the new libraries are committed; otherwise we could end up with | |
2058 | * relocations resolved to symbol definitions that get removed. */ | |
2059 | redo_lazy_relocs(); | |
2060 | ||
2061 | update_tls_size(); | |
f41256b6 DG |
2062 | if (tls_cnt != orig_tls_cnt) |
2063 | install_new_tls(); | |
320054e8 DG |
2064 | _dl_debug_state(); |
2065 | orig_tail = tail; | |
2066 | end: | |
2067 | __release_ptc(); | |
2068 | if (p) gencnt++; | |
2069 | pthread_rwlock_unlock(&lock); | |
f41256b6 DG |
2070 | if (ctor_queue) { |
2071 | do_init_fini(ctor_queue); | |
2072 | free(ctor_queue); | |
2073 | } | |
320054e8 DG |
2074 | pthread_setcancelstate(cs, 0); |
2075 | return p; | |
2076 | } | |
2077 | ||
2078 | hidden int __dl_invalid_handle(void *h) | |
2079 | { | |
2080 | struct dso *p; | |
2081 | for (p=head; p; p=p->next) if (h==p) return 0; | |
2082 | error("Invalid library handle %p", (void *)h); | |
2083 | return 1; | |
2084 | } | |
2085 | ||
2086 | static void *addr2dso(size_t a) | |
2087 | { | |
2088 | struct dso *p; | |
2089 | size_t i; | |
2090 | if (DL_FDPIC) for (p=head; p; p=p->next) { | |
2091 | i = count_syms(p); | |
2092 | if (a-(size_t)p->funcdescs < i*sizeof(*p->funcdescs)) | |
2093 | return p; | |
2094 | } | |
2095 | for (p=head; p; p=p->next) { | |
2096 | if (DL_FDPIC && p->loadmap) { | |
2097 | for (i=0; i<p->loadmap->nsegs; i++) { | |
2098 | if (a-p->loadmap->segs[i].p_vaddr | |
2099 | < p->loadmap->segs[i].p_memsz) | |
2100 | return p; | |
2101 | } | |
2102 | } else { | |
2103 | Phdr *ph = p->phdr; | |
2104 | size_t phcnt = p->phnum; | |
2105 | size_t entsz = p->phentsize; | |
2106 | size_t base = (size_t)p->base; | |
2107 | for (; phcnt--; ph=(void *)((char *)ph+entsz)) { | |
2108 | if (ph->p_type != PT_LOAD) continue; | |
2109 | if (a-base-ph->p_vaddr < ph->p_memsz) | |
2110 | return p; | |
2111 | } | |
2112 | if (a-(size_t)p->map < p->map_len) | |
2113 | return 0; | |
2114 | } | |
2115 | } | |
2116 | return 0; | |
2117 | } | |
2118 | ||
2119 | static void *do_dlsym(struct dso *p, const char *s, void *ra) | |
2120 | { | |
2121 | size_t i; | |
2122 | uint32_t h = 0, gh = 0, *ght; | |
2123 | Sym *sym; | |
2124 | if (p == head || p == RTLD_DEFAULT || p == RTLD_NEXT) { | |
2125 | if (p == RTLD_DEFAULT) { | |
2126 | p = head; | |
2127 | } else if (p == RTLD_NEXT) { | |
2128 | p = addr2dso((size_t)ra); | |
2129 | if (!p) p=head; | |
2130 | p = p->next; | |
2131 | } | |
2132 | struct symdef def = find_sym(p, s, 0); | |
2133 | if (!def.sym) goto failed; | |
2134 | if ((def.sym->st_info&0xf) == STT_TLS) | |
2135 | return __tls_get_addr((tls_mod_off_t []){def.dso->tls_id, def.sym->st_value-DTP_OFFSET}); | |
2136 | if (DL_FDPIC && (def.sym->st_info&0xf) == STT_FUNC) | |
2137 | return def.dso->funcdescs + (def.sym - def.dso->syms); | |
2138 | return laddr(def.dso, def.sym->st_value); | |
2139 | } | |
2140 | if (__dl_invalid_handle(p)) | |
2141 | return 0; | |
2142 | if ((ght = p->ghashtab)) { | |
2143 | gh = gnu_hash(s); | |
2144 | sym = gnu_lookup(gh, ght, p, s); | |
2145 | } else { | |
2146 | h = sysv_hash(s); | |
2147 | sym = sysv_lookup(s, h, p); | |
2148 | } | |
2149 | if (sym && (sym->st_info&0xf) == STT_TLS) | |
2150 | return __tls_get_addr((tls_mod_off_t []){p->tls_id, sym->st_value-DTP_OFFSET}); | |
2151 | if (DL_FDPIC && sym && sym->st_shndx && (sym->st_info&0xf) == STT_FUNC) | |
2152 | return p->funcdescs + (sym - p->syms); | |
2153 | if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES)) | |
2154 | return laddr(p, sym->st_value); | |
2155 | for (i=0; p->deps[i]; i++) { | |
2156 | if ((ght = p->deps[i]->ghashtab)) { | |
2157 | if (!gh) gh = gnu_hash(s); | |
2158 | sym = gnu_lookup(gh, ght, p->deps[i], s); | |
2159 | } else { | |
2160 | if (!h) h = sysv_hash(s); | |
2161 | sym = sysv_lookup(s, h, p->deps[i]); | |
2162 | } | |
2163 | if (sym && (sym->st_info&0xf) == STT_TLS) | |
2164 | return __tls_get_addr((tls_mod_off_t []){p->deps[i]->tls_id, sym->st_value-DTP_OFFSET}); | |
2165 | if (DL_FDPIC && sym && sym->st_shndx && (sym->st_info&0xf) == STT_FUNC) | |
2166 | return p->deps[i]->funcdescs + (sym - p->deps[i]->syms); | |
2167 | if (sym && sym->st_value && (1<<(sym->st_info&0xf) & OK_TYPES)) | |
2168 | return laddr(p->deps[i], sym->st_value); | |
2169 | } | |
2170 | failed: | |
2171 | error("Symbol not found: %s", s); | |
2172 | return 0; | |
2173 | } | |
2174 | ||
2175 | int dladdr(const void *addr_arg, Dl_info *info) | |
2176 | { | |
2177 | size_t addr = (size_t)addr_arg; | |
2178 | struct dso *p; | |
2179 | Sym *sym, *bestsym; | |
2180 | uint32_t nsym; | |
2181 | char *strings; | |
2182 | size_t best = 0; | |
2183 | size_t besterr = -1; | |
2184 | ||
2185 | pthread_rwlock_rdlock(&lock); | |
2186 | p = addr2dso(addr); | |
2187 | pthread_rwlock_unlock(&lock); | |
2188 | ||
2189 | if (!p) return 0; | |
2190 | ||
2191 | sym = p->syms; | |
2192 | strings = p->strings; | |
2193 | nsym = count_syms(p); | |
2194 | ||
2195 | if (DL_FDPIC) { | |
2196 | size_t idx = (addr-(size_t)p->funcdescs) | |
2197 | / sizeof(*p->funcdescs); | |
2198 | if (idx < nsym && (sym[idx].st_info&0xf) == STT_FUNC) { | |
2199 | best = (size_t)(p->funcdescs + idx); | |
2200 | bestsym = sym + idx; | |
2201 | besterr = 0; | |
2202 | } | |
2203 | } | |
2204 | ||
2205 | if (!best) for (; nsym; nsym--, sym++) { | |
2206 | if (sym->st_value | |
2207 | && (1<<(sym->st_info&0xf) & OK_TYPES) | |
2208 | && (1<<(sym->st_info>>4) & OK_BINDS)) { | |
2209 | size_t symaddr = (size_t)laddr(p, sym->st_value); | |
2210 | if (symaddr > addr || symaddr <= best) | |
2211 | continue; | |
2212 | best = symaddr; | |
2213 | bestsym = sym; | |
2214 | besterr = addr - symaddr; | |
2215 | if (addr == symaddr) | |
2216 | break; | |
2217 | } | |
2218 | } | |
2219 | ||
d4db3fa2 | 2220 | if (best && besterr > bestsym->st_size-1) { |
320054e8 DG |
2221 | best = 0; |
2222 | bestsym = 0; | |
2223 | } | |
2224 | ||
2225 | info->dli_fname = p->name; | |
2226 | info->dli_fbase = p->map; | |
2227 | ||
2228 | if (!best) { | |
2229 | info->dli_sname = 0; | |
2230 | info->dli_saddr = 0; | |
2231 | return 1; | |
2232 | } | |
2233 | ||
2234 | if (DL_FDPIC && (bestsym->st_info&0xf) == STT_FUNC) | |
2235 | best = (size_t)(p->funcdescs + (bestsym - p->syms)); | |
2236 | info->dli_sname = strings + bestsym->st_name; | |
2237 | info->dli_saddr = (void *)best; | |
2238 | ||
2239 | return 1; | |
2240 | } | |
2241 | ||
2242 | hidden void *__dlsym(void *restrict p, const char *restrict s, void *restrict ra) | |
2243 | { | |
2244 | void *res; | |
2245 | pthread_rwlock_rdlock(&lock); | |
2246 | res = do_dlsym(p, s, ra); | |
2247 | pthread_rwlock_unlock(&lock); | |
2248 | return res; | |
2249 | } | |
2250 | ||
2251 | int dl_iterate_phdr(int(*callback)(struct dl_phdr_info *info, size_t size, void *data), void *data) | |
2252 | { | |
2253 | struct dso *current; | |
2254 | struct dl_phdr_info info; | |
2255 | int ret = 0; | |
2256 | for(current = head; current;) { | |
2257 | info.dlpi_addr = (uintptr_t)current->base; | |
2258 | info.dlpi_name = current->name; | |
2259 | info.dlpi_phdr = current->phdr; | |
2260 | info.dlpi_phnum = current->phnum; | |
2261 | info.dlpi_adds = gencnt; | |
2262 | info.dlpi_subs = 0; | |
2263 | info.dlpi_tls_modid = current->tls_id; | |
2264 | info.dlpi_tls_data = current->tls.image; | |
2265 | ||
2266 | ret = (callback)(&info, sizeof (info), data); | |
2267 | ||
2268 | if (ret != 0) break; | |
2269 | ||
2270 | pthread_rwlock_rdlock(&lock); | |
2271 | current = current->next; | |
2272 | pthread_rwlock_unlock(&lock); | |
2273 | } | |
2274 | return ret; | |
2275 | } | |
2276 | ||
2277 | static void error(const char *fmt, ...) | |
2278 | { | |
2279 | va_list ap; | |
2280 | va_start(ap, fmt); | |
2281 | if (!runtime) { | |
2282 | vdprintf(2, fmt, ap); | |
2283 | dprintf(2, "\n"); | |
2284 | ldso_fail = 1; | |
2285 | va_end(ap); | |
2286 | return; | |
2287 | } | |
2288 | __dl_vseterr(fmt, ap); | |
2289 | va_end(ap); | |
2290 | } |