]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/entry/vdso/vdso2c.h
License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / entry / vdso / vdso2c.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * This file is included twice from vdso2c.c. It generates code for 32-bit
4 * and 64-bit vDSOs. We need both for 64-bit builds, since 32-bit vDSOs
5 * are built for 32-bit userspace.
6 */
7
8 static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
9 void *stripped_addr, size_t stripped_len,
10 FILE *outfile, const char *name)
11 {
12 int found_load = 0;
13 unsigned long load_size = -1; /* Work around bogus warning */
14 unsigned long mapping_size;
15 ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
16 int i;
17 unsigned long j;
18 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
19 *alt_sec = NULL;
20 ELF(Dyn) *dyn = 0, *dyn_end = 0;
21 const char *secstrings;
22 INT_BITS syms[NSYMS] = {};
23
24 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
25
26 if (GET_LE(&hdr->e_type) != ET_DYN)
27 fail("input is not a shared object\n");
28
29 /* Walk the segment table. */
30 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
31 if (GET_LE(&pt[i].p_type) == PT_LOAD) {
32 if (found_load)
33 fail("multiple PT_LOAD segs\n");
34
35 if (GET_LE(&pt[i].p_offset) != 0 ||
36 GET_LE(&pt[i].p_vaddr) != 0)
37 fail("PT_LOAD in wrong place\n");
38
39 if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
40 fail("cannot handle memsz != filesz\n");
41
42 load_size = GET_LE(&pt[i].p_memsz);
43 found_load = 1;
44 } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
45 dyn = raw_addr + GET_LE(&pt[i].p_offset);
46 dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
47 GET_LE(&pt[i].p_memsz);
48 }
49 }
50 if (!found_load)
51 fail("no PT_LOAD seg\n");
52
53 if (stripped_len < load_size)
54 fail("stripped input is too short\n");
55
56 if (!dyn)
57 fail("input has no PT_DYNAMIC section -- your toolchain is buggy\n");
58
59 /* Walk the dynamic table */
60 for (i = 0; dyn + i < dyn_end &&
61 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
62 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
63 if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
64 tag == DT_RELENT || tag == DT_TEXTREL)
65 fail("vdso image contains dynamic relocations\n");
66 }
67
68 /* Walk the section table */
69 secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
70 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
71 secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
72 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
73 ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
74 GET_LE(&hdr->e_shentsize) * i;
75 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
76 symtab_hdr = sh;
77
78 if (!strcmp(secstrings + GET_LE(&sh->sh_name),
79 ".altinstructions"))
80 alt_sec = sh;
81 }
82
83 if (!symtab_hdr)
84 fail("no symbol table\n");
85
86 strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
87 GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
88
89 /* Walk the symbol table */
90 for (i = 0;
91 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
92 i++) {
93 int k;
94 ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
95 GET_LE(&symtab_hdr->sh_entsize) * i;
96 const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
97 GET_LE(&sym->st_name);
98
99 for (k = 0; k < NSYMS; k++) {
100 if (!strcmp(name, required_syms[k].name)) {
101 if (syms[k]) {
102 fail("duplicate symbol %s\n",
103 required_syms[k].name);
104 }
105
106 /*
107 * Careful: we use negative addresses, but
108 * st_value is unsigned, so we rely
109 * on syms[k] being a signed type of the
110 * correct width.
111 */
112 syms[k] = GET_LE(&sym->st_value);
113 }
114 }
115 }
116
117 /* Validate mapping addresses. */
118 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
119 INT_BITS symval = syms[special_pages[i]];
120
121 if (!symval)
122 continue; /* The mapping isn't used; ignore it. */
123
124 if (symval % 4096)
125 fail("%s must be a multiple of 4096\n",
126 required_syms[i].name);
127 if (symval + 4096 < syms[sym_vvar_start])
128 fail("%s underruns vvar_start\n",
129 required_syms[i].name);
130 if (symval + 4096 > 0)
131 fail("%s is on the wrong side of the vdso text\n",
132 required_syms[i].name);
133 }
134 if (syms[sym_vvar_start] % 4096)
135 fail("vvar_begin must be a multiple of 4096\n");
136
137 if (!name) {
138 fwrite(stripped_addr, stripped_len, 1, outfile);
139 return;
140 }
141
142 mapping_size = (stripped_len + 4095) / 4096 * 4096;
143
144 fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
145 fprintf(outfile, "#include <linux/linkage.h>\n");
146 fprintf(outfile, "#include <asm/page_types.h>\n");
147 fprintf(outfile, "#include <asm/vdso.h>\n");
148 fprintf(outfile, "\n");
149 fprintf(outfile,
150 "static unsigned char raw_data[%lu] __ro_after_init __aligned(PAGE_SIZE) = {",
151 mapping_size);
152 for (j = 0; j < stripped_len; j++) {
153 if (j % 10 == 0)
154 fprintf(outfile, "\n\t");
155 fprintf(outfile, "0x%02X, ",
156 (int)((unsigned char *)stripped_addr)[j]);
157 }
158 fprintf(outfile, "\n};\n\n");
159
160 fprintf(outfile, "const struct vdso_image %s = {\n", name);
161 fprintf(outfile, "\t.data = raw_data,\n");
162 fprintf(outfile, "\t.size = %lu,\n", mapping_size);
163 if (alt_sec) {
164 fprintf(outfile, "\t.alt = %lu,\n",
165 (unsigned long)GET_LE(&alt_sec->sh_offset));
166 fprintf(outfile, "\t.alt_len = %lu,\n",
167 (unsigned long)GET_LE(&alt_sec->sh_size));
168 }
169 for (i = 0; i < NSYMS; i++) {
170 if (required_syms[i].export && syms[i])
171 fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
172 required_syms[i].name, (int64_t)syms[i]);
173 }
174 fprintf(outfile, "};\n");
175 }