]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - arch/x86/boot/compressed/kaslr.c
x86/boot/KASLR: Switch to pass struct mem_vector to process_e820_entry()
[mirror_ubuntu-focal-kernel.git] / arch / x86 / boot / compressed / kaslr.c
CommitLineData
7de828df
KC
1/*
2 * kaslr.c
3 *
4 * This contains the routines needed to generate a reasonable level of
5 * entropy to choose a randomized kernel base address offset in support
6 * of Kernel Address Space Layout Randomization (KASLR). Additionally
7 * handles walking the physical memory maps (and tracking memory regions
8 * to avoid) in order to select a physical memory location that can
9 * contain the entire properly aligned running kernel image.
10 *
11 */
d52e7d5a
BH
12
13/*
14 * isspace() in linux/ctype.h is expected by next_args() to filter
15 * out "space/lf/tab". While boot/ctype.h conflicts with linux/ctype.h,
16 * since isdigit() is implemented in both of them. Hence disable it
17 * here.
18 */
19#define BOOT_CTYPE_H
20
21/*
22 * _ctype[] in lib/ctype.c is needed by isspace() of linux/ctype.h.
23 * While both lib/ctype.c and lib/cmdline.c will bring EXPORT_SYMBOL
24 * which is meaningless and will cause compiling error in some cases.
25 * So do not include linux/export.h and define EXPORT_SYMBOL(sym)
26 * as empty.
27 */
28#define _LINUX_EXPORT_H
29#define EXPORT_SYMBOL(sym)
30
8ab3820f 31#include "misc.h"
dc425a6e 32#include "error.h"
5b8b9cf7 33#include "../string.h"
8ab3820f 34
a653f356
KC
35#include <generated/compile.h>
36#include <linux/module.h>
37#include <linux/uts.h>
38#include <linux/utsname.h>
d52e7d5a 39#include <linux/ctype.h>
a653f356 40#include <generated/utsrelease.h>
a653f356 41
d52e7d5a
BH
42/* Macros used by the included decompressor code below. */
43#define STATIC
44#include <linux/decompress/mm.h>
45
46extern unsigned long get_cmd_line_ptr(void);
47
a653f356 48/* Simplified build-specific string for starting entropy. */
327f7d72 49static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
a653f356
KC
50 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
51
a653f356
KC
52static unsigned long rotate_xor(unsigned long hash, const void *area,
53 size_t size)
54{
55 size_t i;
56 unsigned long *ptr = (unsigned long *)area;
57
58 for (i = 0; i < size / sizeof(hash); i++) {
59 /* Rotate by odd number of bits and XOR. */
60 hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
61 hash ^= ptr[i];
62 }
63
64 return hash;
65}
66
67/* Attempt to create a simple but unpredictable starting entropy. */
d899a7d1 68static unsigned long get_boot_seed(void)
a653f356
KC
69{
70 unsigned long hash = 0;
71
72 hash = rotate_xor(hash, build_str, sizeof(build_str));
6655e0aa 73 hash = rotate_xor(hash, boot_params, sizeof(*boot_params));
a653f356
KC
74
75 return hash;
76}
77
d899a7d1
TG
78#define KASLR_COMPRESSED_BOOT
79#include "../../lib/kaslr.c"
8ab3820f 80
82fa9637 81struct mem_vector {
f2844249
DJ
82 unsigned long long start;
83 unsigned long long size;
82fa9637
KC
84};
85
f2844249
DJ
86/* Only supporting at most 4 unusable memmap regions with kaslr */
87#define MAX_MEMMAP_REGIONS 4
88
89static bool memmap_too_large;
90
d52e7d5a 91
4cdba14f
BH
92/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
93unsigned long long mem_limit = ULLONG_MAX;
94
95
ed09acde
KC
96enum mem_avoid_index {
97 MEM_AVOID_ZO_RANGE = 0,
98 MEM_AVOID_INITRD,
99 MEM_AVOID_CMDLINE,
100 MEM_AVOID_BOOTPARAMS,
f2844249
DJ
101 MEM_AVOID_MEMMAP_BEGIN,
102 MEM_AVOID_MEMMAP_END = MEM_AVOID_MEMMAP_BEGIN + MAX_MEMMAP_REGIONS - 1,
ed09acde
KC
103 MEM_AVOID_MAX,
104};
105
e290e8c5 106static struct mem_vector mem_avoid[MEM_AVOID_MAX];
82fa9637 107
82fa9637
KC
108static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
109{
110 /* Item one is entirely before item two. */
111 if (one->start + one->size <= two->start)
112 return false;
113 /* Item one is entirely after item two. */
114 if (one->start >= two->start + two->size)
115 return false;
116 return true;
117}
118
d52e7d5a 119char *skip_spaces(const char *str)
f2844249 120{
d52e7d5a
BH
121 while (isspace(*str))
122 ++str;
123 return (char *)str;
f2844249 124}
d52e7d5a
BH
125#include "../../../../lib/ctype.c"
126#include "../../../../lib/cmdline.c"
f2844249
DJ
127
128static int
129parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
130{
131 char *oldp;
132
133 if (!p)
134 return -EINVAL;
135
136 /* We don't care about this option here */
137 if (!strncmp(p, "exactmap", 8))
138 return -EINVAL;
139
140 oldp = p;
d52e7d5a 141 *size = memparse(p, &p);
f2844249
DJ
142 if (p == oldp)
143 return -EINVAL;
144
145 switch (*p) {
f2844249
DJ
146 case '#':
147 case '$':
148 case '!':
d52e7d5a 149 *start = memparse(p + 1, &p);
f2844249 150 return 0;
4cdba14f
BH
151 case '@':
152 /* memmap=nn@ss specifies usable region, should be skipped */
153 *size = 0;
154 /* Fall through */
155 default:
156 /*
157 * If w/o offset, only size specified, memmap=nn[KMG] has the
158 * same behaviour as mem=nn[KMG]. It limits the max address
159 * system can use. Region above the limit should be avoided.
160 */
161 *start = 0;
f2844249
DJ
162 return 0;
163 }
164
165 return -EINVAL;
166}
167
d52e7d5a 168static void mem_avoid_memmap(char *str)
f2844249 169{
d52e7d5a 170 static int i;
f2844249 171 int rc;
f2844249 172
d52e7d5a 173 if (i >= MAX_MEMMAP_REGIONS)
f2844249
DJ
174 return;
175
f2844249
DJ
176 while (str && (i < MAX_MEMMAP_REGIONS)) {
177 int rc;
178 unsigned long long start, size;
179 char *k = strchr(str, ',');
180
181 if (k)
182 *k++ = 0;
183
184 rc = parse_memmap(str, &start, &size);
185 if (rc < 0)
186 break;
187 str = k;
4cdba14f
BH
188
189 if (start == 0) {
190 /* Store the specified memory limit if size > 0 */
191 if (size > 0)
192 mem_limit = size;
193
f2844249 194 continue;
4cdba14f 195 }
f2844249
DJ
196
197 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].start = start;
198 mem_avoid[MEM_AVOID_MEMMAP_BEGIN + i].size = size;
199 i++;
200 }
201
202 /* More than 4 memmaps, fail kaslr */
203 if ((i >= MAX_MEMMAP_REGIONS) && str)
204 memmap_too_large = true;
205}
206
d52e7d5a
BH
207static int handle_mem_memmap(void)
208{
209 char *args = (char *)get_cmd_line_ptr();
210 size_t len = strlen((char *)args);
211 char *tmp_cmdline;
212 char *param, *val;
4cdba14f 213 u64 mem_size;
d52e7d5a 214
4cdba14f 215 if (!strstr(args, "memmap=") && !strstr(args, "mem="))
d52e7d5a
BH
216 return 0;
217
218 tmp_cmdline = malloc(len + 1);
219 if (!tmp_cmdline )
220 error("Failed to allocate space for tmp_cmdline");
221
222 memcpy(tmp_cmdline, args, len);
223 tmp_cmdline[len] = 0;
224 args = tmp_cmdline;
225
226 /* Chew leading spaces */
227 args = skip_spaces(args);
228
229 while (*args) {
230 args = next_arg(args, &param, &val);
231 /* Stop at -- */
232 if (!val && strcmp(param, "--") == 0) {
233 warn("Only '--' specified in cmdline");
234 free(tmp_cmdline);
235 return -1;
236 }
237
4cdba14f 238 if (!strcmp(param, "memmap")) {
d52e7d5a 239 mem_avoid_memmap(val);
4cdba14f
BH
240 } else if (!strcmp(param, "mem")) {
241 char *p = val;
242
243 if (!strcmp(p, "nopentium"))
244 continue;
245 mem_size = memparse(p, &p);
246 if (mem_size == 0) {
247 free(tmp_cmdline);
248 return -EINVAL;
249 }
250 mem_limit = mem_size;
251 }
d52e7d5a
BH
252 }
253
254 free(tmp_cmdline);
255 return 0;
256}
257
9dc1969c 258/*
ed09acde
KC
259 * In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
260 * The mem_avoid array is used to store the ranges that need to be avoided
261 * when KASLR searches for an appropriate random address. We must avoid any
9dc1969c 262 * regions that are unsafe to overlap with during decompression, and other
ed09acde
KC
263 * things like the initrd, cmdline and boot_params. This comment seeks to
264 * explain mem_avoid as clearly as possible since incorrect mem_avoid
265 * memory ranges lead to really hard to debug boot failures.
266 *
267 * The initrd, cmdline, and boot_params are trivial to identify for
cb18ef0d 268 * avoiding. They are MEM_AVOID_INITRD, MEM_AVOID_CMDLINE, and
ed09acde
KC
269 * MEM_AVOID_BOOTPARAMS respectively below.
270 *
271 * What is not obvious how to avoid is the range of memory that is used
272 * during decompression (MEM_AVOID_ZO_RANGE below). This range must cover
273 * the compressed kernel (ZO) and its run space, which is used to extract
274 * the uncompressed kernel (VO) and relocs.
275 *
276 * ZO's full run size sits against the end of the decompression buffer, so
277 * we can calculate where text, data, bss, etc of ZO are positioned more
278 * easily.
279 *
280 * For additional background, the decompression calculations can be found
281 * in header.S, and the memory diagram is based on the one found in misc.c.
282 *
283 * The following conditions are already enforced by the image layouts and
284 * associated code:
285 * - input + input_size >= output + output_size
286 * - kernel_total_size <= init_size
287 * - kernel_total_size <= output_size (see Note below)
288 * - output + init_size >= output + output_size
9dc1969c 289 *
ed09acde
KC
290 * (Note that kernel_total_size and output_size have no fundamental
291 * relationship, but output_size is passed to choose_random_location
292 * as a maximum of the two. The diagram is showing a case where
293 * kernel_total_size is larger than output_size, but this case is
294 * handled by bumping output_size.)
9dc1969c 295 *
ed09acde 296 * The above conditions can be illustrated by a diagram:
9dc1969c 297 *
ed09acde
KC
298 * 0 output input input+input_size output+init_size
299 * | | | | |
300 * | | | | |
301 * |-----|--------|--------|--------------|-----------|--|-------------|
302 * | | |
303 * | | |
304 * output+init_size-ZO_INIT_SIZE output+output_size output+kernel_total_size
9dc1969c 305 *
ed09acde
KC
306 * [output, output+init_size) is the entire memory range used for
307 * extracting the compressed image.
9dc1969c 308 *
ed09acde
KC
309 * [output, output+kernel_total_size) is the range needed for the
310 * uncompressed kernel (VO) and its run size (bss, brk, etc).
9dc1969c 311 *
ed09acde
KC
312 * [output, output+output_size) is VO plus relocs (i.e. the entire
313 * uncompressed payload contained by ZO). This is the area of the buffer
314 * written to during decompression.
9dc1969c 315 *
ed09acde
KC
316 * [output+init_size-ZO_INIT_SIZE, output+init_size) is the worst-case
317 * range of the copied ZO and decompression code. (i.e. the range
318 * covered backwards of size ZO_INIT_SIZE, starting from output+init_size.)
9dc1969c 319 *
ed09acde
KC
320 * [input, input+input_size) is the original copied compressed image (ZO)
321 * (i.e. it does not include its run size). This range must be avoided
322 * because it contains the data used for decompression.
9dc1969c 323 *
ed09acde
KC
324 * [input+input_size, output+init_size) is [_text, _end) for ZO. This
325 * range includes ZO's heap and stack, and must be avoided since it
326 * performs the decompression.
9dc1969c 327 *
ed09acde
KC
328 * Since the above two ranges need to be avoided and they are adjacent,
329 * they can be merged, resulting in: [input, output+init_size) which
330 * becomes the MEM_AVOID_ZO_RANGE below.
9dc1969c 331 */
82fa9637 332static void mem_avoid_init(unsigned long input, unsigned long input_size,
9dc1969c 333 unsigned long output)
82fa9637 334{
9dc1969c 335 unsigned long init_size = boot_params->hdr.init_size;
82fa9637
KC
336 u64 initrd_start, initrd_size;
337 u64 cmd_line, cmd_line_size;
82fa9637
KC
338 char *ptr;
339
340 /*
341 * Avoid the region that is unsafe to overlap during
9dc1969c 342 * decompression.
82fa9637 343 */
ed09acde
KC
344 mem_avoid[MEM_AVOID_ZO_RANGE].start = input;
345 mem_avoid[MEM_AVOID_ZO_RANGE].size = (output + init_size) - input;
3a94707d
KC
346 add_identity_map(mem_avoid[MEM_AVOID_ZO_RANGE].start,
347 mem_avoid[MEM_AVOID_ZO_RANGE].size);
82fa9637
KC
348
349 /* Avoid initrd. */
6655e0aa
KC
350 initrd_start = (u64)boot_params->ext_ramdisk_image << 32;
351 initrd_start |= boot_params->hdr.ramdisk_image;
352 initrd_size = (u64)boot_params->ext_ramdisk_size << 32;
353 initrd_size |= boot_params->hdr.ramdisk_size;
ed09acde
KC
354 mem_avoid[MEM_AVOID_INITRD].start = initrd_start;
355 mem_avoid[MEM_AVOID_INITRD].size = initrd_size;
3a94707d 356 /* No need to set mapping for initrd, it will be handled in VO. */
82fa9637
KC
357
358 /* Avoid kernel command line. */
6655e0aa
KC
359 cmd_line = (u64)boot_params->ext_cmd_line_ptr << 32;
360 cmd_line |= boot_params->hdr.cmd_line_ptr;
82fa9637
KC
361 /* Calculate size of cmd_line. */
362 ptr = (char *)(unsigned long)cmd_line;
363 for (cmd_line_size = 0; ptr[cmd_line_size++]; )
364 ;
ed09acde
KC
365 mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
366 mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
3a94707d
KC
367 add_identity_map(mem_avoid[MEM_AVOID_CMDLINE].start,
368 mem_avoid[MEM_AVOID_CMDLINE].size);
82fa9637 369
ed09acde
KC
370 /* Avoid boot parameters. */
371 mem_avoid[MEM_AVOID_BOOTPARAMS].start = (unsigned long)boot_params;
372 mem_avoid[MEM_AVOID_BOOTPARAMS].size = sizeof(*boot_params);
3a94707d
KC
373 add_identity_map(mem_avoid[MEM_AVOID_BOOTPARAMS].start,
374 mem_avoid[MEM_AVOID_BOOTPARAMS].size);
375
376 /* We don't need to set a mapping for setup_data. */
377
f2844249 378 /* Mark the memmap regions we need to avoid */
d52e7d5a 379 handle_mem_memmap();
f2844249 380
3a94707d
KC
381#ifdef CONFIG_X86_VERBOSE_BOOTUP
382 /* Make sure video RAM can be used. */
383 add_identity_map(0, PMD_SIZE);
384#endif
82fa9637
KC
385}
386
06486d6c
KC
387/*
388 * Does this memory vector overlap a known avoided area? If so, record the
389 * overlap region with the lowest address.
390 */
391static bool mem_avoid_overlap(struct mem_vector *img,
392 struct mem_vector *overlap)
82fa9637
KC
393{
394 int i;
0cacbfbe 395 struct setup_data *ptr;
06486d6c
KC
396 unsigned long earliest = img->start + img->size;
397 bool is_overlapping = false;
82fa9637
KC
398
399 for (i = 0; i < MEM_AVOID_MAX; i++) {
06486d6c
KC
400 if (mem_overlaps(img, &mem_avoid[i]) &&
401 mem_avoid[i].start < earliest) {
402 *overlap = mem_avoid[i];
6daa2ec0 403 earliest = overlap->start;
06486d6c
KC
404 is_overlapping = true;
405 }
82fa9637
KC
406 }
407
0cacbfbe 408 /* Avoid all entries in the setup_data linked list. */
6655e0aa 409 ptr = (struct setup_data *)(unsigned long)boot_params->hdr.setup_data;
0cacbfbe
KC
410 while (ptr) {
411 struct mem_vector avoid;
412
20cc2888 413 avoid.start = (unsigned long)ptr;
0cacbfbe
KC
414 avoid.size = sizeof(*ptr) + ptr->len;
415
06486d6c
KC
416 if (mem_overlaps(img, &avoid) && (avoid.start < earliest)) {
417 *overlap = avoid;
6daa2ec0 418 earliest = overlap->start;
06486d6c
KC
419 is_overlapping = true;
420 }
0cacbfbe
KC
421
422 ptr = (struct setup_data *)(unsigned long)ptr->next;
423 }
424
06486d6c 425 return is_overlapping;
82fa9637
KC
426}
427
c401cf15
BH
428struct slot_area {
429 unsigned long addr;
430 int num;
431};
432
433#define MAX_SLOT_AREA 100
434
435static struct slot_area slot_areas[MAX_SLOT_AREA];
436
e290e8c5 437static unsigned long slot_max;
82fa9637 438
c401cf15
BH
439static unsigned long slot_area_index;
440
441static void store_slot_info(struct mem_vector *region, unsigned long image_size)
442{
443 struct slot_area slot_area;
444
445 if (slot_area_index == MAX_SLOT_AREA)
446 return;
447
448 slot_area.addr = region->start;
449 slot_area.num = (region->size - image_size) /
450 CONFIG_PHYSICAL_ALIGN + 1;
451
452 if (slot_area.num > 0) {
453 slot_areas[slot_area_index++] = slot_area;
454 slot_max += slot_area.num;
455 }
456}
457
82fa9637
KC
458static unsigned long slots_fetch_random(void)
459{
ed9f007e
KC
460 unsigned long slot;
461 int i;
462
82fa9637
KC
463 /* Handle case of no slots stored. */
464 if (slot_max == 0)
465 return 0;
466
d899a7d1 467 slot = kaslr_get_random_long("Physical") % slot_max;
ed9f007e
KC
468
469 for (i = 0; i < slot_area_index; i++) {
470 if (slot >= slot_areas[i].num) {
471 slot -= slot_areas[i].num;
472 continue;
473 }
474 return slot_areas[i].addr + slot * CONFIG_PHYSICAL_ALIGN;
475 }
476
477 if (i == slot_area_index)
478 debug_putstr("slots_fetch_random() failed!?\n");
479 return 0;
82fa9637
KC
480}
481
87891b01 482static void process_e820_entry(struct mem_vector *entry,
82fa9637
KC
483 unsigned long minimum,
484 unsigned long image_size)
485{
ed9f007e
KC
486 struct mem_vector region, overlap;
487 struct slot_area slot_area;
4cdba14f 488 unsigned long start_orig, end;
87891b01 489 struct mem_vector cur_entry;
82fa9637 490
ed9f007e 491 /* On 32-bit, ignore entries entirely above our maximum. */
87891b01 492 if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
82fa9637
KC
493 return;
494
495 /* Ignore entries entirely below our minimum. */
87891b01 496 if (entry->start + entry->size < minimum)
82fa9637
KC
497 return;
498
4cdba14f 499 /* Ignore entries above memory limit */
87891b01
BH
500 end = min(entry->size + entry->start, mem_limit);
501 if (entry->start >= end)
4cdba14f 502 return;
87891b01
BH
503 cur_entry.start = entry->start;
504 cur_entry.size = end - entry->start;
4cdba14f 505
87891b01 506 region.start = cur_entry.start;
4cdba14f 507 region.size = cur_entry.size;
82fa9637 508
ed9f007e
KC
509 /* Give up if slot area array is full. */
510 while (slot_area_index < MAX_SLOT_AREA) {
511 start_orig = region.start;
82fa9637 512
ed9f007e
KC
513 /* Potentially raise address to minimum location. */
514 if (region.start < minimum)
515 region.start = minimum;
82fa9637 516
ed9f007e
KC
517 /* Potentially raise address to meet alignment needs. */
518 region.start = ALIGN(region.start, CONFIG_PHYSICAL_ALIGN);
82fa9637 519
ed9f007e 520 /* Did we raise the address above this e820 region? */
87891b01 521 if (region.start > cur_entry.start + cur_entry.size)
ed9f007e 522 return;
82fa9637 523
ed9f007e
KC
524 /* Reduce size by any delta from the original address. */
525 region.size -= region.start - start_orig;
82fa9637 526
ed9f007e
KC
527 /* On 32-bit, reduce region size to fit within max size. */
528 if (IS_ENABLED(CONFIG_X86_32) &&
529 region.start + region.size > KERNEL_IMAGE_SIZE)
530 region.size = KERNEL_IMAGE_SIZE - region.start;
531
532 /* Return if region can't contain decompressed kernel */
533 if (region.size < image_size)
534 return;
535
536 /* If nothing overlaps, store the region and return. */
537 if (!mem_avoid_overlap(&region, &overlap)) {
538 store_slot_info(&region, image_size);
539 return;
540 }
541
542 /* Store beginning of region if holds at least image_size. */
543 if (overlap.start > region.start + image_size) {
544 struct mem_vector beginning;
545
546 beginning.start = region.start;
547 beginning.size = overlap.start - region.start;
548 store_slot_info(&beginning, image_size);
549 }
550
551 /* Return if overlap extends to or past end of region. */
552 if (overlap.start + overlap.size >= region.start + region.size)
553 return;
554
555 /* Clip off the overlapping region and start over. */
556 region.size -= overlap.start - region.start + overlap.size;
557 region.start = overlap.start + overlap.size;
82fa9637
KC
558 }
559}
560
f62995c9
BH
561static void process_e820_entries(unsigned long minimum,
562 unsigned long image_size)
82fa9637
KC
563{
564 int i;
87891b01 565 struct mem_vector region;
f62995c9
BH
566 struct boot_e820_entry *entry;
567
568 /* Verify potential e820 positions, appending to slots list. */
569 for (i = 0; i < boot_params->e820_entries; i++) {
570 entry = &boot_params->e820_table[i];
571 /* Skip non-RAM entries. */
572 if (entry->type != E820_TYPE_RAM)
573 continue;
87891b01
BH
574 region.start = entry->addr;
575 region.size = entry->size;
576 process_e820_entry(&region, minimum, image_size);
f62995c9
BH
577 if (slot_area_index == MAX_SLOT_AREA) {
578 debug_putstr("Aborted e820 scan (slot_areas full)!\n");
579 break;
580 }
581 }
582}
82fa9637 583
f62995c9
BH
584static unsigned long find_random_phys_addr(unsigned long minimum,
585 unsigned long image_size)
586{
f2844249
DJ
587 /* Check if we had too many memmaps. */
588 if (memmap_too_large) {
589 debug_putstr("Aborted e820 scan (more than 4 memmap= args)!\n");
590 return 0;
591 }
592
82fa9637
KC
593 /* Make sure minimum is aligned. */
594 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
595
f62995c9 596 process_e820_entries(minimum, image_size);
82fa9637
KC
597 return slots_fetch_random();
598}
599
071a7493
BH
600static unsigned long find_random_virt_addr(unsigned long minimum,
601 unsigned long image_size)
602{
603 unsigned long slots, random_addr;
604
605 /* Make sure minimum is aligned. */
606 minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
607 /* Align image_size for easy slot calculations. */
608 image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
609
610 /*
611 * There are how many CONFIG_PHYSICAL_ALIGN-sized slots
612 * that can hold image_size within the range of minimum to
613 * KERNEL_IMAGE_SIZE?
614 */
615 slots = (KERNEL_IMAGE_SIZE - minimum - image_size) /
616 CONFIG_PHYSICAL_ALIGN + 1;
617
d899a7d1 618 random_addr = kaslr_get_random_long("Virtual") % slots;
071a7493
BH
619
620 return random_addr * CONFIG_PHYSICAL_ALIGN + minimum;
621}
622
549f90db
BP
623/*
624 * Since this function examines addresses much more numerically,
625 * it takes the input and output pointers as 'unsigned long'.
626 */
8391c73c
BH
627void choose_random_location(unsigned long input,
628 unsigned long input_size,
629 unsigned long *output,
630 unsigned long output_size,
631 unsigned long *virt_addr)
8ab3820f 632{
e066cc47 633 unsigned long random_addr, min_addr;
8ab3820f
KC
634
635 if (cmdline_find_option_bool("nokaslr")) {
0f8ede1b 636 warn("KASLR disabled: 'nokaslr' on cmdline.");
8391c73c 637 return;
8ab3820f
KC
638 }
639
6655e0aa 640 boot_params->hdr.loadflags |= KASLR_FLAG;
78cac48c 641
11fdf97a
KC
642 /* Prepare to add new identity pagetables on demand. */
643 initialize_identity_maps();
644
82fa9637 645 /* Record the various known unsafe memory ranges. */
8391c73c 646 mem_avoid_init(input, input_size, *output);
82fa9637 647
e066cc47
YL
648 /*
649 * Low end of the randomization range should be the
650 * smaller of 512M or the initial kernel image
651 * location:
652 */
653 min_addr = min(*output, 512UL << 20);
654
82fa9637 655 /* Walk e820 and find a random address. */
e066cc47 656 random_addr = find_random_phys_addr(min_addr, output_size);
9016875d 657 if (!random_addr) {
f2844249 658 warn("Physical KASLR disabled: no suitable memory region!");
8391c73c
BH
659 } else {
660 /* Update the new physical address location. */
661 if (*output != random_addr) {
662 add_identity_map(random_addr, output_size);
663 *output = random_addr;
664 }
da63b6b2
BH
665
666 /*
667 * This loads the identity mapping page table.
668 * This should only be done if a new physical address
669 * is found for the kernel, otherwise we should keep
670 * the old page table to make it be like the "nokaslr"
671 * case.
672 */
673 finalize_identity_maps();
82fa9637
KC
674 }
675
8391c73c
BH
676
677 /* Pick random virtual address starting from LOAD_PHYSICAL_ADDR. */
678 if (IS_ENABLED(CONFIG_X86_64))
679 random_addr = find_random_virt_addr(LOAD_PHYSICAL_ADDR, output_size);
680 *virt_addr = random_addr;
8ab3820f 681}