]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/sh/kernel/setup.c
Merge branch 'timers-for-linus-clocksource' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / arch / sh / kernel / setup.c
1 /*
2 * arch/sh/kernel/setup.c
3 *
4 * This file handles the architecture-dependent parts of initialization
5 *
6 * Copyright (C) 1999 Niibe Yutaka
7 * Copyright (C) 2002 - 2007 Paul Mundt
8 */
9 #include <linux/screen_info.h>
10 #include <linux/ioport.h>
11 #include <linux/init.h>
12 #include <linux/initrd.h>
13 #include <linux/bootmem.h>
14 #include <linux/console.h>
15 #include <linux/seq_file.h>
16 #include <linux/root_dev.h>
17 #include <linux/utsname.h>
18 #include <linux/nodemask.h>
19 #include <linux/cpu.h>
20 #include <linux/pfn.h>
21 #include <linux/fs.h>
22 #include <linux/mm.h>
23 #include <linux/kexec.h>
24 #include <linux/module.h>
25 #include <linux/smp.h>
26 #include <linux/err.h>
27 #include <linux/debugfs.h>
28 #include <linux/crash_dump.h>
29 #include <linux/mmzone.h>
30 #include <linux/clk.h>
31 #include <linux/delay.h>
32 #include <linux/platform_device.h>
33 #include <asm/uaccess.h>
34 #include <asm/io.h>
35 #include <asm/page.h>
36 #include <asm/elf.h>
37 #include <asm/sections.h>
38 #include <asm/irq.h>
39 #include <asm/setup.h>
40 #include <asm/clock.h>
41 #include <asm/mmu_context.h>
42
43 /*
44 * Initialize loops_per_jiffy as 10000000 (1000MIPS).
45 * This value will be used at the very early stage of serial setup.
46 * The bigger value means no problem.
47 */
48 struct sh_cpuinfo cpu_data[NR_CPUS] __read_mostly = {
49 [0] = {
50 .type = CPU_SH_NONE,
51 .loops_per_jiffy = 10000000,
52 },
53 };
54 EXPORT_SYMBOL(cpu_data);
55
56 /*
57 * The machine vector. First entry in .machvec.init, or clobbered by
58 * sh_mv= on the command line, prior to .machvec.init teardown.
59 */
60 struct sh_machine_vector sh_mv = { .mv_name = "generic", };
61 EXPORT_SYMBOL(sh_mv);
62
63 #ifdef CONFIG_VT
64 struct screen_info screen_info;
65 #endif
66
67 extern int root_mountflags;
68
69 #define RAMDISK_IMAGE_START_MASK 0x07FF
70 #define RAMDISK_PROMPT_FLAG 0x8000
71 #define RAMDISK_LOAD_FLAG 0x4000
72
73 static char __initdata command_line[COMMAND_LINE_SIZE] = { 0, };
74
75 static struct resource code_resource = {
76 .name = "Kernel code",
77 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
78 };
79
80 static struct resource data_resource = {
81 .name = "Kernel data",
82 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
83 };
84
85 static struct resource bss_resource = {
86 .name = "Kernel bss",
87 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
88 };
89
90 unsigned long memory_start;
91 EXPORT_SYMBOL(memory_start);
92 unsigned long memory_end = 0;
93 EXPORT_SYMBOL(memory_end);
94
95 static struct resource mem_resources[MAX_NUMNODES];
96
97 int l1i_cache_shape, l1d_cache_shape, l2_cache_shape;
98
99 static int __init early_parse_mem(char *p)
100 {
101 unsigned long size;
102
103 memory_start = (unsigned long)__va(__MEMORY_START);
104 size = memparse(p, &p);
105
106 if (size > __MEMORY_SIZE) {
107 printk(KERN_ERR
108 "Using mem= to increase the size of kernel memory "
109 "is not allowed.\n"
110 " Recompile the kernel with the correct value for "
111 "CONFIG_MEMORY_SIZE.\n");
112 return 0;
113 }
114
115 memory_end = memory_start + size;
116
117 return 0;
118 }
119 early_param("mem", early_parse_mem);
120
121 /*
122 * Register fully available low RAM pages with the bootmem allocator.
123 */
124 static void __init register_bootmem_low_pages(void)
125 {
126 unsigned long curr_pfn, last_pfn, pages;
127
128 /*
129 * We are rounding up the start address of usable memory:
130 */
131 curr_pfn = PFN_UP(__MEMORY_START);
132
133 /*
134 * ... and at the end of the usable range downwards:
135 */
136 last_pfn = PFN_DOWN(__pa(memory_end));
137
138 if (last_pfn > max_low_pfn)
139 last_pfn = max_low_pfn;
140
141 pages = last_pfn - curr_pfn;
142 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
143 }
144
145 #ifdef CONFIG_KEXEC
146 static void __init reserve_crashkernel(void)
147 {
148 unsigned long long free_mem;
149 unsigned long long crash_size, crash_base;
150 void *vp;
151 int ret;
152
153 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
154
155 ret = parse_crashkernel(boot_command_line, free_mem,
156 &crash_size, &crash_base);
157 if (ret == 0 && crash_size) {
158 if (crash_base <= 0) {
159 vp = alloc_bootmem_nopanic(crash_size);
160 if (!vp) {
161 printk(KERN_INFO "crashkernel allocation "
162 "failed\n");
163 return;
164 }
165 crash_base = __pa(vp);
166 } else if (reserve_bootmem(crash_base, crash_size,
167 BOOTMEM_EXCLUSIVE) < 0) {
168 printk(KERN_INFO "crashkernel reservation failed - "
169 "memory is in use\n");
170 return;
171 }
172
173 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
174 "for crashkernel (System RAM: %ldMB)\n",
175 (unsigned long)(crash_size >> 20),
176 (unsigned long)(crash_base >> 20),
177 (unsigned long)(free_mem >> 20));
178 crashk_res.start = crash_base;
179 crashk_res.end = crash_base + crash_size - 1;
180 insert_resource(&iomem_resource, &crashk_res);
181 }
182 }
183 #else
184 static inline void __init reserve_crashkernel(void)
185 {}
186 #endif
187
188 void __cpuinit calibrate_delay(void)
189 {
190 struct clk *clk = clk_get(NULL, "cpu_clk");
191
192 if (IS_ERR(clk))
193 panic("Need a sane CPU clock definition!");
194
195 loops_per_jiffy = (clk_get_rate(clk) >> 1) / HZ;
196
197 printk(KERN_INFO "Calibrating delay loop (skipped)... "
198 "%lu.%02lu BogoMIPS PRESET (lpj=%lu)\n",
199 loops_per_jiffy/(500000/HZ),
200 (loops_per_jiffy/(5000/HZ)) % 100,
201 loops_per_jiffy);
202 }
203
204 void __init __add_active_range(unsigned int nid, unsigned long start_pfn,
205 unsigned long end_pfn)
206 {
207 struct resource *res = &mem_resources[nid];
208
209 WARN_ON(res->name); /* max one active range per node for now */
210
211 res->name = "System RAM";
212 res->start = start_pfn << PAGE_SHIFT;
213 res->end = (end_pfn << PAGE_SHIFT) - 1;
214 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
215 if (request_resource(&iomem_resource, res)) {
216 pr_err("unable to request memory_resource 0x%lx 0x%lx\n",
217 start_pfn, end_pfn);
218 return;
219 }
220
221 /*
222 * We don't know which RAM region contains kernel data,
223 * so we try it repeatedly and let the resource manager
224 * test it.
225 */
226 request_resource(res, &code_resource);
227 request_resource(res, &data_resource);
228 request_resource(res, &bss_resource);
229
230 add_active_range(nid, start_pfn, end_pfn);
231 }
232
233 void __init setup_bootmem_allocator(unsigned long free_pfn)
234 {
235 unsigned long bootmap_size;
236
237 /*
238 * Find a proper area for the bootmem bitmap. After this
239 * bootstrap step all allocations (until the page allocator
240 * is intact) must be done via bootmem_alloc().
241 */
242 bootmap_size = init_bootmem_node(NODE_DATA(0), free_pfn,
243 min_low_pfn, max_low_pfn);
244
245 __add_active_range(0, min_low_pfn, max_low_pfn);
246 register_bootmem_low_pages();
247
248 node_set_online(0);
249
250 /*
251 * Reserve the kernel text and
252 * Reserve the bootmem bitmap. We do this in two steps (first step
253 * was init_bootmem()), because this catches the (definitely buggy)
254 * case of us accidentally initializing the bootmem allocator with
255 * an invalid RAM area.
256 */
257 reserve_bootmem(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
258 (PFN_PHYS(free_pfn) + bootmap_size + PAGE_SIZE - 1) -
259 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET),
260 BOOTMEM_DEFAULT);
261
262 /*
263 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
264 */
265 if (CONFIG_ZERO_PAGE_OFFSET != 0)
266 reserve_bootmem(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET,
267 BOOTMEM_DEFAULT);
268
269 sparse_memory_present_with_active_regions(0);
270
271 #ifdef CONFIG_BLK_DEV_INITRD
272 ROOT_DEV = Root_RAM0;
273
274 if (LOADER_TYPE && INITRD_START) {
275 unsigned long initrd_start_phys = INITRD_START + __MEMORY_START;
276
277 if (initrd_start_phys + INITRD_SIZE <= PFN_PHYS(max_low_pfn)) {
278 reserve_bootmem(initrd_start_phys, INITRD_SIZE,
279 BOOTMEM_DEFAULT);
280 initrd_start = (unsigned long)__va(initrd_start_phys);
281 initrd_end = initrd_start + INITRD_SIZE;
282 } else {
283 printk("initrd extends beyond end of memory "
284 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
285 initrd_start_phys + INITRD_SIZE,
286 (unsigned long)PFN_PHYS(max_low_pfn));
287 initrd_start = 0;
288 }
289 }
290 #endif
291
292 reserve_crashkernel();
293 }
294
295 #ifndef CONFIG_NEED_MULTIPLE_NODES
296 static void __init setup_memory(void)
297 {
298 unsigned long start_pfn;
299
300 /*
301 * Partially used pages are not usable - thus
302 * we are rounding upwards:
303 */
304 start_pfn = PFN_UP(__pa(_end));
305 setup_bootmem_allocator(start_pfn);
306 }
307 #else
308 extern void __init setup_memory(void);
309 #endif
310
311 /*
312 * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by
313 * is_kdump_kernel() to determine if we are booting after a panic. Hence
314 * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE.
315 */
316 #ifdef CONFIG_CRASH_DUMP
317 /* elfcorehdr= specifies the location of elf core header
318 * stored by the crashed kernel.
319 */
320 static int __init parse_elfcorehdr(char *arg)
321 {
322 if (!arg)
323 return -EINVAL;
324 elfcorehdr_addr = memparse(arg, &arg);
325 return 0;
326 }
327 early_param("elfcorehdr", parse_elfcorehdr);
328 #endif
329
330 void __init __attribute__ ((weak)) plat_early_device_setup(void)
331 {
332 }
333
334 void __init setup_arch(char **cmdline_p)
335 {
336 enable_mmu();
337
338 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
339
340 printk(KERN_NOTICE "Boot params:\n"
341 "... MOUNT_ROOT_RDONLY - %08lx\n"
342 "... RAMDISK_FLAGS - %08lx\n"
343 "... ORIG_ROOT_DEV - %08lx\n"
344 "... LOADER_TYPE - %08lx\n"
345 "... INITRD_START - %08lx\n"
346 "... INITRD_SIZE - %08lx\n",
347 MOUNT_ROOT_RDONLY, RAMDISK_FLAGS,
348 ORIG_ROOT_DEV, LOADER_TYPE,
349 INITRD_START, INITRD_SIZE);
350
351 #ifdef CONFIG_BLK_DEV_RAM
352 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
353 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
354 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
355 #endif
356
357 if (!MOUNT_ROOT_RDONLY)
358 root_mountflags &= ~MS_RDONLY;
359 init_mm.start_code = (unsigned long) _text;
360 init_mm.end_code = (unsigned long) _etext;
361 init_mm.end_data = (unsigned long) _edata;
362 init_mm.brk = (unsigned long) _end;
363
364 code_resource.start = virt_to_phys(_text);
365 code_resource.end = virt_to_phys(_etext)-1;
366 data_resource.start = virt_to_phys(_etext);
367 data_resource.end = virt_to_phys(_edata)-1;
368 bss_resource.start = virt_to_phys(__bss_start);
369 bss_resource.end = virt_to_phys(_ebss)-1;
370
371 memory_start = (unsigned long)__va(__MEMORY_START);
372 if (!memory_end)
373 memory_end = memory_start + __MEMORY_SIZE;
374
375 #ifdef CONFIG_CMDLINE_BOOL
376 strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line));
377 #else
378 strlcpy(command_line, COMMAND_LINE, sizeof(command_line));
379 #endif
380
381 /* Save unparsed command line copy for /proc/cmdline */
382 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
383 *cmdline_p = command_line;
384
385 parse_early_param();
386
387 plat_early_device_setup();
388
389 sh_mv_setup();
390
391 /*
392 * Find the highest page frame number we have available
393 */
394 max_pfn = PFN_DOWN(__pa(memory_end));
395
396 /*
397 * Determine low and high memory ranges:
398 */
399 max_low_pfn = max_pfn;
400 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
401
402 nodes_clear(node_online_map);
403
404 /* Setup bootmem with available RAM */
405 setup_memory();
406 sparse_init();
407
408 #ifdef CONFIG_DUMMY_CONSOLE
409 conswitchp = &dummy_con;
410 #endif
411
412 /* Perform the machine specific initialisation */
413 if (likely(sh_mv.mv_setup))
414 sh_mv.mv_setup(cmdline_p);
415
416 paging_init();
417
418 #ifdef CONFIG_SMP
419 plat_smp_setup();
420 #endif
421 }
422
423 /* processor boot mode configuration */
424 int generic_mode_pins(void)
425 {
426 pr_warning("generic_mode_pins(): missing mode pin configuration\n");
427 return 0;
428 }
429
430 int test_mode_pin(int pin)
431 {
432 return sh_mv.mv_mode_pins() & pin;
433 }
434
435 static const char *cpu_name[] = {
436 [CPU_SH7201] = "SH7201",
437 [CPU_SH7203] = "SH7203", [CPU_SH7263] = "SH7263",
438 [CPU_SH7206] = "SH7206", [CPU_SH7619] = "SH7619",
439 [CPU_SH7705] = "SH7705", [CPU_SH7706] = "SH7706",
440 [CPU_SH7707] = "SH7707", [CPU_SH7708] = "SH7708",
441 [CPU_SH7709] = "SH7709", [CPU_SH7710] = "SH7710",
442 [CPU_SH7712] = "SH7712", [CPU_SH7720] = "SH7720",
443 [CPU_SH7721] = "SH7721", [CPU_SH7729] = "SH7729",
444 [CPU_SH7750] = "SH7750", [CPU_SH7750S] = "SH7750S",
445 [CPU_SH7750R] = "SH7750R", [CPU_SH7751] = "SH7751",
446 [CPU_SH7751R] = "SH7751R", [CPU_SH7760] = "SH7760",
447 [CPU_SH4_202] = "SH4-202", [CPU_SH4_501] = "SH4-501",
448 [CPU_SH7763] = "SH7763", [CPU_SH7770] = "SH7770",
449 [CPU_SH7780] = "SH7780", [CPU_SH7781] = "SH7781",
450 [CPU_SH7343] = "SH7343", [CPU_SH7785] = "SH7785",
451 [CPU_SH7786] = "SH7786",
452 [CPU_SH7722] = "SH7722", [CPU_SHX3] = "SH-X3",
453 [CPU_SH5_101] = "SH5-101", [CPU_SH5_103] = "SH5-103",
454 [CPU_MXG] = "MX-G", [CPU_SH7723] = "SH7723",
455 [CPU_SH7366] = "SH7366", [CPU_SH7724] = "SH7724",
456 [CPU_SH_NONE] = "Unknown"
457 };
458
459 const char *get_cpu_subtype(struct sh_cpuinfo *c)
460 {
461 return cpu_name[c->type];
462 }
463 EXPORT_SYMBOL(get_cpu_subtype);
464
465 #ifdef CONFIG_PROC_FS
466 /* Symbolic CPU flags, keep in sync with asm/cpu-features.h */
467 static const char *cpu_flags[] = {
468 "none", "fpu", "p2flush", "mmuassoc", "dsp", "perfctr",
469 "ptea", "llsc", "l2", "op32", "pteaex", NULL
470 };
471
472 static void show_cpuflags(struct seq_file *m, struct sh_cpuinfo *c)
473 {
474 unsigned long i;
475
476 seq_printf(m, "cpu flags\t:");
477
478 if (!c->flags) {
479 seq_printf(m, " %s\n", cpu_flags[0]);
480 return;
481 }
482
483 for (i = 0; cpu_flags[i]; i++)
484 if ((c->flags & (1 << i)))
485 seq_printf(m, " %s", cpu_flags[i+1]);
486
487 seq_printf(m, "\n");
488 }
489
490 static void show_cacheinfo(struct seq_file *m, const char *type,
491 struct cache_info info)
492 {
493 unsigned int cache_size;
494
495 cache_size = info.ways * info.sets * info.linesz;
496
497 seq_printf(m, "%s size\t: %2dKiB (%d-way)\n",
498 type, cache_size >> 10, info.ways);
499 }
500
501 /*
502 * Get CPU information for use by the procfs.
503 */
504 static int show_cpuinfo(struct seq_file *m, void *v)
505 {
506 struct sh_cpuinfo *c = v;
507 unsigned int cpu = c - cpu_data;
508
509 if (!cpu_online(cpu))
510 return 0;
511
512 if (cpu == 0)
513 seq_printf(m, "machine\t\t: %s\n", get_system_type());
514
515 seq_printf(m, "processor\t: %d\n", cpu);
516 seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
517 seq_printf(m, "cpu type\t: %s\n", get_cpu_subtype(c));
518 if (c->cut_major == -1)
519 seq_printf(m, "cut\t\t: unknown\n");
520 else if (c->cut_minor == -1)
521 seq_printf(m, "cut\t\t: %d.x\n", c->cut_major);
522 else
523 seq_printf(m, "cut\t\t: %d.%d\n", c->cut_major, c->cut_minor);
524
525 show_cpuflags(m, c);
526
527 seq_printf(m, "cache type\t: ");
528
529 /*
530 * Check for what type of cache we have, we support both the
531 * unified cache on the SH-2 and SH-3, as well as the harvard
532 * style cache on the SH-4.
533 */
534 if (c->icache.flags & SH_CACHE_COMBINED) {
535 seq_printf(m, "unified\n");
536 show_cacheinfo(m, "cache", c->icache);
537 } else {
538 seq_printf(m, "split (harvard)\n");
539 show_cacheinfo(m, "icache", c->icache);
540 show_cacheinfo(m, "dcache", c->dcache);
541 }
542
543 /* Optional secondary cache */
544 if (c->flags & CPU_HAS_L2_CACHE)
545 show_cacheinfo(m, "scache", c->scache);
546
547 seq_printf(m, "bogomips\t: %lu.%02lu\n",
548 c->loops_per_jiffy/(500000/HZ),
549 (c->loops_per_jiffy/(5000/HZ)) % 100);
550
551 return 0;
552 }
553
554 static void *c_start(struct seq_file *m, loff_t *pos)
555 {
556 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
557 }
558 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
559 {
560 ++*pos;
561 return c_start(m, pos);
562 }
563 static void c_stop(struct seq_file *m, void *v)
564 {
565 }
566 const struct seq_operations cpuinfo_op = {
567 .start = c_start,
568 .next = c_next,
569 .stop = c_stop,
570 .show = show_cpuinfo,
571 };
572 #endif /* CONFIG_PROC_FS */
573
574 struct dentry *sh_debugfs_root;
575
576 static int __init sh_debugfs_init(void)
577 {
578 sh_debugfs_root = debugfs_create_dir("sh", NULL);
579 if (!sh_debugfs_root)
580 return -ENOMEM;
581 if (IS_ERR(sh_debugfs_root))
582 return PTR_ERR(sh_debugfs_root);
583
584 return 0;
585 }
586 arch_initcall(sh_debugfs_init);