]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/blackfin/kernel/setup.c
2f156bfc2b2ce6c330b72844264db90e360c6306
[mirror_ubuntu-bionic-kernel.git] / arch / blackfin / kernel / setup.c
1 /*
2 * File: arch/blackfin/kernel/setup.c
3 * Based on:
4 * Author:
5 *
6 * Created:
7 * Description:
8 *
9 * Modified:
10 * Copyright 2004-2006 Analog Devices Inc.
11 *
12 * Bugs: Enter bugs at http://blackfin.uclinux.org/
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, see the file COPYING, or write
26 * to the Free Software Foundation, Inc.,
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */
29
30 #include <linux/delay.h>
31 #include <linux/console.h>
32 #include <linux/bootmem.h>
33 #include <linux/seq_file.h>
34 #include <linux/cpu.h>
35 #include <linux/module.h>
36 #include <linux/tty.h>
37 #include <linux/pfn.h>
38
39 #include <linux/ext2_fs.h>
40 #include <linux/cramfs_fs.h>
41 #include <linux/romfs_fs.h>
42
43 #include <asm/cplb.h>
44 #include <asm/cacheflush.h>
45 #include <asm/blackfin.h>
46 #include <asm/cplbinit.h>
47 #include <asm/div64.h>
48 #include <asm/fixed_code.h>
49 #include <asm/early_printk.h>
50
51 u16 _bfin_swrst;
52
53 unsigned long memory_start, memory_end, physical_mem_end;
54 unsigned long reserved_mem_dcache_on;
55 unsigned long reserved_mem_icache_on;
56 EXPORT_SYMBOL(memory_start);
57 EXPORT_SYMBOL(memory_end);
58 EXPORT_SYMBOL(physical_mem_end);
59 EXPORT_SYMBOL(_ramend);
60
61 #ifdef CONFIG_MTD_UCLINUX
62 unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
63 unsigned long _ebss;
64 EXPORT_SYMBOL(memory_mtd_end);
65 EXPORT_SYMBOL(memory_mtd_start);
66 EXPORT_SYMBOL(mtd_size);
67 #endif
68
69 char __initdata command_line[COMMAND_LINE_SIZE];
70
71 /* boot memmap, for parsing "memmap=" */
72 #define BFIN_MEMMAP_MAX 128 /* number of entries in bfin_memmap */
73 #define BFIN_MEMMAP_RAM 1
74 #define BFIN_MEMMAP_RESERVED 2
75 struct bfin_memmap {
76 int nr_map;
77 struct bfin_memmap_entry {
78 unsigned long long addr; /* start of memory segment */
79 unsigned long long size;
80 unsigned long type;
81 } map[BFIN_MEMMAP_MAX];
82 } bfin_memmap __initdata;
83
84 /* for memmap sanitization */
85 struct change_member {
86 struct bfin_memmap_entry *pentry; /* pointer to original entry */
87 unsigned long long addr; /* address for this change point */
88 };
89 static struct change_member change_point_list[2*BFIN_MEMMAP_MAX] __initdata;
90 static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
91 static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
92 static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
93
94 void __init bf53x_cache_init(void)
95 {
96 #if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
97 generate_cpl_tables();
98 #endif
99
100 #ifdef CONFIG_BFIN_ICACHE
101 bfin_icache_init();
102 printk(KERN_INFO "Instruction Cache Enabled\n");
103 #endif
104
105 #ifdef CONFIG_BFIN_DCACHE
106 bfin_dcache_init();
107 printk(KERN_INFO "Data Cache Enabled"
108 # if defined CONFIG_BFIN_WB
109 " (write-back)"
110 # elif defined CONFIG_BFIN_WT
111 " (write-through)"
112 # endif
113 "\n");
114 #endif
115 }
116
117 void __init bf53x_relocate_l1_mem(void)
118 {
119 unsigned long l1_code_length;
120 unsigned long l1_data_a_length;
121 unsigned long l1_data_b_length;
122
123 l1_code_length = _etext_l1 - _stext_l1;
124 if (l1_code_length > L1_CODE_LENGTH)
125 l1_code_length = L1_CODE_LENGTH;
126 /* cannot complain as printk is not available as yet.
127 * But we can continue booting and complain later!
128 */
129
130 /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
131 dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
132
133 l1_data_a_length = _ebss_l1 - _sdata_l1;
134 if (l1_data_a_length > L1_DATA_A_LENGTH)
135 l1_data_a_length = L1_DATA_A_LENGTH;
136
137 /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */
138 dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
139
140 l1_data_b_length = _ebss_b_l1 - _sdata_b_l1;
141 if (l1_data_b_length > L1_DATA_B_LENGTH)
142 l1_data_b_length = L1_DATA_B_LENGTH;
143
144 /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */
145 dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
146 l1_data_a_length, l1_data_b_length);
147
148 }
149
150 /* add_memory_region to memmap */
151 static void __init add_memory_region(unsigned long long start,
152 unsigned long long size, int type)
153 {
154 int i;
155
156 i = bfin_memmap.nr_map;
157
158 if (i == BFIN_MEMMAP_MAX) {
159 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
160 return;
161 }
162
163 bfin_memmap.map[i].addr = start;
164 bfin_memmap.map[i].size = size;
165 bfin_memmap.map[i].type = type;
166 bfin_memmap.nr_map++;
167 }
168
169 /*
170 * Sanitize the boot memmap, removing overlaps.
171 */
172 static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
173 {
174 struct change_member *change_tmp;
175 unsigned long current_type, last_type;
176 unsigned long long last_addr;
177 int chgidx, still_changing;
178 int overlap_entries;
179 int new_entry;
180 int old_nr, new_nr, chg_nr;
181 int i;
182
183 /*
184 Visually we're performing the following (1,2,3,4 = memory types)
185
186 Sample memory map (w/overlaps):
187 ____22__________________
188 ______________________4_
189 ____1111________________
190 _44_____________________
191 11111111________________
192 ____________________33__
193 ___________44___________
194 __________33333_________
195 ______________22________
196 ___________________2222_
197 _________111111111______
198 _____________________11_
199 _________________4______
200
201 Sanitized equivalent (no overlap):
202 1_______________________
203 _44_____________________
204 ___1____________________
205 ____22__________________
206 ______11________________
207 _________1______________
208 __________3_____________
209 ___________44___________
210 _____________33_________
211 _______________2________
212 ________________1_______
213 _________________4______
214 ___________________2____
215 ____________________33__
216 ______________________4_
217 */
218 /* if there's only one memory region, don't bother */
219 if (*pnr_map < 2)
220 return -1;
221
222 old_nr = *pnr_map;
223
224 /* bail out if we find any unreasonable addresses in memmap */
225 for (i = 0; i < old_nr; i++)
226 if (map[i].addr + map[i].size < map[i].addr)
227 return -1;
228
229 /* create pointers for initial change-point information (for sorting) */
230 for (i = 0; i < 2*old_nr; i++)
231 change_point[i] = &change_point_list[i];
232
233 /* record all known change-points (starting and ending addresses),
234 omitting those that are for empty memory regions */
235 chgidx = 0;
236 for (i = 0; i < old_nr; i++) {
237 if (map[i].size != 0) {
238 change_point[chgidx]->addr = map[i].addr;
239 change_point[chgidx++]->pentry = &map[i];
240 change_point[chgidx]->addr = map[i].addr + map[i].size;
241 change_point[chgidx++]->pentry = &map[i];
242 }
243 }
244 chg_nr = chgidx; /* true number of change-points */
245
246 /* sort change-point list by memory addresses (low -> high) */
247 still_changing = 1;
248 while (still_changing) {
249 still_changing = 0;
250 for (i = 1; i < chg_nr; i++) {
251 /* if <current_addr> > <last_addr>, swap */
252 /* or, if current=<start_addr> & last=<end_addr>, swap */
253 if ((change_point[i]->addr < change_point[i-1]->addr) ||
254 ((change_point[i]->addr == change_point[i-1]->addr) &&
255 (change_point[i]->addr == change_point[i]->pentry->addr) &&
256 (change_point[i-1]->addr != change_point[i-1]->pentry->addr))
257 ) {
258 change_tmp = change_point[i];
259 change_point[i] = change_point[i-1];
260 change_point[i-1] = change_tmp;
261 still_changing = 1;
262 }
263 }
264 }
265
266 /* create a new memmap, removing overlaps */
267 overlap_entries = 0; /* number of entries in the overlap table */
268 new_entry = 0; /* index for creating new memmap entries */
269 last_type = 0; /* start with undefined memory type */
270 last_addr = 0; /* start with 0 as last starting address */
271 /* loop through change-points, determining affect on the new memmap */
272 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
273 /* keep track of all overlapping memmap entries */
274 if (change_point[chgidx]->addr == change_point[chgidx]->pentry->addr) {
275 /* add map entry to overlap list (> 1 entry implies an overlap) */
276 overlap_list[overlap_entries++] = change_point[chgidx]->pentry;
277 } else {
278 /* remove entry from list (order independent, so swap with last) */
279 for (i = 0; i < overlap_entries; i++) {
280 if (overlap_list[i] == change_point[chgidx]->pentry)
281 overlap_list[i] = overlap_list[overlap_entries-1];
282 }
283 overlap_entries--;
284 }
285 /* if there are overlapping entries, decide which "type" to use */
286 /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
287 current_type = 0;
288 for (i = 0; i < overlap_entries; i++)
289 if (overlap_list[i]->type > current_type)
290 current_type = overlap_list[i]->type;
291 /* continue building up new memmap based on this information */
292 if (current_type != last_type) {
293 if (last_type != 0) {
294 new_map[new_entry].size =
295 change_point[chgidx]->addr - last_addr;
296 /* move forward only if the new size was non-zero */
297 if (new_map[new_entry].size != 0)
298 if (++new_entry >= BFIN_MEMMAP_MAX)
299 break; /* no more space left for new entries */
300 }
301 if (current_type != 0) {
302 new_map[new_entry].addr = change_point[chgidx]->addr;
303 new_map[new_entry].type = current_type;
304 last_addr = change_point[chgidx]->addr;
305 }
306 last_type = current_type;
307 }
308 }
309 new_nr = new_entry; /* retain count for new entries */
310
311 /* copy new mapping into original location */
312 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
313 *pnr_map = new_nr;
314
315 return 0;
316 }
317
318 static void __init print_memory_map(char *who)
319 {
320 int i;
321
322 for (i = 0; i < bfin_memmap.nr_map; i++) {
323 printk(KERN_DEBUG " %s: %016Lx - %016Lx ", who,
324 bfin_memmap.map[i].addr,
325 bfin_memmap.map[i].addr + bfin_memmap.map[i].size);
326 switch (bfin_memmap.map[i].type) {
327 case BFIN_MEMMAP_RAM:
328 printk("(usable)\n");
329 break;
330 case BFIN_MEMMAP_RESERVED:
331 printk("(reserved)\n");
332 break;
333 default: printk("type %lu\n", bfin_memmap.map[i].type);
334 break;
335 }
336 }
337 }
338
339 static __init int parse_memmap(char *arg)
340 {
341 unsigned long long start_at, mem_size;
342
343 if (!arg)
344 return -EINVAL;
345
346 mem_size = memparse(arg, &arg);
347 if (*arg == '@') {
348 start_at = memparse(arg+1, &arg);
349 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RAM);
350 } else if (*arg == '$') {
351 start_at = memparse(arg+1, &arg);
352 add_memory_region(start_at, mem_size, BFIN_MEMMAP_RESERVED);
353 }
354
355 return 0;
356 }
357
358 /*
359 * Initial parsing of the command line. Currently, we support:
360 * - Controlling the linux memory size: mem=xxx[KMG]
361 * - Controlling the physical memory size: max_mem=xxx[KMG][$][#]
362 * $ -> reserved memory is dcacheable
363 * # -> reserved memory is icacheable
364 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
365 * @ from <start> to <start>+<mem>, type RAM
366 * $ from <start> to <start>+<mem>, type RESERVED
367 *
368 */
369 static __init void parse_cmdline_early(char *cmdline_p)
370 {
371 char c = ' ', *to = cmdline_p;
372 unsigned int memsize;
373 for (;;) {
374 if (c == ' ') {
375 if (!memcmp(to, "mem=", 4)) {
376 to += 4;
377 memsize = memparse(to, &to);
378 if (memsize)
379 _ramend = memsize;
380
381 } else if (!memcmp(to, "max_mem=", 8)) {
382 to += 8;
383 memsize = memparse(to, &to);
384 if (memsize) {
385 physical_mem_end = memsize;
386 if (*to != ' ') {
387 if (*to == '$'
388 || *(to + 1) == '$')
389 reserved_mem_dcache_on =
390 1;
391 if (*to == '#'
392 || *(to + 1) == '#')
393 reserved_mem_icache_on =
394 1;
395 }
396 }
397 } else if (!memcmp(to, "earlyprintk=", 12)) {
398 to += 12;
399 setup_early_printk(to);
400 } else if (!memcmp(to, "memmap=", 7)) {
401 to += 7;
402 parse_memmap(to);
403 }
404 }
405 c = *(to++);
406 if (!c)
407 break;
408 }
409 }
410
411 /*
412 * Setup memory defaults from user config.
413 * The physical memory layout looks like:
414 *
415 * [_rambase, _ramstart]: kernel image
416 * [memory_start, memory_end]: dynamic memory managed by kernel
417 * [memory_end, _ramend]: reserved memory
418 * [meory_mtd_start(memory_end),
419 * memory_mtd_start + mtd_size]: rootfs (if any)
420 * [_ramend - DMA_UNCACHED_REGION,
421 * _ramend]: uncached DMA region
422 * [_ramend, physical_mem_end]: memory not managed by kernel
423 *
424 */
425 static __init void memory_setup(void)
426 {
427 _rambase = (unsigned long)_stext;
428 _ramstart = (unsigned long)__bss_stop;
429
430 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
431 console_init();
432 panic("DMA region exceeds memory limit: %lu.\n",
433 _ramend - _ramstart);
434 }
435 memory_end = _ramend - DMA_UNCACHED_REGION;
436
437 #ifdef CONFIG_MPU
438 /* Round up to multiple of 4MB. */
439 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
440 #else
441 memory_start = PAGE_ALIGN(_ramstart);
442 #endif
443
444 #if defined(CONFIG_MTD_UCLINUX)
445 /* generic memory mapped MTD driver */
446 memory_mtd_end = memory_end;
447
448 mtd_phys = _ramstart;
449 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 8)));
450
451 # if defined(CONFIG_EXT2_FS) || defined(CONFIG_EXT3_FS)
452 if (*((unsigned short *)(mtd_phys + 0x438)) == EXT2_SUPER_MAGIC)
453 mtd_size =
454 PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x404)) << 10);
455 # endif
456
457 # if defined(CONFIG_CRAMFS)
458 if (*((unsigned long *)(mtd_phys)) == CRAMFS_MAGIC)
459 mtd_size = PAGE_ALIGN(*((unsigned long *)(mtd_phys + 0x4)));
460 # endif
461
462 # if defined(CONFIG_ROMFS_FS)
463 if (((unsigned long *)mtd_phys)[0] == ROMSB_WORD0
464 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
465 mtd_size =
466 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
467 # if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
468 /* Due to a Hardware Anomaly we need to limit the size of usable
469 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
470 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
471 */
472 # if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
473 if (memory_end >= 56 * 1024 * 1024)
474 memory_end = 56 * 1024 * 1024;
475 # else
476 if (memory_end >= 60 * 1024 * 1024)
477 memory_end = 60 * 1024 * 1024;
478 # endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
479 # endif /* ANOMALY_05000263 */
480 # endif /* CONFIG_ROMFS_FS */
481
482 memory_end -= mtd_size;
483
484 if (mtd_size == 0) {
485 console_init();
486 panic("Don't boot kernel without rootfs attached.\n");
487 }
488
489 /* Relocate MTD image to the top of memory after the uncached memory area */
490 dma_memcpy((char *)memory_end, __bss_stop, mtd_size);
491
492 memory_mtd_start = memory_end;
493 _ebss = memory_mtd_start; /* define _ebss for compatible */
494 #endif /* CONFIG_MTD_UCLINUX */
495
496 #if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263)
497 /* Due to a Hardware Anomaly we need to limit the size of usable
498 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
499 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
500 */
501 #if (defined(CONFIG_DEBUG_HUNT_FOR_ZERO))
502 if (memory_end >= 56 * 1024 * 1024)
503 memory_end = 56 * 1024 * 1024;
504 #else
505 if (memory_end >= 60 * 1024 * 1024)
506 memory_end = 60 * 1024 * 1024;
507 #endif /* CONFIG_DEBUG_HUNT_FOR_ZERO */
508 printk(KERN_NOTICE "Warning: limiting memory to %liMB due to hardware anomaly 05000263\n", memory_end >> 20);
509 #endif /* ANOMALY_05000263 */
510
511 #ifdef CONFIG_MPU
512 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
513 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
514 #endif
515
516 #if !defined(CONFIG_MTD_UCLINUX)
517 /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/
518 memory_end -= SIZE_4K;
519 #endif
520
521 init_mm.start_code = (unsigned long)_stext;
522 init_mm.end_code = (unsigned long)_etext;
523 init_mm.end_data = (unsigned long)_edata;
524 init_mm.brk = (unsigned long)0;
525
526 printk(KERN_INFO "Board Memory: %ldMB\n", physical_mem_end >> 20);
527 printk(KERN_INFO "Kernel Managed Memory: %ldMB\n", _ramend >> 20);
528
529 printk( KERN_INFO "Memory map:\n"
530 KERN_INFO " text = 0x%p-0x%p\n"
531 KERN_INFO " rodata = 0x%p-0x%p\n"
532 KERN_INFO " data = 0x%p-0x%p\n"
533 KERN_INFO " stack = 0x%p-0x%p\n"
534 KERN_INFO " init = 0x%p-0x%p\n"
535 KERN_INFO " bss = 0x%p-0x%p\n"
536 KERN_INFO " available = 0x%p-0x%p\n"
537 #ifdef CONFIG_MTD_UCLINUX
538 KERN_INFO " rootfs = 0x%p-0x%p\n"
539 #endif
540 #if DMA_UNCACHED_REGION > 0
541 KERN_INFO " DMA Zone = 0x%p-0x%p\n"
542 #endif
543 , _stext, _etext,
544 __start_rodata, __end_rodata,
545 _sdata, _edata,
546 (void *)&init_thread_union,
547 (void *)((int)(&init_thread_union) + 0x2000),
548 __init_begin, __init_end,
549 __bss_start, __bss_stop,
550 (void *)_ramstart, (void *)memory_end
551 #ifdef CONFIG_MTD_UCLINUX
552 , (void *)memory_mtd_start, (void *)(memory_mtd_start + mtd_size)
553 #endif
554 #if DMA_UNCACHED_REGION > 0
555 , (void *)(_ramend - DMA_UNCACHED_REGION), (void *)(_ramend)
556 #endif
557 );
558 }
559
560 static __init void setup_bootmem_allocator(void)
561 {
562 int bootmap_size;
563 int i;
564 unsigned long min_pfn, max_pfn;
565 unsigned long curr_pfn, last_pfn, size;
566
567 /* mark memory between memory_start and memory_end usable */
568 add_memory_region(memory_start,
569 memory_end - memory_start, BFIN_MEMMAP_RAM);
570 /* sanity check for overlap */
571 sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map);
572 print_memory_map("boot memmap");
573
574 min_pfn = PAGE_OFFSET >> PAGE_SHIFT;
575 max_pfn = memory_end >> PAGE_SHIFT;
576
577 /*
578 * give all the memory to the bootmap allocator, tell it to put the
579 * boot mem_map at the start of memory.
580 */
581 bootmap_size = init_bootmem_node(NODE_DATA(0),
582 memory_start >> PAGE_SHIFT, /* map goes here */
583 min_pfn, max_pfn);
584
585 /* register the memmap regions with the bootmem allocator */
586 for (i = 0; i < bfin_memmap.nr_map; i++) {
587 /*
588 * Reserve usable memory
589 */
590 if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM)
591 continue;
592 /*
593 * We are rounding up the start address of usable memory:
594 */
595 curr_pfn = PFN_UP(bfin_memmap.map[i].addr);
596 if (curr_pfn >= max_pfn)
597 continue;
598 /*
599 * ... and at the end of the usable range downwards:
600 */
601 last_pfn = PFN_DOWN(bfin_memmap.map[i].addr +
602 bfin_memmap.map[i].size);
603
604 if (last_pfn > max_pfn)
605 last_pfn = max_pfn;
606
607 /*
608 * .. finally, did all the rounding and playing
609 * around just make the area go away?
610 */
611 if (last_pfn <= curr_pfn)
612 continue;
613
614 size = last_pfn - curr_pfn;
615 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
616 }
617
618 /* reserve memory before memory_start, including bootmap */
619 reserve_bootmem(PAGE_OFFSET,
620 memory_start + bootmap_size + PAGE_SIZE - 1 - PAGE_OFFSET,
621 BOOTMEM_DEFAULT);
622 }
623
624 void __init setup_arch(char **cmdline_p)
625 {
626 unsigned long l1_length, sclk, cclk;
627 #ifdef CONFIG_MTD_UCLINUX
628 unsigned long mtd_phys = 0;
629 #endif
630
631 #ifdef CONFIG_DUMMY_CONSOLE
632 conswitchp = &dummy_con;
633 #endif
634
635 #if defined(CONFIG_CMDLINE_BOOL)
636 strncpy(&command_line[0], CONFIG_CMDLINE, sizeof(command_line));
637 command_line[sizeof(command_line) - 1] = 0;
638 #endif
639
640 /* Keep a copy of command line */
641 *cmdline_p = &command_line[0];
642 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
643 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
644
645 /* setup memory defaults from the user config */
646 physical_mem_end = 0;
647 _ramend = CONFIG_MEM_SIZE * 1024 * 1024;
648
649 memset(&bfin_memmap, 0, sizeof(bfin_memmap));
650
651 parse_cmdline_early(&command_line[0]);
652
653 if (physical_mem_end == 0)
654 physical_mem_end = _ramend;
655
656 memory_setup();
657
658 cclk = get_cclk();
659 sclk = get_sclk();
660
661 #if !defined(CONFIG_BFIN_KERNEL_CLOCK)
662 if (ANOMALY_05000273 && cclk == sclk)
663 panic("ANOMALY 05000273, SCLK can not be same as CCLK");
664 #endif
665
666 #ifdef BF561_FAMILY
667 if (ANOMALY_05000266) {
668 bfin_read_IMDMA_D0_IRQ_STATUS();
669 bfin_read_IMDMA_D1_IRQ_STATUS();
670 }
671 #endif
672 printk(KERN_INFO "Hardware Trace ");
673 if (bfin_read_TBUFCTL() & 0x1)
674 printk("Active ");
675 else
676 printk("Off ");
677 if (bfin_read_TBUFCTL() & 0x2)
678 printk("and Enabled\n");
679 else
680 printk("and Disabled\n");
681
682 #if defined(CONFIG_CHR_DEV_FLASH) || defined(CONFIG_BLK_DEV_FLASH)
683 /* we need to initialize the Flashrom device here since we might
684 * do things with flash early on in the boot
685 */
686 flash_probe();
687 #endif
688
689 _bfin_swrst = bfin_read_SWRST();
690
691 if (_bfin_swrst & RESET_DOUBLE)
692 printk(KERN_INFO "Recovering from Double Fault event\n");
693 else if (_bfin_swrst & RESET_WDOG)
694 printk(KERN_INFO "Recovering from Watchdog event\n");
695 else if (_bfin_swrst & RESET_SOFTWARE)
696 printk(KERN_NOTICE "Reset caused by Software reset\n");
697
698 printk(KERN_INFO "Blackfin support (C) 2004-2007 Analog Devices, Inc.\n");
699 if (bfin_compiled_revid() == 0xffff)
700 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU);
701 else if (bfin_compiled_revid() == -1)
702 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
703 else
704 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
705 if (bfin_revid() != bfin_compiled_revid()) {
706 if (bfin_compiled_revid() == -1)
707 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
708 bfin_revid());
709 else if (bfin_compiled_revid() != 0xffff)
710 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
711 bfin_compiled_revid(), bfin_revid());
712 }
713 if (bfin_revid() < SUPPORTED_REVID)
714 printk(KERN_ERR "Warning: Unsupported Chip Revision ADSP-%s Rev 0.%d detected\n",
715 CPU, bfin_revid());
716 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
717
718 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
719 cclk / 1000000, sclk / 1000000);
720
721 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
722 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
723
724 setup_bootmem_allocator();
725
726 paging_init();
727
728 /* check the size of the l1 area */
729 l1_length = _etext_l1 - _stext_l1;
730 if (l1_length > L1_CODE_LENGTH)
731 panic("L1 code memory overflow\n");
732
733 l1_length = _ebss_l1 - _sdata_l1;
734 if (l1_length > L1_DATA_A_LENGTH)
735 panic("L1 data memory overflow\n");
736
737 /* Copy atomic sequences to their fixed location, and sanity check that
738 these locations are the ones that we advertise to userspace. */
739 memcpy((void *)FIXED_CODE_START, &fixed_code_start,
740 FIXED_CODE_END - FIXED_CODE_START);
741 BUG_ON((char *)&sigreturn_stub - (char *)&fixed_code_start
742 != SIGRETURN_STUB - FIXED_CODE_START);
743 BUG_ON((char *)&atomic_xchg32 - (char *)&fixed_code_start
744 != ATOMIC_XCHG32 - FIXED_CODE_START);
745 BUG_ON((char *)&atomic_cas32 - (char *)&fixed_code_start
746 != ATOMIC_CAS32 - FIXED_CODE_START);
747 BUG_ON((char *)&atomic_add32 - (char *)&fixed_code_start
748 != ATOMIC_ADD32 - FIXED_CODE_START);
749 BUG_ON((char *)&atomic_sub32 - (char *)&fixed_code_start
750 != ATOMIC_SUB32 - FIXED_CODE_START);
751 BUG_ON((char *)&atomic_ior32 - (char *)&fixed_code_start
752 != ATOMIC_IOR32 - FIXED_CODE_START);
753 BUG_ON((char *)&atomic_and32 - (char *)&fixed_code_start
754 != ATOMIC_AND32 - FIXED_CODE_START);
755 BUG_ON((char *)&atomic_xor32 - (char *)&fixed_code_start
756 != ATOMIC_XOR32 - FIXED_CODE_START);
757 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
758 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
759
760 init_exception_vectors();
761 bf53x_cache_init();
762 }
763
764 static int __init topology_init(void)
765 {
766 #if defined (CONFIG_BF561)
767 static struct cpu cpu[2];
768 register_cpu(&cpu[0], 0);
769 register_cpu(&cpu[1], 1);
770 return 0;
771 #else
772 static struct cpu cpu[1];
773 return register_cpu(cpu, 0);
774 #endif
775 }
776
777 subsys_initcall(topology_init);
778
779 static u_long get_vco(void)
780 {
781 u_long msel;
782 u_long vco;
783
784 msel = (bfin_read_PLL_CTL() >> 9) & 0x3F;
785 if (0 == msel)
786 msel = 64;
787
788 vco = CONFIG_CLKIN_HZ;
789 vco >>= (1 & bfin_read_PLL_CTL()); /* DF bit */
790 vco = msel * vco;
791 return vco;
792 }
793
794 /* Get the Core clock */
795 u_long get_cclk(void)
796 {
797 u_long csel, ssel;
798 if (bfin_read_PLL_STAT() & 0x1)
799 return CONFIG_CLKIN_HZ;
800
801 ssel = bfin_read_PLL_DIV();
802 csel = ((ssel >> 4) & 0x03);
803 ssel &= 0xf;
804 if (ssel && ssel < (1 << csel)) /* SCLK > CCLK */
805 return get_vco() / ssel;
806 return get_vco() >> csel;
807 }
808 EXPORT_SYMBOL(get_cclk);
809
810 /* Get the System clock */
811 u_long get_sclk(void)
812 {
813 u_long ssel;
814
815 if (bfin_read_PLL_STAT() & 0x1)
816 return CONFIG_CLKIN_HZ;
817
818 ssel = (bfin_read_PLL_DIV() & 0xf);
819 if (0 == ssel) {
820 printk(KERN_WARNING "Invalid System Clock\n");
821 ssel = 1;
822 }
823
824 return get_vco() / ssel;
825 }
826 EXPORT_SYMBOL(get_sclk);
827
828 unsigned long sclk_to_usecs(unsigned long sclk)
829 {
830 u64 tmp = USEC_PER_SEC * (u64)sclk;
831 do_div(tmp, get_sclk());
832 return tmp;
833 }
834 EXPORT_SYMBOL(sclk_to_usecs);
835
836 unsigned long usecs_to_sclk(unsigned long usecs)
837 {
838 u64 tmp = get_sclk() * (u64)usecs;
839 do_div(tmp, USEC_PER_SEC);
840 return tmp;
841 }
842 EXPORT_SYMBOL(usecs_to_sclk);
843
844 /*
845 * Get CPU information for use by the procfs.
846 */
847 static int show_cpuinfo(struct seq_file *m, void *v)
848 {
849 char *cpu, *mmu, *fpu, *vendor, *cache;
850 uint32_t revid;
851
852 u_long cclk = 0, sclk = 0;
853 u_int dcache_size = 0, dsup_banks = 0;
854
855 cpu = CPU;
856 mmu = "none";
857 fpu = "none";
858 revid = bfin_revid();
859
860 cclk = get_cclk();
861 sclk = get_sclk();
862
863 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
864 case 0xca:
865 vendor = "Analog Devices";
866 break;
867 default:
868 vendor = "unknown";
869 break;
870 }
871
872 seq_printf(m, "processor\t: %d\n"
873 "vendor_id\t: %s\n"
874 "cpu family\t: 0x%x\n"
875 "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK)\n"
876 "stepping\t: %d\n",
877 0,
878 vendor,
879 (bfin_read_CHIPID() & CHIPID_FAMILY),
880 cpu, cclk/1000000, sclk/1000000,
881 revid);
882
883 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
884 cclk/1000000, cclk%1000000,
885 sclk/1000000, sclk%1000000);
886 seq_printf(m, "bogomips\t: %lu.%02lu\n"
887 "Calibration\t: %lu loops\n",
888 (loops_per_jiffy * HZ) / 500000,
889 ((loops_per_jiffy * HZ) / 5000) % 100,
890 (loops_per_jiffy * HZ));
891
892 /* Check Cache configutation */
893 switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) {
894 case ACACHE_BSRAM:
895 cache = "dbank-A/B\t: cache/sram";
896 dcache_size = 16;
897 dsup_banks = 1;
898 break;
899 case ACACHE_BCACHE:
900 cache = "dbank-A/B\t: cache/cache";
901 dcache_size = 32;
902 dsup_banks = 2;
903 break;
904 case ASRAM_BSRAM:
905 cache = "dbank-A/B\t: sram/sram";
906 dcache_size = 0;
907 dsup_banks = 0;
908 break;
909 default:
910 cache = "unknown";
911 dcache_size = 0;
912 dsup_banks = 0;
913 break;
914 }
915
916 /* Is it turned on? */
917 if (!((bfin_read_DMEM_CONTROL()) & (ENDCPLB | DMC_ENABLE)))
918 dcache_size = 0;
919
920 seq_printf(m, "cache size\t: %d KB(L1 icache) "
921 "%d KB(L1 dcache-%s) %d KB(L2 cache)\n",
922 BFIN_ICACHESIZE / 1024, dcache_size,
923 #if defined CONFIG_BFIN_WB
924 "wb"
925 #elif defined CONFIG_BFIN_WT
926 "wt"
927 #endif
928 "", 0);
929
930 seq_printf(m, "%s\n", cache);
931
932 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
933 BFIN_ISUBBANKS, BFIN_IWAYS, BFIN_ILINES);
934 seq_printf(m,
935 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
936 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
937 BFIN_DLINES);
938 #ifdef CONFIG_BFIN_ICACHE_LOCK
939 switch (read_iloc()) {
940 case WAY0_L:
941 seq_printf(m, "Way0 Locked-Down\n");
942 break;
943 case WAY1_L:
944 seq_printf(m, "Way1 Locked-Down\n");
945 break;
946 case WAY01_L:
947 seq_printf(m, "Way0,Way1 Locked-Down\n");
948 break;
949 case WAY2_L:
950 seq_printf(m, "Way2 Locked-Down\n");
951 break;
952 case WAY02_L:
953 seq_printf(m, "Way0,Way2 Locked-Down\n");
954 break;
955 case WAY12_L:
956 seq_printf(m, "Way1,Way2 Locked-Down\n");
957 break;
958 case WAY012_L:
959 seq_printf(m, "Way0,Way1 & Way2 Locked-Down\n");
960 break;
961 case WAY3_L:
962 seq_printf(m, "Way3 Locked-Down\n");
963 break;
964 case WAY03_L:
965 seq_printf(m, "Way0,Way3 Locked-Down\n");
966 break;
967 case WAY13_L:
968 seq_printf(m, "Way1,Way3 Locked-Down\n");
969 break;
970 case WAY013_L:
971 seq_printf(m, "Way 0,Way1,Way3 Locked-Down\n");
972 break;
973 case WAY32_L:
974 seq_printf(m, "Way3,Way2 Locked-Down\n");
975 break;
976 case WAY320_L:
977 seq_printf(m, "Way3,Way2,Way0 Locked-Down\n");
978 break;
979 case WAY321_L:
980 seq_printf(m, "Way3,Way2,Way1 Locked-Down\n");
981 break;
982 case WAYALL_L:
983 seq_printf(m, "All Ways are locked\n");
984 break;
985 default:
986 seq_printf(m, "No Ways are locked\n");
987 }
988 #endif
989
990 seq_printf(m, "board name\t: %s\n", bfin_board_name);
991 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
992 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
993 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
994 ((int)memory_end - (int)_stext) >> 10,
995 _stext,
996 (void *)memory_end);
997
998 return 0;
999 }
1000
1001 static void *c_start(struct seq_file *m, loff_t *pos)
1002 {
1003 return *pos < NR_CPUS ? ((void *)0x12345678) : NULL;
1004 }
1005
1006 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1007 {
1008 ++*pos;
1009 return c_start(m, pos);
1010 }
1011
1012 static void c_stop(struct seq_file *m, void *v)
1013 {
1014 }
1015
1016 const struct seq_operations cpuinfo_op = {
1017 .start = c_start,
1018 .next = c_next,
1019 .stop = c_stop,
1020 .show = show_cpuinfo,
1021 };
1022
1023 void __init cmdline_init(const char *r0)
1024 {
1025 if (r0)
1026 strncpy(command_line, r0, COMMAND_LINE_SIZE);
1027 }