]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/blackfin/kernel/cplb-nompu/cplbinit.c
c9b0a4d53dc15f2b8cb5cb8453ecbc7e6b8cb43a
[mirror_ubuntu-bionic-kernel.git] / arch / blackfin / kernel / cplb-nompu / cplbinit.c
1 /*
2 * Blackfin CPLB initialization
3 *
4 * Copyright 2007-2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9 #include <linux/module.h>
10
11 #include <asm/blackfin.h>
12 #include <asm/cacheflush.h>
13 #include <asm/cplb.h>
14 #include <asm/cplbinit.h>
15 #include <asm/mem_map.h>
16
17 struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
18 struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR;
19
20 int first_switched_icplb PDT_ATTR;
21 int first_switched_dcplb PDT_ATTR;
22
23 struct cplb_boundary dcplb_bounds[9] PDT_ATTR;
24 struct cplb_boundary icplb_bounds[9] PDT_ATTR;
25
26 int icplb_nr_bounds PDT_ATTR;
27 int dcplb_nr_bounds PDT_ATTR;
28
29 void __init generate_cplb_tables_cpu(unsigned int cpu)
30 {
31 int i_d, i_i;
32 unsigned long addr;
33
34 struct cplb_entry *d_tbl = dcplb_tbl[cpu];
35 struct cplb_entry *i_tbl = icplb_tbl[cpu];
36
37 printk(KERN_INFO "NOMPU: setting up cplb tables\n");
38
39 i_d = i_i = 0;
40
41 #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
42 /* Set up the zero page. */
43 d_tbl[i_d].addr = 0;
44 d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
45 i_tbl[i_i].addr = 0;
46 i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
47 #endif
48
49 /* Cover kernel memory with 4M pages. */
50 addr = 0;
51
52 for (; addr < memory_start; addr += 4 * 1024 * 1024) {
53 d_tbl[i_d].addr = addr;
54 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
55 i_tbl[i_i].addr = addr;
56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
57 }
58
59 /* Cover L1 memory. One 4M area for code and data each is enough. */
60 if (cpu == 0) {
61 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
62 d_tbl[i_d].addr = L1_DATA_A_START;
63 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
64 }
65 i_tbl[i_i].addr = L1_CODE_START;
66 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
67 }
68 #ifdef CONFIG_SMP
69 else {
70 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
71 d_tbl[i_d].addr = COREB_L1_DATA_A_START;
72 d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB;
73 }
74 i_tbl[i_i].addr = COREB_L1_CODE_START;
75 i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB;
76 }
77 #endif
78 first_switched_dcplb = i_d;
79 first_switched_icplb = i_i;
80
81 BUG_ON(first_switched_dcplb > MAX_CPLBS);
82 BUG_ON(first_switched_icplb > MAX_CPLBS);
83
84 while (i_d < MAX_CPLBS)
85 d_tbl[i_d++].data = 0;
86 while (i_i < MAX_CPLBS)
87 i_tbl[i_i++].data = 0;
88 }
89
90 void __init generate_cplb_tables_all(void)
91 {
92 unsigned long uncached_end;
93 int i_d, i_i;
94
95 i_d = 0;
96 /* Normal RAM, including MTD FS. */
97 #ifdef CONFIG_MTD_UCLINUX
98 uncached_end = memory_mtd_start + mtd_size;
99 #else
100 uncached_end = memory_end;
101 #endif
102 /*
103 * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
104 * so that we don't have to use 4kB pages and cause CPLB thrashing
105 */
106 if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
107 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
108 dcplb_bounds[i_d].eaddr = uncached_end;
109 else
110 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
111 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
112 /* DMA uncached region. */
113 if (DMA_UNCACHED_REGION) {
114 dcplb_bounds[i_d].eaddr = _ramend;
115 dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL;
116 }
117 if (_ramend != physical_mem_end) {
118 /* Reserved memory. */
119 dcplb_bounds[i_d].eaddr = physical_mem_end;
120 dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ?
121 SDRAM_DGENERIC : SDRAM_DNON_CHBL);
122 }
123 /* Addressing hole up to the async bank. */
124 dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE;
125 dcplb_bounds[i_d++].data = 0;
126 /* ASYNC banks. */
127 dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
128 dcplb_bounds[i_d++].data = SDRAM_EBIU;
129 /* Addressing hole up to BootROM. */
130 dcplb_bounds[i_d].eaddr = BOOT_ROM_START;
131 dcplb_bounds[i_d++].data = 0;
132 /* BootROM -- largest one should be less than 1 meg. */
133 dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
134 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
135 if (L2_LENGTH) {
136 /* Addressing hole up to L2 SRAM. */
137 dcplb_bounds[i_d].eaddr = L2_START;
138 dcplb_bounds[i_d++].data = 0;
139 /* L2 SRAM. */
140 dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH;
141 dcplb_bounds[i_d++].data = L2_DMEMORY;
142 }
143 dcplb_nr_bounds = i_d;
144 BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds));
145
146 i_i = 0;
147 /* Normal RAM, including MTD FS. */
148 icplb_bounds[i_i].eaddr = uncached_end;
149 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
150 /* DMA uncached region. */
151 if (DMA_UNCACHED_REGION) {
152 icplb_bounds[i_i].eaddr = _ramend;
153 icplb_bounds[i_i++].data = 0;
154 }
155 if (_ramend != physical_mem_end) {
156 /* Reserved memory. */
157 icplb_bounds[i_i].eaddr = physical_mem_end;
158 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
159 SDRAM_IGENERIC : SDRAM_INON_CHBL);
160 }
161 /* Addressing hole up to the async bank. */
162 icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE;
163 icplb_bounds[i_i++].data = 0;
164 /* ASYNC banks. */
165 icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE;
166 icplb_bounds[i_i++].data = SDRAM_EBIU;
167 /* Addressing hole up to BootROM. */
168 icplb_bounds[i_i].eaddr = BOOT_ROM_START;
169 icplb_bounds[i_i++].data = 0;
170 /* BootROM -- largest one should be less than 1 meg. */
171 icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024);
172 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
173
174 if (L2_LENGTH) {
175 /* Addressing hole up to L2 SRAM. */
176 icplb_bounds[i_i].eaddr = L2_START;
177 icplb_bounds[i_i++].data = 0;
178 /* L2 SRAM. */
179 icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH;
180 icplb_bounds[i_i++].data = L2_IMEMORY;
181 }
182 icplb_nr_bounds = i_i;
183 BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds));
184 }