]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/ppc/syslib/mv64x60.c
[PATCH] more SPIN_LOCK_UNLOCKED -> DEFINE_SPINLOCK conversions
[mirror_ubuntu-artful-kernel.git] / arch / ppc / syslib / mv64x60.c
1 /*
2 * arch/ppc/syslib/mv64x60.c
3 *
4 * Common routines for the Marvell/Galileo Discovery line of host bridges
5 * (gt64260, mv64360, mv64460, ...).
6 *
7 * Author: Mark A. Greer <mgreer@mvista.com>
8 *
9 * 2004 (c) MontaVista, Software, Inc. This file is licensed under
10 * the terms of the GNU General Public License version 2. This program
11 * is licensed "as is" without any warranty of any kind, whether express
12 * or implied.
13 */
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/spinlock.h>
21 #include <linux/mv643xx.h>
22
23 #include <asm/byteorder.h>
24 #include <asm/io.h>
25 #include <asm/irq.h>
26 #include <asm/uaccess.h>
27 #include <asm/machdep.h>
28 #include <asm/pci-bridge.h>
29 #include <asm/delay.h>
30 #include <asm/mv64x60.h>
31
32
33 u8 mv64x60_pci_exclude_bridge = 1;
34 DEFINE_SPINLOCK(mv64x60_lock);
35
36 static phys_addr_t mv64x60_bridge_pbase;
37 static void *mv64x60_bridge_vbase;
38 static u32 mv64x60_bridge_type = MV64x60_TYPE_INVALID;
39 static u32 mv64x60_bridge_rev;
40 #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
41 static struct pci_controller sysfs_hose_a;
42 #endif
43
44 static u32 gt64260_translate_size(u32 base, u32 size, u32 num_bits);
45 static u32 gt64260_untranslate_size(u32 base, u32 size, u32 num_bits);
46 static void gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus,
47 u32 window, u32 base);
48 static void gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
49 struct pci_controller *hose, u32 bus, u32 base);
50 static u32 gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
51 static void gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
52 static void gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
53 static void gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
54 static void gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
55 static void gt64260_disable_all_windows(struct mv64x60_handle *bh,
56 struct mv64x60_setup_info *si);
57 static void gt64260a_chip_specific_init(struct mv64x60_handle *bh,
58 struct mv64x60_setup_info *si);
59 static void gt64260b_chip_specific_init(struct mv64x60_handle *bh,
60 struct mv64x60_setup_info *si);
61
62 static u32 mv64360_translate_size(u32 base, u32 size, u32 num_bits);
63 static u32 mv64360_untranslate_size(u32 base, u32 size, u32 num_bits);
64 static void mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus,
65 u32 window, u32 base);
66 static void mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
67 struct pci_controller *hose, u32 bus, u32 base);
68 static u32 mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window);
69 static void mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window);
70 static void mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window);
71 static void mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window);
72 static void mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window);
73 static void mv64360_disable_all_windows(struct mv64x60_handle *bh,
74 struct mv64x60_setup_info *si);
75 static void mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
76 struct mv64x60_setup_info *si,
77 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2]);
78 static void mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base);
79 static void mv64360_chip_specific_init(struct mv64x60_handle *bh,
80 struct mv64x60_setup_info *si);
81 static void mv64460_chip_specific_init(struct mv64x60_handle *bh,
82 struct mv64x60_setup_info *si);
83
84
85 /*
86 * Define tables that have the chip-specific info for each type of
87 * Marvell bridge chip.
88 */
89 static struct mv64x60_chip_info gt64260a_ci __initdata = { /* GT64260A */
90 .translate_size = gt64260_translate_size,
91 .untranslate_size = gt64260_untranslate_size,
92 .set_pci2mem_window = gt64260_set_pci2mem_window,
93 .set_pci2regs_window = gt64260_set_pci2regs_window,
94 .is_enabled_32bit = gt64260_is_enabled_32bit,
95 .enable_window_32bit = gt64260_enable_window_32bit,
96 .disable_window_32bit = gt64260_disable_window_32bit,
97 .enable_window_64bit = gt64260_enable_window_64bit,
98 .disable_window_64bit = gt64260_disable_window_64bit,
99 .disable_all_windows = gt64260_disable_all_windows,
100 .chip_specific_init = gt64260a_chip_specific_init,
101 .window_tab_32bit = gt64260_32bit_windows,
102 .window_tab_64bit = gt64260_64bit_windows,
103 };
104
105 static struct mv64x60_chip_info gt64260b_ci __initdata = { /* GT64260B */
106 .translate_size = gt64260_translate_size,
107 .untranslate_size = gt64260_untranslate_size,
108 .set_pci2mem_window = gt64260_set_pci2mem_window,
109 .set_pci2regs_window = gt64260_set_pci2regs_window,
110 .is_enabled_32bit = gt64260_is_enabled_32bit,
111 .enable_window_32bit = gt64260_enable_window_32bit,
112 .disable_window_32bit = gt64260_disable_window_32bit,
113 .enable_window_64bit = gt64260_enable_window_64bit,
114 .disable_window_64bit = gt64260_disable_window_64bit,
115 .disable_all_windows = gt64260_disable_all_windows,
116 .chip_specific_init = gt64260b_chip_specific_init,
117 .window_tab_32bit = gt64260_32bit_windows,
118 .window_tab_64bit = gt64260_64bit_windows,
119 };
120
121 static struct mv64x60_chip_info mv64360_ci __initdata = { /* MV64360 */
122 .translate_size = mv64360_translate_size,
123 .untranslate_size = mv64360_untranslate_size,
124 .set_pci2mem_window = mv64360_set_pci2mem_window,
125 .set_pci2regs_window = mv64360_set_pci2regs_window,
126 .is_enabled_32bit = mv64360_is_enabled_32bit,
127 .enable_window_32bit = mv64360_enable_window_32bit,
128 .disable_window_32bit = mv64360_disable_window_32bit,
129 .enable_window_64bit = mv64360_enable_window_64bit,
130 .disable_window_64bit = mv64360_disable_window_64bit,
131 .disable_all_windows = mv64360_disable_all_windows,
132 .config_io2mem_windows = mv64360_config_io2mem_windows,
133 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
134 .chip_specific_init = mv64360_chip_specific_init,
135 .window_tab_32bit = mv64360_32bit_windows,
136 .window_tab_64bit = mv64360_64bit_windows,
137 };
138
139 static struct mv64x60_chip_info mv64460_ci __initdata = { /* MV64460 */
140 .translate_size = mv64360_translate_size,
141 .untranslate_size = mv64360_untranslate_size,
142 .set_pci2mem_window = mv64360_set_pci2mem_window,
143 .set_pci2regs_window = mv64360_set_pci2regs_window,
144 .is_enabled_32bit = mv64360_is_enabled_32bit,
145 .enable_window_32bit = mv64360_enable_window_32bit,
146 .disable_window_32bit = mv64360_disable_window_32bit,
147 .enable_window_64bit = mv64360_enable_window_64bit,
148 .disable_window_64bit = mv64360_disable_window_64bit,
149 .disable_all_windows = mv64360_disable_all_windows,
150 .config_io2mem_windows = mv64360_config_io2mem_windows,
151 .set_mpsc2regs_window = mv64360_set_mpsc2regs_window,
152 .chip_specific_init = mv64460_chip_specific_init,
153 .window_tab_32bit = mv64360_32bit_windows,
154 .window_tab_64bit = mv64360_64bit_windows,
155 };
156
157 /*
158 *****************************************************************************
159 *
160 * Platform Device Definitions
161 *
162 *****************************************************************************
163 */
164 #ifdef CONFIG_SERIAL_MPSC
165 static struct mpsc_shared_pdata mv64x60_mpsc_shared_pdata = {
166 .mrr_val = 0x3ffffe38,
167 .rcrr_val = 0,
168 .tcrr_val = 0,
169 .intr_cause_val = 0,
170 .intr_mask_val = 0,
171 };
172
173 static struct resource mv64x60_mpsc_shared_resources[] = {
174 /* Do not change the order of the IORESOURCE_MEM resources */
175 [0] = {
176 .name = "mpsc routing base",
177 .start = MV64x60_MPSC_ROUTING_OFFSET,
178 .end = MV64x60_MPSC_ROUTING_OFFSET +
179 MPSC_ROUTING_REG_BLOCK_SIZE - 1,
180 .flags = IORESOURCE_MEM,
181 },
182 [1] = {
183 .name = "sdma intr base",
184 .start = MV64x60_SDMA_INTR_OFFSET,
185 .end = MV64x60_SDMA_INTR_OFFSET +
186 MPSC_SDMA_INTR_REG_BLOCK_SIZE - 1,
187 .flags = IORESOURCE_MEM,
188 },
189 };
190
191 static struct platform_device mpsc_shared_device = { /* Shared device */
192 .name = MPSC_SHARED_NAME,
193 .id = 0,
194 .num_resources = ARRAY_SIZE(mv64x60_mpsc_shared_resources),
195 .resource = mv64x60_mpsc_shared_resources,
196 .dev = {
197 .platform_data = &mv64x60_mpsc_shared_pdata,
198 },
199 };
200
201 static struct mpsc_pdata mv64x60_mpsc0_pdata = {
202 .mirror_regs = 0,
203 .cache_mgmt = 0,
204 .max_idle = 0,
205 .default_baud = 9600,
206 .default_bits = 8,
207 .default_parity = 'n',
208 .default_flow = 'n',
209 .chr_1_val = 0x00000000,
210 .chr_2_val = 0x00000000,
211 .chr_10_val = 0x00000003,
212 .mpcr_val = 0,
213 .bcr_val = 0,
214 .brg_can_tune = 0,
215 .brg_clk_src = 8, /* Default to TCLK */
216 .brg_clk_freq = 100000000, /* Default to 100 MHz */
217 };
218
219 static struct resource mv64x60_mpsc0_resources[] = {
220 /* Do not change the order of the IORESOURCE_MEM resources */
221 [0] = {
222 .name = "mpsc 0 base",
223 .start = MV64x60_MPSC_0_OFFSET,
224 .end = MV64x60_MPSC_0_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
225 .flags = IORESOURCE_MEM,
226 },
227 [1] = {
228 .name = "sdma 0 base",
229 .start = MV64x60_SDMA_0_OFFSET,
230 .end = MV64x60_SDMA_0_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
231 .flags = IORESOURCE_MEM,
232 },
233 [2] = {
234 .name = "brg 0 base",
235 .start = MV64x60_BRG_0_OFFSET,
236 .end = MV64x60_BRG_0_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
237 .flags = IORESOURCE_MEM,
238 },
239 [3] = {
240 .name = "sdma 0 irq",
241 .start = MV64x60_IRQ_SDMA_0,
242 .end = MV64x60_IRQ_SDMA_0,
243 .flags = IORESOURCE_IRQ,
244 },
245 };
246
247 static struct platform_device mpsc0_device = {
248 .name = MPSC_CTLR_NAME,
249 .id = 0,
250 .num_resources = ARRAY_SIZE(mv64x60_mpsc0_resources),
251 .resource = mv64x60_mpsc0_resources,
252 .dev = {
253 .platform_data = &mv64x60_mpsc0_pdata,
254 },
255 };
256
257 static struct mpsc_pdata mv64x60_mpsc1_pdata = {
258 .mirror_regs = 0,
259 .cache_mgmt = 0,
260 .max_idle = 0,
261 .default_baud = 9600,
262 .default_bits = 8,
263 .default_parity = 'n',
264 .default_flow = 'n',
265 .chr_1_val = 0x00000000,
266 .chr_1_val = 0x00000000,
267 .chr_2_val = 0x00000000,
268 .chr_10_val = 0x00000003,
269 .mpcr_val = 0,
270 .bcr_val = 0,
271 .brg_can_tune = 0,
272 .brg_clk_src = 8, /* Default to TCLK */
273 .brg_clk_freq = 100000000, /* Default to 100 MHz */
274 };
275
276 static struct resource mv64x60_mpsc1_resources[] = {
277 /* Do not change the order of the IORESOURCE_MEM resources */
278 [0] = {
279 .name = "mpsc 1 base",
280 .start = MV64x60_MPSC_1_OFFSET,
281 .end = MV64x60_MPSC_1_OFFSET + MPSC_REG_BLOCK_SIZE - 1,
282 .flags = IORESOURCE_MEM,
283 },
284 [1] = {
285 .name = "sdma 1 base",
286 .start = MV64x60_SDMA_1_OFFSET,
287 .end = MV64x60_SDMA_1_OFFSET + MPSC_SDMA_REG_BLOCK_SIZE - 1,
288 .flags = IORESOURCE_MEM,
289 },
290 [2] = {
291 .name = "brg 1 base",
292 .start = MV64x60_BRG_1_OFFSET,
293 .end = MV64x60_BRG_1_OFFSET + MPSC_BRG_REG_BLOCK_SIZE - 1,
294 .flags = IORESOURCE_MEM,
295 },
296 [3] = {
297 .name = "sdma 1 irq",
298 .start = MV64360_IRQ_SDMA_1,
299 .end = MV64360_IRQ_SDMA_1,
300 .flags = IORESOURCE_IRQ,
301 },
302 };
303
304 static struct platform_device mpsc1_device = {
305 .name = MPSC_CTLR_NAME,
306 .id = 1,
307 .num_resources = ARRAY_SIZE(mv64x60_mpsc1_resources),
308 .resource = mv64x60_mpsc1_resources,
309 .dev = {
310 .platform_data = &mv64x60_mpsc1_pdata,
311 },
312 };
313 #endif
314
315 #ifdef CONFIG_MV643XX_ETH
316 static struct resource mv64x60_eth_shared_resources[] = {
317 [0] = {
318 .name = "ethernet shared base",
319 .start = MV643XX_ETH_SHARED_REGS,
320 .end = MV643XX_ETH_SHARED_REGS +
321 MV643XX_ETH_SHARED_REGS_SIZE - 1,
322 .flags = IORESOURCE_MEM,
323 },
324 };
325
326 static struct platform_device mv64x60_eth_shared_device = {
327 .name = MV643XX_ETH_SHARED_NAME,
328 .id = 0,
329 .num_resources = ARRAY_SIZE(mv64x60_eth_shared_resources),
330 .resource = mv64x60_eth_shared_resources,
331 };
332
333 #ifdef CONFIG_MV643XX_ETH_0
334 static struct resource mv64x60_eth0_resources[] = {
335 [0] = {
336 .name = "eth0 irq",
337 .start = MV64x60_IRQ_ETH_0,
338 .end = MV64x60_IRQ_ETH_0,
339 .flags = IORESOURCE_IRQ,
340 },
341 };
342
343 static struct mv643xx_eth_platform_data eth0_pd;
344
345 static struct platform_device eth0_device = {
346 .name = MV643XX_ETH_NAME,
347 .id = 0,
348 .num_resources = ARRAY_SIZE(mv64x60_eth0_resources),
349 .resource = mv64x60_eth0_resources,
350 .dev = {
351 .platform_data = &eth0_pd,
352 },
353 };
354 #endif
355
356 #ifdef CONFIG_MV643XX_ETH_1
357 static struct resource mv64x60_eth1_resources[] = {
358 [0] = {
359 .name = "eth1 irq",
360 .start = MV64x60_IRQ_ETH_1,
361 .end = MV64x60_IRQ_ETH_1,
362 .flags = IORESOURCE_IRQ,
363 },
364 };
365
366 static struct mv643xx_eth_platform_data eth1_pd;
367
368 static struct platform_device eth1_device = {
369 .name = MV643XX_ETH_NAME,
370 .id = 1,
371 .num_resources = ARRAY_SIZE(mv64x60_eth1_resources),
372 .resource = mv64x60_eth1_resources,
373 .dev = {
374 .platform_data = &eth1_pd,
375 },
376 };
377 #endif
378
379 #ifdef CONFIG_MV643XX_ETH_2
380 static struct resource mv64x60_eth2_resources[] = {
381 [0] = {
382 .name = "eth2 irq",
383 .start = MV64x60_IRQ_ETH_2,
384 .end = MV64x60_IRQ_ETH_2,
385 .flags = IORESOURCE_IRQ,
386 },
387 };
388
389 static struct mv643xx_eth_platform_data eth2_pd;
390
391 static struct platform_device eth2_device = {
392 .name = MV643XX_ETH_NAME,
393 .id = 2,
394 .num_resources = ARRAY_SIZE(mv64x60_eth2_resources),
395 .resource = mv64x60_eth2_resources,
396 .dev = {
397 .platform_data = &eth2_pd,
398 },
399 };
400 #endif
401 #endif
402
403 #ifdef CONFIG_I2C_MV64XXX
404 static struct mv64xxx_i2c_pdata mv64xxx_i2c_pdata = {
405 .freq_m = 8,
406 .freq_n = 3,
407 .timeout = 1000, /* Default timeout of 1 second */
408 .retries = 1,
409 };
410
411 static struct resource mv64xxx_i2c_resources[] = {
412 /* Do not change the order of the IORESOURCE_MEM resources */
413 [0] = {
414 .name = "mv64xxx i2c base",
415 .start = MV64XXX_I2C_OFFSET,
416 .end = MV64XXX_I2C_OFFSET + MV64XXX_I2C_REG_BLOCK_SIZE - 1,
417 .flags = IORESOURCE_MEM,
418 },
419 [1] = {
420 .name = "mv64xxx i2c irq",
421 .start = MV64x60_IRQ_I2C,
422 .end = MV64x60_IRQ_I2C,
423 .flags = IORESOURCE_IRQ,
424 },
425 };
426
427 static struct platform_device i2c_device = {
428 .name = MV64XXX_I2C_CTLR_NAME,
429 .id = 0,
430 .num_resources = ARRAY_SIZE(mv64xxx_i2c_resources),
431 .resource = mv64xxx_i2c_resources,
432 .dev = {
433 .platform_data = &mv64xxx_i2c_pdata,
434 },
435 };
436 #endif
437
438 #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
439 static struct mv64xxx_pdata mv64xxx_pdata = {
440 .hs_reg_valid = 0,
441 };
442
443 static struct platform_device mv64xxx_device = { /* general mv64x60 stuff */
444 .name = MV64XXX_DEV_NAME,
445 .id = 0,
446 .dev = {
447 .platform_data = &mv64xxx_pdata,
448 },
449 };
450 #endif
451
452 static struct platform_device *mv64x60_pd_devs[] __initdata = {
453 #ifdef CONFIG_SERIAL_MPSC
454 &mpsc_shared_device,
455 &mpsc0_device,
456 &mpsc1_device,
457 #endif
458 #ifdef CONFIG_MV643XX_ETH
459 &mv64x60_eth_shared_device,
460 #endif
461 #ifdef CONFIG_MV643XX_ETH_0
462 &eth0_device,
463 #endif
464 #ifdef CONFIG_MV643XX_ETH_1
465 &eth1_device,
466 #endif
467 #ifdef CONFIG_MV643XX_ETH_2
468 &eth2_device,
469 #endif
470 #ifdef CONFIG_I2C_MV64XXX
471 &i2c_device,
472 #endif
473 #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
474 &mv64xxx_device,
475 #endif
476 };
477
478 /*
479 *****************************************************************************
480 *
481 * Bridge Initialization Routines
482 *
483 *****************************************************************************
484 */
485 /*
486 * mv64x60_init()
487 *
488 * Initialze the bridge based on setting passed in via 'si'. The bridge
489 * handle, 'bh', will be set so that it can be used to make subsequent
490 * calls to routines in this file.
491 */
492 int __init
493 mv64x60_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
494 {
495 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
496
497 if (ppc_md.progress)
498 ppc_md.progress("mv64x60 initialization", 0x0);
499
500 spin_lock_init(&mv64x60_lock);
501 mv64x60_early_init(bh, si);
502
503 if (mv64x60_get_type(bh) || mv64x60_setup_for_chip(bh)) {
504 iounmap(bh->v_base);
505 bh->v_base = 0;
506 if (ppc_md.progress)
507 ppc_md.progress("mv64x60_init: Can't determine chip",0);
508 return -1;
509 }
510
511 bh->ci->disable_all_windows(bh, si);
512 mv64x60_get_mem_windows(bh, mem_windows);
513 mv64x60_config_cpu2mem_windows(bh, si, mem_windows);
514
515 if (bh->ci->config_io2mem_windows)
516 bh->ci->config_io2mem_windows(bh, si, mem_windows);
517 if (bh->ci->set_mpsc2regs_window)
518 bh->ci->set_mpsc2regs_window(bh, si->phys_reg_base);
519
520 if (si->pci_1.enable_bus) {
521 bh->io_base_b = (u32)ioremap(si->pci_1.pci_io.cpu_base,
522 si->pci_1.pci_io.size);
523 isa_io_base = bh->io_base_b;
524 }
525
526 if (si->pci_0.enable_bus) {
527 bh->io_base_a = (u32)ioremap(si->pci_0.pci_io.cpu_base,
528 si->pci_0.pci_io.size);
529 isa_io_base = bh->io_base_a;
530
531 mv64x60_alloc_hose(bh, MV64x60_PCI0_CONFIG_ADDR,
532 MV64x60_PCI0_CONFIG_DATA, &bh->hose_a);
533 mv64x60_config_resources(bh->hose_a, &si->pci_0, bh->io_base_a);
534 mv64x60_config_pci_params(bh->hose_a, &si->pci_0);
535
536 mv64x60_config_cpu2pci_windows(bh, &si->pci_0, 0);
537 mv64x60_config_pci2mem_windows(bh, bh->hose_a, &si->pci_0, 0,
538 mem_windows);
539 bh->ci->set_pci2regs_window(bh, bh->hose_a, 0,
540 si->phys_reg_base);
541 }
542
543 if (si->pci_1.enable_bus) {
544 mv64x60_alloc_hose(bh, MV64x60_PCI1_CONFIG_ADDR,
545 MV64x60_PCI1_CONFIG_DATA, &bh->hose_b);
546 mv64x60_config_resources(bh->hose_b, &si->pci_1, bh->io_base_b);
547 mv64x60_config_pci_params(bh->hose_b, &si->pci_1);
548
549 mv64x60_config_cpu2pci_windows(bh, &si->pci_1, 1);
550 mv64x60_config_pci2mem_windows(bh, bh->hose_b, &si->pci_1, 1,
551 mem_windows);
552 bh->ci->set_pci2regs_window(bh, bh->hose_b, 1,
553 si->phys_reg_base);
554 }
555
556 bh->ci->chip_specific_init(bh, si);
557 mv64x60_pd_fixup(bh, mv64x60_pd_devs, ARRAY_SIZE(mv64x60_pd_devs));
558
559 return 0;
560 }
561
562 /*
563 * mv64x60_early_init()
564 *
565 * Do some bridge work that must take place before we start messing with
566 * the bridge for real.
567 */
568 void __init
569 mv64x60_early_init(struct mv64x60_handle *bh, struct mv64x60_setup_info *si)
570 {
571 struct pci_controller hose_a, hose_b;
572
573 memset(bh, 0, sizeof(*bh));
574
575 bh->p_base = si->phys_reg_base;
576 bh->v_base = ioremap(bh->p_base, MV64x60_INTERNAL_SPACE_SIZE);
577
578 mv64x60_bridge_pbase = bh->p_base;
579 mv64x60_bridge_vbase = bh->v_base;
580
581 /* Assuming pci mode [reserved] bits 4:5 on 64260 are 0 */
582 bh->pci_mode_a = mv64x60_read(bh, MV64x60_PCI0_MODE) &
583 MV64x60_PCIMODE_MASK;
584 bh->pci_mode_b = mv64x60_read(bh, MV64x60_PCI1_MODE) &
585 MV64x60_PCIMODE_MASK;
586
587 /* Need temporary hose structs to call mv64x60_set_bus() */
588 memset(&hose_a, 0, sizeof(hose_a));
589 memset(&hose_b, 0, sizeof(hose_b));
590 setup_indirect_pci_nomap(&hose_a, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
591 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
592 setup_indirect_pci_nomap(&hose_b, bh->v_base + MV64x60_PCI1_CONFIG_ADDR,
593 bh->v_base + MV64x60_PCI1_CONFIG_DATA);
594 bh->hose_a = &hose_a;
595 bh->hose_b = &hose_b;
596
597 #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
598 /* Save a copy of hose_a for sysfs functions -- hack */
599 memcpy(&sysfs_hose_a, &hose_a, sizeof(hose_a));
600 #endif
601
602 mv64x60_set_bus(bh, 0, 0);
603 mv64x60_set_bus(bh, 1, 0);
604
605 bh->hose_a = NULL;
606 bh->hose_b = NULL;
607
608 /* Clear bit 0 of PCI addr decode control so PCI->CPU remap 1:1 */
609 mv64x60_clr_bits(bh, MV64x60_PCI0_PCI_DECODE_CNTL, 0x00000001);
610 mv64x60_clr_bits(bh, MV64x60_PCI1_PCI_DECODE_CNTL, 0x00000001);
611
612 /* Bit 12 MUST be 0; set bit 27--don't auto-update cpu remap regs */
613 mv64x60_clr_bits(bh, MV64x60_CPU_CONFIG, (1<<12));
614 mv64x60_set_bits(bh, MV64x60_CPU_CONFIG, (1<<27));
615
616 mv64x60_set_bits(bh, MV64x60_PCI0_TO_RETRY, 0xffff);
617 mv64x60_set_bits(bh, MV64x60_PCI1_TO_RETRY, 0xffff);
618 }
619
620 /*
621 *****************************************************************************
622 *
623 * Window Config Routines
624 *
625 *****************************************************************************
626 */
627 /*
628 * mv64x60_get_32bit_window()
629 *
630 * Determine the base address and size of a 32-bit window on the bridge.
631 */
632 void __init
633 mv64x60_get_32bit_window(struct mv64x60_handle *bh, u32 window,
634 u32 *base, u32 *size)
635 {
636 u32 val, base_reg, size_reg, base_bits, size_bits;
637 u32 (*get_from_field)(u32 val, u32 num_bits);
638
639 base_reg = bh->ci->window_tab_32bit[window].base_reg;
640
641 if (base_reg != 0) {
642 size_reg = bh->ci->window_tab_32bit[window].size_reg;
643 base_bits = bh->ci->window_tab_32bit[window].base_bits;
644 size_bits = bh->ci->window_tab_32bit[window].size_bits;
645 get_from_field= bh->ci->window_tab_32bit[window].get_from_field;
646
647 val = mv64x60_read(bh, base_reg);
648 *base = get_from_field(val, base_bits);
649
650 if (size_reg != 0) {
651 val = mv64x60_read(bh, size_reg);
652 val = get_from_field(val, size_bits);
653 *size = bh->ci->untranslate_size(*base, val, size_bits);
654 } else
655 *size = 0;
656 } else {
657 *base = 0;
658 *size = 0;
659 }
660
661 pr_debug("get 32bit window: %d, base: 0x%x, size: 0x%x\n",
662 window, *base, *size);
663 }
664
665 /*
666 * mv64x60_set_32bit_window()
667 *
668 * Set the base address and size of a 32-bit window on the bridge.
669 */
670 void __init
671 mv64x60_set_32bit_window(struct mv64x60_handle *bh, u32 window,
672 u32 base, u32 size, u32 other_bits)
673 {
674 u32 val, base_reg, size_reg, base_bits, size_bits;
675 u32 (*map_to_field)(u32 val, u32 num_bits);
676
677 pr_debug("set 32bit window: %d, base: 0x%x, size: 0x%x, other: 0x%x\n",
678 window, base, size, other_bits);
679
680 base_reg = bh->ci->window_tab_32bit[window].base_reg;
681
682 if (base_reg != 0) {
683 size_reg = bh->ci->window_tab_32bit[window].size_reg;
684 base_bits = bh->ci->window_tab_32bit[window].base_bits;
685 size_bits = bh->ci->window_tab_32bit[window].size_bits;
686 map_to_field = bh->ci->window_tab_32bit[window].map_to_field;
687
688 val = map_to_field(base, base_bits) | other_bits;
689 mv64x60_write(bh, base_reg, val);
690
691 if (size_reg != 0) {
692 val = bh->ci->translate_size(base, size, size_bits);
693 val = map_to_field(val, size_bits);
694 mv64x60_write(bh, size_reg, val);
695 }
696
697 (void)mv64x60_read(bh, base_reg); /* Flush FIFO */
698 }
699 }
700
701 /*
702 * mv64x60_get_64bit_window()
703 *
704 * Determine the base address and size of a 64-bit window on the bridge.
705 */
706 void __init
707 mv64x60_get_64bit_window(struct mv64x60_handle *bh, u32 window,
708 u32 *base_hi, u32 *base_lo, u32 *size)
709 {
710 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
711 u32 (*get_from_field)(u32 val, u32 num_bits);
712
713 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
714
715 if (base_lo_reg != 0) {
716 size_reg = bh->ci->window_tab_64bit[window].size_reg;
717 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
718 size_bits = bh->ci->window_tab_64bit[window].size_bits;
719 get_from_field= bh->ci->window_tab_64bit[window].get_from_field;
720
721 *base_hi = mv64x60_read(bh,
722 bh->ci->window_tab_64bit[window].base_hi_reg);
723
724 val = mv64x60_read(bh, base_lo_reg);
725 *base_lo = get_from_field(val, base_lo_bits);
726
727 if (size_reg != 0) {
728 val = mv64x60_read(bh, size_reg);
729 val = get_from_field(val, size_bits);
730 *size = bh->ci->untranslate_size(*base_lo, val,
731 size_bits);
732 } else
733 *size = 0;
734 } else {
735 *base_hi = 0;
736 *base_lo = 0;
737 *size = 0;
738 }
739
740 pr_debug("get 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
741 "size: 0x%x\n", window, *base_hi, *base_lo, *size);
742 }
743
744 /*
745 * mv64x60_set_64bit_window()
746 *
747 * Set the base address and size of a 64-bit window on the bridge.
748 */
749 void __init
750 mv64x60_set_64bit_window(struct mv64x60_handle *bh, u32 window,
751 u32 base_hi, u32 base_lo, u32 size, u32 other_bits)
752 {
753 u32 val, base_lo_reg, size_reg, base_lo_bits, size_bits;
754 u32 (*map_to_field)(u32 val, u32 num_bits);
755
756 pr_debug("set 64bit window: %d, base hi: 0x%x, base lo: 0x%x, "
757 "size: 0x%x, other: 0x%x\n",
758 window, base_hi, base_lo, size, other_bits);
759
760 base_lo_reg = bh->ci->window_tab_64bit[window].base_lo_reg;
761
762 if (base_lo_reg != 0) {
763 size_reg = bh->ci->window_tab_64bit[window].size_reg;
764 base_lo_bits = bh->ci->window_tab_64bit[window].base_lo_bits;
765 size_bits = bh->ci->window_tab_64bit[window].size_bits;
766 map_to_field = bh->ci->window_tab_64bit[window].map_to_field;
767
768 mv64x60_write(bh, bh->ci->window_tab_64bit[window].base_hi_reg,
769 base_hi);
770
771 val = map_to_field(base_lo, base_lo_bits) | other_bits;
772 mv64x60_write(bh, base_lo_reg, val);
773
774 if (size_reg != 0) {
775 val = bh->ci->translate_size(base_lo, size, size_bits);
776 val = map_to_field(val, size_bits);
777 mv64x60_write(bh, size_reg, val);
778 }
779
780 (void)mv64x60_read(bh, base_lo_reg); /* Flush FIFO */
781 }
782 }
783
784 /*
785 * mv64x60_mask()
786 *
787 * Take the high-order 'num_bits' of 'val' & mask off low bits.
788 */
789 u32 __init
790 mv64x60_mask(u32 val, u32 num_bits)
791 {
792 return val & (0xffffffff << (32 - num_bits));
793 }
794
795 /*
796 * mv64x60_shift_left()
797 *
798 * Take the low-order 'num_bits' of 'val', shift left to align at bit 31 (MSB).
799 */
800 u32 __init
801 mv64x60_shift_left(u32 val, u32 num_bits)
802 {
803 return val << (32 - num_bits);
804 }
805
806 /*
807 * mv64x60_shift_right()
808 *
809 * Take the high-order 'num_bits' of 'val', shift right to align at bit 0 (LSB).
810 */
811 u32 __init
812 mv64x60_shift_right(u32 val, u32 num_bits)
813 {
814 return val >> (32 - num_bits);
815 }
816
817 /*
818 *****************************************************************************
819 *
820 * Chip Identification Routines
821 *
822 *****************************************************************************
823 */
824 /*
825 * mv64x60_get_type()
826 *
827 * Determine the type of bridge chip we have.
828 */
829 int __init
830 mv64x60_get_type(struct mv64x60_handle *bh)
831 {
832 struct pci_controller hose;
833 u16 val;
834 u8 save_exclude;
835
836 memset(&hose, 0, sizeof(hose));
837 setup_indirect_pci_nomap(&hose, bh->v_base + MV64x60_PCI0_CONFIG_ADDR,
838 bh->v_base + MV64x60_PCI0_CONFIG_DATA);
839
840 save_exclude = mv64x60_pci_exclude_bridge;
841 mv64x60_pci_exclude_bridge = 0;
842 /* Sanity check of bridge's Vendor ID */
843 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID, &val);
844
845 if (val != PCI_VENDOR_ID_MARVELL) {
846 mv64x60_pci_exclude_bridge = save_exclude;
847 return -1;
848 }
849
850 /* Get the revision of the chip */
851 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_CLASS_REVISION,
852 &val);
853 bh->rev = (u32)(val & 0xff);
854
855 /* Figure out the type of Marvell bridge it is */
856 early_read_config_word(&hose, 0, PCI_DEVFN(0, 0), PCI_DEVICE_ID, &val);
857 mv64x60_pci_exclude_bridge = save_exclude;
858
859 switch (val) {
860 case PCI_DEVICE_ID_MARVELL_GT64260:
861 switch (bh->rev) {
862 case GT64260_REV_A:
863 bh->type = MV64x60_TYPE_GT64260A;
864 break;
865
866 default:
867 printk(KERN_WARNING "Unsupported GT64260 rev %04x\n",
868 bh->rev);
869 /* Assume its similar to a 'B' rev and fallthru */
870 case GT64260_REV_B:
871 bh->type = MV64x60_TYPE_GT64260B;
872 break;
873 }
874 break;
875
876 case PCI_DEVICE_ID_MARVELL_MV64360:
877 /* Marvell won't tell me how to distinguish a 64361 & 64362 */
878 bh->type = MV64x60_TYPE_MV64360;
879 break;
880
881 case PCI_DEVICE_ID_MARVELL_MV64460:
882 bh->type = MV64x60_TYPE_MV64460;
883 break;
884
885 default:
886 printk(KERN_ERR "Unknown Marvell bridge type %04x\n", val);
887 return -1;
888 }
889
890 /* Hang onto bridge type & rev for PIC code */
891 mv64x60_bridge_type = bh->type;
892 mv64x60_bridge_rev = bh->rev;
893
894 return 0;
895 }
896
897 /*
898 * mv64x60_setup_for_chip()
899 *
900 * Set 'bh' to use the proper set of routine for the bridge chip that we have.
901 */
902 int __init
903 mv64x60_setup_for_chip(struct mv64x60_handle *bh)
904 {
905 int rc = 0;
906
907 /* Set up chip-specific info based on the chip/bridge type */
908 switch(bh->type) {
909 case MV64x60_TYPE_GT64260A:
910 bh->ci = &gt64260a_ci;
911 break;
912
913 case MV64x60_TYPE_GT64260B:
914 bh->ci = &gt64260b_ci;
915 break;
916
917 case MV64x60_TYPE_MV64360:
918 bh->ci = &mv64360_ci;
919 break;
920
921 case MV64x60_TYPE_MV64460:
922 bh->ci = &mv64460_ci;
923 break;
924
925 case MV64x60_TYPE_INVALID:
926 default:
927 if (ppc_md.progress)
928 ppc_md.progress("mv64x60: Unsupported bridge", 0x0);
929 printk(KERN_ERR "mv64x60: Unsupported bridge\n");
930 rc = -1;
931 }
932
933 return rc;
934 }
935
936 /*
937 * mv64x60_get_bridge_vbase()
938 *
939 * Return the virtual address of the bridge's registers.
940 */
941 void *
942 mv64x60_get_bridge_vbase(void)
943 {
944 return mv64x60_bridge_vbase;
945 }
946
947 /*
948 * mv64x60_get_bridge_type()
949 *
950 * Return the type of bridge on the platform.
951 */
952 u32
953 mv64x60_get_bridge_type(void)
954 {
955 return mv64x60_bridge_type;
956 }
957
958 /*
959 * mv64x60_get_bridge_rev()
960 *
961 * Return the revision of the bridge on the platform.
962 */
963 u32
964 mv64x60_get_bridge_rev(void)
965 {
966 return mv64x60_bridge_rev;
967 }
968
969 /*
970 *****************************************************************************
971 *
972 * System Memory Window Related Routines
973 *
974 *****************************************************************************
975 */
976 /*
977 * mv64x60_get_mem_size()
978 *
979 * Calculate the amount of memory that the memory controller is set up for.
980 * This should only be used by board-specific code if there is no other
981 * way to determine the amount of memory in the system.
982 */
983 u32 __init
984 mv64x60_get_mem_size(u32 bridge_base, u32 chip_type)
985 {
986 struct mv64x60_handle bh;
987 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2];
988 u32 rc = 0;
989
990 memset(&bh, 0, sizeof(bh));
991
992 bh.type = chip_type;
993 bh.v_base = (void *)bridge_base;
994
995 if (!mv64x60_setup_for_chip(&bh)) {
996 mv64x60_get_mem_windows(&bh, mem_windows);
997 rc = mv64x60_calc_mem_size(&bh, mem_windows);
998 }
999
1000 return rc;
1001 }
1002
1003 /*
1004 * mv64x60_get_mem_windows()
1005 *
1006 * Get the values in the memory controller & return in the 'mem_windows' array.
1007 */
1008 void __init
1009 mv64x60_get_mem_windows(struct mv64x60_handle *bh,
1010 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1011 {
1012 u32 i, win;
1013
1014 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1015 if (bh->ci->is_enabled_32bit(bh, win))
1016 mv64x60_get_32bit_window(bh, win,
1017 &mem_windows[i][0], &mem_windows[i][1]);
1018 else {
1019 mem_windows[i][0] = 0;
1020 mem_windows[i][1] = 0;
1021 }
1022 }
1023
1024 /*
1025 * mv64x60_calc_mem_size()
1026 *
1027 * Using the memory controller register values in 'mem_windows', determine
1028 * how much memory it is set up for.
1029 */
1030 u32 __init
1031 mv64x60_calc_mem_size(struct mv64x60_handle *bh,
1032 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1033 {
1034 u32 i, total = 0;
1035
1036 for (i=0; i<MV64x60_CPU2MEM_WINDOWS; i++)
1037 total += mem_windows[i][1];
1038
1039 return total;
1040 }
1041
1042 /*
1043 *****************************************************************************
1044 *
1045 * CPU->System MEM, PCI Config Routines
1046 *
1047 *****************************************************************************
1048 */
1049 /*
1050 * mv64x60_config_cpu2mem_windows()
1051 *
1052 * Configure CPU->Memory windows on the bridge.
1053 */
1054 static u32 prot_tab[] __initdata = {
1055 MV64x60_CPU_PROT_0_WIN, MV64x60_CPU_PROT_1_WIN,
1056 MV64x60_CPU_PROT_2_WIN, MV64x60_CPU_PROT_3_WIN
1057 };
1058
1059 static u32 cpu_snoop_tab[] __initdata = {
1060 MV64x60_CPU_SNOOP_0_WIN, MV64x60_CPU_SNOOP_1_WIN,
1061 MV64x60_CPU_SNOOP_2_WIN, MV64x60_CPU_SNOOP_3_WIN
1062 };
1063
1064 void __init
1065 mv64x60_config_cpu2mem_windows(struct mv64x60_handle *bh,
1066 struct mv64x60_setup_info *si,
1067 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1068 {
1069 u32 i, win;
1070
1071 /* Set CPU protection & snoop windows */
1072 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1073 if (bh->ci->is_enabled_32bit(bh, win)) {
1074 mv64x60_set_32bit_window(bh, prot_tab[i],
1075 mem_windows[i][0], mem_windows[i][1],
1076 si->cpu_prot_options[i]);
1077 bh->ci->enable_window_32bit(bh, prot_tab[i]);
1078
1079 if (bh->ci->window_tab_32bit[cpu_snoop_tab[i]].
1080 base_reg != 0) {
1081 mv64x60_set_32bit_window(bh, cpu_snoop_tab[i],
1082 mem_windows[i][0], mem_windows[i][1],
1083 si->cpu_snoop_options[i]);
1084 bh->ci->enable_window_32bit(bh,
1085 cpu_snoop_tab[i]);
1086 }
1087
1088 }
1089 }
1090
1091 /*
1092 * mv64x60_config_cpu2pci_windows()
1093 *
1094 * Configure the CPU->PCI windows for one of the PCI buses.
1095 */
1096 static u32 win_tab[2][4] __initdata = {
1097 { MV64x60_CPU2PCI0_IO_WIN, MV64x60_CPU2PCI0_MEM_0_WIN,
1098 MV64x60_CPU2PCI0_MEM_1_WIN, MV64x60_CPU2PCI0_MEM_2_WIN },
1099 { MV64x60_CPU2PCI1_IO_WIN, MV64x60_CPU2PCI1_MEM_0_WIN,
1100 MV64x60_CPU2PCI1_MEM_1_WIN, MV64x60_CPU2PCI1_MEM_2_WIN },
1101 };
1102
1103 static u32 remap_tab[2][4] __initdata = {
1104 { MV64x60_CPU2PCI0_IO_REMAP_WIN, MV64x60_CPU2PCI0_MEM_0_REMAP_WIN,
1105 MV64x60_CPU2PCI0_MEM_1_REMAP_WIN, MV64x60_CPU2PCI0_MEM_2_REMAP_WIN },
1106 { MV64x60_CPU2PCI1_IO_REMAP_WIN, MV64x60_CPU2PCI1_MEM_0_REMAP_WIN,
1107 MV64x60_CPU2PCI1_MEM_1_REMAP_WIN, MV64x60_CPU2PCI1_MEM_2_REMAP_WIN }
1108 };
1109
1110 void __init
1111 mv64x60_config_cpu2pci_windows(struct mv64x60_handle *bh,
1112 struct mv64x60_pci_info *pi, u32 bus)
1113 {
1114 int i;
1115
1116 if (pi->pci_io.size > 0) {
1117 mv64x60_set_32bit_window(bh, win_tab[bus][0],
1118 pi->pci_io.cpu_base, pi->pci_io.size, pi->pci_io.swap);
1119 mv64x60_set_32bit_window(bh, remap_tab[bus][0],
1120 pi->pci_io.pci_base_lo, 0, 0);
1121 bh->ci->enable_window_32bit(bh, win_tab[bus][0]);
1122 } else /* Actually, the window should already be disabled */
1123 bh->ci->disable_window_32bit(bh, win_tab[bus][0]);
1124
1125 for (i=0; i<3; i++)
1126 if (pi->pci_mem[i].size > 0) {
1127 mv64x60_set_32bit_window(bh, win_tab[bus][i+1],
1128 pi->pci_mem[i].cpu_base, pi->pci_mem[i].size,
1129 pi->pci_mem[i].swap);
1130 mv64x60_set_64bit_window(bh, remap_tab[bus][i+1],
1131 pi->pci_mem[i].pci_base_hi,
1132 pi->pci_mem[i].pci_base_lo, 0, 0);
1133 bh->ci->enable_window_32bit(bh, win_tab[bus][i+1]);
1134 } else /* Actually, the window should already be disabled */
1135 bh->ci->disable_window_32bit(bh, win_tab[bus][i+1]);
1136 }
1137
1138 /*
1139 *****************************************************************************
1140 *
1141 * PCI->System MEM Config Routines
1142 *
1143 *****************************************************************************
1144 */
1145 /*
1146 * mv64x60_config_pci2mem_windows()
1147 *
1148 * Configure the PCI->Memory windows on the bridge.
1149 */
1150 static u32 pci_acc_tab[2][4] __initdata = {
1151 { MV64x60_PCI02MEM_ACC_CNTL_0_WIN, MV64x60_PCI02MEM_ACC_CNTL_1_WIN,
1152 MV64x60_PCI02MEM_ACC_CNTL_2_WIN, MV64x60_PCI02MEM_ACC_CNTL_3_WIN },
1153 { MV64x60_PCI12MEM_ACC_CNTL_0_WIN, MV64x60_PCI12MEM_ACC_CNTL_1_WIN,
1154 MV64x60_PCI12MEM_ACC_CNTL_2_WIN, MV64x60_PCI12MEM_ACC_CNTL_3_WIN }
1155 };
1156
1157 static u32 pci_snoop_tab[2][4] __initdata = {
1158 { MV64x60_PCI02MEM_SNOOP_0_WIN, MV64x60_PCI02MEM_SNOOP_1_WIN,
1159 MV64x60_PCI02MEM_SNOOP_2_WIN, MV64x60_PCI02MEM_SNOOP_3_WIN },
1160 { MV64x60_PCI12MEM_SNOOP_0_WIN, MV64x60_PCI12MEM_SNOOP_1_WIN,
1161 MV64x60_PCI12MEM_SNOOP_2_WIN, MV64x60_PCI12MEM_SNOOP_3_WIN }
1162 };
1163
1164 static u32 pci_size_tab[2][4] __initdata = {
1165 { MV64x60_PCI0_MEM_0_SIZE, MV64x60_PCI0_MEM_1_SIZE,
1166 MV64x60_PCI0_MEM_2_SIZE, MV64x60_PCI0_MEM_3_SIZE },
1167 { MV64x60_PCI1_MEM_0_SIZE, MV64x60_PCI1_MEM_1_SIZE,
1168 MV64x60_PCI1_MEM_2_SIZE, MV64x60_PCI1_MEM_3_SIZE }
1169 };
1170
1171 void __init
1172 mv64x60_config_pci2mem_windows(struct mv64x60_handle *bh,
1173 struct pci_controller *hose, struct mv64x60_pci_info *pi,
1174 u32 bus, u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
1175 {
1176 u32 i, win;
1177
1178 /*
1179 * Set the access control, snoop, BAR size, and window base addresses.
1180 * PCI->MEM windows base addresses will match exactly what the
1181 * CPU->MEM windows are.
1182 */
1183 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
1184 if (bh->ci->is_enabled_32bit(bh, win)) {
1185 mv64x60_set_64bit_window(bh,
1186 pci_acc_tab[bus][i], 0,
1187 mem_windows[i][0], mem_windows[i][1],
1188 pi->acc_cntl_options[i]);
1189 bh->ci->enable_window_64bit(bh, pci_acc_tab[bus][i]);
1190
1191 if (bh->ci->window_tab_64bit[
1192 pci_snoop_tab[bus][i]].base_lo_reg != 0) {
1193
1194 mv64x60_set_64bit_window(bh,
1195 pci_snoop_tab[bus][i], 0,
1196 mem_windows[i][0], mem_windows[i][1],
1197 pi->snoop_options[i]);
1198 bh->ci->enable_window_64bit(bh,
1199 pci_snoop_tab[bus][i]);
1200 }
1201
1202 bh->ci->set_pci2mem_window(hose, bus, i,
1203 mem_windows[i][0]);
1204 mv64x60_write(bh, pci_size_tab[bus][i],
1205 mv64x60_mask(mem_windows[i][1] - 1, 20));
1206
1207 /* Enable the window */
1208 mv64x60_clr_bits(bh, ((bus == 0) ?
1209 MV64x60_PCI0_BAR_ENABLE :
1210 MV64x60_PCI1_BAR_ENABLE), (1 << i));
1211 }
1212 }
1213
1214 /*
1215 *****************************************************************************
1216 *
1217 * Hose & Resource Alloc/Init Routines
1218 *
1219 *****************************************************************************
1220 */
1221 /*
1222 * mv64x60_alloc_hoses()
1223 *
1224 * Allocate the PCI hose structures for the bridge's PCI buses.
1225 */
1226 void __init
1227 mv64x60_alloc_hose(struct mv64x60_handle *bh, u32 cfg_addr, u32 cfg_data,
1228 struct pci_controller **hose)
1229 {
1230 *hose = pcibios_alloc_controller();
1231 setup_indirect_pci_nomap(*hose, bh->v_base + cfg_addr,
1232 bh->v_base + cfg_data);
1233 }
1234
1235 /*
1236 * mv64x60_config_resources()
1237 *
1238 * Calculate the offsets, etc. for the hose structures to reflect all of
1239 * the address remapping that happens as you go from CPU->PCI and PCI->MEM.
1240 */
1241 void __init
1242 mv64x60_config_resources(struct pci_controller *hose,
1243 struct mv64x60_pci_info *pi, u32 io_base)
1244 {
1245 int i;
1246 /* 2 hoses; 4 resources/hose; string <= 64 bytes */
1247 static char s[2][4][64];
1248
1249 if (pi->pci_io.size != 0) {
1250 sprintf(s[hose->index][0], "PCI hose %d I/O Space",
1251 hose->index);
1252 pci_init_resource(&hose->io_resource, io_base - isa_io_base,
1253 io_base - isa_io_base + pi->pci_io.size - 1,
1254 IORESOURCE_IO, s[hose->index][0]);
1255 hose->io_space.start = pi->pci_io.pci_base_lo;
1256 hose->io_space.end = pi->pci_io.pci_base_lo + pi->pci_io.size-1;
1257 hose->io_base_phys = pi->pci_io.cpu_base;
1258 hose->io_base_virt = (void *)isa_io_base;
1259 }
1260
1261 for (i=0; i<3; i++)
1262 if (pi->pci_mem[i].size != 0) {
1263 sprintf(s[hose->index][i+1], "PCI hose %d MEM Space %d",
1264 hose->index, i);
1265 pci_init_resource(&hose->mem_resources[i],
1266 pi->pci_mem[i].cpu_base,
1267 pi->pci_mem[i].cpu_base + pi->pci_mem[i].size-1,
1268 IORESOURCE_MEM, s[hose->index][i+1]);
1269 }
1270
1271 hose->mem_space.end = pi->pci_mem[0].pci_base_lo +
1272 pi->pci_mem[0].size - 1;
1273 hose->pci_mem_offset = pi->pci_mem[0].cpu_base -
1274 pi->pci_mem[0].pci_base_lo;
1275 }
1276
1277 /*
1278 * mv64x60_config_pci_params()
1279 *
1280 * Configure a hose's PCI config space parameters.
1281 */
1282 void __init
1283 mv64x60_config_pci_params(struct pci_controller *hose,
1284 struct mv64x60_pci_info *pi)
1285 {
1286 u32 devfn;
1287 u16 u16_val;
1288 u8 save_exclude;
1289
1290 devfn = PCI_DEVFN(0,0);
1291
1292 save_exclude = mv64x60_pci_exclude_bridge;
1293 mv64x60_pci_exclude_bridge = 0;
1294
1295 /* Set class code to indicate host bridge */
1296 u16_val = PCI_CLASS_BRIDGE_HOST; /* 0x0600 (host bridge) */
1297 early_write_config_word(hose, 0, devfn, PCI_CLASS_DEVICE, u16_val);
1298
1299 /* Enable bridge to be PCI master & respond to PCI MEM cycles */
1300 early_read_config_word(hose, 0, devfn, PCI_COMMAND, &u16_val);
1301 u16_val &= ~(PCI_COMMAND_IO | PCI_COMMAND_INVALIDATE |
1302 PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
1303 u16_val |= pi->pci_cmd_bits | PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
1304 early_write_config_word(hose, 0, devfn, PCI_COMMAND, u16_val);
1305
1306 /* Set latency timer, cache line size, clear BIST */
1307 u16_val = (pi->latency_timer << 8) | (L1_CACHE_LINE_SIZE >> 2);
1308 early_write_config_word(hose, 0, devfn, PCI_CACHE_LINE_SIZE, u16_val);
1309
1310 mv64x60_pci_exclude_bridge = save_exclude;
1311 }
1312
1313 /*
1314 *****************************************************************************
1315 *
1316 * PCI Related Routine
1317 *
1318 *****************************************************************************
1319 */
1320 /*
1321 * mv64x60_set_bus()
1322 *
1323 * Set the bus number for the hose directly under the bridge.
1324 */
1325 void __init
1326 mv64x60_set_bus(struct mv64x60_handle *bh, u32 bus, u32 child_bus)
1327 {
1328 struct pci_controller *hose;
1329 u32 pci_mode, p2p_cfg, pci_cfg_offset, val;
1330 u8 save_exclude;
1331
1332 if (bus == 0) {
1333 pci_mode = bh->pci_mode_a;
1334 p2p_cfg = MV64x60_PCI0_P2P_CONFIG;
1335 pci_cfg_offset = 0x64;
1336 hose = bh->hose_a;
1337 } else {
1338 pci_mode = bh->pci_mode_b;
1339 p2p_cfg = MV64x60_PCI1_P2P_CONFIG;
1340 pci_cfg_offset = 0xe4;
1341 hose = bh->hose_b;
1342 }
1343
1344 child_bus &= 0xff;
1345 val = mv64x60_read(bh, p2p_cfg);
1346
1347 if (pci_mode == MV64x60_PCIMODE_CONVENTIONAL) {
1348 val &= 0xe0000000; /* Force dev num to 0, turn off P2P bridge */
1349 val |= (child_bus << 16) | 0xff;
1350 mv64x60_write(bh, p2p_cfg, val);
1351 (void)mv64x60_read(bh, p2p_cfg); /* Flush FIFO */
1352 } else { /* PCI-X */
1353 /*
1354 * Need to use the current bus/dev number (that's in the
1355 * P2P CONFIG reg) to access the bridge's pci config space.
1356 */
1357 save_exclude = mv64x60_pci_exclude_bridge;
1358 mv64x60_pci_exclude_bridge = 0;
1359 early_write_config_dword(hose, (val & 0x00ff0000) >> 16,
1360 PCI_DEVFN(((val & 0x1f000000) >> 24), 0),
1361 pci_cfg_offset, child_bus << 8);
1362 mv64x60_pci_exclude_bridge = save_exclude;
1363 }
1364 }
1365
1366 /*
1367 * mv64x60_pci_exclude_device()
1368 *
1369 * This routine is used to make the bridge not appear when the
1370 * PCI subsystem is accessing PCI devices (in PCI config space).
1371 */
1372 int
1373 mv64x60_pci_exclude_device(u8 bus, u8 devfn)
1374 {
1375 struct pci_controller *hose;
1376
1377 hose = pci_bus_to_hose(bus);
1378
1379 /* Skip slot 0 on both hoses */
1380 if ((mv64x60_pci_exclude_bridge == 1) && (PCI_SLOT(devfn) == 0) &&
1381 (hose->first_busno == bus))
1382
1383 return PCIBIOS_DEVICE_NOT_FOUND;
1384 else
1385 return PCIBIOS_SUCCESSFUL;
1386 } /* mv64x60_pci_exclude_device() */
1387
1388 /*
1389 *****************************************************************************
1390 *
1391 * Platform Device Routines
1392 *
1393 *****************************************************************************
1394 */
1395
1396 /*
1397 * mv64x60_pd_fixup()
1398 *
1399 * Need to add the base addr of where the bridge's regs are mapped in the
1400 * physical addr space so drivers can ioremap() them.
1401 */
1402 void __init
1403 mv64x60_pd_fixup(struct mv64x60_handle *bh, struct platform_device *pd_devs[],
1404 u32 entries)
1405 {
1406 struct resource *r;
1407 u32 i, j;
1408
1409 for (i=0; i<entries; i++) {
1410 j = 0;
1411
1412 while ((r = platform_get_resource(pd_devs[i],IORESOURCE_MEM,j))
1413 != NULL) {
1414
1415 r->start += bh->p_base;
1416 r->end += bh->p_base;
1417 j++;
1418 }
1419 }
1420 }
1421
1422 /*
1423 * mv64x60_add_pds()
1424 *
1425 * Add the mv64x60 platform devices to the list of platform devices.
1426 */
1427 static int __init
1428 mv64x60_add_pds(void)
1429 {
1430 return platform_add_devices(mv64x60_pd_devs,
1431 ARRAY_SIZE(mv64x60_pd_devs));
1432 }
1433 arch_initcall(mv64x60_add_pds);
1434
1435 /*
1436 *****************************************************************************
1437 *
1438 * GT64260-Specific Routines
1439 *
1440 *****************************************************************************
1441 */
1442 /*
1443 * gt64260_translate_size()
1444 *
1445 * On the GT64260, the size register is really the "top" address of the window.
1446 */
1447 static u32 __init
1448 gt64260_translate_size(u32 base, u32 size, u32 num_bits)
1449 {
1450 return base + mv64x60_mask(size - 1, num_bits);
1451 }
1452
1453 /*
1454 * gt64260_untranslate_size()
1455 *
1456 * Translate the top address of a window into a window size.
1457 */
1458 static u32 __init
1459 gt64260_untranslate_size(u32 base, u32 size, u32 num_bits)
1460 {
1461 if (size >= base)
1462 size = size - base + (1 << (32 - num_bits));
1463 else
1464 size = 0;
1465
1466 return size;
1467 }
1468
1469 /*
1470 * gt64260_set_pci2mem_window()
1471 *
1472 * The PCI->MEM window registers are actually in PCI config space so need
1473 * to set them by setting the correct config space BARs.
1474 */
1475 static u32 gt64260_reg_addrs[2][4] __initdata = {
1476 { 0x10, 0x14, 0x18, 0x1c }, { 0x90, 0x94, 0x98, 0x9c }
1477 };
1478
1479 static void __init
1480 gt64260_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1481 u32 base)
1482 {
1483 u8 save_exclude;
1484
1485 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1486 hose->index, base);
1487
1488 save_exclude = mv64x60_pci_exclude_bridge;
1489 mv64x60_pci_exclude_bridge = 0;
1490 early_write_config_dword(hose, 0, PCI_DEVFN(0, 0),
1491 gt64260_reg_addrs[bus][window], mv64x60_mask(base, 20) | 0x8);
1492 mv64x60_pci_exclude_bridge = save_exclude;
1493 }
1494
1495 /*
1496 * gt64260_set_pci2regs_window()
1497 *
1498 * Set where the bridge's registers appear in PCI MEM space.
1499 */
1500 static u32 gt64260_offset[2] __initdata = {0x20, 0xa0};
1501
1502 static void __init
1503 gt64260_set_pci2regs_window(struct mv64x60_handle *bh,
1504 struct pci_controller *hose, u32 bus, u32 base)
1505 {
1506 u8 save_exclude;
1507
1508 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1509 base);
1510
1511 save_exclude = mv64x60_pci_exclude_bridge;
1512 mv64x60_pci_exclude_bridge = 0;
1513 early_write_config_dword(hose, 0, PCI_DEVFN(0,0), gt64260_offset[bus],
1514 (base << 16));
1515 mv64x60_pci_exclude_bridge = save_exclude;
1516 }
1517
1518 /*
1519 * gt64260_is_enabled_32bit()
1520 *
1521 * On a GT64260, a window is enabled iff its top address is >= to its base
1522 * address.
1523 */
1524 static u32 __init
1525 gt64260_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1526 {
1527 u32 rc = 0;
1528
1529 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1530 (gt64260_32bit_windows[window].size_reg != 0) &&
1531 ((mv64x60_read(bh, gt64260_32bit_windows[window].size_reg) &
1532 ((1 << gt64260_32bit_windows[window].size_bits) - 1)) >=
1533 (mv64x60_read(bh, gt64260_32bit_windows[window].base_reg) &
1534 ((1 << gt64260_32bit_windows[window].base_bits) - 1))))
1535
1536 rc = 1;
1537
1538 return rc;
1539 }
1540
1541 /*
1542 * gt64260_enable_window_32bit()
1543 *
1544 * On the GT64260, a window is enabled iff the top address is >= to the base
1545 * address of the window. Since the window has already been configured by
1546 * the time this routine is called, we have nothing to do here.
1547 */
1548 static void __init
1549 gt64260_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
1550 {
1551 pr_debug("enable 32bit window: %d\n", window);
1552 }
1553
1554 /*
1555 * gt64260_disable_window_32bit()
1556 *
1557 * On a GT64260, you disable a window by setting its top address to be less
1558 * than its base address.
1559 */
1560 static void __init
1561 gt64260_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
1562 {
1563 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1564 window, gt64260_32bit_windows[window].base_reg,
1565 gt64260_32bit_windows[window].size_reg);
1566
1567 if ((gt64260_32bit_windows[window].base_reg != 0) &&
1568 (gt64260_32bit_windows[window].size_reg != 0)) {
1569
1570 /* To disable, make bottom reg higher than top reg */
1571 mv64x60_write(bh, gt64260_32bit_windows[window].base_reg,0xfff);
1572 mv64x60_write(bh, gt64260_32bit_windows[window].size_reg, 0);
1573 }
1574 }
1575
1576 /*
1577 * gt64260_enable_window_64bit()
1578 *
1579 * On the GT64260, a window is enabled iff the top address is >= to the base
1580 * address of the window. Since the window has already been configured by
1581 * the time this routine is called, we have nothing to do here.
1582 */
1583 static void __init
1584 gt64260_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
1585 {
1586 pr_debug("enable 64bit window: %d\n", window);
1587 }
1588
1589 /*
1590 * gt64260_disable_window_64bit()
1591 *
1592 * On a GT64260, you disable a window by setting its top address to be less
1593 * than its base address.
1594 */
1595 static void __init
1596 gt64260_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
1597 {
1598 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
1599 window, gt64260_64bit_windows[window].base_lo_reg,
1600 gt64260_64bit_windows[window].size_reg);
1601
1602 if ((gt64260_64bit_windows[window].base_lo_reg != 0) &&
1603 (gt64260_64bit_windows[window].size_reg != 0)) {
1604
1605 /* To disable, make bottom reg higher than top reg */
1606 mv64x60_write(bh, gt64260_64bit_windows[window].base_lo_reg,
1607 0xfff);
1608 mv64x60_write(bh, gt64260_64bit_windows[window].base_hi_reg, 0);
1609 mv64x60_write(bh, gt64260_64bit_windows[window].size_reg, 0);
1610 }
1611 }
1612
1613 /*
1614 * gt64260_disable_all_windows()
1615 *
1616 * The GT64260 has several windows that aren't represented in the table of
1617 * windows at the top of this file. This routine turns all of them off
1618 * except for the memory controller windows, of course.
1619 */
1620 static void __init
1621 gt64260_disable_all_windows(struct mv64x60_handle *bh,
1622 struct mv64x60_setup_info *si)
1623 {
1624 u32 i, preserve;
1625
1626 /* Disable 32bit windows (don't disable cpu->mem windows) */
1627 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
1628 if (i < 32)
1629 preserve = si->window_preserve_mask_32_lo & (1 << i);
1630 else
1631 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
1632
1633 if (!preserve)
1634 gt64260_disable_window_32bit(bh, i);
1635 }
1636
1637 /* Disable 64bit windows */
1638 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
1639 if (!(si->window_preserve_mask_64 & (1<<i)))
1640 gt64260_disable_window_64bit(bh, i);
1641
1642 /* Turn off cpu protection windows not in gt64260_32bit_windows[] */
1643 mv64x60_write(bh, GT64260_CPU_PROT_BASE_4, 0xfff);
1644 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_4, 0);
1645 mv64x60_write(bh, GT64260_CPU_PROT_BASE_5, 0xfff);
1646 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_5, 0);
1647 mv64x60_write(bh, GT64260_CPU_PROT_BASE_6, 0xfff);
1648 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_6, 0);
1649 mv64x60_write(bh, GT64260_CPU_PROT_BASE_7, 0xfff);
1650 mv64x60_write(bh, GT64260_CPU_PROT_SIZE_7, 0);
1651
1652 /* Turn off PCI->MEM access cntl wins not in gt64260_64bit_windows[] */
1653 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0xfff);
1654 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_HI, 0);
1655 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_4_SIZE, 0);
1656 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0xfff);
1657 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_HI, 0);
1658 mv64x60_write(bh, MV64x60_PCI0_ACC_CNTL_5_SIZE, 0);
1659 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_LO, 0xfff);
1660 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_BASE_HI, 0);
1661 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_6_SIZE, 0);
1662 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_LO, 0xfff);
1663 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_BASE_HI, 0);
1664 mv64x60_write(bh, GT64260_PCI0_ACC_CNTL_7_SIZE, 0);
1665
1666 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0xfff);
1667 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_HI, 0);
1668 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_4_SIZE, 0);
1669 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0xfff);
1670 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_HI, 0);
1671 mv64x60_write(bh, MV64x60_PCI1_ACC_CNTL_5_SIZE, 0);
1672 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_LO, 0xfff);
1673 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_BASE_HI, 0);
1674 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_6_SIZE, 0);
1675 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_LO, 0xfff);
1676 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_BASE_HI, 0);
1677 mv64x60_write(bh, GT64260_PCI1_ACC_CNTL_7_SIZE, 0);
1678
1679 /* Disable all PCI-><whatever> windows */
1680 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x07fffdff);
1681 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x07fffdff);
1682
1683 /*
1684 * Some firmwares enable a bunch of intr sources
1685 * for the PCI INT output pins.
1686 */
1687 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_LO, 0);
1688 mv64x60_write(bh, GT64260_IC_CPU_INTR_MASK_HI, 0);
1689 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_LO, 0);
1690 mv64x60_write(bh, GT64260_IC_PCI0_INTR_MASK_HI, 0);
1691 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_LO, 0);
1692 mv64x60_write(bh, GT64260_IC_PCI1_INTR_MASK_HI, 0);
1693 mv64x60_write(bh, GT64260_IC_CPU_INT_0_MASK, 0);
1694 mv64x60_write(bh, GT64260_IC_CPU_INT_1_MASK, 0);
1695 mv64x60_write(bh, GT64260_IC_CPU_INT_2_MASK, 0);
1696 mv64x60_write(bh, GT64260_IC_CPU_INT_3_MASK, 0);
1697 }
1698
1699 /*
1700 * gt64260a_chip_specific_init()
1701 *
1702 * Implement errata work arounds for the GT64260A.
1703 */
1704 static void __init
1705 gt64260a_chip_specific_init(struct mv64x60_handle *bh,
1706 struct mv64x60_setup_info *si)
1707 {
1708 #ifdef CONFIG_SERIAL_MPSC
1709 struct resource *r;
1710 #endif
1711 #if !defined(CONFIG_NOT_COHERENT_CACHE)
1712 u32 val;
1713 u8 save_exclude;
1714 #endif
1715
1716 if (si->pci_0.enable_bus)
1717 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1718 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1719
1720 if (si->pci_1.enable_bus)
1721 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1722 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1723
1724 /*
1725 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1726 * be set if you are using cache coherency.
1727 */
1728 #if !defined(CONFIG_NOT_COHERENT_CACHE)
1729 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1730 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1731 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1732
1733 save_exclude = mv64x60_pci_exclude_bridge;
1734 mv64x60_pci_exclude_bridge = 0;
1735 if (si->pci_0.enable_bus) {
1736 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1737 PCI_COMMAND, &val);
1738 val |= PCI_COMMAND_INVALIDATE;
1739 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1740 PCI_COMMAND, val);
1741 }
1742
1743 if (si->pci_1.enable_bus) {
1744 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1745 PCI_COMMAND, &val);
1746 val |= PCI_COMMAND_INVALIDATE;
1747 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1748 PCI_COMMAND, val);
1749 }
1750 mv64x60_pci_exclude_bridge = save_exclude;
1751 #endif
1752
1753 /* Disable buffer/descriptor snooping */
1754 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1755 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1756
1757 #ifdef CONFIG_SERIAL_MPSC
1758 mv64x60_mpsc0_pdata.mirror_regs = 1;
1759 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1760 mv64x60_mpsc1_pdata.mirror_regs = 1;
1761 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1762
1763 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
1764 != NULL) {
1765 r->start = MV64x60_IRQ_SDMA_0;
1766 r->end = MV64x60_IRQ_SDMA_0;
1767 }
1768 #endif
1769 }
1770
1771 /*
1772 * gt64260b_chip_specific_init()
1773 *
1774 * Implement errata work arounds for the GT64260B.
1775 */
1776 static void __init
1777 gt64260b_chip_specific_init(struct mv64x60_handle *bh,
1778 struct mv64x60_setup_info *si)
1779 {
1780 #ifdef CONFIG_SERIAL_MPSC
1781 struct resource *r;
1782 #endif
1783 #if !defined(CONFIG_NOT_COHERENT_CACHE)
1784 u32 val;
1785 u8 save_exclude;
1786 #endif
1787
1788 if (si->pci_0.enable_bus)
1789 mv64x60_set_bits(bh, MV64x60_PCI0_CMD,
1790 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1791
1792 if (si->pci_1.enable_bus)
1793 mv64x60_set_bits(bh, MV64x60_PCI1_CMD,
1794 ((1<<4) | (1<<5) | (1<<9) | (1<<13)));
1795
1796 /*
1797 * Dave Wilhardt found that bit 4 in the PCI Command registers must
1798 * be set if you are using cache coherency.
1799 */
1800 #if !defined(CONFIG_NOT_COHERENT_CACHE)
1801 mv64x60_set_bits(bh, GT64260_CPU_WB_PRIORITY_BUFFER_DEPTH, 0xf);
1802
1803 /* Res #MEM-4 -- cpu read buffer to buffer 1 */
1804 if ((mv64x60_read(bh, MV64x60_CPU_MODE) & 0xf0) == 0x40)
1805 mv64x60_set_bits(bh, GT64260_SDRAM_CONFIG, (1<<26));
1806
1807 save_exclude = mv64x60_pci_exclude_bridge;
1808 mv64x60_pci_exclude_bridge = 0;
1809 if (si->pci_0.enable_bus) {
1810 early_read_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1811 PCI_COMMAND, &val);
1812 val |= PCI_COMMAND_INVALIDATE;
1813 early_write_config_dword(bh->hose_a, 0, PCI_DEVFN(0,0),
1814 PCI_COMMAND, val);
1815 }
1816
1817 if (si->pci_1.enable_bus) {
1818 early_read_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1819 PCI_COMMAND, &val);
1820 val |= PCI_COMMAND_INVALIDATE;
1821 early_write_config_dword(bh->hose_b, 0, PCI_DEVFN(0,0),
1822 PCI_COMMAND, val);
1823 }
1824 mv64x60_pci_exclude_bridge = save_exclude;
1825 #endif
1826
1827 /* Disable buffer/descriptor snooping */
1828 mv64x60_clr_bits(bh, 0xf280, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1829 mv64x60_clr_bits(bh, 0xf2c0, (1<< 6) | (1<<14) | (1<<22) | (1<<30));
1830
1831 #ifdef CONFIG_SERIAL_MPSC
1832 /*
1833 * The 64260B is not supposed to have the bug where the MPSC & ENET
1834 * can't access cache coherent regions. However, testing has shown
1835 * that the MPSC, at least, still has this bug.
1836 */
1837 mv64x60_mpsc0_pdata.cache_mgmt = 1;
1838 mv64x60_mpsc1_pdata.cache_mgmt = 1;
1839
1840 if ((r = platform_get_resource(&mpsc1_device, IORESOURCE_IRQ, 0))
1841 != NULL) {
1842 r->start = MV64x60_IRQ_SDMA_0;
1843 r->end = MV64x60_IRQ_SDMA_0;
1844 }
1845 #endif
1846 }
1847
1848 /*
1849 *****************************************************************************
1850 *
1851 * MV64360-Specific Routines
1852 *
1853 *****************************************************************************
1854 */
1855 /*
1856 * mv64360_translate_size()
1857 *
1858 * On the MV64360, the size register is set similar to the size you get
1859 * from a pci config space BAR register. That is, programmed from LSB to MSB
1860 * as a sequence of 1's followed by a sequence of 0's. IOW, "size -1" with the
1861 * assumption that the size is a power of 2.
1862 */
1863 static u32 __init
1864 mv64360_translate_size(u32 base_addr, u32 size, u32 num_bits)
1865 {
1866 return mv64x60_mask(size - 1, num_bits);
1867 }
1868
1869 /*
1870 * mv64360_untranslate_size()
1871 *
1872 * Translate the size register value of a window into a window size.
1873 */
1874 static u32 __init
1875 mv64360_untranslate_size(u32 base_addr, u32 size, u32 num_bits)
1876 {
1877 if (size > 0) {
1878 size >>= (32 - num_bits);
1879 size++;
1880 size <<= (32 - num_bits);
1881 }
1882
1883 return size;
1884 }
1885
1886 /*
1887 * mv64360_set_pci2mem_window()
1888 *
1889 * The PCI->MEM window registers are actually in PCI config space so need
1890 * to set them by setting the correct config space BARs.
1891 */
1892 struct {
1893 u32 fcn;
1894 u32 base_hi_bar;
1895 u32 base_lo_bar;
1896 } static mv64360_reg_addrs[2][4] __initdata = {
1897 {{ 0, 0x14, 0x10 }, { 0, 0x1c, 0x18 },
1898 { 1, 0x14, 0x10 }, { 1, 0x1c, 0x18 }},
1899 {{ 0, 0x94, 0x90 }, { 0, 0x9c, 0x98 },
1900 { 1, 0x94, 0x90 }, { 1, 0x9c, 0x98 }}
1901 };
1902
1903 static void __init
1904 mv64360_set_pci2mem_window(struct pci_controller *hose, u32 bus, u32 window,
1905 u32 base)
1906 {
1907 u8 save_exclude;
1908
1909 pr_debug("set pci->mem window: %d, hose: %d, base: 0x%x\n", window,
1910 hose->index, base);
1911
1912 save_exclude = mv64x60_pci_exclude_bridge;
1913 mv64x60_pci_exclude_bridge = 0;
1914 early_write_config_dword(hose, 0,
1915 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1916 mv64360_reg_addrs[bus][window].base_hi_bar, 0);
1917 early_write_config_dword(hose, 0,
1918 PCI_DEVFN(0, mv64360_reg_addrs[bus][window].fcn),
1919 mv64360_reg_addrs[bus][window].base_lo_bar,
1920 mv64x60_mask(base,20) | 0xc);
1921 mv64x60_pci_exclude_bridge = save_exclude;
1922 }
1923
1924 /*
1925 * mv64360_set_pci2regs_window()
1926 *
1927 * Set where the bridge's registers appear in PCI MEM space.
1928 */
1929 static u32 mv64360_offset[2][2] __initdata = {{0x20, 0x24}, {0xa0, 0xa4}};
1930
1931 static void __init
1932 mv64360_set_pci2regs_window(struct mv64x60_handle *bh,
1933 struct pci_controller *hose, u32 bus, u32 base)
1934 {
1935 u8 save_exclude;
1936
1937 pr_debug("set pci->internal regs hose: %d, base: 0x%x\n", hose->index,
1938 base);
1939
1940 save_exclude = mv64x60_pci_exclude_bridge;
1941 mv64x60_pci_exclude_bridge = 0;
1942 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1943 mv64360_offset[bus][0], (base << 16));
1944 early_write_config_dword(hose, 0, PCI_DEVFN(0,0),
1945 mv64360_offset[bus][1], 0);
1946 mv64x60_pci_exclude_bridge = save_exclude;
1947 }
1948
1949 /*
1950 * mv64360_is_enabled_32bit()
1951 *
1952 * On a MV64360, a window is enabled by either clearing a bit in the
1953 * CPU BAR Enable reg or setting a bit in the window's base reg.
1954 * Note that this doesn't work for windows on the PCI slave side but we don't
1955 * check those so its okay.
1956 */
1957 static u32 __init
1958 mv64360_is_enabled_32bit(struct mv64x60_handle *bh, u32 window)
1959 {
1960 u32 extra, rc = 0;
1961
1962 if (((mv64360_32bit_windows[window].base_reg != 0) &&
1963 (mv64360_32bit_windows[window].size_reg != 0)) ||
1964 (window == MV64x60_CPU2SRAM_WIN)) {
1965
1966 extra = mv64360_32bit_windows[window].extra;
1967
1968 switch (extra & MV64x60_EXTRA_MASK) {
1969 case MV64x60_EXTRA_CPUWIN_ENAB:
1970 rc = (mv64x60_read(bh, MV64360_CPU_BAR_ENABLE) &
1971 (1 << (extra & 0x1f))) == 0;
1972 break;
1973
1974 case MV64x60_EXTRA_CPUPROT_ENAB:
1975 rc = (mv64x60_read(bh,
1976 mv64360_32bit_windows[window].base_reg) &
1977 (1 << (extra & 0x1f))) != 0;
1978 break;
1979
1980 case MV64x60_EXTRA_ENET_ENAB:
1981 rc = (mv64x60_read(bh, MV64360_ENET2MEM_BAR_ENABLE) &
1982 (1 << (extra & 0x7))) == 0;
1983 break;
1984
1985 case MV64x60_EXTRA_MPSC_ENAB:
1986 rc = (mv64x60_read(bh, MV64360_MPSC2MEM_BAR_ENABLE) &
1987 (1 << (extra & 0x3))) == 0;
1988 break;
1989
1990 case MV64x60_EXTRA_IDMA_ENAB:
1991 rc = (mv64x60_read(bh, MV64360_IDMA2MEM_BAR_ENABLE) &
1992 (1 << (extra & 0x7))) == 0;
1993 break;
1994
1995 default:
1996 printk(KERN_ERR "mv64360_is_enabled: %s\n",
1997 "32bit table corrupted");
1998 }
1999 }
2000
2001 return rc;
2002 }
2003
2004 /*
2005 * mv64360_enable_window_32bit()
2006 *
2007 * On a MV64360, a window is enabled by either clearing a bit in the
2008 * CPU BAR Enable reg or setting a bit in the window's base reg.
2009 */
2010 static void __init
2011 mv64360_enable_window_32bit(struct mv64x60_handle *bh, u32 window)
2012 {
2013 u32 extra;
2014
2015 pr_debug("enable 32bit window: %d\n", window);
2016
2017 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2018 (mv64360_32bit_windows[window].size_reg != 0)) ||
2019 (window == MV64x60_CPU2SRAM_WIN)) {
2020
2021 extra = mv64360_32bit_windows[window].extra;
2022
2023 switch (extra & MV64x60_EXTRA_MASK) {
2024 case MV64x60_EXTRA_CPUWIN_ENAB:
2025 mv64x60_clr_bits(bh, MV64360_CPU_BAR_ENABLE,
2026 (1 << (extra & 0x1f)));
2027 break;
2028
2029 case MV64x60_EXTRA_CPUPROT_ENAB:
2030 mv64x60_set_bits(bh,
2031 mv64360_32bit_windows[window].base_reg,
2032 (1 << (extra & 0x1f)));
2033 break;
2034
2035 case MV64x60_EXTRA_ENET_ENAB:
2036 mv64x60_clr_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2037 (1 << (extra & 0x7)));
2038 break;
2039
2040 case MV64x60_EXTRA_MPSC_ENAB:
2041 mv64x60_clr_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2042 (1 << (extra & 0x3)));
2043 break;
2044
2045 case MV64x60_EXTRA_IDMA_ENAB:
2046 mv64x60_clr_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2047 (1 << (extra & 0x7)));
2048 break;
2049
2050 default:
2051 printk(KERN_ERR "mv64360_enable: %s\n",
2052 "32bit table corrupted");
2053 }
2054 }
2055 }
2056
2057 /*
2058 * mv64360_disable_window_32bit()
2059 *
2060 * On a MV64360, a window is disabled by either setting a bit in the
2061 * CPU BAR Enable reg or clearing a bit in the window's base reg.
2062 */
2063 static void __init
2064 mv64360_disable_window_32bit(struct mv64x60_handle *bh, u32 window)
2065 {
2066 u32 extra;
2067
2068 pr_debug("disable 32bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2069 window, mv64360_32bit_windows[window].base_reg,
2070 mv64360_32bit_windows[window].size_reg);
2071
2072 if (((mv64360_32bit_windows[window].base_reg != 0) &&
2073 (mv64360_32bit_windows[window].size_reg != 0)) ||
2074 (window == MV64x60_CPU2SRAM_WIN)) {
2075
2076 extra = mv64360_32bit_windows[window].extra;
2077
2078 switch (extra & MV64x60_EXTRA_MASK) {
2079 case MV64x60_EXTRA_CPUWIN_ENAB:
2080 mv64x60_set_bits(bh, MV64360_CPU_BAR_ENABLE,
2081 (1 << (extra & 0x1f)));
2082 break;
2083
2084 case MV64x60_EXTRA_CPUPROT_ENAB:
2085 mv64x60_clr_bits(bh,
2086 mv64360_32bit_windows[window].base_reg,
2087 (1 << (extra & 0x1f)));
2088 break;
2089
2090 case MV64x60_EXTRA_ENET_ENAB:
2091 mv64x60_set_bits(bh, MV64360_ENET2MEM_BAR_ENABLE,
2092 (1 << (extra & 0x7)));
2093 break;
2094
2095 case MV64x60_EXTRA_MPSC_ENAB:
2096 mv64x60_set_bits(bh, MV64360_MPSC2MEM_BAR_ENABLE,
2097 (1 << (extra & 0x3)));
2098 break;
2099
2100 case MV64x60_EXTRA_IDMA_ENAB:
2101 mv64x60_set_bits(bh, MV64360_IDMA2MEM_BAR_ENABLE,
2102 (1 << (extra & 0x7)));
2103 break;
2104
2105 default:
2106 printk(KERN_ERR "mv64360_disable: %s\n",
2107 "32bit table corrupted");
2108 }
2109 }
2110 }
2111
2112 /*
2113 * mv64360_enable_window_64bit()
2114 *
2115 * On the MV64360, a 64-bit window is enabled by setting a bit in the window's
2116 * base reg.
2117 */
2118 static void __init
2119 mv64360_enable_window_64bit(struct mv64x60_handle *bh, u32 window)
2120 {
2121 pr_debug("enable 64bit window: %d\n", window);
2122
2123 if ((mv64360_64bit_windows[window].base_lo_reg!= 0) &&
2124 (mv64360_64bit_windows[window].size_reg != 0)) {
2125
2126 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
2127 == MV64x60_EXTRA_PCIACC_ENAB)
2128 mv64x60_set_bits(bh,
2129 mv64360_64bit_windows[window].base_lo_reg,
2130 (1 << (mv64360_64bit_windows[window].extra &
2131 0x1f)));
2132 else
2133 printk(KERN_ERR "mv64360_enable: %s\n",
2134 "64bit table corrupted");
2135 }
2136 }
2137
2138 /*
2139 * mv64360_disable_window_64bit()
2140 *
2141 * On a MV64360, a 64-bit window is disabled by clearing a bit in the window's
2142 * base reg.
2143 */
2144 static void __init
2145 mv64360_disable_window_64bit(struct mv64x60_handle *bh, u32 window)
2146 {
2147 pr_debug("disable 64bit window: %d, base_reg: 0x%x, size_reg: 0x%x\n",
2148 window, mv64360_64bit_windows[window].base_lo_reg,
2149 mv64360_64bit_windows[window].size_reg);
2150
2151 if ((mv64360_64bit_windows[window].base_lo_reg != 0) &&
2152 (mv64360_64bit_windows[window].size_reg != 0)) {
2153 if ((mv64360_64bit_windows[window].extra & MV64x60_EXTRA_MASK)
2154 == MV64x60_EXTRA_PCIACC_ENAB)
2155 mv64x60_clr_bits(bh,
2156 mv64360_64bit_windows[window].base_lo_reg,
2157 (1 << (mv64360_64bit_windows[window].extra &
2158 0x1f)));
2159 else
2160 printk(KERN_ERR "mv64360_disable: %s\n",
2161 "64bit table corrupted");
2162 }
2163 }
2164
2165 /*
2166 * mv64360_disable_all_windows()
2167 *
2168 * The MV64360 has a few windows that aren't represented in the table of
2169 * windows at the top of this file. This routine turns all of them off
2170 * except for the memory controller windows, of course.
2171 */
2172 static void __init
2173 mv64360_disable_all_windows(struct mv64x60_handle *bh,
2174 struct mv64x60_setup_info *si)
2175 {
2176 u32 preserve, i;
2177
2178 /* Disable 32bit windows (don't disable cpu->mem windows) */
2179 for (i=MV64x60_CPU2DEV_0_WIN; i<MV64x60_32BIT_WIN_COUNT; i++) {
2180 if (i < 32)
2181 preserve = si->window_preserve_mask_32_lo & (1 << i);
2182 else
2183 preserve = si->window_preserve_mask_32_hi & (1<<(i-32));
2184
2185 if (!preserve)
2186 mv64360_disable_window_32bit(bh, i);
2187 }
2188
2189 /* Disable 64bit windows */
2190 for (i=0; i<MV64x60_64BIT_WIN_COUNT; i++)
2191 if (!(si->window_preserve_mask_64 & (1<<i)))
2192 mv64360_disable_window_64bit(bh, i);
2193
2194 /* Turn off PCI->MEM access cntl wins not in mv64360_64bit_windows[] */
2195 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_4_BASE_LO, 0);
2196 mv64x60_clr_bits(bh, MV64x60_PCI0_ACC_CNTL_5_BASE_LO, 0);
2197 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_4_BASE_LO, 0);
2198 mv64x60_clr_bits(bh, MV64x60_PCI1_ACC_CNTL_5_BASE_LO, 0);
2199
2200 /* Disable all PCI-><whatever> windows */
2201 mv64x60_set_bits(bh, MV64x60_PCI0_BAR_ENABLE, 0x0000f9ff);
2202 mv64x60_set_bits(bh, MV64x60_PCI1_BAR_ENABLE, 0x0000f9ff);
2203 }
2204
2205 /*
2206 * mv64360_config_io2mem_windows()
2207 *
2208 * ENET, MPSC, and IDMA ctlrs on the MV64[34]60 have separate windows that
2209 * must be set up so that the respective ctlr can access system memory.
2210 */
2211 static u32 enet_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2212 MV64x60_ENET2MEM_0_WIN, MV64x60_ENET2MEM_1_WIN,
2213 MV64x60_ENET2MEM_2_WIN, MV64x60_ENET2MEM_3_WIN,
2214 };
2215
2216 static u32 mpsc_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2217 MV64x60_MPSC2MEM_0_WIN, MV64x60_MPSC2MEM_1_WIN,
2218 MV64x60_MPSC2MEM_2_WIN, MV64x60_MPSC2MEM_3_WIN,
2219 };
2220
2221 static u32 idma_tab[MV64x60_CPU2MEM_WINDOWS] __initdata = {
2222 MV64x60_IDMA2MEM_0_WIN, MV64x60_IDMA2MEM_1_WIN,
2223 MV64x60_IDMA2MEM_2_WIN, MV64x60_IDMA2MEM_3_WIN,
2224 };
2225
2226 static u32 dram_selects[MV64x60_CPU2MEM_WINDOWS] __initdata =
2227 { 0xe, 0xd, 0xb, 0x7 };
2228
2229 static void __init
2230 mv64360_config_io2mem_windows(struct mv64x60_handle *bh,
2231 struct mv64x60_setup_info *si,
2232 u32 mem_windows[MV64x60_CPU2MEM_WINDOWS][2])
2233 {
2234 u32 i, win;
2235
2236 pr_debug("config_io2regs_windows: enet, mpsc, idma -> bridge regs\n");
2237
2238 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_0, 0);
2239 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_1, 0);
2240 mv64x60_write(bh, MV64360_ENET2MEM_ACC_PROT_2, 0);
2241
2242 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_0, 0);
2243 mv64x60_write(bh, MV64360_MPSC2MEM_ACC_PROT_1, 0);
2244
2245 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_0, 0);
2246 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_1, 0);
2247 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_2, 0);
2248 mv64x60_write(bh, MV64360_IDMA2MEM_ACC_PROT_3, 0);
2249
2250 /* Assume that mem ctlr has no more windows than embedded I/O ctlr */
2251 for (win=MV64x60_CPU2MEM_0_WIN,i=0;win<=MV64x60_CPU2MEM_3_WIN;win++,i++)
2252 if (bh->ci->is_enabled_32bit(bh, win)) {
2253 mv64x60_set_32bit_window(bh, enet_tab[i],
2254 mem_windows[i][0], mem_windows[i][1],
2255 (dram_selects[i] << 8) |
2256 (si->enet_options[i] & 0x3000));
2257 bh->ci->enable_window_32bit(bh, enet_tab[i]);
2258
2259 /* Give enet r/w access to memory region */
2260 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_0,
2261 (0x3 << (i << 1)));
2262 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_1,
2263 (0x3 << (i << 1)));
2264 mv64x60_set_bits(bh, MV64360_ENET2MEM_ACC_PROT_2,
2265 (0x3 << (i << 1)));
2266
2267 mv64x60_set_32bit_window(bh, mpsc_tab[i],
2268 mem_windows[i][0], mem_windows[i][1],
2269 (dram_selects[i] << 8) |
2270 (si->mpsc_options[i] & 0x3000));
2271 bh->ci->enable_window_32bit(bh, mpsc_tab[i]);
2272
2273 /* Give mpsc r/w access to memory region */
2274 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_0,
2275 (0x3 << (i << 1)));
2276 mv64x60_set_bits(bh, MV64360_MPSC2MEM_ACC_PROT_1,
2277 (0x3 << (i << 1)));
2278
2279 mv64x60_set_32bit_window(bh, idma_tab[i],
2280 mem_windows[i][0], mem_windows[i][1],
2281 (dram_selects[i] << 8) |
2282 (si->idma_options[i] & 0x3000));
2283 bh->ci->enable_window_32bit(bh, idma_tab[i]);
2284
2285 /* Give idma r/w access to memory region */
2286 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_0,
2287 (0x3 << (i << 1)));
2288 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_1,
2289 (0x3 << (i << 1)));
2290 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_2,
2291 (0x3 << (i << 1)));
2292 mv64x60_set_bits(bh, MV64360_IDMA2MEM_ACC_PROT_3,
2293 (0x3 << (i << 1)));
2294 }
2295 }
2296
2297 /*
2298 * mv64360_set_mpsc2regs_window()
2299 *
2300 * MPSC has a window to the bridge's internal registers. Call this routine
2301 * to change that window so it doesn't conflict with the windows mapping the
2302 * mpsc to system memory.
2303 */
2304 static void __init
2305 mv64360_set_mpsc2regs_window(struct mv64x60_handle *bh, u32 base)
2306 {
2307 pr_debug("set mpsc->internal regs, base: 0x%x\n", base);
2308 mv64x60_write(bh, MV64360_MPSC2REGS_BASE, base & 0xffff0000);
2309 }
2310
2311 /*
2312 * mv64360_chip_specific_init()
2313 *
2314 * Implement errata work arounds for the MV64360.
2315 */
2316 static void __init
2317 mv64360_chip_specific_init(struct mv64x60_handle *bh,
2318 struct mv64x60_setup_info *si)
2319 {
2320 #if !defined(CONFIG_NOT_COHERENT_CACHE)
2321 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24));
2322 #endif
2323 #ifdef CONFIG_SERIAL_MPSC
2324 mv64x60_mpsc0_pdata.brg_can_tune = 1;
2325 mv64x60_mpsc0_pdata.cache_mgmt = 1;
2326 mv64x60_mpsc1_pdata.brg_can_tune = 1;
2327 mv64x60_mpsc1_pdata.cache_mgmt = 1;
2328 #endif
2329 }
2330
2331 /*
2332 * mv64460_chip_specific_init()
2333 *
2334 * Implement errata work arounds for the MV64460.
2335 */
2336 static void __init
2337 mv64460_chip_specific_init(struct mv64x60_handle *bh,
2338 struct mv64x60_setup_info *si)
2339 {
2340 #if !defined(CONFIG_NOT_COHERENT_CACHE)
2341 mv64x60_set_bits(bh, MV64360_D_UNIT_CONTROL_HIGH, (1<<24) | (1<<25));
2342 mv64x60_set_bits(bh, MV64460_D_UNIT_MMASK, (1<<1) | (1<<4));
2343 #endif
2344 #ifdef CONFIG_SERIAL_MPSC
2345 mv64x60_mpsc0_pdata.brg_can_tune = 1;
2346 mv64x60_mpsc0_pdata.cache_mgmt = 1;
2347 mv64x60_mpsc1_pdata.brg_can_tune = 1;
2348 mv64x60_mpsc1_pdata.cache_mgmt = 1;
2349 #endif
2350 }
2351
2352
2353 #if defined(CONFIG_SYSFS) && !defined(CONFIG_GT64260)
2354 /* Export the hotswap register via sysfs for enum event monitoring */
2355 #define VAL_LEN_MAX 11 /* 32-bit hex or dec stringified number + '\n' */
2356
2357 DECLARE_MUTEX(mv64xxx_hs_lock);
2358
2359 static ssize_t
2360 mv64xxx_hs_reg_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
2361 {
2362 u32 v;
2363 u8 save_exclude;
2364
2365 if (off > 0)
2366 return 0;
2367 if (count < VAL_LEN_MAX)
2368 return -EINVAL;
2369
2370 if (down_interruptible(&mv64xxx_hs_lock))
2371 return -ERESTARTSYS;
2372 save_exclude = mv64x60_pci_exclude_bridge;
2373 mv64x60_pci_exclude_bridge = 0;
2374 early_read_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2375 MV64360_PCICFG_CPCI_HOTSWAP, &v);
2376 mv64x60_pci_exclude_bridge = save_exclude;
2377 up(&mv64xxx_hs_lock);
2378
2379 return sprintf(buf, "0x%08x\n", v);
2380 }
2381
2382 static ssize_t
2383 mv64xxx_hs_reg_write(struct kobject *kobj, char *buf, loff_t off, size_t count)
2384 {
2385 u32 v;
2386 u8 save_exclude;
2387
2388 if (off > 0)
2389 return 0;
2390 if (count <= 0)
2391 return -EINVAL;
2392
2393 if (sscanf(buf, "%i", &v) == 1) {
2394 if (down_interruptible(&mv64xxx_hs_lock))
2395 return -ERESTARTSYS;
2396 save_exclude = mv64x60_pci_exclude_bridge;
2397 mv64x60_pci_exclude_bridge = 0;
2398 early_write_config_dword(&sysfs_hose_a, 0, PCI_DEVFN(0, 0),
2399 MV64360_PCICFG_CPCI_HOTSWAP, v);
2400 mv64x60_pci_exclude_bridge = save_exclude;
2401 up(&mv64xxx_hs_lock);
2402 }
2403 else
2404 count = -EINVAL;
2405
2406 return count;
2407 }
2408
2409 static struct bin_attribute mv64xxx_hs_reg_attr = { /* Hotswap register */
2410 .attr = {
2411 .name = "hs_reg",
2412 .mode = S_IRUGO | S_IWUSR,
2413 .owner = THIS_MODULE,
2414 },
2415 .size = VAL_LEN_MAX,
2416 .read = mv64xxx_hs_reg_read,
2417 .write = mv64xxx_hs_reg_write,
2418 };
2419
2420 /* Provide sysfs file indicating if this platform supports the hs_reg */
2421 static ssize_t
2422 mv64xxx_hs_reg_valid_show(struct device *dev, struct device_attribute *attr,
2423 char *buf)
2424 {
2425 struct platform_device *pdev;
2426 struct mv64xxx_pdata *pdp;
2427 u32 v;
2428
2429 pdev = container_of(dev, struct platform_device, dev);
2430 pdp = (struct mv64xxx_pdata *)pdev->dev.platform_data;
2431
2432 if (down_interruptible(&mv64xxx_hs_lock))
2433 return -ERESTARTSYS;
2434 v = pdp->hs_reg_valid;
2435 up(&mv64xxx_hs_lock);
2436
2437 return sprintf(buf, "%i\n", v);
2438 }
2439 static DEVICE_ATTR(hs_reg_valid, S_IRUGO, mv64xxx_hs_reg_valid_show, NULL);
2440
2441 static int __init
2442 mv64xxx_sysfs_init(void)
2443 {
2444 sysfs_create_bin_file(&mv64xxx_device.dev.kobj, &mv64xxx_hs_reg_attr);
2445 sysfs_create_file(&mv64xxx_device.dev.kobj,&dev_attr_hs_reg_valid.attr);
2446 return 0;
2447 }
2448 subsys_initcall(mv64xxx_sysfs_init);
2449 #endif