]>
Commit | Line | Data |
---|---|---|
861fe906 DM |
1 | /* pci_fire.c: Sun4u platform PCI-E controller support. |
2 | * | |
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | |
4 | */ | |
5 | #include <linux/kernel.h> | |
6 | #include <linux/pci.h> | |
7 | #include <linux/slab.h> | |
8 | #include <linux/init.h> | |
9bb3c227 DM |
9 | #include <linux/msi.h> |
10 | #include <linux/irq.h> | |
861fe906 | 11 | |
861fe906 DM |
12 | #include <asm/oplib.h> |
13 | #include <asm/prom.h> | |
9bb3c227 | 14 | #include <asm/irq.h> |
861fe906 DM |
15 | |
16 | #include "pci_impl.h" | |
17 | ||
18 | #define fire_read(__reg) \ | |
19 | ({ u64 __ret; \ | |
20 | __asm__ __volatile__("ldxa [%1] %2, %0" \ | |
21 | : "=r" (__ret) \ | |
22 | : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \ | |
23 | : "memory"); \ | |
24 | __ret; \ | |
25 | }) | |
26 | #define fire_write(__reg, __val) \ | |
27 | __asm__ __volatile__("stxa %0, [%1] %2" \ | |
28 | : /* no outputs */ \ | |
29 | : "r" (__val), "r" (__reg), \ | |
30 | "i" (ASI_PHYS_BYPASS_EC_E) \ | |
31 | : "memory") | |
32 | ||
a1f35ba3 | 33 | static void __init pci_fire_scan_bus(struct pci_pbm_info *pbm) |
861fe906 DM |
34 | { |
35 | pbm->pci_bus = pci_scan_one_pbm(pbm); | |
861fe906 DM |
36 | |
37 | /* XXX register error interrupt handlers XXX */ | |
38 | } | |
39 | ||
40 | #define FIRE_IOMMU_CONTROL 0x40000UL | |
41 | #define FIRE_IOMMU_TSBBASE 0x40008UL | |
42 | #define FIRE_IOMMU_FLUSH 0x40100UL | |
95d71e66 | 43 | #define FIRE_IOMMU_FLUSHINV 0x40108UL |
861fe906 | 44 | |
ad7ad57c | 45 | static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) |
861fe906 DM |
46 | { |
47 | struct iommu *iommu = pbm->iommu; | |
48 | u32 vdma[2], dma_mask; | |
49 | u64 control; | |
ad7ad57c | 50 | int tsbsize, err; |
861fe906 DM |
51 | |
52 | /* No virtual-dma property on these guys, use largest size. */ | |
53 | vdma[0] = 0xc0000000; /* base */ | |
54 | vdma[1] = 0x40000000; /* size */ | |
55 | dma_mask = 0xffffffff; | |
56 | tsbsize = 128; | |
57 | ||
58 | /* Register addresses. */ | |
59 | iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL; | |
60 | iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE; | |
61 | iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH; | |
62 | iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV; | |
63 | ||
64 | /* We use the main control/status register of FIRE as the write | |
65 | * completion register. | |
66 | */ | |
67 | iommu->write_complete_reg = pbm->controller_regs + 0x410000UL; | |
68 | ||
69 | /* | |
70 | * Invalidate TLB Entries. | |
71 | */ | |
72 | fire_write(iommu->iommu_flushinv, ~(u64)0); | |
73 | ||
ad7ad57c DM |
74 | err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask); |
75 | if (err) | |
76 | return err; | |
861fe906 DM |
77 | |
78 | fire_write(iommu->iommu_tsbbase, __pa(iommu->page_table) | 0x7UL); | |
79 | ||
80 | control = fire_read(iommu->iommu_control); | |
81 | control |= (0x00000400 /* TSB cache snoop enable */ | | |
82 | 0x00000300 /* Cache mode */ | | |
83 | 0x00000002 /* Bypass enable */ | | |
84 | 0x00000001 /* Translation enable */); | |
85 | fire_write(iommu->iommu_control, control); | |
ad7ad57c DM |
86 | |
87 | return 0; | |
861fe906 DM |
88 | } |
89 | ||
9bb3c227 DM |
90 | #ifdef CONFIG_PCI_MSI |
91 | struct pci_msiq_entry { | |
92 | u64 word0; | |
93 | #define MSIQ_WORD0_RESV 0x8000000000000000UL | |
94 | #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL | |
95 | #define MSIQ_WORD0_FMT_TYPE_SHIFT 56 | |
96 | #define MSIQ_WORD0_LEN 0x00ffc00000000000UL | |
97 | #define MSIQ_WORD0_LEN_SHIFT 46 | |
98 | #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL | |
99 | #define MSIQ_WORD0_ADDR0_SHIFT 32 | |
100 | #define MSIQ_WORD0_RID 0x00000000ffff0000UL | |
101 | #define MSIQ_WORD0_RID_SHIFT 16 | |
102 | #define MSIQ_WORD0_DATA0 0x000000000000ffffUL | |
103 | #define MSIQ_WORD0_DATA0_SHIFT 0 | |
104 | ||
105 | #define MSIQ_TYPE_MSG 0x6 | |
106 | #define MSIQ_TYPE_MSI32 0xb | |
107 | #define MSIQ_TYPE_MSI64 0xf | |
108 | ||
109 | u64 word1; | |
110 | #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL | |
111 | #define MSIQ_WORD1_ADDR1_SHIFT 16 | |
112 | #define MSIQ_WORD1_DATA1 0x000000000000ffffUL | |
113 | #define MSIQ_WORD1_DATA1_SHIFT 0 | |
114 | ||
115 | u64 resv[6]; | |
116 | }; | |
117 | ||
118 | /* All MSI registers are offset from pbm->pbm_regs */ | |
119 | #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL | |
120 | #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL | |
121 | ||
122 | #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL) | |
123 | #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL | |
124 | #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL | |
125 | ||
126 | #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL) | |
127 | #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL | |
128 | #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL | |
129 | #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL | |
130 | ||
131 | #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL) | |
132 | #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL | |
133 | #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL | |
134 | #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL | |
135 | #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL | |
136 | ||
137 | #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL) | |
138 | #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL | |
139 | #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL | |
140 | ||
141 | #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL) | |
142 | #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL | |
143 | ||
144 | #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL) | |
145 | #define MSI_MAP_VALID 0x8000000000000000UL | |
146 | #define MSI_MAP_EQWR_N 0x4000000000000000UL | |
147 | #define MSI_MAP_EQNUM 0x000000000000003fUL | |
148 | ||
149 | #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL) | |
150 | #define MSI_CLEAR_EQWR_N 0x4000000000000000UL | |
151 | ||
152 | #define IMONDO_DATA0 0x02C000UL | |
153 | #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL | |
154 | ||
155 | #define IMONDO_DATA1 0x02C008UL | |
156 | #define IMONDO_DATA1_DATA 0xffffffffffffffffUL | |
157 | ||
158 | #define MSI_32BIT_ADDR 0x034000UL | |
159 | #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL | |
160 | ||
161 | #define MSI_64BIT_ADDR 0x034008UL | |
162 | #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL | |
163 | ||
759f89e0 DM |
164 | static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
165 | unsigned long *head) | |
166 | { | |
167 | *head = fire_read(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); | |
168 | return 0; | |
169 | } | |
170 | ||
171 | static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, | |
172 | unsigned long *head, unsigned long *msi) | |
9bb3c227 | 173 | { |
759f89e0 | 174 | unsigned long type_fmt, type, msi_num; |
9bb3c227 DM |
175 | struct pci_msiq_entry *base, *ep; |
176 | ||
759f89e0 DM |
177 | base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); |
178 | ep = &base[*head]; | |
9bb3c227 | 179 | |
759f89e0 DM |
180 | if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) |
181 | return 0; | |
9bb3c227 | 182 | |
759f89e0 DM |
183 | type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> |
184 | MSIQ_WORD0_FMT_TYPE_SHIFT); | |
185 | type = (type_fmt >> 3); | |
186 | if (unlikely(type != MSIQ_TYPE_MSI32 && | |
187 | type != MSIQ_TYPE_MSI64)) | |
188 | return -EINVAL; | |
9bb3c227 | 189 | |
759f89e0 DM |
190 | *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> |
191 | MSIQ_WORD0_DATA0_SHIFT); | |
9bb3c227 | 192 | |
759f89e0 DM |
193 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi_num), |
194 | MSI_CLEAR_EQWR_N); | |
195 | ||
196 | /* Clear the entry. */ | |
197 | ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; | |
198 | ||
199 | /* Go to next entry in ring. */ | |
200 | (*head)++; | |
201 | if (*head >= pbm->msiq_ent_count) | |
202 | *head = 0; | |
203 | ||
204 | return 1; | |
9bb3c227 DM |
205 | } |
206 | ||
759f89e0 DM |
207 | static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, |
208 | unsigned long head) | |
9bb3c227 | 209 | { |
759f89e0 DM |
210 | fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid), head); |
211 | return 0; | |
212 | } | |
9bb3c227 | 213 | |
759f89e0 DM |
214 | static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, |
215 | unsigned long msi, int is_msi64) | |
216 | { | |
217 | u64 val; | |
9bb3c227 | 218 | |
759f89e0 DM |
219 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); |
220 | val &= ~(MSI_MAP_EQNUM); | |
221 | val |= msiqid; | |
222 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | |
223 | ||
224 | fire_write(pbm->pbm_regs + MSI_CLEAR(msi), | |
225 | MSI_CLEAR_EQWR_N); | |
226 | ||
227 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | |
228 | val |= MSI_MAP_VALID; | |
229 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | |
9bb3c227 DM |
230 | |
231 | return 0; | |
232 | } | |
233 | ||
759f89e0 | 234 | static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) |
9bb3c227 | 235 | { |
759f89e0 DM |
236 | unsigned long msiqid; |
237 | u64 val; | |
238 | ||
239 | val = fire_read(pbm->pbm_regs + MSI_MAP(msi)); | |
240 | msiqid = (val & MSI_MAP_EQNUM); | |
241 | ||
242 | val &= ~MSI_MAP_VALID; | |
243 | ||
244 | fire_write(pbm->pbm_regs + MSI_MAP(msi), val); | |
245 | ||
246 | return 0; | |
9bb3c227 DM |
247 | } |
248 | ||
759f89e0 | 249 | static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) |
9bb3c227 DM |
250 | { |
251 | unsigned long pages, order, i; | |
252 | ||
253 | order = get_order(512 * 1024); | |
254 | pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); | |
255 | if (pages == 0UL) { | |
256 | printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", | |
257 | order); | |
258 | return -ENOMEM; | |
259 | } | |
260 | memset((char *)pages, 0, PAGE_SIZE << order); | |
261 | pbm->msi_queues = (void *) pages; | |
262 | ||
263 | fire_write(pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG, | |
264 | (EVENT_QUEUE_BASE_ADDR_ALL_ONES | | |
265 | __pa(pbm->msi_queues))); | |
266 | ||
267 | fire_write(pbm->pbm_regs + IMONDO_DATA0, | |
268 | pbm->portid << 6); | |
269 | fire_write(pbm->pbm_regs + IMONDO_DATA1, 0); | |
270 | ||
271 | fire_write(pbm->pbm_regs + MSI_32BIT_ADDR, | |
272 | pbm->msi32_start); | |
273 | fire_write(pbm->pbm_regs + MSI_64BIT_ADDR, | |
274 | pbm->msi64_start); | |
275 | ||
276 | for (i = 0; i < pbm->msiq_num; i++) { | |
277 | fire_write(pbm->pbm_regs + EVENT_QUEUE_HEAD(i), 0); | |
278 | fire_write(pbm->pbm_regs + EVENT_QUEUE_TAIL(i), 0); | |
279 | } | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
759f89e0 | 284 | static void pci_fire_msiq_free(struct pci_pbm_info *pbm) |
9bb3c227 | 285 | { |
759f89e0 | 286 | unsigned long pages, order; |
9bb3c227 | 287 | |
759f89e0 DM |
288 | order = get_order(512 * 1024); |
289 | pages = (unsigned long) pbm->msi_queues; | |
9bb3c227 | 290 | |
759f89e0 | 291 | free_pages(pages, order); |
9bb3c227 | 292 | |
759f89e0 | 293 | pbm->msi_queues = NULL; |
9bb3c227 DM |
294 | } |
295 | ||
759f89e0 DM |
296 | static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, |
297 | unsigned long msiqid, | |
298 | unsigned long devino) | |
9bb3c227 | 299 | { |
759f89e0 DM |
300 | unsigned long cregs = (unsigned long) pbm->pbm_regs; |
301 | unsigned long imap_reg, iclr_reg, int_ctrlr; | |
302 | unsigned int virt_irq; | |
303 | int fixup; | |
9bb3c227 DM |
304 | u64 val; |
305 | ||
759f89e0 DM |
306 | imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); |
307 | iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); | |
9bb3c227 | 308 | |
759f89e0 DM |
309 | /* XXX iterate amongst the 4 IRQ controllers XXX */ |
310 | int_ctrlr = (1UL << 6); | |
9bb3c227 | 311 | |
759f89e0 DM |
312 | val = fire_read(imap_reg); |
313 | val |= (1UL << 63) | int_ctrlr; | |
314 | fire_write(imap_reg, val); | |
9bb3c227 | 315 | |
759f89e0 | 316 | fixup = ((pbm->portid << 6) | devino) - int_ctrlr; |
9bb3c227 | 317 | |
759f89e0 DM |
318 | virt_irq = build_irq(fixup, iclr_reg, imap_reg); |
319 | if (!virt_irq) | |
320 | return -ENOMEM; | |
9bb3c227 DM |
321 | |
322 | fire_write(pbm->pbm_regs + | |
323 | EVENT_QUEUE_CONTROL_SET(msiqid), | |
324 | EVENT_QUEUE_CONTROL_SET_EN); | |
325 | ||
759f89e0 | 326 | return virt_irq; |
9bb3c227 DM |
327 | } |
328 | ||
759f89e0 DM |
329 | static const struct sparc64_msiq_ops pci_fire_msiq_ops = { |
330 | .get_head = pci_fire_get_head, | |
331 | .dequeue_msi = pci_fire_dequeue_msi, | |
332 | .set_head = pci_fire_set_head, | |
333 | .msi_setup = pci_fire_msi_setup, | |
334 | .msi_teardown = pci_fire_msi_teardown, | |
335 | .msiq_alloc = pci_fire_msiq_alloc, | |
336 | .msiq_free = pci_fire_msiq_free, | |
337 | .msiq_build_irq = pci_fire_msiq_build_irq, | |
338 | }; | |
9bb3c227 DM |
339 | |
340 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | |
341 | { | |
759f89e0 | 342 | sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); |
9bb3c227 DM |
343 | } |
344 | #else /* CONFIG_PCI_MSI */ | |
345 | static void pci_fire_msi_init(struct pci_pbm_info *pbm) | |
346 | { | |
347 | } | |
348 | #endif /* !(CONFIG_PCI_MSI) */ | |
349 | ||
861fe906 DM |
350 | /* Based at pbm->controller_regs */ |
351 | #define FIRE_PARITY_CONTROL 0x470010UL | |
352 | #define FIRE_PARITY_ENAB 0x8000000000000000UL | |
353 | #define FIRE_FATAL_RESET_CTL 0x471028UL | |
354 | #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL | |
355 | #define FIRE_FATAL_RESET_MB 0x0000000002000000UL | |
356 | #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL | |
357 | #define FIRE_FATAL_RESET_APE 0x0000000000004000UL | |
358 | #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL | |
359 | #define FIRE_FATAL_RESET_JW 0x0000000000000004UL | |
360 | #define FIRE_FATAL_RESET_JI 0x0000000000000002UL | |
361 | #define FIRE_FATAL_RESET_JR 0x0000000000000001UL | |
362 | #define FIRE_CORE_INTR_ENABLE 0x471800UL | |
363 | ||
364 | /* Based at pbm->pbm_regs */ | |
365 | #define FIRE_TLU_CTRL 0x80000UL | |
366 | #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL | |
367 | #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL | |
368 | #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL | |
369 | #define FIRE_TLU_DEV_CTRL 0x90008UL | |
370 | #define FIRE_TLU_LINK_CTRL 0x90020UL | |
371 | #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL | |
372 | #define FIRE_LPU_RESET 0xe2008UL | |
373 | #define FIRE_LPU_LLCFG 0xe2200UL | |
374 | #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL | |
375 | #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL | |
376 | #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL | |
377 | #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL | |
378 | #define FIRE_LPU_TXL_FIFOP 0xe2430UL | |
379 | #define FIRE_LPU_LTSSM_CFG2 0xe2788UL | |
380 | #define FIRE_LPU_LTSSM_CFG3 0xe2790UL | |
381 | #define FIRE_LPU_LTSSM_CFG4 0xe2798UL | |
382 | #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL | |
383 | #define FIRE_DMC_IENAB 0x31800UL | |
384 | #define FIRE_DMC_DBG_SEL_A 0x53000UL | |
385 | #define FIRE_DMC_DBG_SEL_B 0x53008UL | |
386 | #define FIRE_PEC_IENAB 0x51800UL | |
387 | ||
388 | static void pci_fire_hw_init(struct pci_pbm_info *pbm) | |
389 | { | |
390 | u64 val; | |
391 | ||
392 | fire_write(pbm->controller_regs + FIRE_PARITY_CONTROL, | |
393 | FIRE_PARITY_ENAB); | |
394 | ||
395 | fire_write(pbm->controller_regs + FIRE_FATAL_RESET_CTL, | |
396 | (FIRE_FATAL_RESET_SPARE | | |
397 | FIRE_FATAL_RESET_MB | | |
398 | FIRE_FATAL_RESET_CPE | | |
399 | FIRE_FATAL_RESET_APE | | |
400 | FIRE_FATAL_RESET_PIO | | |
401 | FIRE_FATAL_RESET_JW | | |
402 | FIRE_FATAL_RESET_JI | | |
403 | FIRE_FATAL_RESET_JR)); | |
404 | ||
405 | fire_write(pbm->controller_regs + FIRE_CORE_INTR_ENABLE, ~(u64)0); | |
406 | ||
407 | val = fire_read(pbm->pbm_regs + FIRE_TLU_CTRL); | |
408 | val |= (FIRE_TLU_CTRL_TIM | | |
409 | FIRE_TLU_CTRL_QDET | | |
410 | FIRE_TLU_CTRL_CFG); | |
411 | fire_write(pbm->pbm_regs + FIRE_TLU_CTRL, val); | |
412 | fire_write(pbm->pbm_regs + FIRE_TLU_DEV_CTRL, 0); | |
413 | fire_write(pbm->pbm_regs + FIRE_TLU_LINK_CTRL, | |
414 | FIRE_TLU_LINK_CTRL_CLK); | |
415 | ||
416 | fire_write(pbm->pbm_regs + FIRE_LPU_RESET, 0); | |
417 | fire_write(pbm->pbm_regs + FIRE_LPU_LLCFG, | |
418 | FIRE_LPU_LLCFG_VC0); | |
419 | fire_write(pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL, | |
420 | (FIRE_LPU_FCTRL_UCTRL_N | | |
421 | FIRE_LPU_FCTRL_UCTRL_P)); | |
422 | fire_write(pbm->pbm_regs + FIRE_LPU_TXL_FIFOP, | |
423 | ((0xffff << 16) | (0x0000 << 0))); | |
424 | fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2, 3000000); | |
425 | fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3, 500000); | |
426 | fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4, | |
427 | (2 << 16) | (140 << 8)); | |
428 | fire_write(pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5, 0); | |
429 | ||
430 | fire_write(pbm->pbm_regs + FIRE_DMC_IENAB, ~(u64)0); | |
431 | fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_A, 0); | |
432 | fire_write(pbm->pbm_regs + FIRE_DMC_DBG_SEL_B, 0); | |
433 | ||
434 | fire_write(pbm->pbm_regs + FIRE_PEC_IENAB, ~(u64)0); | |
435 | } | |
436 | ||
a1f35ba3 SR |
437 | static int __init pci_fire_pbm_init(struct pci_controller_info *p, |
438 | struct device_node *dp, u32 portid) | |
861fe906 DM |
439 | { |
440 | const struct linux_prom64_registers *regs; | |
441 | struct pci_pbm_info *pbm; | |
9bb3c227 | 442 | int err; |
861fe906 DM |
443 | |
444 | if ((portid & 1) == 0) | |
445 | pbm = &p->pbm_A; | |
446 | else | |
447 | pbm = &p->pbm_B; | |
448 | ||
34768bc8 DM |
449 | pbm->next = pci_pbm_root; |
450 | pci_pbm_root = pbm; | |
451 | ||
452 | pbm->scan_bus = pci_fire_scan_bus; | |
ca3dd88e DM |
453 | pbm->pci_ops = &sun4u_pci_ops; |
454 | pbm->config_space_reg_bits = 12; | |
34768bc8 | 455 | |
6c108f12 DM |
456 | pbm->index = pci_num_pbms++; |
457 | ||
861fe906 DM |
458 | pbm->portid = portid; |
459 | pbm->parent = p; | |
460 | pbm->prom_node = dp; | |
461 | pbm->name = dp->full_name; | |
462 | ||
463 | regs = of_get_property(dp, "reg", NULL); | |
464 | pbm->pbm_regs = regs[0].phys_addr; | |
465 | pbm->controller_regs = regs[1].phys_addr - 0x410000UL; | |
466 | ||
467 | printk("%s: SUN4U PCIE Bus Module\n", pbm->name); | |
468 | ||
469 | pci_determine_mem_io_space(pbm); | |
470 | ||
cfa0652c | 471 | pci_get_pbm_props(pbm); |
861fe906 DM |
472 | |
473 | pci_fire_hw_init(pbm); | |
ad7ad57c | 474 | |
9bb3c227 DM |
475 | err = pci_fire_pbm_iommu_init(pbm); |
476 | if (err) | |
477 | return err; | |
478 | ||
479 | pci_fire_msi_init(pbm); | |
480 | ||
481 | return 0; | |
861fe906 DM |
482 | } |
483 | ||
484 | static inline int portid_compare(u32 x, u32 y) | |
485 | { | |
486 | if (x == (y ^ 1)) | |
487 | return 1; | |
488 | return 0; | |
489 | } | |
490 | ||
a1f35ba3 | 491 | void __init fire_pci_init(struct device_node *dp, const char *model_name) |
861fe906 DM |
492 | { |
493 | struct pci_controller_info *p; | |
494 | u32 portid = of_getintprop_default(dp, "portid", 0xff); | |
495 | struct iommu *iommu; | |
34768bc8 | 496 | struct pci_pbm_info *pbm; |
861fe906 | 497 | |
34768bc8 | 498 | for (pbm = pci_pbm_root; pbm; pbm = pbm->next) { |
861fe906 | 499 | if (portid_compare(pbm->portid, portid)) { |
ad7ad57c DM |
500 | if (pci_fire_pbm_init(pbm->parent, dp, portid)) |
501 | goto fatal_memory_error; | |
861fe906 DM |
502 | return; |
503 | } | |
504 | } | |
505 | ||
506 | p = kzalloc(sizeof(struct pci_controller_info), GFP_ATOMIC); | |
507 | if (!p) | |
508 | goto fatal_memory_error; | |
509 | ||
510 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); | |
511 | if (!iommu) | |
512 | goto fatal_memory_error; | |
513 | ||
514 | p->pbm_A.iommu = iommu; | |
515 | ||
516 | iommu = kzalloc(sizeof(struct iommu), GFP_ATOMIC); | |
517 | if (!iommu) | |
518 | goto fatal_memory_error; | |
519 | ||
520 | p->pbm_B.iommu = iommu; | |
521 | ||
ad7ad57c DM |
522 | if (pci_fire_pbm_init(p, dp, portid)) |
523 | goto fatal_memory_error; | |
524 | ||
861fe906 DM |
525 | return; |
526 | ||
527 | fatal_memory_error: | |
528 | prom_printf("PCI_FIRE: Fatal memory allocation error.\n"); | |
529 | prom_halt(); | |
530 | } |