]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/message/i2o/pci.c
Merge master.kernel.org:/home/rmk/linux-2.6-serial
[mirror_ubuntu-artful-kernel.git] / drivers / message / i2o / pci.c
1 /*
2 * PCI handling of I2O controller
3 *
4 * Copyright (C) 1999-2002 Red Hat Software
5 *
6 * Written by Alan Cox, Building Number Three Ltd
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * A lot of the I2O message side code from this is taken from the Red
14 * Creek RCPCI45 adapter driver by Red Creek Communications
15 *
16 * Fixes/additions:
17 * Philipp Rumpf
18 * Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
19 * Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
20 * Deepak Saxena <deepak@plexity.net>
21 * Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
22 * Alan Cox <alan@redhat.com>:
23 * Ported to Linux 2.5.
24 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
25 * Minor fixes for 2.6.
26 * Markus Lidel <Markus.Lidel@shadowconnect.com>:
27 * Support for sysfs included.
28 */
29
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/i2o.h>
33 #include "core.h"
34
35 #define OSM_DESCRIPTION "I2O-subsystem"
36
37 /* PCI device id table for all I2O controllers */
38 static struct pci_device_id __devinitdata i2o_pci_ids[] = {
39 {PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
40 {PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
41 {.vendor = PCI_VENDOR_ID_INTEL,.device = 0x1962,
42 .subvendor = PCI_VENDOR_ID_PROMISE,.subdevice = PCI_ANY_ID},
43 {0}
44 };
45
46 /**
47 * i2o_pci_free - Frees the DMA memory for the I2O controller
48 * @c: I2O controller to free
49 *
50 * Remove all allocated DMA memory and unmap memory IO regions. If MTRR
51 * is enabled, also remove it again.
52 */
53 static void i2o_pci_free(struct i2o_controller *c)
54 {
55 struct device *dev;
56
57 dev = &c->pdev->dev;
58
59 i2o_dma_free(dev, &c->out_queue);
60 i2o_dma_free(dev, &c->status_block);
61 kfree(c->lct);
62 i2o_dma_free(dev, &c->dlct);
63 i2o_dma_free(dev, &c->hrt);
64 i2o_dma_free(dev, &c->status);
65
66 if (c->raptor && c->in_queue.virt)
67 iounmap(c->in_queue.virt);
68
69 if (c->base.virt)
70 iounmap(c->base.virt);
71
72 pci_release_regions(c->pdev);
73 }
74
75 /**
76 * i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller
77 * @c: I2O controller
78 *
79 * Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All
80 * IO mappings are also done here. If MTRR is enabled, also do add memory
81 * regions here.
82 *
83 * Returns 0 on success or negative error code on failure.
84 */
85 static int __devinit i2o_pci_alloc(struct i2o_controller *c)
86 {
87 struct pci_dev *pdev = c->pdev;
88 struct device *dev = &pdev->dev;
89 int i;
90
91 if (pci_request_regions(pdev, OSM_DESCRIPTION)) {
92 printk(KERN_ERR "%s: device already claimed\n", c->name);
93 return -ENODEV;
94 }
95
96 for (i = 0; i < 6; i++) {
97 /* Skip I/O spaces */
98 if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
99 if (!c->base.phys) {
100 c->base.phys = pci_resource_start(pdev, i);
101 c->base.len = pci_resource_len(pdev, i);
102
103 /*
104 * If we know what card it is, set the size
105 * correctly. Code is taken from dpt_i2o.c
106 */
107 if (pdev->device == 0xa501) {
108 if (pdev->subsystem_device >= 0xc032 &&
109 pdev->subsystem_device <= 0xc03b) {
110 if (c->base.len > 0x400000)
111 c->base.len = 0x400000;
112 } else {
113 if (c->base.len > 0x100000)
114 c->base.len = 0x100000;
115 }
116 }
117 if (!c->raptor)
118 break;
119 } else {
120 c->in_queue.phys = pci_resource_start(pdev, i);
121 c->in_queue.len = pci_resource_len(pdev, i);
122 break;
123 }
124 }
125 }
126
127 if (i == 6) {
128 printk(KERN_ERR "%s: I2O controller has no memory regions"
129 " defined.\n", c->name);
130 i2o_pci_free(c);
131 return -EINVAL;
132 }
133
134 /* Map the I2O controller */
135 if (c->raptor) {
136 printk(KERN_INFO "%s: PCI I2O controller\n", c->name);
137 printk(KERN_INFO " BAR0 at 0x%08lX size=%ld\n",
138 (unsigned long)c->base.phys, (unsigned long)c->base.len);
139 printk(KERN_INFO " BAR1 at 0x%08lX size=%ld\n",
140 (unsigned long)c->in_queue.phys,
141 (unsigned long)c->in_queue.len);
142 } else
143 printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n",
144 c->name, (unsigned long)c->base.phys,
145 (unsigned long)c->base.len);
146
147 c->base.virt = ioremap_nocache(c->base.phys, c->base.len);
148 if (!c->base.virt) {
149 printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
150 i2o_pci_free(c);
151 return -ENOMEM;
152 }
153
154 if (c->raptor) {
155 c->in_queue.virt =
156 ioremap_nocache(c->in_queue.phys, c->in_queue.len);
157 if (!c->in_queue.virt) {
158 printk(KERN_ERR "%s: Unable to map controller.\n",
159 c->name);
160 i2o_pci_free(c);
161 return -ENOMEM;
162 }
163 } else
164 c->in_queue = c->base;
165
166 c->irq_status = c->base.virt + I2O_IRQ_STATUS;
167 c->irq_mask = c->base.virt + I2O_IRQ_MASK;
168 c->in_port = c->base.virt + I2O_IN_PORT;
169 c->out_port = c->base.virt + I2O_OUT_PORT;
170
171 /* Motorola/Freescale chip does not follow spec */
172 if (pdev->vendor == PCI_VENDOR_ID_MOTOROLA && pdev->device == 0x18c0) {
173 /* Check if CPU is enabled */
174 if (be32_to_cpu(readl(c->base.virt + 0x10000)) & 0x10000000) {
175 printk(KERN_INFO "%s: MPC82XX needs CPU running to "
176 "service I2O.\n", c->name);
177 i2o_pci_free(c);
178 return -ENODEV;
179 } else {
180 c->irq_status += I2O_MOTOROLA_PORT_OFFSET;
181 c->irq_mask += I2O_MOTOROLA_PORT_OFFSET;
182 c->in_port += I2O_MOTOROLA_PORT_OFFSET;
183 c->out_port += I2O_MOTOROLA_PORT_OFFSET;
184 printk(KERN_INFO "%s: MPC82XX workarounds activated.\n",
185 c->name);
186 }
187 }
188
189 if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
190 i2o_pci_free(c);
191 return -ENOMEM;
192 }
193
194 if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) {
195 i2o_pci_free(c);
196 return -ENOMEM;
197 }
198
199 if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) {
200 i2o_pci_free(c);
201 return -ENOMEM;
202 }
203
204 if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block),
205 GFP_KERNEL)) {
206 i2o_pci_free(c);
207 return -ENOMEM;
208 }
209
210 if (i2o_dma_alloc
211 (dev, &c->out_queue,
212 I2O_MAX_OUTBOUND_MSG_FRAMES * I2O_OUTBOUND_MSG_FRAME_SIZE *
213 sizeof(u32), GFP_KERNEL)) {
214 i2o_pci_free(c);
215 return -ENOMEM;
216 }
217
218 pci_set_drvdata(pdev, c);
219
220 return 0;
221 }
222
223 /**
224 * i2o_pci_interrupt - Interrupt handler for I2O controller
225 * @irq: interrupt line
226 * @dev_id: pointer to the I2O controller
227 * @r: pointer to registers
228 *
229 * Handle an interrupt from a PCI based I2O controller. This turns out
230 * to be rather simple. We keep the controller pointer in the cookie.
231 */
232 static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
233 {
234 struct i2o_controller *c = dev_id;
235 u32 m;
236 irqreturn_t rc = IRQ_NONE;
237
238 while (readl(c->irq_status) & I2O_IRQ_OUTBOUND_POST) {
239 m = readl(c->out_port);
240 if (m == I2O_QUEUE_EMPTY) {
241 /*
242 * Old 960 steppings had a bug in the I2O unit that
243 * caused the queue to appear empty when it wasn't.
244 */
245 m = readl(c->out_port);
246 if (unlikely(m == I2O_QUEUE_EMPTY))
247 break;
248 }
249
250 /* dispatch it */
251 if (i2o_driver_dispatch(c, m))
252 /* flush it if result != 0 */
253 i2o_flush_reply(c, m);
254
255 rc = IRQ_HANDLED;
256 }
257
258 return rc;
259 }
260
261 /**
262 * i2o_pci_irq_enable - Allocate interrupt for I2O controller
263 *
264 * Allocate an interrupt for the I2O controller, and activate interrupts
265 * on the I2O controller.
266 *
267 * Returns 0 on success or negative error code on failure.
268 */
269 static int i2o_pci_irq_enable(struct i2o_controller *c)
270 {
271 struct pci_dev *pdev = c->pdev;
272 int rc;
273
274 writel(0xffffffff, c->irq_mask);
275
276 if (pdev->irq) {
277 rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ,
278 c->name, c);
279 if (rc < 0) {
280 printk(KERN_ERR "%s: unable to allocate interrupt %d."
281 "\n", c->name, pdev->irq);
282 return rc;
283 }
284 }
285
286 writel(0x00000000, c->irq_mask);
287
288 printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq);
289
290 return 0;
291 }
292
293 /**
294 * i2o_pci_irq_disable - Free interrupt for I2O controller
295 * @c: I2O controller
296 *
297 * Disable interrupts in I2O controller and then free interrupt.
298 */
299 static void i2o_pci_irq_disable(struct i2o_controller *c)
300 {
301 writel(0xffffffff, c->irq_mask);
302
303 if (c->pdev->irq > 0)
304 free_irq(c->pdev->irq, c);
305 }
306
307 /**
308 * i2o_pci_probe - Probe the PCI device for an I2O controller
309 * @dev: PCI device to test
310 * @id: id which matched with the PCI device id table
311 *
312 * Probe the PCI device for any device which is a memory of the
313 * Intelligent, I2O class or an Adaptec Zero Channel Controller. We
314 * attempt to set up each such device and register it with the core.
315 *
316 * Returns 0 on success or negative error code on failure.
317 */
318 static int __devinit i2o_pci_probe(struct pci_dev *pdev,
319 const struct pci_device_id *id)
320 {
321 struct i2o_controller *c;
322 int rc;
323 struct pci_dev *i960 = NULL;
324 int enabled = pdev->is_enabled;
325
326 printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
327
328 if ((pdev->class & 0xff) > 1) {
329 printk(KERN_WARNING "i2o: %s does not support I2O 1.5 "
330 "(skipping).\n", pci_name(pdev));
331 return -ENODEV;
332 }
333
334 if (!enabled)
335 if ((rc = pci_enable_device(pdev))) {
336 printk(KERN_WARNING "i2o: couldn't enable device %s\n",
337 pci_name(pdev));
338 return rc;
339 }
340
341 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
342 printk(KERN_WARNING "i2o: no suitable DMA found for %s\n",
343 pci_name(pdev));
344 rc = -ENODEV;
345 goto disable;
346 }
347
348 pci_set_master(pdev);
349
350 c = i2o_iop_alloc();
351 if (IS_ERR(c)) {
352 printk(KERN_ERR "i2o: couldn't allocate memory for %s\n",
353 pci_name(pdev));
354 rc = PTR_ERR(c);
355 goto disable;
356 } else
357 printk(KERN_INFO "%s: controller found (%s)\n", c->name,
358 pci_name(pdev));
359
360 c->pdev = pdev;
361 c->device.parent = &pdev->dev;
362
363 /* Cards that fall apart if you hit them with large I/O loads... */
364 if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
365 c->short_req = 1;
366 printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n",
367 c->name);
368 }
369
370 if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
371 /*
372 * Expose the ship behind i960 for initialization, or it will
373 * failed
374 */
375 i960 =
376 pci_find_slot(c->pdev->bus->number,
377 PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
378
379 if (i960)
380 pci_write_config_word(i960, 0x42, 0);
381
382 c->promise = 1;
383 c->limit_sectors = 1;
384 }
385
386 if (pdev->subsystem_vendor == PCI_VENDOR_ID_DPT)
387 c->adaptec = 1;
388
389 /* Cards that go bananas if you quiesce them before you reset them. */
390 if (pdev->vendor == PCI_VENDOR_ID_DPT) {
391 c->no_quiesce = 1;
392 if (pdev->device == 0xa511)
393 c->raptor = 1;
394
395 if (pdev->subsystem_device == 0xc05a) {
396 c->limit_sectors = 1;
397 printk(KERN_INFO
398 "%s: limit sectors per request to %d\n", c->name,
399 I2O_MAX_SECTORS_LIMITED);
400 }
401 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
402 if (sizeof(dma_addr_t) > 4) {
403 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK))
404 printk(KERN_INFO "%s: 64-bit DMA unavailable\n",
405 c->name);
406 else {
407 c->pae_support = 1;
408 printk(KERN_INFO "%s: using 64-bit DMA\n",
409 c->name);
410 }
411 }
412 #endif
413 }
414
415 if ((rc = i2o_pci_alloc(c))) {
416 printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
417 "failed\n", c->name);
418 goto free_controller;
419 }
420
421 if (i2o_pci_irq_enable(c)) {
422 printk(KERN_ERR "%s: unable to enable interrupts for I2O "
423 "controller\n", c->name);
424 goto free_pci;
425 }
426
427 if ((rc = i2o_iop_add(c)))
428 goto uninstall;
429
430 if (i960)
431 pci_write_config_word(i960, 0x42, 0x03ff);
432
433 return 0;
434
435 uninstall:
436 i2o_pci_irq_disable(c);
437
438 free_pci:
439 i2o_pci_free(c);
440
441 free_controller:
442 i2o_iop_free(c);
443
444 disable:
445 if (!enabled)
446 pci_disable_device(pdev);
447
448 return rc;
449 }
450
451 /**
452 * i2o_pci_remove - Removes a I2O controller from the system
453 * pdev: I2O controller which should be removed
454 *
455 * Reset the I2O controller, disable interrupts and remove all allocated
456 * resources.
457 */
458 static void __devexit i2o_pci_remove(struct pci_dev *pdev)
459 {
460 struct i2o_controller *c;
461 c = pci_get_drvdata(pdev);
462
463 i2o_iop_remove(c);
464 i2o_pci_irq_disable(c);
465 i2o_pci_free(c);
466
467 pci_disable_device(pdev);
468
469 printk(KERN_INFO "%s: Controller removed.\n", c->name);
470
471 put_device(&c->device);
472 };
473
474 /* PCI driver for I2O controller */
475 static struct pci_driver i2o_pci_driver = {
476 .name = "PCI_I2O",
477 .id_table = i2o_pci_ids,
478 .probe = i2o_pci_probe,
479 .remove = __devexit_p(i2o_pci_remove),
480 };
481
482 /**
483 * i2o_pci_init - registers I2O PCI driver in PCI subsystem
484 *
485 * Returns > 0 on success or negative error code on failure.
486 */
487 int __init i2o_pci_init(void)
488 {
489 return pci_register_driver(&i2o_pci_driver);
490 };
491
492 /**
493 * i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem
494 */
495 void __exit i2o_pci_exit(void)
496 {
497 pci_unregister_driver(&i2o_pci_driver);
498 };
499
500 MODULE_DEVICE_TABLE(pci, i2o_pci_ids);