]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/vme/bridges/vme_ca91cx42.c
Merge branch 'thinkpad-2.6.33' into release
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / vme / bridges / vme_ca91cx42.c
1 /*
2 * Support for the Tundra Universe I/II VME-PCI Bridge Chips
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * Derived from ca91c042.c by Michael Wyrick
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18 #include <linux/module.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/pci.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/poll.h>
25 #include <linux/interrupt.h>
26 #include <linux/spinlock.h>
27 #include <linux/sched.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_ca91cx42.h"
35
36 static int __init ca91cx42_init(void);
37 static int ca91cx42_probe(struct pci_dev *, const struct pci_device_id *);
38 static void ca91cx42_remove(struct pci_dev *);
39 static void __exit ca91cx42_exit(void);
40
41 struct vme_bridge *ca91cx42_bridge;
42 wait_queue_head_t dma_queue;
43 wait_queue_head_t iack_queue;
44 wait_queue_head_t lm_queue;
45 wait_queue_head_t mbox_queue;
46
47 void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
48 void *crcsr_kernel;
49 dma_addr_t crcsr_bus;
50
51 struct mutex vme_rmw; /* Only one RMW cycle at a time */
52 struct mutex vme_int; /*
53 * Only one VME interrupt can be
54 * generated at a time, provide locking
55 */
56
57 static char driver_name[] = "vme_ca91cx42";
58
59 static struct pci_device_id ca91cx42_ids[] = {
60 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_CA91C142) },
61 { },
62 };
63
64 static struct pci_driver ca91cx42_driver = {
65 .name = driver_name,
66 .id_table = ca91cx42_ids,
67 .probe = ca91cx42_probe,
68 .remove = ca91cx42_remove,
69 };
70
71 static u32 ca91cx42_DMA_irqhandler(void)
72 {
73 wake_up(&dma_queue);
74
75 return CA91CX42_LINT_DMA;
76 }
77
78 static u32 ca91cx42_LM_irqhandler(u32 stat)
79 {
80 int i;
81 u32 serviced = 0;
82
83 for (i = 0; i < 4; i++) {
84 if (stat & CA91CX42_LINT_LM[i]) {
85 /* We only enable interrupts if the callback is set */
86 lm_callback[i](i);
87 serviced |= CA91CX42_LINT_LM[i];
88 }
89 }
90
91 return serviced;
92 }
93
94 /* XXX This needs to be split into 4 queues */
95 static u32 ca91cx42_MB_irqhandler(int mbox_mask)
96 {
97 wake_up(&mbox_queue);
98
99 return CA91CX42_LINT_MBOX;
100 }
101
102 static u32 ca91cx42_IACK_irqhandler(void)
103 {
104 wake_up(&iack_queue);
105
106 return CA91CX42_LINT_SW_IACK;
107 }
108
109 #if 0
110 int ca91cx42_bus_error_chk(int clrflag)
111 {
112 int tmp;
113 tmp = ioread32(ca91cx42_bridge->base + PCI_COMMAND);
114 if (tmp & 0x08000000) { /* S_TA is Set */
115 if (clrflag)
116 iowrite32(tmp | 0x08000000,
117 ca91cx42_bridge->base + PCI_COMMAND);
118 return 1;
119 }
120 return 0;
121 }
122 #endif
123
124 static u32 ca91cx42_VERR_irqhandler(void)
125 {
126 int val;
127
128 val = ioread32(ca91cx42_bridge->base + DGCS);
129
130 if (!(val & 0x00000800)) {
131 printk(KERN_ERR "ca91c042: ca91cx42_VERR_irqhandler DMA Read "
132 "Error DGCS=%08X\n", val);
133 }
134
135 return CA91CX42_LINT_VERR;
136 }
137
138 static u32 ca91cx42_LERR_irqhandler(void)
139 {
140 int val;
141
142 val = ioread32(ca91cx42_bridge->base + DGCS);
143
144 if (!(val & 0x00000800)) {
145 printk(KERN_ERR "ca91c042: ca91cx42_LERR_irqhandler DMA Read "
146 "Error DGCS=%08X\n", val);
147
148 }
149
150 return CA91CX42_LINT_LERR;
151 }
152
153
154 static u32 ca91cx42_VIRQ_irqhandler(int stat)
155 {
156 int vec, i, serviced = 0;
157
158 for (i = 7; i > 0; i--) {
159 if (stat & (1 << i)) {
160 vec = ioread32(ca91cx42_bridge->base +
161 CA91CX42_V_STATID[i]) & 0xff;
162
163 vme_irq_handler(ca91cx42_bridge, i, vec);
164
165 serviced |= (1 << i);
166 }
167 }
168
169 return serviced;
170 }
171
172 static irqreturn_t ca91cx42_irqhandler(int irq, void *dev_id)
173 {
174 u32 stat, enable, serviced = 0;
175
176 if (dev_id != ca91cx42_bridge->base)
177 return IRQ_NONE;
178
179 enable = ioread32(ca91cx42_bridge->base + LINT_EN);
180 stat = ioread32(ca91cx42_bridge->base + LINT_STAT);
181
182 /* Only look at unmasked interrupts */
183 stat &= enable;
184
185 if (unlikely(!stat))
186 return IRQ_NONE;
187
188 if (stat & CA91CX42_LINT_DMA)
189 serviced |= ca91cx42_DMA_irqhandler();
190 if (stat & (CA91CX42_LINT_LM0 | CA91CX42_LINT_LM1 | CA91CX42_LINT_LM2 |
191 CA91CX42_LINT_LM3))
192 serviced |= ca91cx42_LM_irqhandler(stat);
193 if (stat & CA91CX42_LINT_MBOX)
194 serviced |= ca91cx42_MB_irqhandler(stat);
195 if (stat & CA91CX42_LINT_SW_IACK)
196 serviced |= ca91cx42_IACK_irqhandler();
197 if (stat & CA91CX42_LINT_VERR)
198 serviced |= ca91cx42_VERR_irqhandler();
199 if (stat & CA91CX42_LINT_LERR)
200 serviced |= ca91cx42_LERR_irqhandler();
201 if (stat & (CA91CX42_LINT_VIRQ1 | CA91CX42_LINT_VIRQ2 |
202 CA91CX42_LINT_VIRQ3 | CA91CX42_LINT_VIRQ4 |
203 CA91CX42_LINT_VIRQ5 | CA91CX42_LINT_VIRQ6 |
204 CA91CX42_LINT_VIRQ7))
205 serviced |= ca91cx42_VIRQ_irqhandler(stat);
206
207 /* Clear serviced interrupts */
208 iowrite32(stat, ca91cx42_bridge->base + LINT_STAT);
209
210 return IRQ_HANDLED;
211 }
212
213 static int ca91cx42_irq_init(struct vme_bridge *bridge)
214 {
215 int result, tmp;
216 struct pci_dev *pdev;
217
218 /* Need pdev */
219 pdev = container_of(bridge->parent, struct pci_dev, dev);
220
221 /* Initialise list for VME bus errors */
222 INIT_LIST_HEAD(&(bridge->vme_errors));
223
224 mutex_init(&(bridge->irq_mtx));
225
226 /* Disable interrupts from PCI to VME */
227 iowrite32(0, bridge->base + VINT_EN);
228
229 /* Disable PCI interrupts */
230 iowrite32(0, bridge->base + LINT_EN);
231 /* Clear Any Pending PCI Interrupts */
232 iowrite32(0x00FFFFFF, bridge->base + LINT_STAT);
233
234 result = request_irq(pdev->irq, ca91cx42_irqhandler, IRQF_SHARED,
235 driver_name, pdev);
236 if (result) {
237 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
238 pdev->irq);
239 return result;
240 }
241
242 /* Ensure all interrupts are mapped to PCI Interrupt 0 */
243 iowrite32(0, bridge->base + LINT_MAP0);
244 iowrite32(0, bridge->base + LINT_MAP1);
245 iowrite32(0, bridge->base + LINT_MAP2);
246
247 /* Enable DMA, mailbox & LM Interrupts */
248 tmp = CA91CX42_LINT_MBOX3 | CA91CX42_LINT_MBOX2 | CA91CX42_LINT_MBOX1 |
249 CA91CX42_LINT_MBOX0 | CA91CX42_LINT_SW_IACK |
250 CA91CX42_LINT_VERR | CA91CX42_LINT_LERR | CA91CX42_LINT_DMA;
251
252 iowrite32(tmp, bridge->base + LINT_EN);
253
254 return 0;
255 }
256
257 static void ca91cx42_irq_exit(struct pci_dev *pdev)
258 {
259 /* Disable interrupts from PCI to VME */
260 iowrite32(0, ca91cx42_bridge->base + VINT_EN);
261
262 /* Disable PCI interrupts */
263 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
264 /* Clear Any Pending PCI Interrupts */
265 iowrite32(0x00FFFFFF, ca91cx42_bridge->base + LINT_STAT);
266
267 free_irq(pdev->irq, pdev);
268 }
269
270 /*
271 * Set up an VME interrupt
272 */
273 void ca91cx42_irq_set(int level, int state, int sync)
274
275 {
276 struct pci_dev *pdev;
277 u32 tmp;
278
279 /* Enable IRQ level */
280 tmp = ioread32(ca91cx42_bridge->base + LINT_EN);
281
282 if (state == 0)
283 tmp &= ~CA91CX42_LINT_VIRQ[level];
284 else
285 tmp |= CA91CX42_LINT_VIRQ[level];
286
287 iowrite32(tmp, ca91cx42_bridge->base + LINT_EN);
288
289 if ((state == 0) && (sync != 0)) {
290 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev,
291 dev);
292
293 synchronize_irq(pdev->irq);
294 }
295 }
296
297 int ca91cx42_irq_generate(int level, int statid)
298 {
299 u32 tmp;
300
301 /* Universe can only generate even vectors */
302 if (statid & 1)
303 return -EINVAL;
304
305 mutex_lock(&(vme_int));
306
307 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
308
309 /* Set Status/ID */
310 iowrite32(statid << 24, ca91cx42_bridge->base + STATID);
311
312 /* Assert VMEbus IRQ */
313 tmp = tmp | (1 << (level + 24));
314 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
315
316 /* Wait for IACK */
317 wait_event_interruptible(iack_queue, 0);
318
319 /* Return interrupt to low state */
320 tmp = ioread32(ca91cx42_bridge->base + VINT_EN);
321 tmp = tmp & ~(1 << (level + 24));
322 iowrite32(tmp, ca91cx42_bridge->base + VINT_EN);
323
324 mutex_unlock(&(vme_int));
325
326 return 0;
327 }
328
329 int ca91cx42_slave_set(struct vme_slave_resource *image, int enabled,
330 unsigned long long vme_base, unsigned long long size,
331 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
332 {
333 unsigned int i, addr = 0, granularity = 0;
334 unsigned int temp_ctl = 0;
335 unsigned int vme_bound, pci_offset;
336
337 i = image->number;
338
339 switch (aspace) {
340 case VME_A16:
341 addr |= CA91CX42_VSI_CTL_VAS_A16;
342 break;
343 case VME_A24:
344 addr |= CA91CX42_VSI_CTL_VAS_A24;
345 break;
346 case VME_A32:
347 addr |= CA91CX42_VSI_CTL_VAS_A32;
348 break;
349 case VME_USER1:
350 addr |= CA91CX42_VSI_CTL_VAS_USER1;
351 break;
352 case VME_USER2:
353 addr |= CA91CX42_VSI_CTL_VAS_USER2;
354 break;
355 case VME_A64:
356 case VME_CRCSR:
357 case VME_USER3:
358 case VME_USER4:
359 default:
360 printk(KERN_ERR "Invalid address space\n");
361 return -EINVAL;
362 break;
363 }
364
365 /*
366 * Bound address is a valid address for the window, adjust
367 * accordingly
368 */
369 vme_bound = vme_base + size - granularity;
370 pci_offset = pci_base - vme_base;
371
372 /* XXX Need to check that vme_base, vme_bound and pci_offset aren't
373 * too big for registers
374 */
375
376 if ((i == 0) || (i == 4))
377 granularity = 0x1000;
378 else
379 granularity = 0x10000;
380
381 if (vme_base & (granularity - 1)) {
382 printk(KERN_ERR "Invalid VME base alignment\n");
383 return -EINVAL;
384 }
385 if (vme_bound & (granularity - 1)) {
386 printk(KERN_ERR "Invalid VME bound alignment\n");
387 return -EINVAL;
388 }
389 if (pci_offset & (granularity - 1)) {
390 printk(KERN_ERR "Invalid PCI Offset alignment\n");
391 return -EINVAL;
392 }
393
394 /* Disable while we are mucking around */
395 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
396 temp_ctl &= ~CA91CX42_VSI_CTL_EN;
397 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
398
399 /* Setup mapping */
400 iowrite32(vme_base, ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
401 iowrite32(vme_bound, ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
402 iowrite32(pci_offset, ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
403
404 /* XXX Prefetch stuff currently unsupported */
405 #if 0
406 if (vmeIn->wrPostEnable)
407 temp_ctl |= CA91CX42_VSI_CTL_PWEN;
408 if (vmeIn->prefetchEnable)
409 temp_ctl |= CA91CX42_VSI_CTL_PREN;
410 if (vmeIn->rmwLock)
411 temp_ctl |= CA91CX42_VSI_CTL_LLRMW;
412 if (vmeIn->data64BitCapable)
413 temp_ctl |= CA91CX42_VSI_CTL_LD64EN;
414 #endif
415
416 /* Setup address space */
417 temp_ctl &= ~CA91CX42_VSI_CTL_VAS_M;
418 temp_ctl |= addr;
419
420 /* Setup cycle types */
421 temp_ctl &= ~(CA91CX42_VSI_CTL_PGM_M | CA91CX42_VSI_CTL_SUPER_M);
422 if (cycle & VME_SUPER)
423 temp_ctl |= CA91CX42_VSI_CTL_SUPER_SUPR;
424 if (cycle & VME_USER)
425 temp_ctl |= CA91CX42_VSI_CTL_SUPER_NPRIV;
426 if (cycle & VME_PROG)
427 temp_ctl |= CA91CX42_VSI_CTL_PGM_PGM;
428 if (cycle & VME_DATA)
429 temp_ctl |= CA91CX42_VSI_CTL_PGM_DATA;
430
431 /* Write ctl reg without enable */
432 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
433
434 if (enabled)
435 temp_ctl |= CA91CX42_VSI_CTL_EN;
436
437 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
438
439 return 0;
440 }
441
442 int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
443 unsigned long long *vme_base, unsigned long long *size,
444 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
445 {
446 unsigned int i, granularity = 0, ctl = 0;
447 unsigned long long vme_bound, pci_offset;
448
449 i = image->number;
450
451 if ((i == 0) || (i == 4))
452 granularity = 0x1000;
453 else
454 granularity = 0x10000;
455
456 /* Read Registers */
457 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_CTL[i]);
458
459 *vme_base = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BS[i]);
460 vme_bound = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_BD[i]);
461 pci_offset = ioread32(ca91cx42_bridge->base + CA91CX42_VSI_TO[i]);
462
463 *pci_base = (dma_addr_t)vme_base + pci_offset;
464 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
465
466 *enabled = 0;
467 *aspace = 0;
468 *cycle = 0;
469
470 if (ctl & CA91CX42_VSI_CTL_EN)
471 *enabled = 1;
472
473 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A16)
474 *aspace = VME_A16;
475 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A24)
476 *aspace = VME_A24;
477 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_A32)
478 *aspace = VME_A32;
479 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER1)
480 *aspace = VME_USER1;
481 if ((ctl & CA91CX42_VSI_CTL_VAS_M) == CA91CX42_VSI_CTL_VAS_USER2)
482 *aspace = VME_USER2;
483
484 if (ctl & CA91CX42_VSI_CTL_SUPER_SUPR)
485 *cycle |= VME_SUPER;
486 if (ctl & CA91CX42_VSI_CTL_SUPER_NPRIV)
487 *cycle |= VME_USER;
488 if (ctl & CA91CX42_VSI_CTL_PGM_PGM)
489 *cycle |= VME_PROG;
490 if (ctl & CA91CX42_VSI_CTL_PGM_DATA)
491 *cycle |= VME_DATA;
492
493 return 0;
494 }
495
496 /*
497 * Allocate and map PCI Resource
498 */
499 static int ca91cx42_alloc_resource(struct vme_master_resource *image,
500 unsigned long long size)
501 {
502 unsigned long long existing_size;
503 int retval = 0;
504 struct pci_dev *pdev;
505
506 /* Find pci_dev container of dev */
507 if (ca91cx42_bridge->parent == NULL) {
508 printk(KERN_ERR "Dev entry NULL\n");
509 return -EINVAL;
510 }
511 pdev = container_of(ca91cx42_bridge->parent, struct pci_dev, dev);
512
513 existing_size = (unsigned long long)(image->pci_resource.end -
514 image->pci_resource.start);
515
516 /* If the existing size is OK, return */
517 if (existing_size == (size - 1))
518 return 0;
519
520 if (existing_size != 0) {
521 iounmap(image->kern_base);
522 image->kern_base = NULL;
523 if (image->pci_resource.name != NULL)
524 kfree(image->pci_resource.name);
525 release_resource(&(image->pci_resource));
526 memset(&(image->pci_resource), 0, sizeof(struct resource));
527 }
528
529 if (image->pci_resource.name == NULL) {
530 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
531 if (image->pci_resource.name == NULL) {
532 printk(KERN_ERR "Unable to allocate memory for resource"
533 " name\n");
534 retval = -ENOMEM;
535 goto err_name;
536 }
537 }
538
539 sprintf((char *)image->pci_resource.name, "%s.%d",
540 ca91cx42_bridge->name, image->number);
541
542 image->pci_resource.start = 0;
543 image->pci_resource.end = (unsigned long)size;
544 image->pci_resource.flags = IORESOURCE_MEM;
545
546 retval = pci_bus_alloc_resource(pdev->bus,
547 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
548 0, NULL, NULL);
549 if (retval) {
550 printk(KERN_ERR "Failed to allocate mem resource for "
551 "window %d size 0x%lx start 0x%lx\n",
552 image->number, (unsigned long)size,
553 (unsigned long)image->pci_resource.start);
554 goto err_resource;
555 }
556
557 image->kern_base = ioremap_nocache(
558 image->pci_resource.start, size);
559 if (image->kern_base == NULL) {
560 printk(KERN_ERR "Failed to remap resource\n");
561 retval = -ENOMEM;
562 goto err_remap;
563 }
564
565 return 0;
566
567 iounmap(image->kern_base);
568 image->kern_base = NULL;
569 err_remap:
570 release_resource(&(image->pci_resource));
571 err_resource:
572 kfree(image->pci_resource.name);
573 memset(&(image->pci_resource), 0, sizeof(struct resource));
574 err_name:
575 return retval;
576 }
577
578 /*
579 * * Free and unmap PCI Resource
580 * */
581 static void ca91cx42_free_resource(struct vme_master_resource *image)
582 {
583 iounmap(image->kern_base);
584 image->kern_base = NULL;
585 release_resource(&(image->pci_resource));
586 kfree(image->pci_resource.name);
587 memset(&(image->pci_resource), 0, sizeof(struct resource));
588 }
589
590
591 int ca91cx42_master_set(struct vme_master_resource *image, int enabled,
592 unsigned long long vme_base, unsigned long long size,
593 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
594 {
595 int retval = 0;
596 unsigned int i;
597 unsigned int temp_ctl = 0;
598 unsigned long long pci_bound, vme_offset, pci_base;
599
600 /* Verify input data */
601 if (vme_base & 0xFFF) {
602 printk(KERN_ERR "Invalid VME Window alignment\n");
603 retval = -EINVAL;
604 goto err_window;
605 }
606 if (size & 0xFFF) {
607 printk(KERN_ERR "Invalid VME Window alignment\n");
608 retval = -EINVAL;
609 goto err_window;
610 }
611
612 spin_lock(&(image->lock));
613
614 /* XXX We should do this much later, so that we can exit without
615 * needing to redo the mapping...
616 */
617 /*
618 * Let's allocate the resource here rather than further up the stack as
619 * it avoids pushing loads of bus dependant stuff up the stack
620 */
621 retval = ca91cx42_alloc_resource(image, size);
622 if (retval) {
623 spin_unlock(&(image->lock));
624 printk(KERN_ERR "Unable to allocate memory for resource "
625 "name\n");
626 retval = -ENOMEM;
627 goto err_res;
628 }
629
630 pci_base = (unsigned long long)image->pci_resource.start;
631
632 /*
633 * Bound address is a valid address for the window, adjust
634 * according to window granularity.
635 */
636 pci_bound = pci_base + (size - 0x1000);
637 vme_offset = vme_base - pci_base;
638
639 i = image->number;
640
641 /* Disable while we are mucking around */
642 temp_ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
643 temp_ctl &= ~CA91CX42_LSI_CTL_EN;
644 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
645
646 /* XXX Prefetch stuff currently unsupported */
647 #if 0
648 if (vmeOut->wrPostEnable)
649 temp_ctl |= 0x40000000;
650 #endif
651
652 /* Setup cycle types */
653 temp_ctl &= ~CA91CX42_LSI_CTL_VCT_M;
654 if (cycle & VME_BLT)
655 temp_ctl |= CA91CX42_LSI_CTL_VCT_BLT;
656 if (cycle & VME_MBLT)
657 temp_ctl |= CA91CX42_LSI_CTL_VCT_MBLT;
658
659 /* Setup data width */
660 temp_ctl &= ~CA91CX42_LSI_CTL_VDW_M;
661 switch (dwidth) {
662 case VME_D8:
663 temp_ctl |= CA91CX42_LSI_CTL_VDW_D8;
664 break;
665 case VME_D16:
666 temp_ctl |= CA91CX42_LSI_CTL_VDW_D16;
667 break;
668 case VME_D32:
669 temp_ctl |= CA91CX42_LSI_CTL_VDW_D32;
670 break;
671 case VME_D64:
672 temp_ctl |= CA91CX42_LSI_CTL_VDW_D64;
673 break;
674 default:
675 spin_unlock(&(image->lock));
676 printk(KERN_ERR "Invalid data width\n");
677 retval = -EINVAL;
678 goto err_dwidth;
679 break;
680 }
681
682 /* Setup address space */
683 temp_ctl &= ~CA91CX42_LSI_CTL_VAS_M;
684 switch (aspace) {
685 case VME_A16:
686 temp_ctl |= CA91CX42_LSI_CTL_VAS_A16;
687 break;
688 case VME_A24:
689 temp_ctl |= CA91CX42_LSI_CTL_VAS_A24;
690 break;
691 case VME_A32:
692 temp_ctl |= CA91CX42_LSI_CTL_VAS_A32;
693 break;
694 case VME_CRCSR:
695 temp_ctl |= CA91CX42_LSI_CTL_VAS_CRCSR;
696 break;
697 case VME_USER1:
698 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER1;
699 break;
700 case VME_USER2:
701 temp_ctl |= CA91CX42_LSI_CTL_VAS_USER2;
702 break;
703 case VME_A64:
704 case VME_USER3:
705 case VME_USER4:
706 default:
707 spin_unlock(&(image->lock));
708 printk(KERN_ERR "Invalid address space\n");
709 retval = -EINVAL;
710 goto err_aspace;
711 break;
712 }
713
714 temp_ctl &= ~(CA91CX42_LSI_CTL_PGM_M | CA91CX42_LSI_CTL_SUPER_M);
715 if (cycle & VME_SUPER)
716 temp_ctl |= CA91CX42_LSI_CTL_SUPER_SUPR;
717 if (cycle & VME_PROG)
718 temp_ctl |= CA91CX42_LSI_CTL_PGM_PGM;
719
720 /* Setup mapping */
721 iowrite32(pci_base, ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
722 iowrite32(pci_bound, ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
723 iowrite32(vme_offset, ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
724
725 /* Write ctl reg without enable */
726 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
727
728 if (enabled)
729 temp_ctl |= CA91CX42_LSI_CTL_EN;
730
731 iowrite32(temp_ctl, ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
732
733 spin_unlock(&(image->lock));
734 return 0;
735
736 err_aspace:
737 err_dwidth:
738 ca91cx42_free_resource(image);
739 err_res:
740 err_window:
741 return retval;
742 }
743
744 int __ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
745 unsigned long long *vme_base, unsigned long long *size,
746 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
747 {
748 unsigned int i, ctl;
749 unsigned long long pci_base, pci_bound, vme_offset;
750
751 i = image->number;
752
753 ctl = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_CTL[i]);
754
755 pci_base = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
756 vme_offset = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
757 pci_bound = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
758
759 *vme_base = pci_base + vme_offset;
760 *size = (pci_bound - pci_base) + 0x1000;
761
762 *enabled = 0;
763 *aspace = 0;
764 *cycle = 0;
765 *dwidth = 0;
766
767 if (ctl & CA91CX42_LSI_CTL_EN)
768 *enabled = 1;
769
770 /* Setup address space */
771 switch (ctl & CA91CX42_LSI_CTL_VAS_M) {
772 case CA91CX42_LSI_CTL_VAS_A16:
773 *aspace = VME_A16;
774 break;
775 case CA91CX42_LSI_CTL_VAS_A24:
776 *aspace = VME_A24;
777 break;
778 case CA91CX42_LSI_CTL_VAS_A32:
779 *aspace = VME_A32;
780 break;
781 case CA91CX42_LSI_CTL_VAS_CRCSR:
782 *aspace = VME_CRCSR;
783 break;
784 case CA91CX42_LSI_CTL_VAS_USER1:
785 *aspace = VME_USER1;
786 break;
787 case CA91CX42_LSI_CTL_VAS_USER2:
788 *aspace = VME_USER2;
789 break;
790 }
791
792 /* XXX Not sure howto check for MBLT */
793 /* Setup cycle types */
794 if (ctl & CA91CX42_LSI_CTL_VCT_BLT)
795 *cycle |= VME_BLT;
796 else
797 *cycle |= VME_SCT;
798
799 if (ctl & CA91CX42_LSI_CTL_SUPER_SUPR)
800 *cycle |= VME_SUPER;
801 else
802 *cycle |= VME_USER;
803
804 if (ctl & CA91CX42_LSI_CTL_PGM_PGM)
805 *cycle = VME_PROG;
806 else
807 *cycle = VME_DATA;
808
809 /* Setup data width */
810 switch (ctl & CA91CX42_LSI_CTL_VDW_M) {
811 case CA91CX42_LSI_CTL_VDW_D8:
812 *dwidth = VME_D8;
813 break;
814 case CA91CX42_LSI_CTL_VDW_D16:
815 *dwidth = VME_D16;
816 break;
817 case CA91CX42_LSI_CTL_VDW_D32:
818 *dwidth = VME_D32;
819 break;
820 case CA91CX42_LSI_CTL_VDW_D64:
821 *dwidth = VME_D64;
822 break;
823 }
824
825 /* XXX Prefetch stuff currently unsupported */
826 #if 0
827 if (ctl & 0x40000000)
828 vmeOut->wrPostEnable = 1;
829 #endif
830
831 return 0;
832 }
833
834 int ca91cx42_master_get(struct vme_master_resource *image, int *enabled,
835 unsigned long long *vme_base, unsigned long long *size,
836 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
837 {
838 int retval;
839
840 spin_lock(&(image->lock));
841
842 retval = __ca91cx42_master_get(image, enabled, vme_base, size, aspace,
843 cycle, dwidth);
844
845 spin_unlock(&(image->lock));
846
847 return retval;
848 }
849
850 ssize_t ca91cx42_master_read(struct vme_master_resource *image, void *buf,
851 size_t count, loff_t offset)
852 {
853 int retval;
854
855 spin_lock(&(image->lock));
856
857 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
858 retval = count;
859
860 spin_unlock(&(image->lock));
861
862 return retval;
863 }
864
865 ssize_t ca91cx42_master_write(struct vme_master_resource *image, void *buf,
866 size_t count, loff_t offset)
867 {
868 int retval = 0;
869
870 spin_lock(&(image->lock));
871
872 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
873 retval = count;
874
875 spin_unlock(&(image->lock));
876
877 return retval;
878 }
879
880 int ca91cx42_slot_get(void)
881 {
882 u32 slot = 0;
883
884 slot = ioread32(ca91cx42_bridge->base + VCSR_BS);
885 slot = ((slot & CA91CX42_VCSR_BS_SLOT_M) >> 27);
886 return (int)slot;
887
888 }
889
890 static int __init ca91cx42_init(void)
891 {
892 return pci_register_driver(&ca91cx42_driver);
893 }
894
895 /*
896 * Configure CR/CSR space
897 *
898 * Access to the CR/CSR can be configured at power-up. The location of the
899 * CR/CSR registers in the CR/CSR address space is determined by the boards
900 * Auto-ID or Geographic address. This function ensures that the window is
901 * enabled at an offset consistent with the boards geopgraphic address.
902 */
903 static int ca91cx42_crcsr_init(struct pci_dev *pdev)
904 {
905 unsigned int crcsr_addr;
906 int tmp, slot;
907
908 /* XXX We may need to set this somehow as the Universe II does not support
909 * geographical addressing.
910 */
911 #if 0
912 if (vme_slotnum != -1)
913 iowrite32(vme_slotnum << 27, ca91cx42_bridge->base + VCSR_BS);
914 #endif
915 slot = ca91cx42_slot_get();
916 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", slot);
917 if (slot == 0) {
918 dev_err(&pdev->dev, "Slot number is unset, not configuring "
919 "CR/CSR space\n");
920 return -EINVAL;
921 }
922
923 /* Allocate mem for CR/CSR image */
924 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
925 &crcsr_bus);
926 if (crcsr_kernel == NULL) {
927 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
928 "image\n");
929 return -ENOMEM;
930 }
931
932 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
933
934 crcsr_addr = slot * (512 * 1024);
935 iowrite32(crcsr_bus - crcsr_addr, ca91cx42_bridge->base + VCSR_TO);
936
937 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
938 tmp |= CA91CX42_VCSR_CTL_EN;
939 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
940
941 return 0;
942 }
943
944 static void ca91cx42_crcsr_exit(struct pci_dev *pdev)
945 {
946 u32 tmp;
947
948 /* Turn off CR/CSR space */
949 tmp = ioread32(ca91cx42_bridge->base + VCSR_CTL);
950 tmp &= ~CA91CX42_VCSR_CTL_EN;
951 iowrite32(tmp, ca91cx42_bridge->base + VCSR_CTL);
952
953 /* Free image */
954 iowrite32(0, ca91cx42_bridge->base + VCSR_TO);
955
956 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
957 }
958
959 static int ca91cx42_probe(struct pci_dev *pdev, const struct pci_device_id *id)
960 {
961 int retval, i;
962 u32 data;
963 struct list_head *pos = NULL;
964 struct vme_master_resource *master_image;
965 struct vme_slave_resource *slave_image;
966 #if 0
967 struct vme_dma_resource *dma_ctrlr;
968 #endif
969 struct vme_lm_resource *lm;
970
971 /* We want to support more than one of each bridge so we need to
972 * dynamically allocate the bridge structure
973 */
974 ca91cx42_bridge = kmalloc(sizeof(struct vme_bridge), GFP_KERNEL);
975
976 if (ca91cx42_bridge == NULL) {
977 dev_err(&pdev->dev, "Failed to allocate memory for device "
978 "structure\n");
979 retval = -ENOMEM;
980 goto err_struct;
981 }
982
983 memset(ca91cx42_bridge, 0, sizeof(struct vme_bridge));
984
985 /* Enable the device */
986 retval = pci_enable_device(pdev);
987 if (retval) {
988 dev_err(&pdev->dev, "Unable to enable device\n");
989 goto err_enable;
990 }
991
992 /* Map Registers */
993 retval = pci_request_regions(pdev, driver_name);
994 if (retval) {
995 dev_err(&pdev->dev, "Unable to reserve resources\n");
996 goto err_resource;
997 }
998
999 /* map registers in BAR 0 */
1000 ca91cx42_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0),
1001 4096);
1002 if (!ca91cx42_bridge->base) {
1003 dev_err(&pdev->dev, "Unable to remap CRG region\n");
1004 retval = -EIO;
1005 goto err_remap;
1006 }
1007
1008 /* Check to see if the mapping worked out */
1009 data = ioread32(ca91cx42_bridge->base + CA91CX42_PCI_ID) & 0x0000FFFF;
1010 if (data != PCI_VENDOR_ID_TUNDRA) {
1011 dev_err(&pdev->dev, "PCI_ID check failed\n");
1012 retval = -EIO;
1013 goto err_test;
1014 }
1015
1016 /* Initialize wait queues & mutual exclusion flags */
1017 /* XXX These need to be moved to the vme_bridge structure */
1018 init_waitqueue_head(&dma_queue);
1019 init_waitqueue_head(&iack_queue);
1020 mutex_init(&(vme_int));
1021 mutex_init(&(vme_rmw));
1022
1023 ca91cx42_bridge->parent = &(pdev->dev);
1024 strcpy(ca91cx42_bridge->name, driver_name);
1025
1026 /* Setup IRQ */
1027 retval = ca91cx42_irq_init(ca91cx42_bridge);
1028 if (retval != 0) {
1029 dev_err(&pdev->dev, "Chip Initialization failed.\n");
1030 goto err_irq;
1031 }
1032
1033 /* Add master windows to list */
1034 INIT_LIST_HEAD(&(ca91cx42_bridge->master_resources));
1035 for (i = 0; i < CA91C142_MAX_MASTER; i++) {
1036 master_image = kmalloc(sizeof(struct vme_master_resource),
1037 GFP_KERNEL);
1038 if (master_image == NULL) {
1039 dev_err(&pdev->dev, "Failed to allocate memory for "
1040 "master resource structure\n");
1041 retval = -ENOMEM;
1042 goto err_master;
1043 }
1044 master_image->parent = ca91cx42_bridge;
1045 spin_lock_init(&(master_image->lock));
1046 master_image->locked = 0;
1047 master_image->number = i;
1048 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
1049 VME_CRCSR | VME_USER1 | VME_USER2;
1050 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1051 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1052 master_image->width_attr = VME_D8 | VME_D16 | VME_D32 | VME_D64;
1053 memset(&(master_image->pci_resource), 0,
1054 sizeof(struct resource));
1055 master_image->kern_base = NULL;
1056 list_add_tail(&(master_image->list),
1057 &(ca91cx42_bridge->master_resources));
1058 }
1059
1060 /* Add slave windows to list */
1061 INIT_LIST_HEAD(&(ca91cx42_bridge->slave_resources));
1062 for (i = 0; i < CA91C142_MAX_SLAVE; i++) {
1063 slave_image = kmalloc(sizeof(struct vme_slave_resource),
1064 GFP_KERNEL);
1065 if (slave_image == NULL) {
1066 dev_err(&pdev->dev, "Failed to allocate memory for "
1067 "slave resource structure\n");
1068 retval = -ENOMEM;
1069 goto err_slave;
1070 }
1071 slave_image->parent = ca91cx42_bridge;
1072 mutex_init(&(slave_image->mtx));
1073 slave_image->locked = 0;
1074 slave_image->number = i;
1075 slave_image->address_attr = VME_A24 | VME_A32 | VME_USER1 |
1076 VME_USER2;
1077
1078 /* Only windows 0 and 4 support A16 */
1079 if (i == 0 || i == 4)
1080 slave_image->address_attr |= VME_A16;
1081
1082 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
1083 VME_SUPER | VME_USER | VME_PROG | VME_DATA;
1084 list_add_tail(&(slave_image->list),
1085 &(ca91cx42_bridge->slave_resources));
1086 }
1087 #if 0
1088 /* Add dma engines to list */
1089 INIT_LIST_HEAD(&(ca91cx42_bridge->dma_resources));
1090 for (i = 0; i < CA91C142_MAX_DMA; i++) {
1091 dma_ctrlr = kmalloc(sizeof(struct vme_dma_resource),
1092 GFP_KERNEL);
1093 if (dma_ctrlr == NULL) {
1094 dev_err(&pdev->dev, "Failed to allocate memory for "
1095 "dma resource structure\n");
1096 retval = -ENOMEM;
1097 goto err_dma;
1098 }
1099 dma_ctrlr->parent = ca91cx42_bridge;
1100 mutex_init(&(dma_ctrlr->mtx));
1101 dma_ctrlr->locked = 0;
1102 dma_ctrlr->number = i;
1103 INIT_LIST_HEAD(&(dma_ctrlr->pending));
1104 INIT_LIST_HEAD(&(dma_ctrlr->running));
1105 list_add_tail(&(dma_ctrlr->list),
1106 &(ca91cx42_bridge->dma_resources));
1107 }
1108 #endif
1109 /* Add location monitor to list */
1110 INIT_LIST_HEAD(&(ca91cx42_bridge->lm_resources));
1111 lm = kmalloc(sizeof(struct vme_lm_resource), GFP_KERNEL);
1112 if (lm == NULL) {
1113 dev_err(&pdev->dev, "Failed to allocate memory for "
1114 "location monitor resource structure\n");
1115 retval = -ENOMEM;
1116 goto err_lm;
1117 }
1118 lm->parent = ca91cx42_bridge;
1119 mutex_init(&(lm->mtx));
1120 lm->locked = 0;
1121 lm->number = 1;
1122 lm->monitors = 4;
1123 list_add_tail(&(lm->list), &(ca91cx42_bridge->lm_resources));
1124
1125 ca91cx42_bridge->slave_get = ca91cx42_slave_get;
1126 ca91cx42_bridge->slave_set = ca91cx42_slave_set;
1127 ca91cx42_bridge->master_get = ca91cx42_master_get;
1128 ca91cx42_bridge->master_set = ca91cx42_master_set;
1129 ca91cx42_bridge->master_read = ca91cx42_master_read;
1130 ca91cx42_bridge->master_write = ca91cx42_master_write;
1131 #if 0
1132 ca91cx42_bridge->master_rmw = ca91cx42_master_rmw;
1133 ca91cx42_bridge->dma_list_add = ca91cx42_dma_list_add;
1134 ca91cx42_bridge->dma_list_exec = ca91cx42_dma_list_exec;
1135 ca91cx42_bridge->dma_list_empty = ca91cx42_dma_list_empty;
1136 #endif
1137 ca91cx42_bridge->irq_set = ca91cx42_irq_set;
1138 ca91cx42_bridge->irq_generate = ca91cx42_irq_generate;
1139 #if 0
1140 ca91cx42_bridge->lm_set = ca91cx42_lm_set;
1141 ca91cx42_bridge->lm_get = ca91cx42_lm_get;
1142 ca91cx42_bridge->lm_attach = ca91cx42_lm_attach;
1143 ca91cx42_bridge->lm_detach = ca91cx42_lm_detach;
1144 #endif
1145 ca91cx42_bridge->slot_get = ca91cx42_slot_get;
1146
1147 data = ioread32(ca91cx42_bridge->base + MISC_CTL);
1148 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
1149 (data & CA91CX42_MISC_CTL_SYSCON) ? "" : " not");
1150 dev_info(&pdev->dev, "Slot ID is %d\n", ca91cx42_slot_get());
1151
1152 if (ca91cx42_crcsr_init(pdev)) {
1153 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
1154 retval = -EINVAL;
1155 #if 0
1156 goto err_crcsr;
1157 #endif
1158 }
1159
1160 /* Need to save ca91cx42_bridge pointer locally in link list for use in
1161 * ca91cx42_remove()
1162 */
1163 retval = vme_register_bridge(ca91cx42_bridge);
1164 if (retval != 0) {
1165 dev_err(&pdev->dev, "Chip Registration failed.\n");
1166 goto err_reg;
1167 }
1168
1169 return 0;
1170
1171 vme_unregister_bridge(ca91cx42_bridge);
1172 err_reg:
1173 ca91cx42_crcsr_exit(pdev);
1174 #if 0
1175 err_crcsr:
1176 #endif
1177 err_lm:
1178 /* resources are stored in link list */
1179 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1180 lm = list_entry(pos, struct vme_lm_resource, list);
1181 list_del(pos);
1182 kfree(lm);
1183 }
1184 #if 0
1185 err_dma:
1186 /* resources are stored in link list */
1187 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1188 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1189 list_del(pos);
1190 kfree(dma_ctrlr);
1191 }
1192 #endif
1193 err_slave:
1194 /* resources are stored in link list */
1195 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1196 slave_image = list_entry(pos, struct vme_slave_resource, list);
1197 list_del(pos);
1198 kfree(slave_image);
1199 }
1200 err_master:
1201 /* resources are stored in link list */
1202 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1203 master_image = list_entry(pos, struct vme_master_resource,
1204 list);
1205 list_del(pos);
1206 kfree(master_image);
1207 }
1208
1209 ca91cx42_irq_exit(pdev);
1210 err_irq:
1211 err_test:
1212 iounmap(ca91cx42_bridge->base);
1213 err_remap:
1214 pci_release_regions(pdev);
1215 err_resource:
1216 pci_disable_device(pdev);
1217 err_enable:
1218 kfree(ca91cx42_bridge);
1219 err_struct:
1220 return retval;
1221
1222 }
1223
1224 void ca91cx42_remove(struct pci_dev *pdev)
1225 {
1226 struct list_head *pos = NULL;
1227 struct vme_master_resource *master_image;
1228 struct vme_slave_resource *slave_image;
1229 struct vme_dma_resource *dma_ctrlr;
1230 struct vme_lm_resource *lm;
1231
1232 /* Turn off Ints */
1233 iowrite32(0, ca91cx42_bridge->base + LINT_EN);
1234
1235 /* Turn off the windows */
1236 iowrite32(0x00800000, ca91cx42_bridge->base + LSI0_CTL);
1237 iowrite32(0x00800000, ca91cx42_bridge->base + LSI1_CTL);
1238 iowrite32(0x00800000, ca91cx42_bridge->base + LSI2_CTL);
1239 iowrite32(0x00800000, ca91cx42_bridge->base + LSI3_CTL);
1240 iowrite32(0x00800000, ca91cx42_bridge->base + LSI4_CTL);
1241 iowrite32(0x00800000, ca91cx42_bridge->base + LSI5_CTL);
1242 iowrite32(0x00800000, ca91cx42_bridge->base + LSI6_CTL);
1243 iowrite32(0x00800000, ca91cx42_bridge->base + LSI7_CTL);
1244 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI0_CTL);
1245 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI1_CTL);
1246 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI2_CTL);
1247 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI3_CTL);
1248 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI4_CTL);
1249 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI5_CTL);
1250 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI6_CTL);
1251 iowrite32(0x00F00000, ca91cx42_bridge->base + VSI7_CTL);
1252
1253 vme_unregister_bridge(ca91cx42_bridge);
1254 #if 0
1255 ca91cx42_crcsr_exit(pdev);
1256 #endif
1257 /* resources are stored in link list */
1258 list_for_each(pos, &(ca91cx42_bridge->lm_resources)) {
1259 lm = list_entry(pos, struct vme_lm_resource, list);
1260 list_del(pos);
1261 kfree(lm);
1262 }
1263
1264 /* resources are stored in link list */
1265 list_for_each(pos, &(ca91cx42_bridge->dma_resources)) {
1266 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
1267 list_del(pos);
1268 kfree(dma_ctrlr);
1269 }
1270
1271 /* resources are stored in link list */
1272 list_for_each(pos, &(ca91cx42_bridge->slave_resources)) {
1273 slave_image = list_entry(pos, struct vme_slave_resource, list);
1274 list_del(pos);
1275 kfree(slave_image);
1276 }
1277
1278 /* resources are stored in link list */
1279 list_for_each(pos, &(ca91cx42_bridge->master_resources)) {
1280 master_image = list_entry(pos, struct vme_master_resource,
1281 list);
1282 list_del(pos);
1283 kfree(master_image);
1284 }
1285
1286 ca91cx42_irq_exit(pdev);
1287
1288 iounmap(ca91cx42_bridge->base);
1289
1290 pci_release_regions(pdev);
1291
1292 pci_disable_device(pdev);
1293
1294 kfree(ca91cx42_bridge);
1295 }
1296
1297 static void __exit ca91cx42_exit(void)
1298 {
1299 pci_unregister_driver(&ca91cx42_driver);
1300 }
1301
1302 MODULE_DESCRIPTION("VME driver for the Tundra Universe II VME bridge");
1303 MODULE_LICENSE("GPL");
1304
1305 module_init(ca91cx42_init);
1306 module_exit(ca91cx42_exit);
1307
1308 /*----------------------------------------------------------------------------
1309 * STAGING
1310 *--------------------------------------------------------------------------*/
1311
1312 #if 0
1313 #define SWIZZLE(X) ( ((X & 0xFF000000) >> 24) | ((X & 0x00FF0000) >> 8) | ((X & 0x0000FF00) << 8) | ((X & 0x000000FF) << 24))
1314
1315 int ca91cx42_master_rmw(vmeRmwCfg_t *vmeRmw)
1316 {
1317 int temp_ctl = 0;
1318 int tempBS = 0;
1319 int tempBD = 0;
1320 int tempTO = 0;
1321 int vmeBS = 0;
1322 int vmeBD = 0;
1323 int *rmw_pci_data_ptr = NULL;
1324 int *vaDataPtr = NULL;
1325 int i;
1326 vmeOutWindowCfg_t vmeOut;
1327 if (vmeRmw->maxAttempts < 1) {
1328 return -EINVAL;
1329 }
1330 if (vmeRmw->targetAddrU) {
1331 return -EINVAL;
1332 }
1333 /* Find the PCI address that maps to the desired VME address */
1334 for (i = 0; i < 8; i++) {
1335 temp_ctl = ioread32(ca91cx42_bridge->base +
1336 CA91CX42_LSI_CTL[i]);
1337 if ((temp_ctl & 0x80000000) == 0) {
1338 continue;
1339 }
1340 memset(&vmeOut, 0, sizeof(vmeOut));
1341 vmeOut.windowNbr = i;
1342 ca91cx42_get_out_bound(&vmeOut);
1343 if (vmeOut.addrSpace != vmeRmw->addrSpace) {
1344 continue;
1345 }
1346 tempBS = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BS[i]);
1347 tempBD = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_BD[i]);
1348 tempTO = ioread32(ca91cx42_bridge->base + CA91CX42_LSI_TO[i]);
1349 vmeBS = tempBS + tempTO;
1350 vmeBD = tempBD + tempTO;
1351 if ((vmeRmw->targetAddr >= vmeBS) &&
1352 (vmeRmw->targetAddr < vmeBD)) {
1353 rmw_pci_data_ptr =
1354 (int *)(tempBS + (vmeRmw->targetAddr - vmeBS));
1355 vaDataPtr =
1356 (int *)(out_image_va[i] +
1357 (vmeRmw->targetAddr - vmeBS));
1358 break;
1359 }
1360 }
1361
1362 /* If no window - fail. */
1363 if (rmw_pci_data_ptr == NULL) {
1364 return -EINVAL;
1365 }
1366 /* Setup the RMW registers. */
1367 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1368 iowrite32(SWIZZLE(vmeRmw->enableMask), ca91cx42_bridge->base + SCYC_EN);
1369 iowrite32(SWIZZLE(vmeRmw->compareData), ca91cx42_bridge->base +
1370 SCYC_CMP);
1371 iowrite32(SWIZZLE(vmeRmw->swapData), ca91cx42_bridge->base + SCYC_SWP);
1372 iowrite32((int)rmw_pci_data_ptr, ca91cx42_bridge->base + SCYC_ADDR);
1373 iowrite32(1, ca91cx42_bridge->base + SCYC_CTL);
1374
1375 /* Run the RMW cycle until either success or max attempts. */
1376 vmeRmw->numAttempts = 1;
1377 while (vmeRmw->numAttempts <= vmeRmw->maxAttempts) {
1378
1379 if ((ioread32(vaDataPtr) & vmeRmw->enableMask) ==
1380 (vmeRmw->swapData & vmeRmw->enableMask)) {
1381
1382 iowrite32(0, ca91cx42_bridge->base + SCYC_CTL);
1383 break;
1384
1385 }
1386 vmeRmw->numAttempts++;
1387 }
1388
1389 /* If no success, set num Attempts to be greater than max attempts */
1390 if (vmeRmw->numAttempts > vmeRmw->maxAttempts) {
1391 vmeRmw->numAttempts = vmeRmw->maxAttempts + 1;
1392 }
1393
1394 return 0;
1395 }
1396
1397 int uniSetupDctlReg(vmeDmaPacket_t * vmeDma, int *dctlregreturn)
1398 {
1399 unsigned int dctlreg = 0x80;
1400 struct vmeAttr *vmeAttr;
1401
1402 if (vmeDma->srcBus == VME_DMA_VME) {
1403 dctlreg = 0;
1404 vmeAttr = &vmeDma->srcVmeAttr;
1405 } else {
1406 dctlreg = 0x80000000;
1407 vmeAttr = &vmeDma->dstVmeAttr;
1408 }
1409
1410 switch (vmeAttr->maxDataWidth) {
1411 case VME_D8:
1412 break;
1413 case VME_D16:
1414 dctlreg |= 0x00400000;
1415 break;
1416 case VME_D32:
1417 dctlreg |= 0x00800000;
1418 break;
1419 case VME_D64:
1420 dctlreg |= 0x00C00000;
1421 break;
1422 }
1423
1424 switch (vmeAttr->addrSpace) {
1425 case VME_A16:
1426 break;
1427 case VME_A24:
1428 dctlreg |= 0x00010000;
1429 break;
1430 case VME_A32:
1431 dctlreg |= 0x00020000;
1432 break;
1433 case VME_USER1:
1434 dctlreg |= 0x00060000;
1435 break;
1436 case VME_USER2:
1437 dctlreg |= 0x00070000;
1438 break;
1439
1440 case VME_A64: /* not supported in Universe DMA */
1441 case VME_CRCSR:
1442 case VME_USER3:
1443 case VME_USER4:
1444 return -EINVAL;
1445 break;
1446 }
1447 if (vmeAttr->userAccessType == VME_PROG) {
1448 dctlreg |= 0x00004000;
1449 }
1450 if (vmeAttr->dataAccessType == VME_SUPER) {
1451 dctlreg |= 0x00001000;
1452 }
1453 if (vmeAttr->xferProtocol != VME_SCT) {
1454 dctlreg |= 0x00000100;
1455 }
1456 *dctlregreturn = dctlreg;
1457 return 0;
1458 }
1459
1460 unsigned int
1461 ca91cx42_start_dma(int channel, unsigned int dgcsreg, TDMA_Cmd_Packet *vmeLL)
1462 {
1463 unsigned int val;
1464
1465 /* Setup registers as needed for direct or chained. */
1466 if (dgcsreg & 0x8000000) {
1467 iowrite32(0, ca91cx42_bridge->base + DTBC);
1468 iowrite32((unsigned int)vmeLL, ca91cx42_bridge->base + DCPP);
1469 } else {
1470 #if 0
1471 printk(KERN_ERR "Starting: DGCS = %08x\n", dgcsreg);
1472 printk(KERN_ERR "Starting: DVA = %08x\n",
1473 ioread32(&vmeLL->dva));
1474 printk(KERN_ERR "Starting: DLV = %08x\n",
1475 ioread32(&vmeLL->dlv));
1476 printk(KERN_ERR "Starting: DTBC = %08x\n",
1477 ioread32(&vmeLL->dtbc));
1478 printk(KERN_ERR "Starting: DCTL = %08x\n",
1479 ioread32(&vmeLL->dctl));
1480 #endif
1481 /* Write registers */
1482 iowrite32(ioread32(&vmeLL->dva), ca91cx42_bridge->base + DVA);
1483 iowrite32(ioread32(&vmeLL->dlv), ca91cx42_bridge->base + DLA);
1484 iowrite32(ioread32(&vmeLL->dtbc), ca91cx42_bridge->base + DTBC);
1485 iowrite32(ioread32(&vmeLL->dctl), ca91cx42_bridge->base + DCTL);
1486 iowrite32(0, ca91cx42_bridge->base + DCPP);
1487 }
1488
1489 /* Start the operation */
1490 iowrite32(dgcsreg, ca91cx42_bridge->base + DGCS);
1491 val = get_tbl();
1492 iowrite32(dgcsreg | 0x8000000F, ca91cx42_bridge->base + DGCS);
1493 return val;
1494 }
1495
1496 TDMA_Cmd_Packet *ca91cx42_setup_dma(vmeDmaPacket_t * vmeDma)
1497 {
1498 vmeDmaPacket_t *vmeCur;
1499 int maxPerPage;
1500 int currentLLcount;
1501 TDMA_Cmd_Packet *startLL;
1502 TDMA_Cmd_Packet *currentLL;
1503 TDMA_Cmd_Packet *nextLL;
1504 unsigned int dctlreg = 0;
1505
1506 maxPerPage = PAGESIZE / sizeof(TDMA_Cmd_Packet) - 1;
1507 startLL = (TDMA_Cmd_Packet *) __get_free_pages(GFP_KERNEL, 0);
1508 if (startLL == 0) {
1509 return startLL;
1510 }
1511 /* First allocate pages for descriptors and create linked list */
1512 vmeCur = vmeDma;
1513 currentLL = startLL;
1514 currentLLcount = 0;
1515 while (vmeCur != 0) {
1516 if (vmeCur->pNextPacket != 0) {
1517 currentLL->dcpp = (unsigned int)(currentLL + 1);
1518 currentLLcount++;
1519 if (currentLLcount >= maxPerPage) {
1520 currentLL->dcpp =
1521 __get_free_pages(GFP_KERNEL, 0);
1522 currentLLcount = 0;
1523 }
1524 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1525 } else {
1526 currentLL->dcpp = (unsigned int)0;
1527 }
1528 vmeCur = vmeCur->pNextPacket;
1529 }
1530
1531 /* Next fill in information for each descriptor */
1532 vmeCur = vmeDma;
1533 currentLL = startLL;
1534 while (vmeCur != 0) {
1535 if (vmeCur->srcBus == VME_DMA_VME) {
1536 iowrite32(vmeCur->srcAddr, &currentLL->dva);
1537 iowrite32(vmeCur->dstAddr, &currentLL->dlv);
1538 } else {
1539 iowrite32(vmeCur->srcAddr, &currentLL->dlv);
1540 iowrite32(vmeCur->dstAddr, &currentLL->dva);
1541 }
1542 uniSetupDctlReg(vmeCur, &dctlreg);
1543 iowrite32(dctlreg, &currentLL->dctl);
1544 iowrite32(vmeCur->byteCount, &currentLL->dtbc);
1545
1546 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1547 vmeCur = vmeCur->pNextPacket;
1548 }
1549
1550 /* Convert Links to PCI addresses. */
1551 currentLL = startLL;
1552 while (currentLL != 0) {
1553 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1554 if (nextLL == 0) {
1555 iowrite32(1, &currentLL->dcpp);
1556 } else {
1557 iowrite32((unsigned int)virt_to_bus(nextLL),
1558 &currentLL->dcpp);
1559 }
1560 currentLL = nextLL;
1561 }
1562
1563 /* Return pointer to descriptors list */
1564 return startLL;
1565 }
1566
1567 int ca91cx42_free_dma(TDMA_Cmd_Packet *startLL)
1568 {
1569 TDMA_Cmd_Packet *currentLL;
1570 TDMA_Cmd_Packet *prevLL;
1571 TDMA_Cmd_Packet *nextLL;
1572 unsigned int dcppreg;
1573
1574 /* Convert Links to virtual addresses. */
1575 currentLL = startLL;
1576 while (currentLL != 0) {
1577 dcppreg = ioread32(&currentLL->dcpp);
1578 dcppreg &= ~6;
1579 if (dcppreg & 1) {
1580 currentLL->dcpp = 0;
1581 } else {
1582 currentLL->dcpp = (unsigned int)bus_to_virt(dcppreg);
1583 }
1584 currentLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1585 }
1586
1587 /* Free all pages associated with the descriptors. */
1588 currentLL = startLL;
1589 prevLL = currentLL;
1590 while (currentLL != 0) {
1591 nextLL = (TDMA_Cmd_Packet *) currentLL->dcpp;
1592 if (currentLL + 1 != nextLL) {
1593 free_pages((int)prevLL, 0);
1594 prevLL = nextLL;
1595 }
1596 currentLL = nextLL;
1597 }
1598
1599 /* Return pointer to descriptors list */
1600 return 0;
1601 }
1602
1603 int ca91cx42_do_dma(vmeDmaPacket_t *vmeDma)
1604 {
1605 unsigned int dgcsreg = 0;
1606 unsigned int dctlreg = 0;
1607 int val;
1608 int channel, x;
1609 vmeDmaPacket_t *curDma;
1610 TDMA_Cmd_Packet *dmaLL;
1611
1612 /* Sanity check the VME chain. */
1613 channel = vmeDma->channel_number;
1614 if (channel > 0) {
1615 return -EINVAL;
1616 }
1617 curDma = vmeDma;
1618 while (curDma != 0) {
1619 if (curDma->byteCount == 0) {
1620 return -EINVAL;
1621 }
1622 if (curDma->byteCount >= 0x1000000) {
1623 return -EINVAL;
1624 }
1625 if ((curDma->srcAddr & 7) != (curDma->dstAddr & 7)) {
1626 return -EINVAL;
1627 }
1628 switch (curDma->srcBus) {
1629 case VME_DMA_PCI:
1630 if (curDma->dstBus != VME_DMA_VME) {
1631 return -EINVAL;
1632 }
1633 break;
1634 case VME_DMA_VME:
1635 if (curDma->dstBus != VME_DMA_PCI) {
1636 return -EINVAL;
1637 }
1638 break;
1639 default:
1640 return -EINVAL;
1641 break;
1642 }
1643 if (uniSetupDctlReg(curDma, &dctlreg) < 0) {
1644 return -EINVAL;
1645 }
1646
1647 curDma = curDma->pNextPacket;
1648 if (curDma == vmeDma) { /* Endless Loop! */
1649 return -EINVAL;
1650 }
1651 }
1652
1653 /* calculate control register */
1654 if (vmeDma->pNextPacket != 0) {
1655 dgcsreg = 0x8000000;
1656 } else {
1657 dgcsreg = 0;
1658 }
1659
1660 for (x = 0; x < 8; x++) { /* vme block size */
1661 if ((256 << x) >= vmeDma->maxVmeBlockSize) {
1662 break;
1663 }
1664 }
1665 if (x == 8)
1666 x = 7;
1667 dgcsreg |= (x << 20);
1668
1669 if (vmeDma->vmeBackOffTimer) {
1670 for (x = 1; x < 8; x++) { /* vme timer */
1671 if ((16 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1672 break;
1673 }
1674 }
1675 if (x == 8)
1676 x = 7;
1677 dgcsreg |= (x << 16);
1678 }
1679 /*` Setup the dma chain */
1680 dmaLL = ca91cx42_setup_dma(vmeDma);
1681
1682 /* Start the DMA */
1683 if (dgcsreg & 0x8000000) {
1684 vmeDma->vmeDmaStartTick =
1685 ca91cx42_start_dma(channel, dgcsreg,
1686 (TDMA_Cmd_Packet *) virt_to_phys(dmaLL));
1687 } else {
1688 vmeDma->vmeDmaStartTick =
1689 ca91cx42_start_dma(channel, dgcsreg, dmaLL);
1690 }
1691
1692 wait_event_interruptible(dma_queue,
1693 ioread32(ca91cx42_bridge->base + DGCS) & 0x800);
1694
1695 val = ioread32(ca91cx42_bridge->base + DGCS);
1696 iowrite32(val | 0xF00, ca91cx42_bridge->base + DGCS);
1697
1698 vmeDma->vmeDmaStatus = 0;
1699
1700 if (!(val & 0x00000800)) {
1701 vmeDma->vmeDmaStatus = val & 0x700;
1702 printk(KERN_ERR "ca91c042: DMA Error in ca91cx42_DMA_irqhandler"
1703 " DGCS=%08X\n", val);
1704 val = ioread32(ca91cx42_bridge->base + DCPP);
1705 printk(KERN_ERR "ca91c042: DCPP=%08X\n", val);
1706 val = ioread32(ca91cx42_bridge->base + DCTL);
1707 printk(KERN_ERR "ca91c042: DCTL=%08X\n", val);
1708 val = ioread32(ca91cx42_bridge->base + DTBC);
1709 printk(KERN_ERR "ca91c042: DTBC=%08X\n", val);
1710 val = ioread32(ca91cx42_bridge->base + DLA);
1711 printk(KERN_ERR "ca91c042: DLA=%08X\n", val);
1712 val = ioread32(ca91cx42_bridge->base + DVA);
1713 printk(KERN_ERR "ca91c042: DVA=%08X\n", val);
1714
1715 }
1716 /* Free the dma chain */
1717 ca91cx42_free_dma(dmaLL);
1718
1719 return 0;
1720 }
1721
1722 int ca91cx42_lm_set(vmeLmCfg_t *vmeLm)
1723 {
1724 int temp_ctl = 0;
1725
1726 if (vmeLm->addrU)
1727 return -EINVAL;
1728
1729 switch (vmeLm->addrSpace) {
1730 case VME_A64:
1731 case VME_USER3:
1732 case VME_USER4:
1733 return -EINVAL;
1734 case VME_A16:
1735 temp_ctl |= 0x00000;
1736 break;
1737 case VME_A24:
1738 temp_ctl |= 0x10000;
1739 break;
1740 case VME_A32:
1741 temp_ctl |= 0x20000;
1742 break;
1743 case VME_CRCSR:
1744 temp_ctl |= 0x50000;
1745 break;
1746 case VME_USER1:
1747 temp_ctl |= 0x60000;
1748 break;
1749 case VME_USER2:
1750 temp_ctl |= 0x70000;
1751 break;
1752 }
1753
1754 /* Disable while we are mucking around */
1755 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1756
1757 iowrite32(vmeLm->addr, ca91cx42_bridge->base + LM_BS);
1758
1759 /* Setup CTL register. */
1760 if (vmeLm->userAccessType & VME_SUPER)
1761 temp_ctl |= 0x00200000;
1762 if (vmeLm->userAccessType & VME_USER)
1763 temp_ctl |= 0x00100000;
1764 if (vmeLm->dataAccessType & VME_PROG)
1765 temp_ctl |= 0x00800000;
1766 if (vmeLm->dataAccessType & VME_DATA)
1767 temp_ctl |= 0x00400000;
1768
1769
1770 /* Write ctl reg and enable */
1771 iowrite32(0x80000000 | temp_ctl, ca91cx42_bridge->base + LM_CTL);
1772 temp_ctl = ioread32(ca91cx42_bridge->base + LM_CTL);
1773
1774 return 0;
1775 }
1776
1777 int ca91cx42_wait_lm(vmeLmCfg_t *vmeLm)
1778 {
1779 unsigned long flags;
1780 unsigned int tmp;
1781
1782 spin_lock_irqsave(&lm_lock, flags);
1783 spin_unlock_irqrestore(&lm_lock, flags);
1784 if (tmp == 0) {
1785 if (vmeLm->lmWait < 10)
1786 vmeLm->lmWait = 10;
1787 interruptible_sleep_on_timeout(&lm_queue, vmeLm->lmWait);
1788 }
1789 iowrite32(0x00000000, ca91cx42_bridge->base + LM_CTL);
1790
1791 return 0;
1792 }
1793
1794
1795
1796 int ca91cx42_set_arbiter(vmeArbiterCfg_t *vmeArb)
1797 {
1798 int temp_ctl = 0;
1799 int vbto = 0;
1800
1801 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1802 temp_ctl &= 0x00FFFFFF;
1803
1804 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
1805 vbto = 7;
1806 } else if (vmeArb->globalTimeoutTimer > 1024) {
1807 return -EINVAL;
1808 } else if (vmeArb->globalTimeoutTimer == 0) {
1809 vbto = 0;
1810 } else {
1811 vbto = 1;
1812 while ((16 * (1 << (vbto - 1))) < vmeArb->globalTimeoutTimer)
1813 vbto += 1;
1814 }
1815 temp_ctl |= (vbto << 28);
1816
1817 if (vmeArb->arbiterMode == VME_PRIORITY_MODE)
1818 temp_ctl |= 1 << 26;
1819
1820 if (vmeArb->arbiterTimeoutFlag)
1821 temp_ctl |= 2 << 24;
1822
1823 iowrite32(temp_ctl, ca91cx42_bridge->base + MISC_CTL);
1824 return 0;
1825 }
1826
1827 int ca91cx42_get_arbiter(vmeArbiterCfg_t *vmeArb)
1828 {
1829 int temp_ctl = 0;
1830 int vbto = 0;
1831
1832 temp_ctl = ioread32(ca91cx42_bridge->base + MISC_CTL);
1833
1834 vbto = (temp_ctl >> 28) & 0xF;
1835 if (vbto != 0)
1836 vmeArb->globalTimeoutTimer = (16 * (1 << (vbto - 1)));
1837
1838 if (temp_ctl & (1 << 26))
1839 vmeArb->arbiterMode = VME_PRIORITY_MODE;
1840 else
1841 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
1842
1843 if (temp_ctl & (3 << 24))
1844 vmeArb->arbiterTimeoutFlag = 1;
1845
1846 return 0;
1847 }
1848
1849 int ca91cx42_set_requestor(vmeRequesterCfg_t *vmeReq)
1850 {
1851 int temp_ctl = 0;
1852
1853 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1854 temp_ctl &= 0xFF0FFFFF;
1855
1856 if (vmeReq->releaseMode == 1)
1857 temp_ctl |= (1 << 20);
1858
1859 if (vmeReq->fairMode == 1)
1860 temp_ctl |= (1 << 21);
1861
1862 temp_ctl |= (vmeReq->requestLevel << 22);
1863
1864 iowrite32(temp_ctl, ca91cx42_bridge->base + MAST_CTL);
1865 return 0;
1866 }
1867
1868 int ca91cx42_get_requestor(vmeRequesterCfg_t *vmeReq)
1869 {
1870 int temp_ctl = 0;
1871
1872 temp_ctl = ioread32(ca91cx42_bridge->base + MAST_CTL);
1873
1874 if (temp_ctl & (1 << 20))
1875 vmeReq->releaseMode = 1;
1876
1877 if (temp_ctl & (1 << 21))
1878 vmeReq->fairMode = 1;
1879
1880 vmeReq->requestLevel = (temp_ctl & 0xC00000) >> 22;
1881
1882 return 0;
1883 }
1884
1885
1886 #endif