]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/staging/vme/bridges/vme_tsi148.c
Staging: vme: Add syncronize interrupts before removing callback
[mirror_ubuntu-zesty-kernel.git] / drivers / staging / vme / bridges / vme_tsi148.c
1 /*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16 #include <linux/version.h>
17 #include <linux/module.h>
18 #include <linux/moduleparam.h>
19 #include <linux/mm.h>
20 #include <linux/types.h>
21 #include <linux/errno.h>
22 #include <linux/proc_fs.h>
23 #include <linux/pci.h>
24 #include <linux/poll.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/interrupt.h>
27 #include <linux/spinlock.h>
28 #include <asm/time.h>
29 #include <asm/io.h>
30 #include <asm/uaccess.h>
31
32 #include "../vme.h"
33 #include "../vme_bridge.h"
34 #include "vme_tsi148.h"
35
36 static int __init tsi148_init(void);
37 static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
38 static void tsi148_remove(struct pci_dev *);
39 static void __exit tsi148_exit(void);
40
41
42 int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
43 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
44 int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
46
47 int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
48 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
49 int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
50 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
51 ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
52 loff_t);
53 ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
54 loff_t);
55 unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
56 unsigned int, unsigned int, loff_t);
57 int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
58 struct vme_dma_attr *, size_t);
59 int tsi148_dma_list_exec(struct vme_dma_list *);
60 int tsi148_dma_list_empty(struct vme_dma_list *);
61 int tsi148_generate_irq(int, int);
62 int tsi148_lm_set(unsigned long long, vme_address_t, vme_cycle_t);
63 int tsi148_lm_get(unsigned long long *, vme_address_t *, vme_cycle_t *);
64 int tsi148_lm_attach(int, void (*callback)(int));
65 int tsi148_lm_detach(int);
66 int tsi148_slot_get(void);
67
68 /* Modue parameter */
69 int err_chk = 0;
70
71 /* XXX These should all be in a per device structure */
72 struct vme_bridge *tsi148_bridge;
73 wait_queue_head_t dma_queue[2];
74 wait_queue_head_t iack_queue;
75 void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
76 void *crcsr_kernel;
77 dma_addr_t crcsr_bus;
78 struct vme_master_resource *flush_image;
79 struct semaphore vme_rmw; /* Only one RMW cycle at a time */
80 struct semaphore vme_int; /*
81 * Only one VME interrupt can be
82 * generated at a time, provide locking
83 */
84 struct semaphore vme_irq; /* Locking for VME irq callback configuration */
85 struct semaphore vme_lm; /* Locking for location monitor operations */
86
87
88 static char driver_name[] = "vme_tsi148";
89
90 static struct pci_device_id tsi148_ids[] = {
91 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
92 { },
93 };
94
95 static struct pci_driver tsi148_driver = {
96 .name = driver_name,
97 .id_table = tsi148_ids,
98 .probe = tsi148_probe,
99 .remove = tsi148_remove,
100 };
101
102 static void reg_join(unsigned int high, unsigned int low,
103 unsigned long long *variable)
104 {
105 *variable = (unsigned long long)high << 32;
106 *variable |= (unsigned long long)low;
107 }
108
109 static void reg_split(unsigned long long variable, unsigned int *high,
110 unsigned int *low)
111 {
112 *low = (unsigned int)variable & 0xFFFFFFFF;
113 *high = (unsigned int)(variable >> 32);
114 }
115
116 /*
117 * Wakes up DMA queue.
118 */
119 static u32 tsi148_DMA_irqhandler(int channel_mask)
120 {
121 u32 serviced = 0;
122
123 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
124 wake_up(&dma_queue[0]);
125 serviced |= TSI148_LCSR_INTC_DMA0C;
126 }
127 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
128 wake_up(&dma_queue[1]);
129 serviced |= TSI148_LCSR_INTC_DMA1C;
130 }
131
132 return serviced;
133 }
134
135 /*
136 * Wake up location monitor queue
137 */
138 static u32 tsi148_LM_irqhandler(u32 stat)
139 {
140 int i;
141 u32 serviced = 0;
142
143 for (i = 0; i < 4; i++) {
144 if(stat & TSI148_LCSR_INTS_LMS[i]) {
145 /* We only enable interrupts if the callback is set */
146 lm_callback[i](i);
147 serviced |= TSI148_LCSR_INTC_LMC[i];
148 }
149 }
150
151 return serviced;
152 }
153
154 /*
155 * Wake up mail box queue.
156 *
157 * XXX This functionality is not exposed up though API.
158 */
159 static u32 tsi148_MB_irqhandler(u32 stat)
160 {
161 int i;
162 u32 val;
163 u32 serviced = 0;
164
165 for (i = 0; i < 4; i++) {
166 if(stat & TSI148_LCSR_INTS_MBS[i]) {
167 val = ioread32be(tsi148_bridge->base +
168 TSI148_GCSR_MBOX[i]);
169 printk("VME Mailbox %d received: 0x%x\n", i, val);
170 serviced |= TSI148_LCSR_INTC_MBC[i];
171 }
172 }
173
174 return serviced;
175 }
176
177 /*
178 * Display error & status message when PERR (PCI) exception interrupt occurs.
179 */
180 static u32 tsi148_PERR_irqhandler(void)
181 {
182 printk(KERN_ERR
183 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
184 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU),
185 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL),
186 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT)
187 );
188 printk(KERN_ERR
189 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
190 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA),
191 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS)
192 );
193
194 iowrite32be(TSI148_LCSR_EDPAT_EDPCL,
195 tsi148_bridge->base + TSI148_LCSR_EDPAT);
196
197 return TSI148_LCSR_INTC_PERRC;
198 }
199
200 /*
201 * Save address and status when VME error interrupt occurs.
202 */
203 static u32 tsi148_VERR_irqhandler(void)
204 {
205 unsigned int error_addr_high, error_addr_low;
206 unsigned long long error_addr;
207 u32 error_attrib;
208 struct vme_bus_error *error;
209
210 error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU);
211 error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL);
212 error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT);
213
214 reg_join(error_addr_high, error_addr_low, &error_addr);
215
216 /* Check for exception register overflow (we have lost error data) */
217 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
218 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
219 }
220
221 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
222 GFP_ATOMIC);
223 if (error) {
224 error->address = error_addr;
225 error->attributes = error_attrib;
226 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
227 } else {
228 printk(KERN_ERR
229 "Unable to alloc memory for VMEbus Error reporting\n");
230 printk(KERN_ERR
231 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
232 error_addr, error_attrib);
233 }
234
235 /* Clear Status */
236 iowrite32be(TSI148_LCSR_VEAT_VESCL,
237 tsi148_bridge->base + TSI148_LCSR_VEAT);
238
239 return TSI148_LCSR_INTC_VERRC;
240 }
241
242 /*
243 * Wake up IACK queue.
244 */
245 static u32 tsi148_IACK_irqhandler(void)
246 {
247 printk("tsi148_IACK_irqhandler\n");
248 wake_up(&iack_queue);
249
250 return TSI148_LCSR_INTC_IACKC;
251 }
252
253 /*
254 * Calling VME bus interrupt callback if provided.
255 */
256 static u32 tsi148_VIRQ_irqhandler(u32 stat)
257 {
258 int vec, i, serviced = 0;
259 void (*call)(int, int, void *);
260 void *priv_data;
261
262 for (i = 7; i > 0; i--) {
263 if (stat & (1 << i)) {
264 /*
265 * Note: Even though the registers are defined
266 * as 32-bits in the spec, we only want to issue
267 * 8-bit IACK cycles on the bus, read from offset
268 * 3.
269 */
270 vec = ioread8(tsi148_bridge->base +
271 TSI148_LCSR_VIACK[i] + 3);
272
273 call = tsi148_bridge->irq[i - 1].callback[vec].func;
274 priv_data =
275 tsi148_bridge->irq[i-1].callback[vec].priv_data;
276
277 if (call != NULL)
278 call(i, vec, priv_data);
279 else
280 printk("Spurilous VME interrupt, level:%x, "
281 "vector:%x\n", i, vec);
282
283 serviced |= (1 << i);
284 }
285 }
286
287 return serviced;
288 }
289
290 /*
291 * Top level interrupt handler. Clears appropriate interrupt status bits and
292 * then calls appropriate sub handler(s).
293 */
294 static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
295 {
296 u32 stat, enable, serviced = 0;
297
298 /* Determine which interrupts are unmasked and set */
299 enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
300 stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS);
301
302 /* Only look at unmasked interrupts */
303 stat &= enable;
304
305 if (unlikely(!stat)) {
306 return IRQ_NONE;
307 }
308
309 /* Call subhandlers as appropriate */
310 /* DMA irqs */
311 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
312 serviced |= tsi148_DMA_irqhandler(stat);
313
314 /* Location monitor irqs */
315 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
316 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
317 serviced |= tsi148_LM_irqhandler(stat);
318
319 /* Mail box irqs */
320 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
321 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
322 serviced |= tsi148_MB_irqhandler(stat);
323
324 /* PCI bus error */
325 if (stat & TSI148_LCSR_INTS_PERRS)
326 serviced |= tsi148_PERR_irqhandler();
327
328 /* VME bus error */
329 if (stat & TSI148_LCSR_INTS_VERRS)
330 serviced |= tsi148_VERR_irqhandler();
331
332 /* IACK irq */
333 if (stat & TSI148_LCSR_INTS_IACKS)
334 serviced |= tsi148_IACK_irqhandler();
335
336 /* VME bus irqs */
337 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
338 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
339 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
340 TSI148_LCSR_INTS_IRQ1S))
341 serviced |= tsi148_VIRQ_irqhandler(stat);
342
343 /* Clear serviced interrupts */
344 iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC);
345
346 return IRQ_HANDLED;
347 }
348
349 static int tsi148_irq_init(struct vme_bridge *bridge)
350 {
351 int result;
352 unsigned int tmp;
353 struct pci_dev *pdev;
354
355 /* Need pdev */
356 pdev = container_of(bridge->parent, struct pci_dev, dev);
357
358 /* Initialise list for VME bus errors */
359 INIT_LIST_HEAD(&(bridge->vme_errors));
360
361 result = request_irq(pdev->irq,
362 tsi148_irqhandler,
363 IRQF_SHARED,
364 driver_name, pdev);
365 if (result) {
366 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
367 pdev->irq);
368 return result;
369 }
370
371 /* Enable and unmask interrupts */
372 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
373 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
374 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
375 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
376 TSI148_LCSR_INTEO_IACKEO;
377
378 /* XXX This leaves the following interrupts masked.
379 * TSI148_LCSR_INTEO_VIEEO
380 * TSI148_LCSR_INTEO_SYSFLEO
381 * TSI148_LCSR_INTEO_ACFLEO
382 */
383
384 /* Don't enable Location Monitor interrupts here - they will be
385 * enabled when the location monitors are properly configured and
386 * a callback has been attached.
387 * TSI148_LCSR_INTEO_LM0EO
388 * TSI148_LCSR_INTEO_LM1EO
389 * TSI148_LCSR_INTEO_LM2EO
390 * TSI148_LCSR_INTEO_LM3EO
391 */
392
393 /* Don't enable VME interrupts until we add a handler, else the board
394 * will respond to it and we don't want that unless it knows how to
395 * properly deal with it.
396 * TSI148_LCSR_INTEO_IRQ7EO
397 * TSI148_LCSR_INTEO_IRQ6EO
398 * TSI148_LCSR_INTEO_IRQ5EO
399 * TSI148_LCSR_INTEO_IRQ4EO
400 * TSI148_LCSR_INTEO_IRQ3EO
401 * TSI148_LCSR_INTEO_IRQ2EO
402 * TSI148_LCSR_INTEO_IRQ1EO
403 */
404
405 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
406 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
407
408 return 0;
409 }
410
411 static void tsi148_irq_exit(struct pci_dev *pdev)
412 {
413 /* Turn off interrupts */
414 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
415 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN);
416
417 /* Clear all interrupts */
418 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
419
420 /* Detach interrupt handler */
421 free_irq(pdev->irq, pdev);
422 }
423
424 /*
425 * Check to see if an IACk has been received, return true (1) or false (0).
426 */
427 int tsi148_iack_received(void)
428 {
429 u32 tmp;
430
431 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
432
433 if (tmp & TSI148_LCSR_VICR_IRQS)
434 return 0;
435 else
436 return 1;
437 }
438
439 /*
440 * Set up an VME interrupt
441 */
442 int tsi148_request_irq(int level, int statid,
443 void (*callback)(int level, int vector, void *priv_data),
444 void *priv_data)
445 {
446 u32 tmp;
447
448 /* Get semaphore */
449 down(&(vme_irq));
450
451 if(tsi148_bridge->irq[level - 1].callback[statid].func) {
452 up(&(vme_irq));
453 printk("VME Interrupt already taken\n");
454 return -EBUSY;
455 }
456
457
458 tsi148_bridge->irq[level - 1].count++;
459 tsi148_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
460 tsi148_bridge->irq[level - 1].callback[statid].func = callback;
461
462 /* Enable IRQ level */
463 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
464 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
465 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
466
467 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
468 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
469 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
470
471 /* Release semaphore */
472 up(&(vme_irq));
473
474 return 0;
475 }
476
477 /*
478 * Free VME interrupt
479 */
480 void tsi148_free_irq(int level, int statid)
481 {
482 u32 tmp;
483 struct pci_dev *pdev;
484
485 /* Get semaphore */
486 down(&(vme_irq));
487
488 tsi148_bridge->irq[level - 1].count--;
489
490 /* Disable IRQ level if no more interrupts attached at this level*/
491 if (tsi148_bridge->irq[level - 1].count == 0) {
492 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
493 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
494 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
495
496 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
497 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
498 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
499
500 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
501
502 synchronize_irq(pdev->irq);
503 }
504
505 tsi148_bridge->irq[level - 1].callback[statid].func = NULL;
506 tsi148_bridge->irq[level - 1].callback[statid].priv_data = NULL;
507
508 /* Release semaphore */
509 up(&(vme_irq));
510 }
511
512 /*
513 * Generate a VME bus interrupt at the requested level & vector. Wait for
514 * interrupt to be acked.
515 *
516 * Only one interrupt can be generated at a time - so add a semaphore.
517 */
518 int tsi148_generate_irq(int level, int statid)
519 {
520 u32 tmp;
521
522 /* Get semaphore */
523 down(&(vme_int));
524
525 /* Read VICR register */
526 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
527
528 /* Set Status/ID */
529 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
530 (statid & TSI148_LCSR_VICR_STID_M);
531 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
532
533 /* Assert VMEbus IRQ */
534 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
535 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
536
537 /* XXX Consider implementing a timeout? */
538 wait_event_interruptible(iack_queue, tsi148_iack_received());
539
540 /* Release semaphore */
541 up(&(vme_int));
542
543 return 0;
544 }
545
546 /*
547 * Find the first error in this address range
548 */
549 static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
550 unsigned long long address, size_t count)
551 {
552 struct list_head *err_pos;
553 struct vme_bus_error *vme_err, *valid = NULL;
554 unsigned long long bound;
555
556 bound = address + count;
557
558 /*
559 * XXX We are currently not looking at the address space when parsing
560 * for errors. This is because parsing the Address Modifier Codes
561 * is going to be quite resource intensive to do properly. We
562 * should be OK just looking at the addresses and this is certainly
563 * much better than what we had before.
564 */
565 err_pos = NULL;
566 /* Iterate through errors */
567 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
568 vme_err = list_entry(err_pos, struct vme_bus_error, list);
569 if((vme_err->address >= address) && (vme_err->address < bound)){
570 valid = vme_err;
571 break;
572 }
573 }
574
575 return valid;
576 }
577
578 /*
579 * Clear errors in the provided address range.
580 */
581 static void tsi148_clear_errors(vme_address_t aspace,
582 unsigned long long address, size_t count)
583 {
584 struct list_head *err_pos, *temp;
585 struct vme_bus_error *vme_err;
586 unsigned long long bound;
587
588 bound = address + count;
589
590 /*
591 * XXX We are currently not looking at the address space when parsing
592 * for errors. This is because parsing the Address Modifier Codes
593 * is going to be quite resource intensive to do properly. We
594 * should be OK just looking at the addresses and this is certainly
595 * much better than what we had before.
596 */
597 err_pos = NULL;
598 /* Iterate through errors */
599 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
600 vme_err = list_entry(err_pos, struct vme_bus_error, list);
601
602 if((vme_err->address >= address) && (vme_err->address < bound)){
603 list_del(err_pos);
604 kfree(vme_err);
605 }
606 }
607 }
608
609 /*
610 * Initialize a slave window with the requested attributes.
611 */
612 int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
613 unsigned long long vme_base, unsigned long long size,
614 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
615 {
616 unsigned int i, addr = 0, granularity = 0;
617 unsigned int temp_ctl = 0;
618 unsigned int vme_base_low, vme_base_high;
619 unsigned int vme_bound_low, vme_bound_high;
620 unsigned int pci_offset_low, pci_offset_high;
621 unsigned long long vme_bound, pci_offset;
622
623 #if 0
624 printk("Set slave image %d to:\n", image->number);
625 printk("\tEnabled: %s\n", (enabled == 1)? "yes" : "no");
626 printk("\tVME Base:0x%llx\n", vme_base);
627 printk("\tWindow Size:0x%llx\n", size);
628 printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base);
629 printk("\tAddress Space:0x%x\n", aspace);
630 printk("\tTransfer Cycle Properties:0x%x\n", cycle);
631 #endif
632
633 i = image->number;
634
635 switch (aspace) {
636 case VME_A16:
637 granularity = 0x10;
638 addr |= TSI148_LCSR_ITAT_AS_A16;
639 break;
640 case VME_A24:
641 granularity = 0x1000;
642 addr |= TSI148_LCSR_ITAT_AS_A24;
643 break;
644 case VME_A32:
645 granularity = 0x10000;
646 addr |= TSI148_LCSR_ITAT_AS_A32;
647 break;
648 case VME_A64:
649 granularity = 0x10000;
650 addr |= TSI148_LCSR_ITAT_AS_A64;
651 break;
652 case VME_CRCSR:
653 case VME_USER1:
654 case VME_USER2:
655 case VME_USER3:
656 case VME_USER4:
657 default:
658 printk("Invalid address space\n");
659 return -EINVAL;
660 break;
661 }
662
663 /* Convert 64-bit variables to 2x 32-bit variables */
664 reg_split(vme_base, &vme_base_high, &vme_base_low);
665
666 /*
667 * Bound address is a valid address for the window, adjust
668 * accordingly
669 */
670 vme_bound = vme_base + size - granularity;
671 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
672 pci_offset = (unsigned long long)pci_base - vme_base;
673 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
674
675 if (vme_base_low & (granularity - 1)) {
676 printk("Invalid VME base alignment\n");
677 return -EINVAL;
678 }
679 if (vme_bound_low & (granularity - 1)) {
680 printk("Invalid VME bound alignment\n");
681 return -EINVAL;
682 }
683 if (pci_offset_low & (granularity - 1)) {
684 printk("Invalid PCI Offset alignment\n");
685 return -EINVAL;
686 }
687
688 #if 0
689 printk("\tVME Bound:0x%llx\n", vme_bound);
690 printk("\tPCI Offset:0x%llx\n", pci_offset);
691 #endif
692
693 /* Disable while we are mucking around */
694 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
695 TSI148_LCSR_OFFSET_ITAT);
696 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
697 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
698 TSI148_LCSR_OFFSET_ITAT);
699
700 /* Setup mapping */
701 iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
702 TSI148_LCSR_OFFSET_ITSAU);
703 iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
704 TSI148_LCSR_OFFSET_ITSAL);
705 iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
706 TSI148_LCSR_OFFSET_ITEAU);
707 iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
708 TSI148_LCSR_OFFSET_ITEAL);
709 iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
710 TSI148_LCSR_OFFSET_ITOFU);
711 iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
712 TSI148_LCSR_OFFSET_ITOFL);
713
714 /* XXX Prefetch stuff currently unsupported */
715 #if 0
716
717 for (x = 0; x < 4; x++) {
718 if ((64 << x) >= vmeIn->prefetchSize) {
719 break;
720 }
721 }
722 if (x == 4)
723 x--;
724 temp_ctl |= (x << 16);
725
726 if (vmeIn->prefetchThreshold)
727 if (vmeIn->prefetchThreshold)
728 temp_ctl |= 0x40000;
729 #endif
730
731 /* Setup 2eSST speeds */
732 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
733 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
734 case VME_2eSST160:
735 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
736 break;
737 case VME_2eSST267:
738 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
739 break;
740 case VME_2eSST320:
741 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
742 break;
743 }
744
745 /* Setup cycle types */
746 temp_ctl &= ~(0x1F << 7);
747 if (cycle & VME_BLT)
748 temp_ctl |= TSI148_LCSR_ITAT_BLT;
749 if (cycle & VME_MBLT)
750 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
751 if (cycle & VME_2eVME)
752 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
753 if (cycle & VME_2eSST)
754 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
755 if (cycle & VME_2eSSTB)
756 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
757
758 /* Setup address space */
759 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
760 temp_ctl |= addr;
761
762 temp_ctl &= ~0xF;
763 if (cycle & VME_SUPER)
764 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
765 if (cycle & VME_USER)
766 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
767 if (cycle & VME_PROG)
768 temp_ctl |= TSI148_LCSR_ITAT_PGM;
769 if (cycle & VME_DATA)
770 temp_ctl |= TSI148_LCSR_ITAT_DATA;
771
772 /* Write ctl reg without enable */
773 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
774 TSI148_LCSR_OFFSET_ITAT);
775
776 if (enabled)
777 temp_ctl |= TSI148_LCSR_ITAT_EN;
778
779 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
780 TSI148_LCSR_OFFSET_ITAT);
781
782 return 0;
783 }
784
785 /*
786 * Get slave window configuration.
787 *
788 * XXX Prefetch currently unsupported.
789 */
790 int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
791 unsigned long long *vme_base, unsigned long long *size,
792 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
793 {
794 unsigned int i, granularity = 0, ctl = 0;
795 unsigned int vme_base_low, vme_base_high;
796 unsigned int vme_bound_low, vme_bound_high;
797 unsigned int pci_offset_low, pci_offset_high;
798 unsigned long long vme_bound, pci_offset;
799
800
801 i = image->number;
802
803 /* Read registers */
804 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
805 TSI148_LCSR_OFFSET_ITAT);
806
807 vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
808 TSI148_LCSR_OFFSET_ITSAU);
809 vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
810 TSI148_LCSR_OFFSET_ITSAL);
811 vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
812 TSI148_LCSR_OFFSET_ITEAU);
813 vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
814 TSI148_LCSR_OFFSET_ITEAL);
815 pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
816 TSI148_LCSR_OFFSET_ITOFU);
817 pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
818 TSI148_LCSR_OFFSET_ITOFL);
819
820 /* Convert 64-bit variables to 2x 32-bit variables */
821 reg_join(vme_base_high, vme_base_low, vme_base);
822 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
823 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
824
825 *pci_base = (dma_addr_t)vme_base + pci_offset;
826
827 *enabled = 0;
828 *aspace = 0;
829 *cycle = 0;
830
831 if (ctl & TSI148_LCSR_ITAT_EN)
832 *enabled = 1;
833
834 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
835 granularity = 0x10;
836 *aspace |= VME_A16;
837 }
838 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
839 granularity = 0x1000;
840 *aspace |= VME_A24;
841 }
842 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
843 granularity = 0x10000;
844 *aspace |= VME_A32;
845 }
846 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
847 granularity = 0x10000;
848 *aspace |= VME_A64;
849 }
850
851 /* Need granularity before we set the size */
852 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
853
854
855 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
856 *cycle |= VME_2eSST160;
857 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
858 *cycle |= VME_2eSST267;
859 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
860 *cycle |= VME_2eSST320;
861
862 if (ctl & TSI148_LCSR_ITAT_BLT)
863 *cycle |= VME_BLT;
864 if (ctl & TSI148_LCSR_ITAT_MBLT)
865 *cycle |= VME_MBLT;
866 if (ctl & TSI148_LCSR_ITAT_2eVME)
867 *cycle |= VME_2eVME;
868 if (ctl & TSI148_LCSR_ITAT_2eSST)
869 *cycle |= VME_2eSST;
870 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
871 *cycle |= VME_2eSSTB;
872
873 if (ctl & TSI148_LCSR_ITAT_SUPR)
874 *cycle |= VME_SUPER;
875 if (ctl & TSI148_LCSR_ITAT_NPRIV)
876 *cycle |= VME_USER;
877 if (ctl & TSI148_LCSR_ITAT_PGM)
878 *cycle |= VME_PROG;
879 if (ctl & TSI148_LCSR_ITAT_DATA)
880 *cycle |= VME_DATA;
881
882 return 0;
883 }
884
885 /*
886 * Allocate and map PCI Resource
887 */
888 static int tsi148_alloc_resource(struct vme_master_resource *image,
889 unsigned long long size)
890 {
891 unsigned long long existing_size;
892 int retval = 0;
893 struct pci_dev *pdev;
894
895 /* Find pci_dev container of dev */
896 if (tsi148_bridge->parent == NULL) {
897 printk("Dev entry NULL\n");
898 return -EINVAL;
899 }
900 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
901
902 existing_size = (unsigned long long)(image->pci_resource.end -
903 image->pci_resource.start);
904
905 /* If the existing size is OK, return */
906 if (existing_size == (size - 1))
907 return 0;
908
909 if (existing_size != 0) {
910 iounmap(image->kern_base);
911 image->kern_base = NULL;
912 if (image->pci_resource.name != NULL)
913 kfree(image->pci_resource.name);
914 release_resource(&(image->pci_resource));
915 memset(&(image->pci_resource), 0, sizeof(struct resource));
916 }
917
918 if (image->pci_resource.name == NULL) {
919 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
920 if (image->pci_resource.name == NULL) {
921 printk(KERN_ERR "Unable to allocate memory for resource"
922 " name\n");
923 retval = -ENOMEM;
924 goto err_name;
925 }
926 }
927
928 sprintf((char *)image->pci_resource.name, "%s.%d", tsi148_bridge->name,
929 image->number);
930
931 image->pci_resource.start = 0;
932 image->pci_resource.end = (unsigned long)size;
933 image->pci_resource.flags = IORESOURCE_MEM;
934
935 retval = pci_bus_alloc_resource(pdev->bus,
936 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
937 0, NULL, NULL);
938 if (retval) {
939 printk(KERN_ERR "Failed to allocate mem resource for "
940 "window %d size 0x%lx start 0x%lx\n",
941 image->number, (unsigned long)size,
942 (unsigned long)image->pci_resource.start);
943 goto err_resource;
944 }
945
946 image->kern_base = ioremap_nocache(
947 image->pci_resource.start, size);
948 if (image->kern_base == NULL) {
949 printk(KERN_ERR "Failed to remap resource\n");
950 retval = -ENOMEM;
951 goto err_remap;
952 }
953
954 return 0;
955
956 iounmap(image->kern_base);
957 image->kern_base = NULL;
958 err_remap:
959 release_resource(&(image->pci_resource));
960 err_resource:
961 kfree(image->pci_resource.name);
962 memset(&(image->pci_resource), 0, sizeof(struct resource));
963 err_name:
964 return retval;
965 }
966
967 /*
968 * Free and unmap PCI Resource
969 */
970 static void tsi148_free_resource(struct vme_master_resource *image)
971 {
972 iounmap(image->kern_base);
973 image->kern_base = NULL;
974 release_resource(&(image->pci_resource));
975 kfree(image->pci_resource.name);
976 memset(&(image->pci_resource), 0, sizeof(struct resource));
977 }
978
979 /*
980 * Set the attributes of an outbound window.
981 */
982 int tsi148_master_set( struct vme_master_resource *image, int enabled,
983 unsigned long long vme_base, unsigned long long size,
984 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
985 {
986 int retval = 0;
987 unsigned int i;
988 unsigned int temp_ctl = 0;
989 unsigned int pci_base_low, pci_base_high;
990 unsigned int pci_bound_low, pci_bound_high;
991 unsigned int vme_offset_low, vme_offset_high;
992 unsigned long long pci_bound, vme_offset, pci_base;
993
994 /* Verify input data */
995 if (vme_base & 0xFFFF) {
996 printk("Invalid VME Window alignment\n");
997 retval = -EINVAL;
998 goto err_window;
999 }
1000 if (size < 0x10000) {
1001 printk("Invalid VME Window size\n");
1002 retval = -EINVAL;
1003 goto err_window;
1004 }
1005
1006 spin_lock(&(image->lock));
1007
1008 /* Let's allocate the resource here rather than further up the stack as
1009 * it avoids pushing loads of bus dependant stuff up the stack
1010 */
1011 retval = tsi148_alloc_resource(image, size);
1012 if (retval) {
1013 spin_unlock(&(image->lock));
1014 printk(KERN_ERR "Unable to allocate memory for resource "
1015 "name\n");
1016 retval = -ENOMEM;
1017 goto err_res;
1018 }
1019
1020 pci_base = (unsigned long long)image->pci_resource.start;
1021
1022
1023 /*
1024 * Bound address is a valid address for the window, adjust
1025 * according to window granularity.
1026 */
1027 pci_bound = pci_base + (size - 0x10000);
1028 vme_offset = vme_base - pci_base;
1029
1030 /* Convert 64-bit variables to 2x 32-bit variables */
1031 reg_split(pci_base, &pci_base_high, &pci_base_low);
1032 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
1033 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
1034
1035 if (pci_base_low & 0xFFFF) {
1036 spin_unlock(&(image->lock));
1037 printk("Invalid PCI base alignment\n");
1038 retval = -EINVAL;
1039 goto err_gran;
1040 }
1041 if (pci_bound_low & 0xFFFF) {
1042 spin_unlock(&(image->lock));
1043 printk("Invalid PCI bound alignment\n");
1044 retval = -EINVAL;
1045 goto err_gran;
1046 }
1047 if (vme_offset_low & 0xFFFF) {
1048 spin_unlock(&(image->lock));
1049 printk("Invalid VME Offset alignment\n");
1050 retval = -EINVAL;
1051 goto err_gran;
1052 }
1053
1054 i = image->number;
1055
1056 /* Disable while we are mucking around */
1057 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1058 TSI148_LCSR_OFFSET_OTAT);
1059 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
1060 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1061 TSI148_LCSR_OFFSET_OTAT);
1062
1063 /* XXX Prefetch stuff currently unsupported */
1064 #if 0
1065 if (vmeOut->prefetchEnable) {
1066 temp_ctl |= 0x40000;
1067 for (x = 0; x < 4; x++) {
1068 if ((2 << x) >= vmeOut->prefetchSize)
1069 break;
1070 }
1071 if (x == 4)
1072 x = 3;
1073 temp_ctl |= (x << 16);
1074 }
1075 #endif
1076
1077 /* Setup 2eSST speeds */
1078 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1079 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1080 case VME_2eSST160:
1081 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1082 break;
1083 case VME_2eSST267:
1084 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1085 break;
1086 case VME_2eSST320:
1087 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1088 break;
1089 }
1090
1091 /* Setup cycle types */
1092 if (cycle & VME_BLT) {
1093 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1094 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1095 }
1096 if (cycle & VME_MBLT) {
1097 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1098 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1099 }
1100 if (cycle & VME_2eVME) {
1101 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1102 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1103 }
1104 if (cycle & VME_2eSST) {
1105 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1106 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1107 }
1108 if (cycle & VME_2eSSTB) {
1109 printk("Currently not setting Broadcast Select Registers\n");
1110 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1111 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1112 }
1113
1114 /* Setup data width */
1115 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1116 switch (dwidth) {
1117 case VME_D16:
1118 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1119 break;
1120 case VME_D32:
1121 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1122 break;
1123 default:
1124 spin_unlock(&(image->lock));
1125 printk("Invalid data width\n");
1126 retval = -EINVAL;
1127 goto err_dwidth;
1128 }
1129
1130 /* Setup address space */
1131 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1132 switch (aspace) {
1133 case VME_A16:
1134 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1135 break;
1136 case VME_A24:
1137 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1138 break;
1139 case VME_A32:
1140 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1141 break;
1142 case VME_A64:
1143 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1144 break;
1145 case VME_CRCSR:
1146 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1147 break;
1148 case VME_USER1:
1149 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1150 break;
1151 case VME_USER2:
1152 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1153 break;
1154 case VME_USER3:
1155 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1156 break;
1157 case VME_USER4:
1158 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1159 break;
1160 default:
1161 spin_unlock(&(image->lock));
1162 printk("Invalid address space\n");
1163 retval = -EINVAL;
1164 goto err_aspace;
1165 break;
1166 }
1167
1168 temp_ctl &= ~(3<<4);
1169 if (cycle & VME_SUPER)
1170 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1171 if (cycle & VME_PROG)
1172 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1173
1174 /* Setup mapping */
1175 iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1176 TSI148_LCSR_OFFSET_OTSAU);
1177 iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1178 TSI148_LCSR_OFFSET_OTSAL);
1179 iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1180 TSI148_LCSR_OFFSET_OTEAU);
1181 iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1182 TSI148_LCSR_OFFSET_OTEAL);
1183 iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1184 TSI148_LCSR_OFFSET_OTOFU);
1185 iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1186 TSI148_LCSR_OFFSET_OTOFL);
1187
1188 /* XXX We need to deal with OTBS */
1189 #if 0
1190 iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base +
1191 TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
1192 #endif
1193
1194 /* Write ctl reg without enable */
1195 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1196 TSI148_LCSR_OFFSET_OTAT);
1197
1198 if (enabled)
1199 temp_ctl |= TSI148_LCSR_OTAT_EN;
1200
1201 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1202 TSI148_LCSR_OFFSET_OTAT);
1203
1204 spin_unlock(&(image->lock));
1205 return 0;
1206
1207 err_aspace:
1208 err_dwidth:
1209 err_gran:
1210 tsi148_free_resource(image);
1211 err_res:
1212 err_window:
1213 return retval;
1214
1215 }
1216
1217 /*
1218 * Set the attributes of an outbound window.
1219 *
1220 * XXX Not parsing prefetch information.
1221 */
1222 int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1223 unsigned long long *vme_base, unsigned long long *size,
1224 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1225 {
1226 unsigned int i, ctl;
1227 unsigned int pci_base_low, pci_base_high;
1228 unsigned int pci_bound_low, pci_bound_high;
1229 unsigned int vme_offset_low, vme_offset_high;
1230
1231 unsigned long long pci_base, pci_bound, vme_offset;
1232
1233 i = image->number;
1234
1235 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1236 TSI148_LCSR_OFFSET_OTAT);
1237
1238 pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1239 TSI148_LCSR_OFFSET_OTSAU);
1240 pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1241 TSI148_LCSR_OFFSET_OTSAL);
1242 pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1243 TSI148_LCSR_OFFSET_OTEAU);
1244 pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1245 TSI148_LCSR_OFFSET_OTEAL);
1246 vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1247 TSI148_LCSR_OFFSET_OTOFU);
1248 vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1249 TSI148_LCSR_OFFSET_OTOFL);
1250
1251 /* Convert 64-bit variables to 2x 32-bit variables */
1252 reg_join(pci_base_high, pci_base_low, &pci_base);
1253 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1254 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1255
1256 *vme_base = pci_base + vme_offset;
1257 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1258
1259 *enabled = 0;
1260 *aspace = 0;
1261 *cycle = 0;
1262 *dwidth = 0;
1263
1264 if (ctl & TSI148_LCSR_OTAT_EN)
1265 *enabled = 1;
1266
1267 /* Setup address space */
1268 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1269 *aspace |= VME_A16;
1270 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1271 *aspace |= VME_A24;
1272 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1273 *aspace |= VME_A32;
1274 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1275 *aspace |= VME_A64;
1276 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1277 *aspace |= VME_CRCSR;
1278 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1279 *aspace |= VME_USER1;
1280 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1281 *aspace |= VME_USER2;
1282 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1283 *aspace |= VME_USER3;
1284 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1285 *aspace |= VME_USER4;
1286
1287 /* Setup 2eSST speeds */
1288 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1289 *cycle |= VME_2eSST160;
1290 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1291 *cycle |= VME_2eSST267;
1292 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1293 *cycle |= VME_2eSST320;
1294
1295 /* Setup cycle types */
1296 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1297 *cycle |= VME_SCT;
1298 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1299 *cycle |= VME_BLT;
1300 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1301 *cycle |= VME_MBLT;
1302 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1303 *cycle |= VME_2eVME;
1304 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1305 *cycle |= VME_2eSST;
1306 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1307 *cycle |= VME_2eSSTB;
1308
1309 if (ctl & TSI148_LCSR_OTAT_SUP)
1310 *cycle |= VME_SUPER;
1311 else
1312 *cycle |= VME_USER;
1313
1314 if (ctl & TSI148_LCSR_OTAT_PGM)
1315 *cycle |= VME_PROG;
1316 else
1317 *cycle |= VME_DATA;
1318
1319 /* Setup data width */
1320 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1321 *dwidth = VME_D16;
1322 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1323 *dwidth = VME_D32;
1324
1325 return 0;
1326 }
1327
1328
1329 int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1330 unsigned long long *vme_base, unsigned long long *size,
1331 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1332 {
1333 int retval;
1334
1335 spin_lock(&(image->lock));
1336
1337 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1338 cycle, dwidth);
1339
1340 spin_unlock(&(image->lock));
1341
1342 return retval;
1343 }
1344
1345 ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1346 size_t count, loff_t offset)
1347 {
1348 int retval, enabled;
1349 unsigned long long vme_base, size;
1350 vme_address_t aspace;
1351 vme_cycle_t cycle;
1352 vme_width_t dwidth;
1353 struct vme_bus_error *vme_err = NULL;
1354
1355 spin_lock(&(image->lock));
1356
1357 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1358 retval = count;
1359
1360 if (!err_chk)
1361 goto skip_chk;
1362
1363 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1364 &dwidth);
1365
1366 vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1367 if(vme_err != NULL) {
1368 dev_err(image->parent->parent, "First VME read error detected "
1369 "an at address 0x%llx\n", vme_err->address);
1370 retval = vme_err->address - (vme_base + offset);
1371 /* Clear down save errors in this address range */
1372 tsi148_clear_errors(aspace, vme_base + offset, count);
1373 }
1374
1375 skip_chk:
1376 spin_unlock(&(image->lock));
1377
1378 return retval;
1379 }
1380
1381
1382 /* XXX We need to change vme_master_resource->sem to a spinlock so that read
1383 * and write functions can be used in an interrupt context
1384 */
1385 ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1386 size_t count, loff_t offset)
1387 {
1388 int retval = 0, enabled;
1389 unsigned long long vme_base, size;
1390 vme_address_t aspace;
1391 vme_cycle_t cycle;
1392 vme_width_t dwidth;
1393
1394 struct vme_bus_error *vme_err = NULL;
1395
1396 spin_lock(&(image->lock));
1397
1398 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1399 retval = count;
1400
1401 /*
1402 * Writes are posted. We need to do a read on the VME bus to flush out
1403 * all of the writes before we check for errors. We can't guarentee
1404 * that reading the data we have just written is safe. It is believed
1405 * that there isn't any read, write re-ordering, so we can read any
1406 * location in VME space, so lets read the Device ID from the tsi148's
1407 * own registers as mapped into CR/CSR space.
1408 *
1409 * We check for saved errors in the written address range/space.
1410 */
1411
1412 if (!err_chk)
1413 goto skip_chk;
1414
1415 /*
1416 * Get window info first, to maximise the time that the buffers may
1417 * fluch on their own
1418 */
1419 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1420 &dwidth);
1421
1422 ioread16(flush_image->kern_base + 0x7F000);
1423
1424 vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1425 if(vme_err != NULL) {
1426 printk("First VME write error detected an at address 0x%llx\n",
1427 vme_err->address);
1428 retval = vme_err->address - (vme_base + offset);
1429 /* Clear down save errors in this address range */
1430 tsi148_clear_errors(aspace, vme_base + offset, count);
1431 }
1432
1433 skip_chk:
1434 spin_unlock(&(image->lock));
1435
1436 return retval;
1437 }
1438
1439 /*
1440 * Perform an RMW cycle on the VME bus.
1441 *
1442 * Requires a previously configured master window, returns final value.
1443 */
1444 unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1445 unsigned int mask, unsigned int compare, unsigned int swap,
1446 loff_t offset)
1447 {
1448 unsigned long long pci_addr;
1449 unsigned int pci_addr_high, pci_addr_low;
1450 u32 tmp, result;
1451 int i;
1452
1453
1454 /* Find the PCI address that maps to the desired VME address */
1455 i = image->number;
1456
1457 /* Locking as we can only do one of these at a time */
1458 down(&(vme_rmw));
1459
1460 /* Lock image */
1461 spin_lock(&(image->lock));
1462
1463 pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1464 TSI148_LCSR_OFFSET_OTSAU);
1465 pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1466 TSI148_LCSR_OFFSET_OTSAL);
1467
1468 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1469 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1470
1471 /* Configure registers */
1472 iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN);
1473 iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC);
1474 iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS);
1475 iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU);
1476 iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL);
1477
1478 /* Enable RMW */
1479 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1480 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1481 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1482
1483 /* Kick process off with a read to the required address. */
1484 result = ioread32be(image->kern_base + offset);
1485
1486 /* Disable RMW */
1487 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1488 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1489 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1490
1491 spin_unlock(&(image->lock));
1492
1493 up(&(vme_rmw));
1494
1495 return result;
1496 }
1497
1498 static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1499 vme_cycle_t cycle, vme_width_t dwidth)
1500 {
1501 /* Setup 2eSST speeds */
1502 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1503 case VME_2eSST160:
1504 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1505 break;
1506 case VME_2eSST267:
1507 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1508 break;
1509 case VME_2eSST320:
1510 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1511 break;
1512 }
1513
1514 /* Setup cycle types */
1515 if (cycle & VME_SCT) {
1516 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1517 }
1518 if (cycle & VME_BLT) {
1519 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1520 }
1521 if (cycle & VME_MBLT) {
1522 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1523 }
1524 if (cycle & VME_2eVME) {
1525 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1526 }
1527 if (cycle & VME_2eSST) {
1528 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1529 }
1530 if (cycle & VME_2eSSTB) {
1531 printk("Currently not setting Broadcast Select Registers\n");
1532 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1533 }
1534
1535 /* Setup data width */
1536 switch (dwidth) {
1537 case VME_D16:
1538 *attr |= TSI148_LCSR_DSAT_DBW_16;
1539 break;
1540 case VME_D32:
1541 *attr |= TSI148_LCSR_DSAT_DBW_32;
1542 break;
1543 default:
1544 printk("Invalid data width\n");
1545 return -EINVAL;
1546 }
1547
1548 /* Setup address space */
1549 switch (aspace) {
1550 case VME_A16:
1551 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1552 break;
1553 case VME_A24:
1554 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1555 break;
1556 case VME_A32:
1557 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1558 break;
1559 case VME_A64:
1560 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1561 break;
1562 case VME_CRCSR:
1563 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1564 break;
1565 case VME_USER1:
1566 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1567 break;
1568 case VME_USER2:
1569 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1570 break;
1571 case VME_USER3:
1572 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1573 break;
1574 case VME_USER4:
1575 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1576 break;
1577 default:
1578 printk("Invalid address space\n");
1579 return -EINVAL;
1580 break;
1581 }
1582
1583 if (cycle & VME_SUPER)
1584 *attr |= TSI148_LCSR_DSAT_SUP;
1585 if (cycle & VME_PROG)
1586 *attr |= TSI148_LCSR_DSAT_PGM;
1587
1588 return 0;
1589 }
1590
1591 static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1592 vme_cycle_t cycle, vme_width_t dwidth)
1593 {
1594 /* Setup 2eSST speeds */
1595 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1596 case VME_2eSST160:
1597 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1598 break;
1599 case VME_2eSST267:
1600 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1601 break;
1602 case VME_2eSST320:
1603 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1604 break;
1605 }
1606
1607 /* Setup cycle types */
1608 if (cycle & VME_SCT) {
1609 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1610 }
1611 if (cycle & VME_BLT) {
1612 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1613 }
1614 if (cycle & VME_MBLT) {
1615 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1616 }
1617 if (cycle & VME_2eVME) {
1618 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1619 }
1620 if (cycle & VME_2eSST) {
1621 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1622 }
1623 if (cycle & VME_2eSSTB) {
1624 printk("Currently not setting Broadcast Select Registers\n");
1625 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1626 }
1627
1628 /* Setup data width */
1629 switch (dwidth) {
1630 case VME_D16:
1631 *attr |= TSI148_LCSR_DDAT_DBW_16;
1632 break;
1633 case VME_D32:
1634 *attr |= TSI148_LCSR_DDAT_DBW_32;
1635 break;
1636 default:
1637 printk("Invalid data width\n");
1638 return -EINVAL;
1639 }
1640
1641 /* Setup address space */
1642 switch (aspace) {
1643 case VME_A16:
1644 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1645 break;
1646 case VME_A24:
1647 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1648 break;
1649 case VME_A32:
1650 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1651 break;
1652 case VME_A64:
1653 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1654 break;
1655 case VME_CRCSR:
1656 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1657 break;
1658 case VME_USER1:
1659 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1660 break;
1661 case VME_USER2:
1662 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1663 break;
1664 case VME_USER3:
1665 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1666 break;
1667 case VME_USER4:
1668 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1669 break;
1670 default:
1671 printk("Invalid address space\n");
1672 return -EINVAL;
1673 break;
1674 }
1675
1676 if (cycle & VME_SUPER)
1677 *attr |= TSI148_LCSR_DDAT_SUP;
1678 if (cycle & VME_PROG)
1679 *attr |= TSI148_LCSR_DDAT_PGM;
1680
1681 return 0;
1682 }
1683
1684 /*
1685 * Add a link list descriptor to the list
1686 *
1687 * XXX Need to handle 2eSST Broadcast select bits
1688 */
1689 int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1690 struct vme_dma_attr *dest, size_t count)
1691 {
1692 struct tsi148_dma_entry *entry, *prev;
1693 u32 address_high, address_low;
1694 struct vme_dma_pattern *pattern_attr;
1695 struct vme_dma_pci *pci_attr;
1696 struct vme_dma_vme *vme_attr;
1697 dma_addr_t desc_ptr;
1698 int retval = 0;
1699
1700 /* XXX descriptor must be aligned on 64-bit boundaries */
1701 entry = (struct tsi148_dma_entry *)kmalloc(
1702 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1703 if (entry == NULL) {
1704 printk("Failed to allocate memory for dma resource "
1705 "structure\n");
1706 retval = -ENOMEM;
1707 goto err_mem;
1708 }
1709
1710 /* Test descriptor alignment */
1711 if ((unsigned long)&(entry->descriptor) & 0x7) {
1712 printk("Descriptor not aligned to 8 byte boundary as "
1713 "required: %p\n", &(entry->descriptor));
1714 retval = -EINVAL;
1715 goto err_align;
1716 }
1717
1718 /* Given we are going to fill out the structure, we probably don't
1719 * need to zero it, but better safe than sorry for now.
1720 */
1721 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1722
1723 /* Fill out source part */
1724 switch (src->type) {
1725 case VME_DMA_PATTERN:
1726 pattern_attr = (struct vme_dma_pattern *)src->private;
1727
1728 entry->descriptor.dsal = pattern_attr->pattern;
1729 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1730 /* Default behaviour is 32 bit pattern */
1731 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1732 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1733 }
1734 /* It seems that the default behaviour is to increment */
1735 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1736 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1737 }
1738 break;
1739 case VME_DMA_PCI:
1740 pci_attr = (struct vme_dma_pci *)src->private;
1741
1742 reg_split((unsigned long long)pci_attr->address, &address_high,
1743 &address_low);
1744 entry->descriptor.dsau = address_high;
1745 entry->descriptor.dsal = address_low;
1746 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1747 break;
1748 case VME_DMA_VME:
1749 vme_attr = (struct vme_dma_vme *)src->private;
1750
1751 reg_split((unsigned long long)vme_attr->address, &address_high,
1752 &address_low);
1753 entry->descriptor.dsau = address_high;
1754 entry->descriptor.dsal = address_low;
1755 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1756
1757 retval = tsi148_dma_set_vme_src_attributes(
1758 &(entry->descriptor.dsat), vme_attr->aspace,
1759 vme_attr->cycle, vme_attr->dwidth);
1760 if(retval < 0 )
1761 goto err_source;
1762 break;
1763 default:
1764 printk("Invalid source type\n");
1765 retval = -EINVAL;
1766 goto err_source;
1767 break;
1768 }
1769
1770 /* Assume last link - this will be over-written by adding another */
1771 entry->descriptor.dnlau = 0;
1772 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1773
1774
1775 /* Fill out destination part */
1776 switch (dest->type) {
1777 case VME_DMA_PCI:
1778 pci_attr = (struct vme_dma_pci *)dest->private;
1779
1780 reg_split((unsigned long long)pci_attr->address, &address_high,
1781 &address_low);
1782 entry->descriptor.ddau = address_high;
1783 entry->descriptor.ddal = address_low;
1784 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1785 break;
1786 case VME_DMA_VME:
1787 vme_attr = (struct vme_dma_vme *)dest->private;
1788
1789 reg_split((unsigned long long)vme_attr->address, &address_high,
1790 &address_low);
1791 entry->descriptor.ddau = address_high;
1792 entry->descriptor.ddal = address_low;
1793 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1794
1795 retval = tsi148_dma_set_vme_dest_attributes(
1796 &(entry->descriptor.ddat), vme_attr->aspace,
1797 vme_attr->cycle, vme_attr->dwidth);
1798 if(retval < 0 )
1799 goto err_dest;
1800 break;
1801 default:
1802 printk("Invalid destination type\n");
1803 retval = -EINVAL;
1804 goto err_dest;
1805 break;
1806 }
1807
1808 /* Fill out count */
1809 entry->descriptor.dcnt = (u32)count;
1810
1811 /* Add to list */
1812 list_add_tail(&(entry->list), &(list->entries));
1813
1814 /* Fill out previous descriptors "Next Address" */
1815 if(entry->list.prev != &(list->entries)){
1816 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1817 list);
1818 /* We need the bus address for the pointer */
1819 desc_ptr = virt_to_bus(&(entry->descriptor));
1820 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1821 &(prev->descriptor.dnlal));
1822 }
1823
1824 return 0;
1825
1826 err_dest:
1827 err_source:
1828 err_align:
1829 kfree(entry);
1830 err_mem:
1831 return retval;
1832 }
1833
1834 /*
1835 * Check to see if the provided DMA channel is busy.
1836 */
1837 static int tsi148_dma_busy(int channel)
1838 {
1839 u32 tmp;
1840
1841 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1842 TSI148_LCSR_OFFSET_DSTA);
1843
1844 if (tmp & TSI148_LCSR_DSTA_BSY)
1845 return 0;
1846 else
1847 return 1;
1848
1849 }
1850
1851 /*
1852 * Execute a previously generated link list
1853 *
1854 * XXX Need to provide control register configuration.
1855 */
1856 int tsi148_dma_list_exec(struct vme_dma_list *list)
1857 {
1858 struct vme_dma_resource *ctrlr;
1859 int channel, retval = 0;
1860 struct tsi148_dma_entry *entry;
1861 dma_addr_t bus_addr;
1862 u32 bus_addr_high, bus_addr_low;
1863 u32 val, dctlreg = 0;
1864 #if 0
1865 int x;
1866 #endif
1867
1868 ctrlr = list->parent;
1869
1870 down(&(ctrlr->sem));
1871
1872 channel = ctrlr->number;
1873
1874 if (! list_empty(&(ctrlr->running))) {
1875 /*
1876 * XXX We have an active DMA transfer and currently haven't
1877 * sorted out the mechanism for "pending" DMA transfers.
1878 * Return busy.
1879 */
1880 /* Need to add to pending here */
1881 up(&(ctrlr->sem));
1882 return -EBUSY;
1883 } else {
1884 list_add(&(list->list), &(ctrlr->running));
1885 }
1886 #if 0
1887 /* XXX Still todo */
1888 for (x = 0; x < 8; x++) { /* vme block size */
1889 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
1890 break;
1891 }
1892 }
1893 if (x == 8)
1894 x = 7;
1895 dctlreg |= (x << 12);
1896
1897 for (x = 0; x < 8; x++) { /* pci block size */
1898 if ((32 << x) >= vmeDma->maxPciBlockSize) {
1899 break;
1900 }
1901 }
1902 if (x == 8)
1903 x = 7;
1904 dctlreg |= (x << 4);
1905
1906 if (vmeDma->vmeBackOffTimer) {
1907 for (x = 1; x < 8; x++) { /* vme timer */
1908 if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1909 break;
1910 }
1911 }
1912 if (x == 8)
1913 x = 7;
1914 dctlreg |= (x << 8);
1915 }
1916
1917 if (vmeDma->pciBackOffTimer) {
1918 for (x = 1; x < 8; x++) { /* pci timer */
1919 if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
1920 break;
1921 }
1922 }
1923 if (x == 8)
1924 x = 7;
1925 dctlreg |= (x << 0);
1926 }
1927 #endif
1928
1929 /* Get first bus address and write into registers */
1930 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1931 list);
1932
1933 bus_addr = virt_to_bus(&(entry->descriptor));
1934
1935 up(&(ctrlr->sem));
1936
1937 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1938
1939 iowrite32be(bus_addr_high, tsi148_bridge->base +
1940 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1941 iowrite32be(bus_addr_low, tsi148_bridge->base +
1942 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1943
1944 /* Start the operation */
1945 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base +
1946 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1947
1948 wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel));
1949 /*
1950 * Read status register, this register is valid until we kick off a
1951 * new transfer.
1952 */
1953 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1954 TSI148_LCSR_OFFSET_DSTA);
1955
1956 if (val & TSI148_LCSR_DSTA_VBE) {
1957 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1958 retval = -EIO;
1959 }
1960
1961 /* Remove list from running list */
1962 down(&(ctrlr->sem));
1963 list_del(&(list->list));
1964 up(&(ctrlr->sem));
1965
1966 return retval;
1967 }
1968
1969 /*
1970 * Clean up a previously generated link list
1971 *
1972 * We have a separate function, don't assume that the chain can't be reused.
1973 */
1974 int tsi148_dma_list_empty(struct vme_dma_list *list)
1975 {
1976 struct list_head *pos, *temp;
1977 struct tsi148_dma_entry *entry;
1978
1979 /* detach and free each entry */
1980 list_for_each_safe(pos, temp, &(list->entries)) {
1981 list_del(pos);
1982 entry = list_entry(pos, struct tsi148_dma_entry, list);
1983 kfree(entry);
1984 }
1985
1986 return (0);
1987 }
1988
1989 /*
1990 * All 4 location monitors reside at the same base - this is therefore a
1991 * system wide configuration.
1992 *
1993 * This does not enable the LM monitor - that should be done when the first
1994 * callback is attached and disabled when the last callback is removed.
1995 */
1996 int tsi148_lm_set(unsigned long long lm_base, vme_address_t aspace,
1997 vme_cycle_t cycle)
1998 {
1999 u32 lm_base_high, lm_base_low, lm_ctl = 0;
2000 int i;
2001
2002 /* Get semaphore */
2003 down(&(vme_lm));
2004
2005 /* If we already have a callback attached, we can't move it! */
2006 for (i = 0; i < 4; i++) {
2007 if(lm_callback[i] != NULL) {
2008 up(&(vme_lm));
2009 printk("Location monitor callback attached, can't "
2010 "reset\n");
2011 return -EBUSY;
2012 }
2013 }
2014
2015 switch (aspace) {
2016 case VME_A16:
2017 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2018 break;
2019 case VME_A24:
2020 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2021 break;
2022 case VME_A32:
2023 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2024 break;
2025 case VME_A64:
2026 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2027 break;
2028 default:
2029 up(&(vme_lm));
2030 printk("Invalid address space\n");
2031 return -EINVAL;
2032 break;
2033 }
2034
2035 if (cycle & VME_SUPER)
2036 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2037 if (cycle & VME_USER)
2038 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2039 if (cycle & VME_PROG)
2040 lm_ctl |= TSI148_LCSR_LMAT_PGM;
2041 if (cycle & VME_DATA)
2042 lm_ctl |= TSI148_LCSR_LMAT_DATA;
2043
2044 reg_split(lm_base, &lm_base_high, &lm_base_low);
2045
2046 iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU);
2047 iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL);
2048 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2049
2050 up(&(vme_lm));
2051
2052 return 0;
2053 }
2054
2055 /* Get configuration of the callback monitor and return whether it is enabled
2056 * or disabled.
2057 */
2058 int tsi148_lm_get(unsigned long long *lm_base, vme_address_t *aspace,
2059 vme_cycle_t *cycle)
2060 {
2061 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2062
2063 /* Get semaphore */
2064 down(&(vme_lm));
2065
2066 lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU);
2067 lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL);
2068 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2069
2070 reg_join(lm_base_high, lm_base_low, lm_base);
2071
2072 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2073 enabled = 1;
2074
2075 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
2076 *aspace |= VME_A16;
2077 }
2078 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
2079 *aspace |= VME_A24;
2080 }
2081 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
2082 *aspace |= VME_A32;
2083 }
2084 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
2085 *aspace |= VME_A64;
2086 }
2087
2088 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2089 *cycle |= VME_SUPER;
2090 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2091 *cycle |= VME_USER;
2092 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2093 *cycle |= VME_PROG;
2094 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2095 *cycle |= VME_DATA;
2096
2097 up(&(vme_lm));
2098
2099 return enabled;
2100 }
2101
2102 /*
2103 * Attach a callback to a specific location monitor.
2104 *
2105 * Callback will be passed the monitor triggered.
2106 */
2107 int tsi148_lm_attach(int monitor, void (*callback)(int))
2108 {
2109 u32 lm_ctl, tmp;
2110
2111 /* Get semaphore */
2112 down(&(vme_lm));
2113
2114 /* Ensure that the location monitor is configured - need PGM or DATA */
2115 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2116 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2117 up(&(vme_lm));
2118 printk("Location monitor not properly configured\n");
2119 return -EINVAL;
2120 }
2121
2122 /* Check that a callback isn't already attached */
2123 if (lm_callback[monitor] != NULL) {
2124 up(&(vme_lm));
2125 printk("Existing callback attached\n");
2126 return -EBUSY;
2127 }
2128
2129 /* Attach callback */
2130 lm_callback[monitor] = callback;
2131
2132 /* Enable Location Monitor interrupt */
2133 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2134 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2135 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
2136
2137 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2138 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2139 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2140
2141 /* Ensure that global Location Monitor Enable set */
2142 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2143 lm_ctl |= TSI148_LCSR_LMAT_EN;
2144 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2145 }
2146
2147 up(&(vme_lm));
2148
2149 return 0;
2150 }
2151
2152 /*
2153 * Detach a callback function forn a specific location monitor.
2154 */
2155 int tsi148_lm_detach(int monitor)
2156 {
2157 u32 lm_en, tmp;
2158
2159 /* Get semaphore */
2160 down(&(vme_lm));
2161
2162 /* Disable Location Monitor and ensure previous interrupts are clear */
2163 lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2164 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2165 iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN);
2166
2167 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2168 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2169 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2170
2171 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2172 tsi148_bridge->base + TSI148_LCSR_INTEO);
2173
2174 /* Detach callback */
2175 lm_callback[monitor] = NULL;
2176
2177 /* If all location monitors disabled, disable global Location Monitor */
2178 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2179 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2180 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2181 tmp &= ~TSI148_LCSR_LMAT_EN;
2182 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT);
2183 }
2184
2185 up(&(vme_lm));
2186
2187 return 0;
2188 }
2189
2190 /*
2191 * Determine Geographical Addressing
2192 */
2193 int tsi148_slot_get(void)
2194 {
2195 u32 slot = 0;
2196
2197 slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2198 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2199 return (int)slot;
2200 }
2201
2202 static int __init tsi148_init(void)
2203 {
2204 return pci_register_driver(&tsi148_driver);
2205 }
2206
2207 /*
2208 * Configure CR/CSR space
2209 *
2210 * Access to the CR/CSR can be configured at power-up. The location of the
2211 * CR/CSR registers in the CR/CSR address space is determined by the boards
2212 * Auto-ID or Geographic address. This function ensures that the window is
2213 * enabled at an offset consistent with the boards geopgraphic address.
2214 *
2215 * Each board has a 512kB window, with the highest 4kB being used for the
2216 * boards registers, this means there is a fix length 508kB window which must
2217 * be mapped onto PCI memory.
2218 */
2219 static int tsi148_crcsr_init(struct pci_dev *pdev)
2220 {
2221 u32 cbar, crat, vstat;
2222 u32 crcsr_bus_high, crcsr_bus_low;
2223 int retval;
2224
2225 /* Allocate mem for CR/CSR image */
2226 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2227 &crcsr_bus);
2228 if (crcsr_kernel == NULL) {
2229 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2230 "image\n");
2231 return -ENOMEM;
2232 }
2233
2234 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2235
2236 reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2237
2238 iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU);
2239 iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL);
2240
2241 /* Ensure that the CR/CSR is configured at the correct offset */
2242 cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR);
2243 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2244
2245 vstat = tsi148_slot_get();
2246
2247 if (cbar != vstat) {
2248 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2249 iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR);
2250 }
2251 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2252
2253 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2254 if (crat & TSI148_LCSR_CRAT_EN) {
2255 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2256 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2257 tsi148_bridge->base + TSI148_LCSR_CRAT);
2258 } else
2259 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2260
2261 /* If we want flushed, error-checked writes, set up a window
2262 * over the CR/CSR registers. We read from here to safely flush
2263 * through VME writes.
2264 */
2265 if(err_chk) {
2266 retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000),
2267 0x80000, VME_CRCSR, VME_SCT, VME_D16);
2268 if (retval)
2269 dev_err(&pdev->dev, "Configuring flush image failed\n");
2270 }
2271
2272 return 0;
2273
2274 }
2275
2276 static void tsi148_crcsr_exit(struct pci_dev *pdev)
2277 {
2278 u32 crat;
2279
2280 /* Turn off CR/CSR space */
2281 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2282 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2283 tsi148_bridge->base + TSI148_LCSR_CRAT);
2284
2285 /* Free image */
2286 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU);
2287 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL);
2288
2289 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
2290 }
2291
2292 static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2293 {
2294 int retval, i, master_num;
2295 u32 data;
2296 struct list_head *pos = NULL;
2297 struct vme_master_resource *master_image;
2298 struct vme_slave_resource *slave_image;
2299 struct vme_dma_resource *dma_ctrlr;
2300
2301 /* If we want to support more than one of each bridge, we need to
2302 * dynamically generate this so we get one per device
2303 */
2304 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2305 GFP_KERNEL);
2306 if (tsi148_bridge == NULL) {
2307 dev_err(&pdev->dev, "Failed to allocate memory for device "
2308 "structure\n");
2309 retval = -ENOMEM;
2310 goto err_struct;
2311 }
2312
2313 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2314
2315 /* Enable the device */
2316 retval = pci_enable_device(pdev);
2317 if (retval) {
2318 dev_err(&pdev->dev, "Unable to enable device\n");
2319 goto err_enable;
2320 }
2321
2322 /* Map Registers */
2323 retval = pci_request_regions(pdev, driver_name);
2324 if (retval) {
2325 dev_err(&pdev->dev, "Unable to reserve resources\n");
2326 goto err_resource;
2327 }
2328
2329 /* map registers in BAR 0 */
2330 tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096);
2331 if (!tsi148_bridge->base) {
2332 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2333 retval = -EIO;
2334 goto err_remap;
2335 }
2336
2337 /* Check to see if the mapping worked out */
2338 data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF;
2339 if (data != PCI_VENDOR_ID_TUNDRA) {
2340 dev_err(&pdev->dev, "CRG region check failed\n");
2341 retval = -EIO;
2342 goto err_test;
2343 }
2344
2345 /* Initialize wait queues & mutual exclusion flags */
2346 /* XXX These need to be moved to the vme_bridge structure */
2347 init_waitqueue_head(&dma_queue[0]);
2348 init_waitqueue_head(&dma_queue[1]);
2349 init_waitqueue_head(&iack_queue);
2350 init_MUTEX(&(vme_int));
2351 init_MUTEX(&(vme_irq));
2352 init_MUTEX(&(vme_rmw));
2353 init_MUTEX(&(vme_lm));
2354
2355 tsi148_bridge->parent = &(pdev->dev);
2356 strcpy(tsi148_bridge->name, driver_name);
2357
2358 /* Setup IRQ */
2359 retval = tsi148_irq_init(tsi148_bridge);
2360 if (retval != 0) {
2361 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2362 goto err_irq;
2363 }
2364
2365 /* If we are going to flush writes, we need to read from the VME bus.
2366 * We need to do this safely, thus we read the devices own CR/CSR
2367 * register. To do this we must set up a window in CR/CSR space and
2368 * hence have one less master window resource available.
2369 */
2370 master_num = TSI148_MAX_MASTER;
2371 if(err_chk){
2372 master_num--;
2373 /* XXX */
2374 flush_image = (struct vme_master_resource *)kmalloc(
2375 sizeof(struct vme_master_resource), GFP_KERNEL);
2376 if (flush_image == NULL) {
2377 dev_err(&pdev->dev, "Failed to allocate memory for "
2378 "flush resource structure\n");
2379 retval = -ENOMEM;
2380 goto err_master;
2381 }
2382 flush_image->parent = tsi148_bridge;
2383 spin_lock_init(&(flush_image->lock));
2384 flush_image->locked = 1;
2385 flush_image->number = master_num;
2386 flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2387 VME_A64;
2388 flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2389 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2390 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2391 VME_PROG | VME_DATA;
2392 flush_image->width_attr = VME_D16 | VME_D32;
2393 memset(&(flush_image->pci_resource), 0,
2394 sizeof(struct resource));
2395 flush_image->kern_base = NULL;
2396 }
2397
2398 /* Add master windows to list */
2399 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2400 for (i = 0; i < master_num; i++) {
2401 master_image = (struct vme_master_resource *)kmalloc(
2402 sizeof(struct vme_master_resource), GFP_KERNEL);
2403 if (master_image == NULL) {
2404 dev_err(&pdev->dev, "Failed to allocate memory for "
2405 "master resource structure\n");
2406 retval = -ENOMEM;
2407 goto err_master;
2408 }
2409 master_image->parent = tsi148_bridge;
2410 spin_lock_init(&(master_image->lock));
2411 master_image->locked = 0;
2412 master_image->number = i;
2413 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2414 VME_A64;
2415 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2416 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2417 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2418 VME_PROG | VME_DATA;
2419 master_image->width_attr = VME_D16 | VME_D32;
2420 memset(&(master_image->pci_resource), 0,
2421 sizeof(struct resource));
2422 master_image->kern_base = NULL;
2423 list_add_tail(&(master_image->list),
2424 &(tsi148_bridge->master_resources));
2425 }
2426
2427 /* Add slave windows to list */
2428 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2429 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2430 slave_image = (struct vme_slave_resource *)kmalloc(
2431 sizeof(struct vme_slave_resource), GFP_KERNEL);
2432 if (slave_image == NULL) {
2433 dev_err(&pdev->dev, "Failed to allocate memory for "
2434 "slave resource structure\n");
2435 retval = -ENOMEM;
2436 goto err_slave;
2437 }
2438 slave_image->parent = tsi148_bridge;
2439 init_MUTEX(&(slave_image->sem));
2440 slave_image->locked = 0;
2441 slave_image->number = i;
2442 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2443 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2444 VME_USER3 | VME_USER4;
2445 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2446 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2447 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2448 VME_PROG | VME_DATA;
2449 list_add_tail(&(slave_image->list),
2450 &(tsi148_bridge->slave_resources));
2451 }
2452
2453 /* Add dma engines to list */
2454 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2455 for (i = 0; i < TSI148_MAX_DMA; i++) {
2456 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2457 sizeof(struct vme_dma_resource), GFP_KERNEL);
2458 if (dma_ctrlr == NULL) {
2459 dev_err(&pdev->dev, "Failed to allocate memory for "
2460 "dma resource structure\n");
2461 retval = -ENOMEM;
2462 goto err_dma;
2463 }
2464 dma_ctrlr->parent = tsi148_bridge;
2465 init_MUTEX(&(dma_ctrlr->sem));
2466 dma_ctrlr->locked = 0;
2467 dma_ctrlr->number = i;
2468 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2469 INIT_LIST_HEAD(&(dma_ctrlr->running));
2470 list_add_tail(&(dma_ctrlr->list),
2471 &(tsi148_bridge->dma_resources));
2472 }
2473
2474 tsi148_bridge->slave_get = tsi148_slave_get;
2475 tsi148_bridge->slave_set = tsi148_slave_set;
2476 tsi148_bridge->master_get = tsi148_master_get;
2477 tsi148_bridge->master_set = tsi148_master_set;
2478 tsi148_bridge->master_read = tsi148_master_read;
2479 tsi148_bridge->master_write = tsi148_master_write;
2480 tsi148_bridge->master_rmw = tsi148_master_rmw;
2481 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2482 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2483 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2484 tsi148_bridge->request_irq = tsi148_request_irq;
2485 tsi148_bridge->free_irq = tsi148_free_irq;
2486 tsi148_bridge->generate_irq = tsi148_generate_irq;
2487 tsi148_bridge->lm_set = tsi148_lm_set;
2488 tsi148_bridge->lm_get = tsi148_lm_get;
2489 tsi148_bridge->lm_attach = tsi148_lm_attach;
2490 tsi148_bridge->lm_detach = tsi148_lm_detach;
2491 tsi148_bridge->slot_get = tsi148_slot_get;
2492
2493 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2494 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2495 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2496 dev_info(&pdev->dev, "VME geographical address is %d\n",
2497 data & TSI148_LCSR_VSTAT_GA_M);
2498 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2499 err_chk ? "enabled" : "disabled");
2500
2501 if(tsi148_crcsr_init(pdev)) {
2502 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2503 goto err_crcsr;
2504
2505 }
2506
2507 /* Need to save tsi148_bridge pointer locally in link list for use in
2508 * tsi148_remove()
2509 */
2510 retval = vme_register_bridge(tsi148_bridge);
2511 if (retval != 0) {
2512 dev_err(&pdev->dev, "Chip Registration failed.\n");
2513 goto err_reg;
2514 }
2515
2516 /* Clear VME bus "board fail", and "power-up reset" lines */
2517 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2518 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2519 data |= TSI148_LCSR_VSTAT_CPURST;
2520 iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT);
2521
2522 return 0;
2523
2524 vme_unregister_bridge(tsi148_bridge);
2525 err_reg:
2526 tsi148_crcsr_exit(pdev);
2527 err_crcsr:
2528 err_dma:
2529 /* resources are stored in link list */
2530 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2531 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2532 list_del(pos);
2533 kfree(dma_ctrlr);
2534 }
2535 err_slave:
2536 /* resources are stored in link list */
2537 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2538 slave_image = list_entry(pos, struct vme_slave_resource, list);
2539 list_del(pos);
2540 kfree(slave_image);
2541 }
2542 err_master:
2543 /* resources are stored in link list */
2544 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2545 master_image = list_entry(pos, struct vme_master_resource, list);
2546 list_del(pos);
2547 kfree(master_image);
2548 }
2549
2550 tsi148_irq_exit(pdev);
2551 err_irq:
2552 err_test:
2553 iounmap(tsi148_bridge->base);
2554 err_remap:
2555 pci_release_regions(pdev);
2556 err_resource:
2557 pci_disable_device(pdev);
2558 err_enable:
2559 kfree(tsi148_bridge);
2560 err_struct:
2561 return retval;
2562
2563 }
2564
2565 static void tsi148_remove(struct pci_dev *pdev)
2566 {
2567 struct list_head *pos = NULL;
2568 struct vme_master_resource *master_image;
2569 struct vme_slave_resource *slave_image;
2570 struct vme_dma_resource *dma_ctrlr;
2571 int i;
2572
2573 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2574
2575 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2576
2577 /*
2578 * Shutdown all inbound and outbound windows.
2579 */
2580 for (i = 0; i < 8; i++) {
2581 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] +
2582 TSI148_LCSR_OFFSET_ITAT);
2583 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] +
2584 TSI148_LCSR_OFFSET_OTAT);
2585 }
2586
2587 /*
2588 * Shutdown Location monitor.
2589 */
2590 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT);
2591
2592 /*
2593 * Shutdown CRG map.
2594 */
2595 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT);
2596
2597 /*
2598 * Clear error status.
2599 */
2600 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT);
2601 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT);
2602 iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT);
2603
2604 /*
2605 * Remove VIRQ interrupt (if any)
2606 */
2607 if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) {
2608 iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR);
2609 }
2610
2611 /*
2612 * Disable and clear all interrupts.
2613 */
2614 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
2615 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
2616 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTEN);
2617
2618 /*
2619 * Map all Interrupts to PCI INTA
2620 */
2621 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1);
2622 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2);
2623
2624 tsi148_irq_exit(pdev);
2625
2626 vme_unregister_bridge(tsi148_bridge);
2627
2628 tsi148_crcsr_exit(pdev);
2629
2630 /* resources are stored in link list */
2631 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2632 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2633 list_del(pos);
2634 kfree(dma_ctrlr);
2635 }
2636
2637 /* resources are stored in link list */
2638 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2639 slave_image = list_entry(pos, struct vme_slave_resource, list);
2640 list_del(pos);
2641 kfree(slave_image);
2642 }
2643
2644 /* resources are stored in link list */
2645 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2646 master_image = list_entry(pos, struct vme_master_resource, list);
2647 list_del(pos);
2648 kfree(master_image);
2649 }
2650
2651 tsi148_irq_exit(pdev);
2652
2653 iounmap(tsi148_bridge->base);
2654
2655 pci_release_regions(pdev);
2656
2657 pci_disable_device(pdev);
2658
2659 kfree(tsi148_bridge);
2660 }
2661
2662 static void __exit tsi148_exit(void)
2663 {
2664 pci_unregister_driver(&tsi148_driver);
2665
2666 printk(KERN_DEBUG "Driver removed.\n");
2667 }
2668
2669 MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2670 module_param(err_chk, bool, 0);
2671
2672 MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2673 MODULE_LICENSE("GPL");
2674
2675 module_init(tsi148_init);
2676 module_exit(tsi148_exit);
2677
2678 /*----------------------------------------------------------------------------
2679 * STAGING
2680 *--------------------------------------------------------------------------*/
2681
2682 #if 0
2683 /*
2684 * Direct Mode DMA transfer
2685 *
2686 * XXX Not looking at direct mode for now, we can always use link list mode
2687 * with a single entry.
2688 */
2689 int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2690 struct vme_dma_attr dest, size_t count)
2691 {
2692 u32 dctlreg = 0;
2693 unsigned int tmp;
2694 int val;
2695 int channel, x;
2696 struct vmeDmaPacket *cur_dma;
2697 struct tsi148_dma_descriptor *dmaLL;
2698
2699 /* direct mode */
2700 dctlreg = 0x800000;
2701
2702 for (x = 0; x < 8; x++) { /* vme block size */
2703 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
2704 break;
2705 }
2706 }
2707 if (x == 8)
2708 x = 7;
2709 dctlreg |= (x << 12);
2710
2711 for (x = 0; x < 8; x++) { /* pci block size */
2712 if ((32 << x) >= vmeDma->maxPciBlockSize) {
2713 break;
2714 }
2715 }
2716 if (x == 8)
2717 x = 7;
2718 dctlreg |= (x << 4);
2719
2720 if (vmeDma->vmeBackOffTimer) {
2721 for (x = 1; x < 8; x++) { /* vme timer */
2722 if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
2723 break;
2724 }
2725 }
2726 if (x == 8)
2727 x = 7;
2728 dctlreg |= (x << 8);
2729 }
2730
2731 if (vmeDma->pciBackOffTimer) {
2732 for (x = 1; x < 8; x++) { /* pci timer */
2733 if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
2734 break;
2735 }
2736 }
2737 if (x == 8)
2738 x = 7;
2739 dctlreg |= (x << 0);
2740 }
2741
2742 /* Program registers for DMA transfer */
2743 iowrite32be(dmaLL->dsau, tsi148_bridge->base +
2744 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
2745 iowrite32be(dmaLL->dsal, tsi148_bridge->base +
2746 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
2747 iowrite32be(dmaLL->ddau, tsi148_bridge->base +
2748 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
2749 iowrite32be(dmaLL->ddal, tsi148_bridge->base +
2750 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
2751 iowrite32be(dmaLL->dsat, tsi148_bridge->base +
2752 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
2753 iowrite32be(dmaLL->ddat, tsi148_bridge->base +
2754 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
2755 iowrite32be(dmaLL->dcnt, tsi148_bridge->base +
2756 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
2757 iowrite32be(dmaLL->ddbs, tsi148_bridge->base +
2758 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
2759
2760 /* Start the operation */
2761 iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base +
2762 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
2763
2764 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2765 TSI148_LCSR_OFFSET_DSTA);
2766 wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
2767
2768 /*
2769 * Read status register, we should probably do this in some error
2770 * handler rather than here so that we can be sure we haven't kicked off
2771 * another DMA transfer.
2772 */
2773 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2774 TSI148_LCSR_OFFSET_DSTA);
2775
2776 vmeDma->vmeDmaStatus = 0;
2777 if (val & 0x10000000) {
2778 printk(KERN_ERR
2779 "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2780 val);
2781 vmeDma->vmeDmaStatus = val;
2782
2783 }
2784 return (0);
2785 }
2786 #endif
2787
2788 #if 0
2789
2790 /* Global VME controller information */
2791 struct pci_dev *vme_pci_dev;
2792
2793 /*
2794 * Set the VME bus arbiter with the requested attributes
2795 */
2796 int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2797 {
2798 int temp_ctl = 0;
2799 int gto = 0;
2800
2801 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2802 temp_ctl &= 0xFFEFFF00;
2803
2804 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
2805 gto = 8;
2806 } else if (vmeArb->globalTimeoutTimer > 2048) {
2807 return (-EINVAL);
2808 } else if (vmeArb->globalTimeoutTimer == 0) {
2809 gto = 0;
2810 } else {
2811 gto = 1;
2812 while ((16 * (1 << (gto - 1))) < vmeArb->globalTimeoutTimer) {
2813 gto += 1;
2814 }
2815 }
2816 temp_ctl |= gto;
2817
2818 if (vmeArb->arbiterMode != VME_PRIORITY_MODE) {
2819 temp_ctl |= 1 << 6;
2820 }
2821
2822 if (vmeArb->arbiterTimeoutFlag) {
2823 temp_ctl |= 1 << 7;
2824 }
2825
2826 if (vmeArb->noEarlyReleaseFlag) {
2827 temp_ctl |= 1 << 20;
2828 }
2829 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL);
2830
2831 return (0);
2832 }
2833
2834 /*
2835 * Return the attributes of the VME bus arbiter.
2836 */
2837 int tempe_get_arbiter(vmeArbiterCfg_t * vmeArb)
2838 {
2839 int temp_ctl = 0;
2840 int gto = 0;
2841
2842
2843 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2844
2845 gto = temp_ctl & 0xF;
2846 if (gto != 0) {
2847 vmeArb->globalTimeoutTimer = (16 * (1 << (gto - 1)));
2848 }
2849
2850 if (temp_ctl & (1 << 6)) {
2851 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
2852 } else {
2853 vmeArb->arbiterMode = VME_PRIORITY_MODE;
2854 }
2855
2856 if (temp_ctl & (1 << 7)) {
2857 vmeArb->arbiterTimeoutFlag = 1;
2858 }
2859
2860 if (temp_ctl & (1 << 20)) {
2861 vmeArb->noEarlyReleaseFlag = 1;
2862 }
2863
2864 return (0);
2865 }
2866
2867 /*
2868 * Set the VME bus requestor with the requested attributes
2869 */
2870 int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2871 {
2872 int temp_ctl = 0;
2873
2874 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2875 temp_ctl &= 0xFFFF0000;
2876
2877 if (vmeReq->releaseMode == 1) {
2878 temp_ctl |= (1 << 3);
2879 }
2880
2881 if (vmeReq->fairMode == 1) {
2882 temp_ctl |= (1 << 2);
2883 }
2884
2885 temp_ctl |= (vmeReq->timeonTimeoutTimer & 7) << 8;
2886 temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
2887 temp_ctl |= vmeReq->requestLevel;
2888
2889 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2890 return (0);
2891 }
2892
2893 /*
2894 * Return the attributes of the VME bus requestor
2895 */
2896 int tempe_get_requestor(vmeRequesterCfg_t * vmeReq)
2897 {
2898 int temp_ctl = 0;
2899
2900 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2901
2902 if (temp_ctl & 0x18) {
2903 vmeReq->releaseMode = 1;
2904 }
2905
2906 if (temp_ctl & (1 << 2)) {
2907 vmeReq->fairMode = 1;
2908 }
2909
2910 vmeReq->requestLevel = temp_ctl & 3;
2911 vmeReq->timeonTimeoutTimer = (temp_ctl >> 8) & 7;
2912 vmeReq->timeoffTimeoutTimer = (temp_ctl >> 12) & 7;
2913
2914 return (0);
2915 }
2916
2917
2918 #endif