]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/ieee1394/ohci1394.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / drivers / ieee1394 / ohci1394.c
1 /*
2 * ohci1394.c - driver for OHCI 1394 boards
3 * Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
4 * Gord Peters <GordPeters@smarttech.com>
5 * 2001 Ben Collins <bcollins@debian.org>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software Foundation,
19 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22 /*
23 * Things known to be working:
24 * . Async Request Transmit
25 * . Async Response Receive
26 * . Async Request Receive
27 * . Async Response Transmit
28 * . Iso Receive
29 * . DMA mmap for iso receive
30 * . Config ROM generation
31 *
32 * Things implemented, but still in test phase:
33 * . Iso Transmit
34 * . Async Stream Packets Transmit (Receive done via Iso interface)
35 *
36 * Things not implemented:
37 * . DMA error recovery
38 *
39 * Known bugs:
40 * . devctl BUS_RESET arg confusion (reset type or root holdoff?)
41 * added LONG_RESET_ROOT and SHORT_RESET_ROOT for root holdoff --kk
42 */
43
44 /*
45 * Acknowledgments:
46 *
47 * Adam J Richter <adam@yggdrasil.com>
48 * . Use of pci_class to find device
49 *
50 * Emilie Chung <emilie.chung@axis.com>
51 * . Tip on Async Request Filter
52 *
53 * Pascal Drolet <pascal.drolet@informission.ca>
54 * . Various tips for optimization and functionnalities
55 *
56 * Robert Ficklin <rficklin@westengineering.com>
57 * . Loop in irq_handler
58 *
59 * James Goodwin <jamesg@Filanet.com>
60 * . Various tips on initialization, self-id reception, etc.
61 *
62 * Albrecht Dress <ad@mpifr-bonn.mpg.de>
63 * . Apple PowerBook detection
64 *
65 * Daniel Kobras <daniel.kobras@student.uni-tuebingen.de>
66 * . Reset the board properly before leaving + misc cleanups
67 *
68 * Leon van Stuivenberg <leonvs@iae.nl>
69 * . Bug fixes
70 *
71 * Ben Collins <bcollins@debian.org>
72 * . Working big-endian support
73 * . Updated to 2.4.x module scheme (PCI aswell)
74 * . Config ROM generation
75 *
76 * Manfred Weihs <weihs@ict.tuwien.ac.at>
77 * . Reworked code for initiating bus resets
78 * (long, short, with or without hold-off)
79 *
80 * Nandu Santhi <contactnandu@users.sourceforge.net>
81 * . Added support for nVidia nForce2 onboard Firewire chipset
82 *
83 */
84
85 #include <linux/config.h>
86 #include <linux/kernel.h>
87 #include <linux/list.h>
88 #include <linux/slab.h>
89 #include <linux/interrupt.h>
90 #include <linux/wait.h>
91 #include <linux/errno.h>
92 #include <linux/module.h>
93 #include <linux/moduleparam.h>
94 #include <linux/pci.h>
95 #include <linux/fs.h>
96 #include <linux/poll.h>
97 #include <asm/byteorder.h>
98 #include <asm/atomic.h>
99 #include <asm/uaccess.h>
100 #include <linux/delay.h>
101 #include <linux/spinlock.h>
102
103 #include <asm/pgtable.h>
104 #include <asm/page.h>
105 #include <asm/irq.h>
106 #include <linux/sched.h>
107 #include <linux/types.h>
108 #include <linux/vmalloc.h>
109 #include <linux/init.h>
110
111 #ifdef CONFIG_PPC_PMAC
112 #include <asm/machdep.h>
113 #include <asm/pmac_feature.h>
114 #include <asm/prom.h>
115 #include <asm/pci-bridge.h>
116 #endif
117
118 #include "csr1212.h"
119 #include "ieee1394.h"
120 #include "ieee1394_types.h"
121 #include "hosts.h"
122 #include "dma.h"
123 #include "iso.h"
124 #include "ieee1394_core.h"
125 #include "highlevel.h"
126 #include "ohci1394.h"
127
128 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
129 #define OHCI1394_DEBUG
130 #endif
131
132 #ifdef DBGMSG
133 #undef DBGMSG
134 #endif
135
136 #ifdef OHCI1394_DEBUG
137 #define DBGMSG(fmt, args...) \
138 printk(KERN_INFO "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
139 #else
140 #define DBGMSG(fmt, args...)
141 #endif
142
143 #ifdef CONFIG_IEEE1394_OHCI_DMA_DEBUG
144 #define OHCI_DMA_ALLOC(fmt, args...) \
145 HPSB_ERR("%s(%s)alloc(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
146 ++global_outstanding_dmas, ## args)
147 #define OHCI_DMA_FREE(fmt, args...) \
148 HPSB_ERR("%s(%s)free(%d): "fmt, OHCI1394_DRIVER_NAME, __FUNCTION__, \
149 --global_outstanding_dmas, ## args)
150 static int global_outstanding_dmas = 0;
151 #else
152 #define OHCI_DMA_ALLOC(fmt, args...)
153 #define OHCI_DMA_FREE(fmt, args...)
154 #endif
155
156 /* print general (card independent) information */
157 #define PRINT_G(level, fmt, args...) \
158 printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
159
160 /* print card specific information */
161 #define PRINT(level, fmt, args...) \
162 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
163
164 static char version[] __devinitdata =
165 "$Rev: 1250 $ Ben Collins <bcollins@debian.org>";
166
167 /* Module Parameters */
168 static int phys_dma = 1;
169 module_param(phys_dma, int, 0644);
170 MODULE_PARM_DESC(phys_dma, "Enable physical dma (default = 1).");
171
172 static void dma_trm_tasklet(unsigned long data);
173 static void dma_trm_reset(struct dma_trm_ctx *d);
174
175 static int alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
176 enum context_type type, int ctx, int num_desc,
177 int buf_size, int split_buf_size, int context_base);
178 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d);
179 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d);
180
181 static int alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
182 enum context_type type, int ctx, int num_desc,
183 int context_base);
184
185 static void ohci1394_pci_remove(struct pci_dev *pdev);
186
187 #ifndef __LITTLE_ENDIAN
188 static unsigned hdr_sizes[] =
189 {
190 3, /* TCODE_WRITEQ */
191 4, /* TCODE_WRITEB */
192 3, /* TCODE_WRITE_RESPONSE */
193 0, /* ??? */
194 3, /* TCODE_READQ */
195 4, /* TCODE_READB */
196 3, /* TCODE_READQ_RESPONSE */
197 4, /* TCODE_READB_RESPONSE */
198 1, /* TCODE_CYCLE_START (???) */
199 4, /* TCODE_LOCK_REQUEST */
200 2, /* TCODE_ISO_DATA */
201 4, /* TCODE_LOCK_RESPONSE */
202 };
203
204 /* Swap headers */
205 static inline void packet_swab(quadlet_t *data, int tcode)
206 {
207 size_t size = hdr_sizes[tcode];
208
209 if (tcode > TCODE_LOCK_RESPONSE || hdr_sizes[tcode] == 0)
210 return;
211
212 while (size--)
213 data[size] = swab32(data[size]);
214 }
215 #else
216 /* Don't waste cycles on same sex byte swaps */
217 #define packet_swab(w,x)
218 #endif /* !LITTLE_ENDIAN */
219
220 /***********************************
221 * IEEE-1394 functionality section *
222 ***********************************/
223
224 static u8 get_phy_reg(struct ti_ohci *ohci, u8 addr)
225 {
226 int i;
227 unsigned long flags;
228 quadlet_t r;
229
230 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
231
232 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000);
233
234 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
235 if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000)
236 break;
237
238 mdelay(1);
239 }
240
241 r = reg_read(ohci, OHCI1394_PhyControl);
242
243 if (i >= OHCI_LOOP_COUNT)
244 PRINT (KERN_ERR, "Get PHY Reg timeout [0x%08x/0x%08x/%d]",
245 r, r & 0x80000000, i);
246
247 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
248
249 return (r & 0x00ff0000) >> 16;
250 }
251
252 static void set_phy_reg(struct ti_ohci *ohci, u8 addr, u8 data)
253 {
254 int i;
255 unsigned long flags;
256 u32 r = 0;
257
258 spin_lock_irqsave (&ohci->phy_reg_lock, flags);
259
260 reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000);
261
262 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
263 r = reg_read(ohci, OHCI1394_PhyControl);
264 if (!(r & 0x00004000))
265 break;
266
267 mdelay(1);
268 }
269
270 if (i == OHCI_LOOP_COUNT)
271 PRINT (KERN_ERR, "Set PHY Reg timeout [0x%08x/0x%08x/%d]",
272 r, r & 0x00004000, i);
273
274 spin_unlock_irqrestore (&ohci->phy_reg_lock, flags);
275
276 return;
277 }
278
279 /* Or's our value into the current value */
280 static void set_phy_reg_mask(struct ti_ohci *ohci, u8 addr, u8 data)
281 {
282 u8 old;
283
284 old = get_phy_reg (ohci, addr);
285 old |= data;
286 set_phy_reg (ohci, addr, old);
287
288 return;
289 }
290
291 static void handle_selfid(struct ti_ohci *ohci, struct hpsb_host *host,
292 int phyid, int isroot)
293 {
294 quadlet_t *q = ohci->selfid_buf_cpu;
295 quadlet_t self_id_count=reg_read(ohci, OHCI1394_SelfIDCount);
296 size_t size;
297 quadlet_t q0, q1;
298
299 /* Check status of self-id reception */
300
301 if (ohci->selfid_swap)
302 q0 = le32_to_cpu(q[0]);
303 else
304 q0 = q[0];
305
306 if ((self_id_count & 0x80000000) ||
307 ((self_id_count & 0x00FF0000) != (q0 & 0x00FF0000))) {
308 PRINT(KERN_ERR,
309 "Error in reception of SelfID packets [0x%08x/0x%08x] (count: %d)",
310 self_id_count, q0, ohci->self_id_errors);
311
312 /* Tip by James Goodwin <jamesg@Filanet.com>:
313 * We had an error, generate another bus reset in response. */
314 if (ohci->self_id_errors<OHCI1394_MAX_SELF_ID_ERRORS) {
315 set_phy_reg_mask (ohci, 1, 0x40);
316 ohci->self_id_errors++;
317 } else {
318 PRINT(KERN_ERR,
319 "Too many errors on SelfID error reception, giving up!");
320 }
321 return;
322 }
323
324 /* SelfID Ok, reset error counter. */
325 ohci->self_id_errors = 0;
326
327 size = ((self_id_count & 0x00001FFC) >> 2) - 1;
328 q++;
329
330 while (size > 0) {
331 if (ohci->selfid_swap) {
332 q0 = le32_to_cpu(q[0]);
333 q1 = le32_to_cpu(q[1]);
334 } else {
335 q0 = q[0];
336 q1 = q[1];
337 }
338
339 if (q0 == ~q1) {
340 DBGMSG ("SelfID packet 0x%x received", q0);
341 hpsb_selfid_received(host, cpu_to_be32(q0));
342 if (((q0 & 0x3f000000) >> 24) == phyid)
343 DBGMSG ("SelfID for this node is 0x%08x", q0);
344 } else {
345 PRINT(KERN_ERR,
346 "SelfID is inconsistent [0x%08x/0x%08x]", q0, q1);
347 }
348 q += 2;
349 size -= 2;
350 }
351
352 DBGMSG("SelfID complete");
353
354 return;
355 }
356
357 static void ohci_soft_reset(struct ti_ohci *ohci) {
358 int i;
359
360 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
361
362 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
363 if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset))
364 break;
365 mdelay(1);
366 }
367 DBGMSG ("Soft reset finished");
368 }
369
370
371 /* Generate the dma receive prgs and start the context */
372 static void initialize_dma_rcv_ctx(struct dma_rcv_ctx *d, int generate_irq)
373 {
374 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
375 int i;
376
377 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
378
379 for (i=0; i<d->num_desc; i++) {
380 u32 c;
381
382 c = DMA_CTL_INPUT_MORE | DMA_CTL_UPDATE | DMA_CTL_BRANCH;
383 if (generate_irq)
384 c |= DMA_CTL_IRQ;
385
386 d->prg_cpu[i]->control = cpu_to_le32(c | d->buf_size);
387
388 /* End of descriptor list? */
389 if (i + 1 < d->num_desc) {
390 d->prg_cpu[i]->branchAddress =
391 cpu_to_le32((d->prg_bus[i+1] & 0xfffffff0) | 0x1);
392 } else {
393 d->prg_cpu[i]->branchAddress =
394 cpu_to_le32((d->prg_bus[0] & 0xfffffff0));
395 }
396
397 d->prg_cpu[i]->address = cpu_to_le32(d->buf_bus[i]);
398 d->prg_cpu[i]->status = cpu_to_le32(d->buf_size);
399 }
400
401 d->buf_ind = 0;
402 d->buf_offset = 0;
403
404 if (d->type == DMA_CTX_ISO) {
405 /* Clear contextControl */
406 reg_write(ohci, d->ctrlClear, 0xffffffff);
407
408 /* Set bufferFill, isochHeader, multichannel for IR context */
409 reg_write(ohci, d->ctrlSet, 0xd0000000);
410
411 /* Set the context match register to match on all tags */
412 reg_write(ohci, d->ctxtMatch, 0xf0000000);
413
414 /* Clear the multi channel mask high and low registers */
415 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, 0xffffffff);
416 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, 0xffffffff);
417
418 /* Set up isoRecvIntMask to generate interrupts */
419 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << d->ctx);
420 }
421
422 /* Tell the controller where the first AR program is */
423 reg_write(ohci, d->cmdPtr, d->prg_bus[0] | 0x1);
424
425 /* Run context */
426 reg_write(ohci, d->ctrlSet, 0x00008000);
427
428 DBGMSG("Receive DMA ctx=%d initialized", d->ctx);
429 }
430
431 /* Initialize the dma transmit context */
432 static void initialize_dma_trm_ctx(struct dma_trm_ctx *d)
433 {
434 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
435
436 /* Stop the context */
437 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
438
439 d->prg_ind = 0;
440 d->sent_ind = 0;
441 d->free_prgs = d->num_desc;
442 d->branchAddrPtr = NULL;
443 INIT_LIST_HEAD(&d->fifo_list);
444 INIT_LIST_HEAD(&d->pending_list);
445
446 if (d->type == DMA_CTX_ISO) {
447 /* enable interrupts */
448 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << d->ctx);
449 }
450
451 DBGMSG("Transmit DMA ctx=%d initialized", d->ctx);
452 }
453
454 /* Count the number of available iso contexts */
455 static int get_nb_iso_ctx(struct ti_ohci *ohci, int reg)
456 {
457 int i,ctx=0;
458 u32 tmp;
459
460 reg_write(ohci, reg, 0xffffffff);
461 tmp = reg_read(ohci, reg);
462
463 DBGMSG("Iso contexts reg: %08x implemented: %08x", reg, tmp);
464
465 /* Count the number of contexts */
466 for (i=0; i<32; i++) {
467 if (tmp & 1) ctx++;
468 tmp >>= 1;
469 }
470 return ctx;
471 }
472
473 /* Global initialization */
474 static void ohci_initialize(struct ti_ohci *ohci)
475 {
476 char irq_buf[16];
477 quadlet_t buf;
478 int num_ports, i;
479
480 spin_lock_init(&ohci->phy_reg_lock);
481 spin_lock_init(&ohci->event_lock);
482
483 /* Put some defaults to these undefined bus options */
484 buf = reg_read(ohci, OHCI1394_BusOptions);
485 buf |= 0x60000000; /* Enable CMC and ISC */
486 if (!hpsb_disable_irm)
487 buf |= 0x80000000; /* Enable IRMC */
488 buf &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */
489 buf &= ~0x18000000; /* Disable PMC and BMC */
490 reg_write(ohci, OHCI1394_BusOptions, buf);
491
492 /* Set the bus number */
493 reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0);
494
495 /* Enable posted writes */
496 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable);
497
498 /* Clear link control register */
499 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
500
501 /* Enable cycle timer and cycle master and set the IRM
502 * contender bit in our self ID packets if appropriate. */
503 reg_write(ohci, OHCI1394_LinkControlSet,
504 OHCI1394_LinkControl_CycleTimerEnable |
505 OHCI1394_LinkControl_CycleMaster);
506 set_phy_reg_mask(ohci, 4, PHY_04_LCTRL |
507 (hpsb_disable_irm ? 0 : PHY_04_CONTENDER));
508
509 /* Set up self-id dma buffer */
510 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->selfid_buf_bus);
511
512 /* enable self-id and phys */
513 reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_RcvSelfID |
514 OHCI1394_LinkControl_RcvPhyPkt);
515
516 /* Set the Config ROM mapping register */
517 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->csr_config_rom_bus);
518
519 /* Now get our max packet size */
520 ohci->max_packet_size =
521 1<<(((reg_read(ohci, OHCI1394_BusOptions)>>12)&0xf)+1);
522
523 /* Don't accept phy packets into AR request context */
524 reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400);
525
526 /* Clear the interrupt mask */
527 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
528 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
529
530 /* Clear the interrupt mask */
531 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
532 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
533
534 /* Initialize AR dma */
535 initialize_dma_rcv_ctx(&ohci->ar_req_context, 0);
536 initialize_dma_rcv_ctx(&ohci->ar_resp_context, 0);
537
538 /* Initialize AT dma */
539 initialize_dma_trm_ctx(&ohci->at_req_context);
540 initialize_dma_trm_ctx(&ohci->at_resp_context);
541
542 /* Initialize IR Legacy DMA */
543 ohci->ir_legacy_channels = 0;
544 initialize_dma_rcv_ctx(&ohci->ir_legacy_context, 1);
545 DBGMSG("ISO receive legacy context activated");
546
547 /*
548 * Accept AT requests from all nodes. This probably
549 * will have to be controlled from the subsystem
550 * on a per node basis.
551 */
552 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0x80000000);
553
554 /* Specify AT retries */
555 reg_write(ohci, OHCI1394_ATRetries,
556 OHCI1394_MAX_AT_REQ_RETRIES |
557 (OHCI1394_MAX_AT_RESP_RETRIES<<4) |
558 (OHCI1394_MAX_PHYS_RESP_RETRIES<<8));
559
560 /* We don't want hardware swapping */
561 reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwap);
562
563 /* Enable interrupts */
564 reg_write(ohci, OHCI1394_IntMaskSet,
565 OHCI1394_unrecoverableError |
566 OHCI1394_masterIntEnable |
567 OHCI1394_busReset |
568 OHCI1394_selfIDComplete |
569 OHCI1394_RSPkt |
570 OHCI1394_RQPkt |
571 OHCI1394_respTxComplete |
572 OHCI1394_reqTxComplete |
573 OHCI1394_isochRx |
574 OHCI1394_isochTx |
575 OHCI1394_cycleInconsistent);
576
577 /* Enable link */
578 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable);
579
580 buf = reg_read(ohci, OHCI1394_Version);
581 #ifndef __sparc__
582 sprintf (irq_buf, "%d", ohci->dev->irq);
583 #else
584 sprintf (irq_buf, "%s", __irq_itoa(ohci->dev->irq));
585 #endif
586 PRINT(KERN_INFO, "OHCI-1394 %d.%d (PCI): IRQ=[%s] "
587 "MMIO=[%lx-%lx] Max Packet=[%d]",
588 ((((buf) >> 16) & 0xf) + (((buf) >> 20) & 0xf) * 10),
589 ((((buf) >> 4) & 0xf) + ((buf) & 0xf) * 10), irq_buf,
590 pci_resource_start(ohci->dev, 0),
591 pci_resource_start(ohci->dev, 0) + OHCI1394_REGISTER_SIZE - 1,
592 ohci->max_packet_size);
593
594 /* Check all of our ports to make sure that if anything is
595 * connected, we enable that port. */
596 num_ports = get_phy_reg(ohci, 2) & 0xf;
597 for (i = 0; i < num_ports; i++) {
598 unsigned int status;
599
600 set_phy_reg(ohci, 7, i);
601 status = get_phy_reg(ohci, 8);
602
603 if (status & 0x20)
604 set_phy_reg(ohci, 8, status & ~1);
605 }
606
607 /* Serial EEPROM Sanity check. */
608 if ((ohci->max_packet_size < 512) ||
609 (ohci->max_packet_size > 4096)) {
610 /* Serial EEPROM contents are suspect, set a sane max packet
611 * size and print the raw contents for bug reports if verbose
612 * debug is enabled. */
613 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
614 int i;
615 #endif
616
617 PRINT(KERN_DEBUG, "Serial EEPROM has suspicious values, "
618 "attempting to setting max_packet_size to 512 bytes");
619 reg_write(ohci, OHCI1394_BusOptions,
620 (reg_read(ohci, OHCI1394_BusOptions) & 0xf007) | 0x8002);
621 ohci->max_packet_size = 512;
622 #ifdef CONFIG_IEEE1394_VERBOSEDEBUG
623 PRINT(KERN_DEBUG, " EEPROM Present: %d",
624 (reg_read(ohci, OHCI1394_Version) >> 24) & 0x1);
625 reg_write(ohci, OHCI1394_GUID_ROM, 0x80000000);
626
627 for (i = 0;
628 ((i < 1000) &&
629 (reg_read(ohci, OHCI1394_GUID_ROM) & 0x80000000)); i++)
630 udelay(10);
631
632 for (i = 0; i < 0x20; i++) {
633 reg_write(ohci, OHCI1394_GUID_ROM, 0x02000000);
634 PRINT(KERN_DEBUG, " EEPROM %02x: %02x", i,
635 (reg_read(ohci, OHCI1394_GUID_ROM) >> 16) & 0xff);
636 }
637 #endif
638 }
639 }
640
641 /*
642 * Insert a packet in the DMA fifo and generate the DMA prg
643 * FIXME: rewrite the program in order to accept packets crossing
644 * page boundaries.
645 * check also that a single dma descriptor doesn't cross a
646 * page boundary.
647 */
648 static void insert_packet(struct ti_ohci *ohci,
649 struct dma_trm_ctx *d, struct hpsb_packet *packet)
650 {
651 u32 cycleTimer;
652 int idx = d->prg_ind;
653
654 DBGMSG("Inserting packet for node " NODE_BUS_FMT
655 ", tlabel=%d, tcode=0x%x, speed=%d",
656 NODE_BUS_ARGS(ohci->host, packet->node_id), packet->tlabel,
657 packet->tcode, packet->speed_code);
658
659 d->prg_cpu[idx]->begin.address = 0;
660 d->prg_cpu[idx]->begin.branchAddress = 0;
661
662 if (d->type == DMA_CTX_ASYNC_RESP) {
663 /*
664 * For response packets, we need to put a timeout value in
665 * the 16 lower bits of the status... let's try 1 sec timeout
666 */
667 cycleTimer = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
668 d->prg_cpu[idx]->begin.status = cpu_to_le32(
669 (((((cycleTimer>>25)&0x7)+1)&0x7)<<13) |
670 ((cycleTimer&0x01fff000)>>12));
671
672 DBGMSG("cycleTimer: %08x timeStamp: %08x",
673 cycleTimer, d->prg_cpu[idx]->begin.status);
674 } else
675 d->prg_cpu[idx]->begin.status = 0;
676
677 if ( (packet->type == hpsb_async) || (packet->type == hpsb_raw) ) {
678
679 if (packet->type == hpsb_raw) {
680 d->prg_cpu[idx]->data[0] = cpu_to_le32(OHCI1394_TCODE_PHY<<4);
681 d->prg_cpu[idx]->data[1] = cpu_to_le32(packet->header[0]);
682 d->prg_cpu[idx]->data[2] = cpu_to_le32(packet->header[1]);
683 } else {
684 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
685 (packet->header[0] & 0xFFFF);
686
687 if (packet->tcode == TCODE_ISO_DATA) {
688 /* Sending an async stream packet */
689 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
690 } else {
691 /* Sending a normal async request or response */
692 d->prg_cpu[idx]->data[1] =
693 (packet->header[1] & 0xFFFF) |
694 (packet->header[0] & 0xFFFF0000);
695 d->prg_cpu[idx]->data[2] = packet->header[2];
696 d->prg_cpu[idx]->data[3] = packet->header[3];
697 }
698 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
699 }
700
701 if (packet->data_size) { /* block transmit */
702 if (packet->tcode == TCODE_STREAM_DATA){
703 d->prg_cpu[idx]->begin.control =
704 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
705 DMA_CTL_IMMEDIATE | 0x8);
706 } else {
707 d->prg_cpu[idx]->begin.control =
708 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
709 DMA_CTL_IMMEDIATE | 0x10);
710 }
711 d->prg_cpu[idx]->end.control =
712 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
713 DMA_CTL_IRQ |
714 DMA_CTL_BRANCH |
715 packet->data_size);
716 /*
717 * Check that the packet data buffer
718 * does not cross a page boundary.
719 *
720 * XXX Fix this some day. eth1394 seems to trigger
721 * it, but ignoring it doesn't seem to cause a
722 * problem.
723 */
724 #if 0
725 if (cross_bound((unsigned long)packet->data,
726 packet->data_size)>0) {
727 /* FIXME: do something about it */
728 PRINT(KERN_ERR,
729 "%s: packet data addr: %p size %Zd bytes "
730 "cross page boundary", __FUNCTION__,
731 packet->data, packet->data_size);
732 }
733 #endif
734 d->prg_cpu[idx]->end.address = cpu_to_le32(
735 pci_map_single(ohci->dev, packet->data,
736 packet->data_size,
737 PCI_DMA_TODEVICE));
738 OHCI_DMA_ALLOC("single, block transmit packet");
739
740 d->prg_cpu[idx]->end.branchAddress = 0;
741 d->prg_cpu[idx]->end.status = 0;
742 if (d->branchAddrPtr)
743 *(d->branchAddrPtr) =
744 cpu_to_le32(d->prg_bus[idx] | 0x3);
745 d->branchAddrPtr =
746 &(d->prg_cpu[idx]->end.branchAddress);
747 } else { /* quadlet transmit */
748 if (packet->type == hpsb_raw)
749 d->prg_cpu[idx]->begin.control =
750 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
751 DMA_CTL_IMMEDIATE |
752 DMA_CTL_IRQ |
753 DMA_CTL_BRANCH |
754 (packet->header_size + 4));
755 else
756 d->prg_cpu[idx]->begin.control =
757 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
758 DMA_CTL_IMMEDIATE |
759 DMA_CTL_IRQ |
760 DMA_CTL_BRANCH |
761 packet->header_size);
762
763 if (d->branchAddrPtr)
764 *(d->branchAddrPtr) =
765 cpu_to_le32(d->prg_bus[idx] | 0x2);
766 d->branchAddrPtr =
767 &(d->prg_cpu[idx]->begin.branchAddress);
768 }
769
770 } else { /* iso packet */
771 d->prg_cpu[idx]->data[0] = packet->speed_code<<16 |
772 (packet->header[0] & 0xFFFF);
773 d->prg_cpu[idx]->data[1] = packet->header[0] & 0xFFFF0000;
774 packet_swab(d->prg_cpu[idx]->data, packet->tcode);
775
776 d->prg_cpu[idx]->begin.control =
777 cpu_to_le32(DMA_CTL_OUTPUT_MORE |
778 DMA_CTL_IMMEDIATE | 0x8);
779 d->prg_cpu[idx]->end.control =
780 cpu_to_le32(DMA_CTL_OUTPUT_LAST |
781 DMA_CTL_UPDATE |
782 DMA_CTL_IRQ |
783 DMA_CTL_BRANCH |
784 packet->data_size);
785 d->prg_cpu[idx]->end.address = cpu_to_le32(
786 pci_map_single(ohci->dev, packet->data,
787 packet->data_size, PCI_DMA_TODEVICE));
788 OHCI_DMA_ALLOC("single, iso transmit packet");
789
790 d->prg_cpu[idx]->end.branchAddress = 0;
791 d->prg_cpu[idx]->end.status = 0;
792 DBGMSG("Iso xmit context info: header[%08x %08x]\n"
793 " begin=%08x %08x %08x %08x\n"
794 " %08x %08x %08x %08x\n"
795 " end =%08x %08x %08x %08x",
796 d->prg_cpu[idx]->data[0], d->prg_cpu[idx]->data[1],
797 d->prg_cpu[idx]->begin.control,
798 d->prg_cpu[idx]->begin.address,
799 d->prg_cpu[idx]->begin.branchAddress,
800 d->prg_cpu[idx]->begin.status,
801 d->prg_cpu[idx]->data[0],
802 d->prg_cpu[idx]->data[1],
803 d->prg_cpu[idx]->data[2],
804 d->prg_cpu[idx]->data[3],
805 d->prg_cpu[idx]->end.control,
806 d->prg_cpu[idx]->end.address,
807 d->prg_cpu[idx]->end.branchAddress,
808 d->prg_cpu[idx]->end.status);
809 if (d->branchAddrPtr)
810 *(d->branchAddrPtr) = cpu_to_le32(d->prg_bus[idx] | 0x3);
811 d->branchAddrPtr = &(d->prg_cpu[idx]->end.branchAddress);
812 }
813 d->free_prgs--;
814
815 /* queue the packet in the appropriate context queue */
816 list_add_tail(&packet->driver_list, &d->fifo_list);
817 d->prg_ind = (d->prg_ind + 1) % d->num_desc;
818 }
819
820 /*
821 * This function fills the FIFO with the (eventual) pending packets
822 * and runs or wakes up the DMA prg if necessary.
823 *
824 * The function MUST be called with the d->lock held.
825 */
826 static void dma_trm_flush(struct ti_ohci *ohci, struct dma_trm_ctx *d)
827 {
828 struct hpsb_packet *packet, *ptmp;
829 int idx = d->prg_ind;
830 int z = 0;
831
832 /* insert the packets into the dma fifo */
833 list_for_each_entry_safe(packet, ptmp, &d->pending_list, driver_list) {
834 if (!d->free_prgs)
835 break;
836
837 /* For the first packet only */
838 if (!z)
839 z = (packet->data_size) ? 3 : 2;
840
841 /* Insert the packet */
842 list_del_init(&packet->driver_list);
843 insert_packet(ohci, d, packet);
844 }
845
846 /* Nothing must have been done, either no free_prgs or no packets */
847 if (z == 0)
848 return;
849
850 /* Is the context running ? (should be unless it is
851 the first packet to be sent in this context) */
852 if (!(reg_read(ohci, d->ctrlSet) & 0x8000)) {
853 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
854
855 DBGMSG("Starting transmit DMA ctx=%d",d->ctx);
856 reg_write(ohci, d->cmdPtr, d->prg_bus[idx] | z);
857
858 /* Check that the node id is valid, and not 63 */
859 if (!(nodeId & 0x80000000) || (nodeId & 0x3f) == 63)
860 PRINT(KERN_ERR, "Running dma failed because Node ID is not valid");
861 else
862 reg_write(ohci, d->ctrlSet, 0x8000);
863 } else {
864 /* Wake up the dma context if necessary */
865 if (!(reg_read(ohci, d->ctrlSet) & 0x400))
866 DBGMSG("Waking transmit DMA ctx=%d",d->ctx);
867
868 /* do this always, to avoid race condition */
869 reg_write(ohci, d->ctrlSet, 0x1000);
870 }
871
872 return;
873 }
874
875 /* Transmission of an async or iso packet */
876 static int ohci_transmit(struct hpsb_host *host, struct hpsb_packet *packet)
877 {
878 struct ti_ohci *ohci = host->hostdata;
879 struct dma_trm_ctx *d;
880 unsigned long flags;
881
882 if (packet->data_size > ohci->max_packet_size) {
883 PRINT(KERN_ERR,
884 "Transmit packet size %Zd is too big",
885 packet->data_size);
886 return -EOVERFLOW;
887 }
888
889 /* Decide whether we have an iso, a request, or a response packet */
890 if (packet->type == hpsb_raw)
891 d = &ohci->at_req_context;
892 else if ((packet->tcode == TCODE_ISO_DATA) && (packet->type == hpsb_iso)) {
893 /* The legacy IT DMA context is initialized on first
894 * use. However, the alloc cannot be run from
895 * interrupt context, so we bail out if that is the
896 * case. I don't see anyone sending ISO packets from
897 * interrupt context anyway... */
898
899 if (ohci->it_legacy_context.ohci == NULL) {
900 if (in_interrupt()) {
901 PRINT(KERN_ERR,
902 "legacy IT context cannot be initialized during interrupt");
903 return -EINVAL;
904 }
905
906 if (alloc_dma_trm_ctx(ohci, &ohci->it_legacy_context,
907 DMA_CTX_ISO, 0, IT_NUM_DESC,
908 OHCI1394_IsoXmitContextBase) < 0) {
909 PRINT(KERN_ERR,
910 "error initializing legacy IT context");
911 return -ENOMEM;
912 }
913
914 initialize_dma_trm_ctx(&ohci->it_legacy_context);
915 }
916
917 d = &ohci->it_legacy_context;
918 } else if ((packet->tcode & 0x02) && (packet->tcode != TCODE_ISO_DATA))
919 d = &ohci->at_resp_context;
920 else
921 d = &ohci->at_req_context;
922
923 spin_lock_irqsave(&d->lock,flags);
924
925 list_add_tail(&packet->driver_list, &d->pending_list);
926
927 dma_trm_flush(ohci, d);
928
929 spin_unlock_irqrestore(&d->lock,flags);
930
931 return 0;
932 }
933
934 static int ohci_devctl(struct hpsb_host *host, enum devctl_cmd cmd, int arg)
935 {
936 struct ti_ohci *ohci = host->hostdata;
937 int retval = 0;
938 unsigned long flags;
939 int phy_reg;
940
941 switch (cmd) {
942 case RESET_BUS:
943 switch (arg) {
944 case SHORT_RESET:
945 phy_reg = get_phy_reg(ohci, 5);
946 phy_reg |= 0x40;
947 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
948 break;
949 case LONG_RESET:
950 phy_reg = get_phy_reg(ohci, 1);
951 phy_reg |= 0x40;
952 set_phy_reg(ohci, 1, phy_reg); /* set IBR */
953 break;
954 case SHORT_RESET_NO_FORCE_ROOT:
955 phy_reg = get_phy_reg(ohci, 1);
956 if (phy_reg & 0x80) {
957 phy_reg &= ~0x80;
958 set_phy_reg(ohci, 1, phy_reg); /* clear RHB */
959 }
960
961 phy_reg = get_phy_reg(ohci, 5);
962 phy_reg |= 0x40;
963 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
964 break;
965 case LONG_RESET_NO_FORCE_ROOT:
966 phy_reg = get_phy_reg(ohci, 1);
967 phy_reg &= ~0x80;
968 phy_reg |= 0x40;
969 set_phy_reg(ohci, 1, phy_reg); /* clear RHB, set IBR */
970 break;
971 case SHORT_RESET_FORCE_ROOT:
972 phy_reg = get_phy_reg(ohci, 1);
973 if (!(phy_reg & 0x80)) {
974 phy_reg |= 0x80;
975 set_phy_reg(ohci, 1, phy_reg); /* set RHB */
976 }
977
978 phy_reg = get_phy_reg(ohci, 5);
979 phy_reg |= 0x40;
980 set_phy_reg(ohci, 5, phy_reg); /* set ISBR */
981 break;
982 case LONG_RESET_FORCE_ROOT:
983 phy_reg = get_phy_reg(ohci, 1);
984 phy_reg |= 0xc0;
985 set_phy_reg(ohci, 1, phy_reg); /* set RHB and IBR */
986 break;
987 default:
988 retval = -1;
989 }
990 break;
991
992 case GET_CYCLE_COUNTER:
993 retval = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
994 break;
995
996 case SET_CYCLE_COUNTER:
997 reg_write(ohci, OHCI1394_IsochronousCycleTimer, arg);
998 break;
999
1000 case SET_BUS_ID:
1001 PRINT(KERN_ERR, "devctl command SET_BUS_ID err");
1002 break;
1003
1004 case ACT_CYCLE_MASTER:
1005 if (arg) {
1006 /* check if we are root and other nodes are present */
1007 u32 nodeId = reg_read(ohci, OHCI1394_NodeID);
1008 if ((nodeId & (1<<30)) && (nodeId & 0x3f)) {
1009 /*
1010 * enable cycleTimer, cycleMaster
1011 */
1012 DBGMSG("Cycle master enabled");
1013 reg_write(ohci, OHCI1394_LinkControlSet,
1014 OHCI1394_LinkControl_CycleTimerEnable |
1015 OHCI1394_LinkControl_CycleMaster);
1016 }
1017 } else {
1018 /* disable cycleTimer, cycleMaster, cycleSource */
1019 reg_write(ohci, OHCI1394_LinkControlClear,
1020 OHCI1394_LinkControl_CycleTimerEnable |
1021 OHCI1394_LinkControl_CycleMaster |
1022 OHCI1394_LinkControl_CycleSource);
1023 }
1024 break;
1025
1026 case CANCEL_REQUESTS:
1027 DBGMSG("Cancel request received");
1028 dma_trm_reset(&ohci->at_req_context);
1029 dma_trm_reset(&ohci->at_resp_context);
1030 break;
1031
1032 case ISO_LISTEN_CHANNEL:
1033 {
1034 u64 mask;
1035
1036 if (arg<0 || arg>63) {
1037 PRINT(KERN_ERR,
1038 "%s: IS0 listen channel %d is out of range",
1039 __FUNCTION__, arg);
1040 return -EFAULT;
1041 }
1042
1043 mask = (u64)0x1<<arg;
1044
1045 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1046
1047 if (ohci->ISO_channel_usage & mask) {
1048 PRINT(KERN_ERR,
1049 "%s: IS0 listen channel %d is already used",
1050 __FUNCTION__, arg);
1051 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1052 return -EFAULT;
1053 }
1054
1055 ohci->ISO_channel_usage |= mask;
1056 ohci->ir_legacy_channels |= mask;
1057
1058 if (arg>31)
1059 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet,
1060 1<<(arg-32));
1061 else
1062 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet,
1063 1<<arg);
1064
1065 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1066 DBGMSG("Listening enabled on channel %d", arg);
1067 break;
1068 }
1069 case ISO_UNLISTEN_CHANNEL:
1070 {
1071 u64 mask;
1072
1073 if (arg<0 || arg>63) {
1074 PRINT(KERN_ERR,
1075 "%s: IS0 unlisten channel %d is out of range",
1076 __FUNCTION__, arg);
1077 return -EFAULT;
1078 }
1079
1080 mask = (u64)0x1<<arg;
1081
1082 spin_lock_irqsave(&ohci->IR_channel_lock, flags);
1083
1084 if (!(ohci->ISO_channel_usage & mask)) {
1085 PRINT(KERN_ERR,
1086 "%s: IS0 unlisten channel %d is not used",
1087 __FUNCTION__, arg);
1088 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1089 return -EFAULT;
1090 }
1091
1092 ohci->ISO_channel_usage &= ~mask;
1093 ohci->ir_legacy_channels &= ~mask;
1094
1095 if (arg>31)
1096 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear,
1097 1<<(arg-32));
1098 else
1099 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear,
1100 1<<arg);
1101
1102 spin_unlock_irqrestore(&ohci->IR_channel_lock, flags);
1103 DBGMSG("Listening disabled on channel %d", arg);
1104 break;
1105 }
1106 default:
1107 PRINT_G(KERN_ERR, "ohci_devctl cmd %d not implemented yet",
1108 cmd);
1109 break;
1110 }
1111 return retval;
1112 }
1113
1114 /***********************************
1115 * rawiso ISO reception *
1116 ***********************************/
1117
1118 /*
1119 We use either buffer-fill or packet-per-buffer DMA mode. The DMA
1120 buffer is split into "blocks" (regions described by one DMA
1121 descriptor). Each block must be one page or less in size, and
1122 must not cross a page boundary.
1123
1124 There is one little wrinkle with buffer-fill mode: a packet that
1125 starts in the final block may wrap around into the first block. But
1126 the user API expects all packets to be contiguous. Our solution is
1127 to keep the very last page of the DMA buffer in reserve - if a
1128 packet spans the gap, we copy its tail into this page.
1129 */
1130
1131 struct ohci_iso_recv {
1132 struct ti_ohci *ohci;
1133
1134 struct ohci1394_iso_tasklet task;
1135 int task_active;
1136
1137 enum { BUFFER_FILL_MODE = 0,
1138 PACKET_PER_BUFFER_MODE = 1 } dma_mode;
1139
1140 /* memory and PCI mapping for the DMA descriptors */
1141 struct dma_prog_region prog;
1142 struct dma_cmd *block; /* = (struct dma_cmd*) prog.virt */
1143
1144 /* how many DMA blocks fit in the buffer */
1145 unsigned int nblocks;
1146
1147 /* stride of DMA blocks */
1148 unsigned int buf_stride;
1149
1150 /* number of blocks to batch between interrupts */
1151 int block_irq_interval;
1152
1153 /* block that DMA will finish next */
1154 int block_dma;
1155
1156 /* (buffer-fill only) block that the reader will release next */
1157 int block_reader;
1158
1159 /* (buffer-fill only) bytes of buffer the reader has released,
1160 less than one block */
1161 int released_bytes;
1162
1163 /* (buffer-fill only) buffer offset at which the next packet will appear */
1164 int dma_offset;
1165
1166 /* OHCI DMA context control registers */
1167 u32 ContextControlSet;
1168 u32 ContextControlClear;
1169 u32 CommandPtr;
1170 u32 ContextMatch;
1171 };
1172
1173 static void ohci_iso_recv_task(unsigned long data);
1174 static void ohci_iso_recv_stop(struct hpsb_iso *iso);
1175 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso);
1176 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync);
1177 static void ohci_iso_recv_program(struct hpsb_iso *iso);
1178
1179 static int ohci_iso_recv_init(struct hpsb_iso *iso)
1180 {
1181 struct ti_ohci *ohci = iso->host->hostdata;
1182 struct ohci_iso_recv *recv;
1183 int ctx;
1184 int ret = -ENOMEM;
1185
1186 recv = kmalloc(sizeof(*recv), SLAB_KERNEL);
1187 if (!recv)
1188 return -ENOMEM;
1189
1190 iso->hostdata = recv;
1191 recv->ohci = ohci;
1192 recv->task_active = 0;
1193 dma_prog_region_init(&recv->prog);
1194 recv->block = NULL;
1195
1196 /* use buffer-fill mode, unless irq_interval is 1
1197 (note: multichannel requires buffer-fill) */
1198
1199 if (((iso->irq_interval == 1 && iso->dma_mode == HPSB_ISO_DMA_OLD_ABI) ||
1200 iso->dma_mode == HPSB_ISO_DMA_PACKET_PER_BUFFER) && iso->channel != -1) {
1201 recv->dma_mode = PACKET_PER_BUFFER_MODE;
1202 } else {
1203 recv->dma_mode = BUFFER_FILL_MODE;
1204 }
1205
1206 /* set nblocks, buf_stride, block_irq_interval */
1207
1208 if (recv->dma_mode == BUFFER_FILL_MODE) {
1209 recv->buf_stride = PAGE_SIZE;
1210
1211 /* one block per page of data in the DMA buffer, minus the final guard page */
1212 recv->nblocks = iso->buf_size/PAGE_SIZE - 1;
1213 if (recv->nblocks < 3) {
1214 DBGMSG("ohci_iso_recv_init: DMA buffer too small");
1215 goto err;
1216 }
1217
1218 /* iso->irq_interval is in packets - translate that to blocks */
1219 if (iso->irq_interval == 1)
1220 recv->block_irq_interval = 1;
1221 else
1222 recv->block_irq_interval = iso->irq_interval *
1223 ((recv->nblocks+1)/iso->buf_packets);
1224 if (recv->block_irq_interval*4 > recv->nblocks)
1225 recv->block_irq_interval = recv->nblocks/4;
1226 if (recv->block_irq_interval < 1)
1227 recv->block_irq_interval = 1;
1228
1229 } else {
1230 int max_packet_size;
1231
1232 recv->nblocks = iso->buf_packets;
1233 recv->block_irq_interval = iso->irq_interval;
1234 if (recv->block_irq_interval * 4 > iso->buf_packets)
1235 recv->block_irq_interval = iso->buf_packets / 4;
1236 if (recv->block_irq_interval < 1)
1237 recv->block_irq_interval = 1;
1238
1239 /* choose a buffer stride */
1240 /* must be a power of 2, and <= PAGE_SIZE */
1241
1242 max_packet_size = iso->buf_size / iso->buf_packets;
1243
1244 for (recv->buf_stride = 8; recv->buf_stride < max_packet_size;
1245 recv->buf_stride *= 2);
1246
1247 if (recv->buf_stride*iso->buf_packets > iso->buf_size ||
1248 recv->buf_stride > PAGE_SIZE) {
1249 /* this shouldn't happen, but anyway... */
1250 DBGMSG("ohci_iso_recv_init: problem choosing a buffer stride");
1251 goto err;
1252 }
1253 }
1254
1255 recv->block_reader = 0;
1256 recv->released_bytes = 0;
1257 recv->block_dma = 0;
1258 recv->dma_offset = 0;
1259
1260 /* size of DMA program = one descriptor per block */
1261 if (dma_prog_region_alloc(&recv->prog,
1262 sizeof(struct dma_cmd) * recv->nblocks,
1263 recv->ohci->dev))
1264 goto err;
1265
1266 recv->block = (struct dma_cmd*) recv->prog.kvirt;
1267
1268 ohci1394_init_iso_tasklet(&recv->task,
1269 iso->channel == -1 ? OHCI_ISO_MULTICHANNEL_RECEIVE :
1270 OHCI_ISO_RECEIVE,
1271 ohci_iso_recv_task, (unsigned long) iso);
1272
1273 if (ohci1394_register_iso_tasklet(recv->ohci, &recv->task) < 0)
1274 goto err;
1275
1276 recv->task_active = 1;
1277
1278 /* recv context registers are spaced 32 bytes apart */
1279 ctx = recv->task.context;
1280 recv->ContextControlSet = OHCI1394_IsoRcvContextControlSet + 32 * ctx;
1281 recv->ContextControlClear = OHCI1394_IsoRcvContextControlClear + 32 * ctx;
1282 recv->CommandPtr = OHCI1394_IsoRcvCommandPtr + 32 * ctx;
1283 recv->ContextMatch = OHCI1394_IsoRcvContextMatch + 32 * ctx;
1284
1285 if (iso->channel == -1) {
1286 /* clear multi-channel selection mask */
1287 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, 0xFFFFFFFF);
1288 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, 0xFFFFFFFF);
1289 }
1290
1291 /* write the DMA program */
1292 ohci_iso_recv_program(iso);
1293
1294 DBGMSG("ohci_iso_recv_init: %s mode, DMA buffer is %lu pages"
1295 " (%u bytes), using %u blocks, buf_stride %u, block_irq_interval %d",
1296 recv->dma_mode == BUFFER_FILL_MODE ?
1297 "buffer-fill" : "packet-per-buffer",
1298 iso->buf_size/PAGE_SIZE, iso->buf_size,
1299 recv->nblocks, recv->buf_stride, recv->block_irq_interval);
1300
1301 return 0;
1302
1303 err:
1304 ohci_iso_recv_shutdown(iso);
1305 return ret;
1306 }
1307
1308 static void ohci_iso_recv_stop(struct hpsb_iso *iso)
1309 {
1310 struct ohci_iso_recv *recv = iso->hostdata;
1311
1312 /* disable interrupts */
1313 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << recv->task.context);
1314
1315 /* halt DMA */
1316 ohci1394_stop_context(recv->ohci, recv->ContextControlClear, NULL);
1317 }
1318
1319 static void ohci_iso_recv_shutdown(struct hpsb_iso *iso)
1320 {
1321 struct ohci_iso_recv *recv = iso->hostdata;
1322
1323 if (recv->task_active) {
1324 ohci_iso_recv_stop(iso);
1325 ohci1394_unregister_iso_tasklet(recv->ohci, &recv->task);
1326 recv->task_active = 0;
1327 }
1328
1329 dma_prog_region_free(&recv->prog);
1330 kfree(recv);
1331 iso->hostdata = NULL;
1332 }
1333
1334 /* set up a "gapped" ring buffer DMA program */
1335 static void ohci_iso_recv_program(struct hpsb_iso *iso)
1336 {
1337 struct ohci_iso_recv *recv = iso->hostdata;
1338 int blk;
1339
1340 /* address of 'branch' field in previous DMA descriptor */
1341 u32 *prev_branch = NULL;
1342
1343 for (blk = 0; blk < recv->nblocks; blk++) {
1344 u32 control;
1345
1346 /* the DMA descriptor */
1347 struct dma_cmd *cmd = &recv->block[blk];
1348
1349 /* offset of the DMA descriptor relative to the DMA prog buffer */
1350 unsigned long prog_offset = blk * sizeof(struct dma_cmd);
1351
1352 /* offset of this packet's data within the DMA buffer */
1353 unsigned long buf_offset = blk * recv->buf_stride;
1354
1355 if (recv->dma_mode == BUFFER_FILL_MODE) {
1356 control = 2 << 28; /* INPUT_MORE */
1357 } else {
1358 control = 3 << 28; /* INPUT_LAST */
1359 }
1360
1361 control |= 8 << 24; /* s = 1, update xferStatus and resCount */
1362
1363 /* interrupt on last block, and at intervals */
1364 if (blk == recv->nblocks-1 || (blk % recv->block_irq_interval) == 0) {
1365 control |= 3 << 20; /* want interrupt */
1366 }
1367
1368 control |= 3 << 18; /* enable branch to address */
1369 control |= recv->buf_stride;
1370
1371 cmd->control = cpu_to_le32(control);
1372 cmd->address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, buf_offset));
1373 cmd->branchAddress = 0; /* filled in on next loop */
1374 cmd->status = cpu_to_le32(recv->buf_stride);
1375
1376 /* link the previous descriptor to this one */
1377 if (prev_branch) {
1378 *prev_branch = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog, prog_offset) | 1);
1379 }
1380
1381 prev_branch = &cmd->branchAddress;
1382 }
1383
1384 /* the final descriptor's branch address and Z should be left at 0 */
1385 }
1386
1387 /* listen or unlisten to a specific channel (multi-channel mode only) */
1388 static void ohci_iso_recv_change_channel(struct hpsb_iso *iso, unsigned char channel, int listen)
1389 {
1390 struct ohci_iso_recv *recv = iso->hostdata;
1391 int reg, i;
1392
1393 if (channel < 32) {
1394 reg = listen ? OHCI1394_IRMultiChanMaskLoSet : OHCI1394_IRMultiChanMaskLoClear;
1395 i = channel;
1396 } else {
1397 reg = listen ? OHCI1394_IRMultiChanMaskHiSet : OHCI1394_IRMultiChanMaskHiClear;
1398 i = channel - 32;
1399 }
1400
1401 reg_write(recv->ohci, reg, (1 << i));
1402
1403 /* issue a dummy read to force all PCI writes to be posted immediately */
1404 mb();
1405 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1406 }
1407
1408 static void ohci_iso_recv_set_channel_mask(struct hpsb_iso *iso, u64 mask)
1409 {
1410 struct ohci_iso_recv *recv = iso->hostdata;
1411 int i;
1412
1413 for (i = 0; i < 64; i++) {
1414 if (mask & (1ULL << i)) {
1415 if (i < 32)
1416 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoSet, (1 << i));
1417 else
1418 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiSet, (1 << (i-32)));
1419 } else {
1420 if (i < 32)
1421 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskLoClear, (1 << i));
1422 else
1423 reg_write(recv->ohci, OHCI1394_IRMultiChanMaskHiClear, (1 << (i-32)));
1424 }
1425 }
1426
1427 /* issue a dummy read to force all PCI writes to be posted immediately */
1428 mb();
1429 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1430 }
1431
1432 static int ohci_iso_recv_start(struct hpsb_iso *iso, int cycle, int tag_mask, int sync)
1433 {
1434 struct ohci_iso_recv *recv = iso->hostdata;
1435 struct ti_ohci *ohci = recv->ohci;
1436 u32 command, contextMatch;
1437
1438 reg_write(recv->ohci, recv->ContextControlClear, 0xFFFFFFFF);
1439 wmb();
1440
1441 /* always keep ISO headers */
1442 command = (1 << 30);
1443
1444 if (recv->dma_mode == BUFFER_FILL_MODE)
1445 command |= (1 << 31);
1446
1447 reg_write(recv->ohci, recv->ContextControlSet, command);
1448
1449 /* match on specified tags */
1450 contextMatch = tag_mask << 28;
1451
1452 if (iso->channel == -1) {
1453 /* enable multichannel reception */
1454 reg_write(recv->ohci, recv->ContextControlSet, (1 << 28));
1455 } else {
1456 /* listen on channel */
1457 contextMatch |= iso->channel;
1458 }
1459
1460 if (cycle != -1) {
1461 u32 seconds;
1462
1463 /* enable cycleMatch */
1464 reg_write(recv->ohci, recv->ContextControlSet, (1 << 29));
1465
1466 /* set starting cycle */
1467 cycle &= 0x1FFF;
1468
1469 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
1470 just snarf them from the current time */
1471 seconds = reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
1472
1473 /* advance one second to give some extra time for DMA to start */
1474 seconds += 1;
1475
1476 cycle |= (seconds & 3) << 13;
1477
1478 contextMatch |= cycle << 12;
1479 }
1480
1481 if (sync != -1) {
1482 /* set sync flag on first DMA descriptor */
1483 struct dma_cmd *cmd = &recv->block[recv->block_dma];
1484 cmd->control |= cpu_to_le32(DMA_CTL_WAIT);
1485
1486 /* match sync field */
1487 contextMatch |= (sync&0xf)<<8;
1488 }
1489
1490 reg_write(recv->ohci, recv->ContextMatch, contextMatch);
1491
1492 /* address of first descriptor block */
1493 command = dma_prog_region_offset_to_bus(&recv->prog,
1494 recv->block_dma * sizeof(struct dma_cmd));
1495 command |= 1; /* Z=1 */
1496
1497 reg_write(recv->ohci, recv->CommandPtr, command);
1498
1499 /* enable interrupts */
1500 reg_write(recv->ohci, OHCI1394_IsoRecvIntMaskSet, 1 << recv->task.context);
1501
1502 wmb();
1503
1504 /* run */
1505 reg_write(recv->ohci, recv->ContextControlSet, 0x8000);
1506
1507 /* issue a dummy read of the cycle timer register to force
1508 all PCI writes to be posted immediately */
1509 mb();
1510 reg_read(recv->ohci, OHCI1394_IsochronousCycleTimer);
1511
1512 /* check RUN */
1513 if (!(reg_read(recv->ohci, recv->ContextControlSet) & 0x8000)) {
1514 PRINT(KERN_ERR,
1515 "Error starting IR DMA (ContextControl 0x%08x)\n",
1516 reg_read(recv->ohci, recv->ContextControlSet));
1517 return -1;
1518 }
1519
1520 return 0;
1521 }
1522
1523 static void ohci_iso_recv_release_block(struct ohci_iso_recv *recv, int block)
1524 {
1525 /* re-use the DMA descriptor for the block */
1526 /* by linking the previous descriptor to it */
1527
1528 int next_i = block;
1529 int prev_i = (next_i == 0) ? (recv->nblocks - 1) : (next_i - 1);
1530
1531 struct dma_cmd *next = &recv->block[next_i];
1532 struct dma_cmd *prev = &recv->block[prev_i];
1533
1534 /* 'next' becomes the new end of the DMA chain,
1535 so disable branch and enable interrupt */
1536 next->branchAddress = 0;
1537 next->control |= cpu_to_le32(3 << 20);
1538 next->status = cpu_to_le32(recv->buf_stride);
1539
1540 /* link prev to next */
1541 prev->branchAddress = cpu_to_le32(dma_prog_region_offset_to_bus(&recv->prog,
1542 sizeof(struct dma_cmd) * next_i)
1543 | 1); /* Z=1 */
1544
1545 /* disable interrupt on previous DMA descriptor, except at intervals */
1546 if ((prev_i % recv->block_irq_interval) == 0) {
1547 prev->control |= cpu_to_le32(3 << 20); /* enable interrupt */
1548 } else {
1549 prev->control &= cpu_to_le32(~(3<<20)); /* disable interrupt */
1550 }
1551 wmb();
1552
1553 /* wake up DMA in case it fell asleep */
1554 reg_write(recv->ohci, recv->ContextControlSet, (1 << 12));
1555 }
1556
1557 static void ohci_iso_recv_bufferfill_release(struct ohci_iso_recv *recv,
1558 struct hpsb_iso_packet_info *info)
1559 {
1560 int len;
1561
1562 /* release the memory where the packet was */
1563 len = info->len;
1564
1565 /* add the wasted space for padding to 4 bytes */
1566 if (len % 4)
1567 len += 4 - (len % 4);
1568
1569 /* add 8 bytes for the OHCI DMA data format overhead */
1570 len += 8;
1571
1572 recv->released_bytes += len;
1573
1574 /* have we released enough memory for one block? */
1575 while (recv->released_bytes > recv->buf_stride) {
1576 ohci_iso_recv_release_block(recv, recv->block_reader);
1577 recv->block_reader = (recv->block_reader + 1) % recv->nblocks;
1578 recv->released_bytes -= recv->buf_stride;
1579 }
1580 }
1581
1582 static inline void ohci_iso_recv_release(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1583 {
1584 struct ohci_iso_recv *recv = iso->hostdata;
1585 if (recv->dma_mode == BUFFER_FILL_MODE) {
1586 ohci_iso_recv_bufferfill_release(recv, info);
1587 } else {
1588 ohci_iso_recv_release_block(recv, info - iso->infos);
1589 }
1590 }
1591
1592 /* parse all packets from blocks that have been fully received */
1593 static void ohci_iso_recv_bufferfill_parse(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1594 {
1595 int wake = 0;
1596 int runaway = 0;
1597 struct ti_ohci *ohci = recv->ohci;
1598
1599 while (1) {
1600 /* we expect the next parsable packet to begin at recv->dma_offset */
1601 /* note: packet layout is as shown in section 10.6.1.1 of the OHCI spec */
1602
1603 unsigned int offset;
1604 unsigned short len, cycle;
1605 unsigned char channel, tag, sy;
1606
1607 unsigned char *p = iso->data_buf.kvirt;
1608
1609 unsigned int this_block = recv->dma_offset/recv->buf_stride;
1610
1611 /* don't loop indefinitely */
1612 if (runaway++ > 100000) {
1613 atomic_inc(&iso->overflows);
1614 PRINT(KERN_ERR,
1615 "IR DMA error - Runaway during buffer parsing!\n");
1616 break;
1617 }
1618
1619 /* stop parsing once we arrive at block_dma (i.e. don't get ahead of DMA) */
1620 if (this_block == recv->block_dma)
1621 break;
1622
1623 wake = 1;
1624
1625 /* parse data length, tag, channel, and sy */
1626
1627 /* note: we keep our own local copies of 'len' and 'offset'
1628 so the user can't mess with them by poking in the mmap area */
1629
1630 len = p[recv->dma_offset+2] | (p[recv->dma_offset+3] << 8);
1631
1632 if (len > 4096) {
1633 PRINT(KERN_ERR,
1634 "IR DMA error - bogus 'len' value %u\n", len);
1635 }
1636
1637 channel = p[recv->dma_offset+1] & 0x3F;
1638 tag = p[recv->dma_offset+1] >> 6;
1639 sy = p[recv->dma_offset+0] & 0xF;
1640
1641 /* advance to data payload */
1642 recv->dma_offset += 4;
1643
1644 /* check for wrap-around */
1645 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1646 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1647 }
1648
1649 /* dma_offset now points to the first byte of the data payload */
1650 offset = recv->dma_offset;
1651
1652 /* advance to xferStatus/timeStamp */
1653 recv->dma_offset += len;
1654
1655 /* payload is padded to 4 bytes */
1656 if (len % 4) {
1657 recv->dma_offset += 4 - (len%4);
1658 }
1659
1660 /* check for wrap-around */
1661 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1662 /* uh oh, the packet data wraps from the last
1663 to the first DMA block - make the packet
1664 contiguous by copying its "tail" into the
1665 guard page */
1666
1667 int guard_off = recv->buf_stride*recv->nblocks;
1668 int tail_len = len - (guard_off - offset);
1669
1670 if (tail_len > 0 && tail_len < recv->buf_stride) {
1671 memcpy(iso->data_buf.kvirt + guard_off,
1672 iso->data_buf.kvirt,
1673 tail_len);
1674 }
1675
1676 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1677 }
1678
1679 /* parse timestamp */
1680 cycle = p[recv->dma_offset+0] | (p[recv->dma_offset+1]<<8);
1681 cycle &= 0x1FFF;
1682
1683 /* advance to next packet */
1684 recv->dma_offset += 4;
1685
1686 /* check for wrap-around */
1687 if (recv->dma_offset >= recv->buf_stride*recv->nblocks) {
1688 recv->dma_offset -= recv->buf_stride*recv->nblocks;
1689 }
1690
1691 hpsb_iso_packet_received(iso, offset, len, cycle, channel, tag, sy);
1692 }
1693
1694 if (wake)
1695 hpsb_iso_wake(iso);
1696 }
1697
1698 static void ohci_iso_recv_bufferfill_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1699 {
1700 int loop;
1701 struct ti_ohci *ohci = recv->ohci;
1702
1703 /* loop over all blocks */
1704 for (loop = 0; loop < recv->nblocks; loop++) {
1705
1706 /* check block_dma to see if it's done */
1707 struct dma_cmd *im = &recv->block[recv->block_dma];
1708
1709 /* check the DMA descriptor for new writes to xferStatus */
1710 u16 xferstatus = le32_to_cpu(im->status) >> 16;
1711
1712 /* rescount is the number of bytes *remaining to be written* in the block */
1713 u16 rescount = le32_to_cpu(im->status) & 0xFFFF;
1714
1715 unsigned char event = xferstatus & 0x1F;
1716
1717 if (!event) {
1718 /* nothing has happened to this block yet */
1719 break;
1720 }
1721
1722 if (event != 0x11) {
1723 atomic_inc(&iso->overflows);
1724 PRINT(KERN_ERR,
1725 "IR DMA error - OHCI error code 0x%02x\n", event);
1726 }
1727
1728 if (rescount != 0) {
1729 /* the card is still writing to this block;
1730 we can't touch it until it's done */
1731 break;
1732 }
1733
1734 /* OK, the block is finished... */
1735
1736 /* sync our view of the block */
1737 dma_region_sync_for_cpu(&iso->data_buf, recv->block_dma*recv->buf_stride, recv->buf_stride);
1738
1739 /* reset the DMA descriptor */
1740 im->status = recv->buf_stride;
1741
1742 /* advance block_dma */
1743 recv->block_dma = (recv->block_dma + 1) % recv->nblocks;
1744
1745 if ((recv->block_dma+1) % recv->nblocks == recv->block_reader) {
1746 atomic_inc(&iso->overflows);
1747 DBGMSG("ISO reception overflow - "
1748 "ran out of DMA blocks");
1749 }
1750 }
1751
1752 /* parse any packets that have arrived */
1753 ohci_iso_recv_bufferfill_parse(iso, recv);
1754 }
1755
1756 static void ohci_iso_recv_packetperbuf_task(struct hpsb_iso *iso, struct ohci_iso_recv *recv)
1757 {
1758 int count;
1759 int wake = 0;
1760 struct ti_ohci *ohci = recv->ohci;
1761
1762 /* loop over the entire buffer */
1763 for (count = 0; count < recv->nblocks; count++) {
1764 u32 packet_len = 0;
1765
1766 /* pointer to the DMA descriptor */
1767 struct dma_cmd *il = ((struct dma_cmd*) recv->prog.kvirt) + iso->pkt_dma;
1768
1769 /* check the DMA descriptor for new writes to xferStatus */
1770 u16 xferstatus = le32_to_cpu(il->status) >> 16;
1771 u16 rescount = le32_to_cpu(il->status) & 0xFFFF;
1772
1773 unsigned char event = xferstatus & 0x1F;
1774
1775 if (!event) {
1776 /* this packet hasn't come in yet; we are done for now */
1777 goto out;
1778 }
1779
1780 if (event == 0x11) {
1781 /* packet received successfully! */
1782
1783 /* rescount is the number of bytes *remaining* in the packet buffer,
1784 after the packet was written */
1785 packet_len = recv->buf_stride - rescount;
1786
1787 } else if (event == 0x02) {
1788 PRINT(KERN_ERR, "IR DMA error - packet too long for buffer\n");
1789 } else if (event) {
1790 PRINT(KERN_ERR, "IR DMA error - OHCI error code 0x%02x\n", event);
1791 }
1792
1793 /* sync our view of the buffer */
1794 dma_region_sync_for_cpu(&iso->data_buf, iso->pkt_dma * recv->buf_stride, recv->buf_stride);
1795
1796 /* record the per-packet info */
1797 {
1798 /* iso header is 8 bytes ahead of the data payload */
1799 unsigned char *hdr;
1800
1801 unsigned int offset;
1802 unsigned short cycle;
1803 unsigned char channel, tag, sy;
1804
1805 offset = iso->pkt_dma * recv->buf_stride;
1806 hdr = iso->data_buf.kvirt + offset;
1807
1808 /* skip iso header */
1809 offset += 8;
1810 packet_len -= 8;
1811
1812 cycle = (hdr[0] | (hdr[1] << 8)) & 0x1FFF;
1813 channel = hdr[5] & 0x3F;
1814 tag = hdr[5] >> 6;
1815 sy = hdr[4] & 0xF;
1816
1817 hpsb_iso_packet_received(iso, offset, packet_len, cycle, channel, tag, sy);
1818 }
1819
1820 /* reset the DMA descriptor */
1821 il->status = recv->buf_stride;
1822
1823 wake = 1;
1824 recv->block_dma = iso->pkt_dma;
1825 }
1826
1827 out:
1828 if (wake)
1829 hpsb_iso_wake(iso);
1830 }
1831
1832 static void ohci_iso_recv_task(unsigned long data)
1833 {
1834 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1835 struct ohci_iso_recv *recv = iso->hostdata;
1836
1837 if (recv->dma_mode == BUFFER_FILL_MODE)
1838 ohci_iso_recv_bufferfill_task(iso, recv);
1839 else
1840 ohci_iso_recv_packetperbuf_task(iso, recv);
1841 }
1842
1843 /***********************************
1844 * rawiso ISO transmission *
1845 ***********************************/
1846
1847 struct ohci_iso_xmit {
1848 struct ti_ohci *ohci;
1849 struct dma_prog_region prog;
1850 struct ohci1394_iso_tasklet task;
1851 int task_active;
1852
1853 u32 ContextControlSet;
1854 u32 ContextControlClear;
1855 u32 CommandPtr;
1856 };
1857
1858 /* transmission DMA program:
1859 one OUTPUT_MORE_IMMEDIATE for the IT header
1860 one OUTPUT_LAST for the buffer data */
1861
1862 struct iso_xmit_cmd {
1863 struct dma_cmd output_more_immediate;
1864 u8 iso_hdr[8];
1865 u32 unused[2];
1866 struct dma_cmd output_last;
1867 };
1868
1869 static int ohci_iso_xmit_init(struct hpsb_iso *iso);
1870 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle);
1871 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso);
1872 static void ohci_iso_xmit_task(unsigned long data);
1873
1874 static int ohci_iso_xmit_init(struct hpsb_iso *iso)
1875 {
1876 struct ohci_iso_xmit *xmit;
1877 unsigned int prog_size;
1878 int ctx;
1879 int ret = -ENOMEM;
1880
1881 xmit = kmalloc(sizeof(*xmit), SLAB_KERNEL);
1882 if (!xmit)
1883 return -ENOMEM;
1884
1885 iso->hostdata = xmit;
1886 xmit->ohci = iso->host->hostdata;
1887 xmit->task_active = 0;
1888
1889 dma_prog_region_init(&xmit->prog);
1890
1891 prog_size = sizeof(struct iso_xmit_cmd) * iso->buf_packets;
1892
1893 if (dma_prog_region_alloc(&xmit->prog, prog_size, xmit->ohci->dev))
1894 goto err;
1895
1896 ohci1394_init_iso_tasklet(&xmit->task, OHCI_ISO_TRANSMIT,
1897 ohci_iso_xmit_task, (unsigned long) iso);
1898
1899 if (ohci1394_register_iso_tasklet(xmit->ohci, &xmit->task) < 0)
1900 goto err;
1901
1902 xmit->task_active = 1;
1903
1904 /* xmit context registers are spaced 16 bytes apart */
1905 ctx = xmit->task.context;
1906 xmit->ContextControlSet = OHCI1394_IsoXmitContextControlSet + 16 * ctx;
1907 xmit->ContextControlClear = OHCI1394_IsoXmitContextControlClear + 16 * ctx;
1908 xmit->CommandPtr = OHCI1394_IsoXmitCommandPtr + 16 * ctx;
1909
1910 return 0;
1911
1912 err:
1913 ohci_iso_xmit_shutdown(iso);
1914 return ret;
1915 }
1916
1917 static void ohci_iso_xmit_stop(struct hpsb_iso *iso)
1918 {
1919 struct ohci_iso_xmit *xmit = iso->hostdata;
1920 struct ti_ohci *ohci = xmit->ohci;
1921
1922 /* disable interrupts */
1923 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskClear, 1 << xmit->task.context);
1924
1925 /* halt DMA */
1926 if (ohci1394_stop_context(xmit->ohci, xmit->ContextControlClear, NULL)) {
1927 /* XXX the DMA context will lock up if you try to send too much data! */
1928 PRINT(KERN_ERR,
1929 "you probably exceeded the OHCI card's bandwidth limit - "
1930 "reload the module and reduce xmit bandwidth");
1931 }
1932 }
1933
1934 static void ohci_iso_xmit_shutdown(struct hpsb_iso *iso)
1935 {
1936 struct ohci_iso_xmit *xmit = iso->hostdata;
1937
1938 if (xmit->task_active) {
1939 ohci_iso_xmit_stop(iso);
1940 ohci1394_unregister_iso_tasklet(xmit->ohci, &xmit->task);
1941 xmit->task_active = 0;
1942 }
1943
1944 dma_prog_region_free(&xmit->prog);
1945 kfree(xmit);
1946 iso->hostdata = NULL;
1947 }
1948
1949 static void ohci_iso_xmit_task(unsigned long data)
1950 {
1951 struct hpsb_iso *iso = (struct hpsb_iso*) data;
1952 struct ohci_iso_xmit *xmit = iso->hostdata;
1953 struct ti_ohci *ohci = xmit->ohci;
1954 int wake = 0;
1955 int count;
1956
1957 /* check the whole buffer if necessary, starting at pkt_dma */
1958 for (count = 0; count < iso->buf_packets; count++) {
1959 int cycle;
1960
1961 /* DMA descriptor */
1962 struct iso_xmit_cmd *cmd = dma_region_i(&xmit->prog, struct iso_xmit_cmd, iso->pkt_dma);
1963
1964 /* check for new writes to xferStatus */
1965 u16 xferstatus = le32_to_cpu(cmd->output_last.status) >> 16;
1966 u8 event = xferstatus & 0x1F;
1967
1968 if (!event) {
1969 /* packet hasn't been sent yet; we are done for now */
1970 break;
1971 }
1972
1973 if (event != 0x11)
1974 PRINT(KERN_ERR,
1975 "IT DMA error - OHCI error code 0x%02x\n", event);
1976
1977 /* at least one packet went out, so wake up the writer */
1978 wake = 1;
1979
1980 /* parse cycle */
1981 cycle = le32_to_cpu(cmd->output_last.status) & 0x1FFF;
1982
1983 /* tell the subsystem the packet has gone out */
1984 hpsb_iso_packet_sent(iso, cycle, event != 0x11);
1985
1986 /* reset the DMA descriptor for next time */
1987 cmd->output_last.status = 0;
1988 }
1989
1990 if (wake)
1991 hpsb_iso_wake(iso);
1992 }
1993
1994 static int ohci_iso_xmit_queue(struct hpsb_iso *iso, struct hpsb_iso_packet_info *info)
1995 {
1996 struct ohci_iso_xmit *xmit = iso->hostdata;
1997 struct ti_ohci *ohci = xmit->ohci;
1998
1999 int next_i, prev_i;
2000 struct iso_xmit_cmd *next, *prev;
2001
2002 unsigned int offset;
2003 unsigned short len;
2004 unsigned char tag, sy;
2005
2006 /* check that the packet doesn't cross a page boundary
2007 (we could allow this if we added OUTPUT_MORE descriptor support) */
2008 if (cross_bound(info->offset, info->len)) {
2009 PRINT(KERN_ERR,
2010 "rawiso xmit: packet %u crosses a page boundary",
2011 iso->first_packet);
2012 return -EINVAL;
2013 }
2014
2015 offset = info->offset;
2016 len = info->len;
2017 tag = info->tag;
2018 sy = info->sy;
2019
2020 /* sync up the card's view of the buffer */
2021 dma_region_sync_for_device(&iso->data_buf, offset, len);
2022
2023 /* append first_packet to the DMA chain */
2024 /* by linking the previous descriptor to it */
2025 /* (next will become the new end of the DMA chain) */
2026
2027 next_i = iso->first_packet;
2028 prev_i = (next_i == 0) ? (iso->buf_packets - 1) : (next_i - 1);
2029
2030 next = dma_region_i(&xmit->prog, struct iso_xmit_cmd, next_i);
2031 prev = dma_region_i(&xmit->prog, struct iso_xmit_cmd, prev_i);
2032
2033 /* set up the OUTPUT_MORE_IMMEDIATE descriptor */
2034 memset(next, 0, sizeof(struct iso_xmit_cmd));
2035 next->output_more_immediate.control = cpu_to_le32(0x02000008);
2036
2037 /* ISO packet header is embedded in the OUTPUT_MORE_IMMEDIATE */
2038
2039 /* tcode = 0xA, and sy */
2040 next->iso_hdr[0] = 0xA0 | (sy & 0xF);
2041
2042 /* tag and channel number */
2043 next->iso_hdr[1] = (tag << 6) | (iso->channel & 0x3F);
2044
2045 /* transmission speed */
2046 next->iso_hdr[2] = iso->speed & 0x7;
2047
2048 /* payload size */
2049 next->iso_hdr[6] = len & 0xFF;
2050 next->iso_hdr[7] = len >> 8;
2051
2052 /* set up the OUTPUT_LAST */
2053 next->output_last.control = cpu_to_le32(1 << 28);
2054 next->output_last.control |= cpu_to_le32(1 << 27); /* update timeStamp */
2055 next->output_last.control |= cpu_to_le32(3 << 20); /* want interrupt */
2056 next->output_last.control |= cpu_to_le32(3 << 18); /* enable branch */
2057 next->output_last.control |= cpu_to_le32(len);
2058
2059 /* payload bus address */
2060 next->output_last.address = cpu_to_le32(dma_region_offset_to_bus(&iso->data_buf, offset));
2061
2062 /* leave branchAddress at zero for now */
2063
2064 /* re-write the previous DMA descriptor to chain to this one */
2065
2066 /* set prev branch address to point to next (Z=3) */
2067 prev->output_last.branchAddress = cpu_to_le32(
2068 dma_prog_region_offset_to_bus(&xmit->prog, sizeof(struct iso_xmit_cmd) * next_i) | 3);
2069
2070 /* disable interrupt, unless required by the IRQ interval */
2071 if (prev_i % iso->irq_interval) {
2072 prev->output_last.control &= cpu_to_le32(~(3 << 20)); /* no interrupt */
2073 } else {
2074 prev->output_last.control |= cpu_to_le32(3 << 20); /* enable interrupt */
2075 }
2076
2077 wmb();
2078
2079 /* wake DMA in case it is sleeping */
2080 reg_write(xmit->ohci, xmit->ContextControlSet, 1 << 12);
2081
2082 /* issue a dummy read of the cycle timer to force all PCI
2083 writes to be posted immediately */
2084 mb();
2085 reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer);
2086
2087 return 0;
2088 }
2089
2090 static int ohci_iso_xmit_start(struct hpsb_iso *iso, int cycle)
2091 {
2092 struct ohci_iso_xmit *xmit = iso->hostdata;
2093 struct ti_ohci *ohci = xmit->ohci;
2094
2095 /* clear out the control register */
2096 reg_write(xmit->ohci, xmit->ContextControlClear, 0xFFFFFFFF);
2097 wmb();
2098
2099 /* address and length of first descriptor block (Z=3) */
2100 reg_write(xmit->ohci, xmit->CommandPtr,
2101 dma_prog_region_offset_to_bus(&xmit->prog, iso->pkt_dma * sizeof(struct iso_xmit_cmd)) | 3);
2102
2103 /* cycle match */
2104 if (cycle != -1) {
2105 u32 start = cycle & 0x1FFF;
2106
2107 /* 'cycle' is only mod 8000, but we also need two 'seconds' bits -
2108 just snarf them from the current time */
2109 u32 seconds = reg_read(xmit->ohci, OHCI1394_IsochronousCycleTimer) >> 25;
2110
2111 /* advance one second to give some extra time for DMA to start */
2112 seconds += 1;
2113
2114 start |= (seconds & 3) << 13;
2115
2116 reg_write(xmit->ohci, xmit->ContextControlSet, 0x80000000 | (start << 16));
2117 }
2118
2119 /* enable interrupts */
2120 reg_write(xmit->ohci, OHCI1394_IsoXmitIntMaskSet, 1 << xmit->task.context);
2121
2122 /* run */
2123 reg_write(xmit->ohci, xmit->ContextControlSet, 0x8000);
2124 mb();
2125
2126 /* wait 100 usec to give the card time to go active */
2127 udelay(100);
2128
2129 /* check the RUN bit */
2130 if (!(reg_read(xmit->ohci, xmit->ContextControlSet) & 0x8000)) {
2131 PRINT(KERN_ERR, "Error starting IT DMA (ContextControl 0x%08x)\n",
2132 reg_read(xmit->ohci, xmit->ContextControlSet));
2133 return -1;
2134 }
2135
2136 return 0;
2137 }
2138
2139 static int ohci_isoctl(struct hpsb_iso *iso, enum isoctl_cmd cmd, unsigned long arg)
2140 {
2141
2142 switch(cmd) {
2143 case XMIT_INIT:
2144 return ohci_iso_xmit_init(iso);
2145 case XMIT_START:
2146 return ohci_iso_xmit_start(iso, arg);
2147 case XMIT_STOP:
2148 ohci_iso_xmit_stop(iso);
2149 return 0;
2150 case XMIT_QUEUE:
2151 return ohci_iso_xmit_queue(iso, (struct hpsb_iso_packet_info*) arg);
2152 case XMIT_SHUTDOWN:
2153 ohci_iso_xmit_shutdown(iso);
2154 return 0;
2155
2156 case RECV_INIT:
2157 return ohci_iso_recv_init(iso);
2158 case RECV_START: {
2159 int *args = (int*) arg;
2160 return ohci_iso_recv_start(iso, args[0], args[1], args[2]);
2161 }
2162 case RECV_STOP:
2163 ohci_iso_recv_stop(iso);
2164 return 0;
2165 case RECV_RELEASE:
2166 ohci_iso_recv_release(iso, (struct hpsb_iso_packet_info*) arg);
2167 return 0;
2168 case RECV_FLUSH:
2169 ohci_iso_recv_task((unsigned long) iso);
2170 return 0;
2171 case RECV_SHUTDOWN:
2172 ohci_iso_recv_shutdown(iso);
2173 return 0;
2174 case RECV_LISTEN_CHANNEL:
2175 ohci_iso_recv_change_channel(iso, arg, 1);
2176 return 0;
2177 case RECV_UNLISTEN_CHANNEL:
2178 ohci_iso_recv_change_channel(iso, arg, 0);
2179 return 0;
2180 case RECV_SET_CHANNEL_MASK:
2181 ohci_iso_recv_set_channel_mask(iso, *((u64*) arg));
2182 return 0;
2183
2184 default:
2185 PRINT_G(KERN_ERR, "ohci_isoctl cmd %d not implemented yet",
2186 cmd);
2187 break;
2188 }
2189 return -EINVAL;
2190 }
2191
2192 /***************************************
2193 * IEEE-1394 functionality section END *
2194 ***************************************/
2195
2196
2197 /********************************************************
2198 * Global stuff (interrupt handler, init/shutdown code) *
2199 ********************************************************/
2200
2201 static void dma_trm_reset(struct dma_trm_ctx *d)
2202 {
2203 unsigned long flags;
2204 LIST_HEAD(packet_list);
2205 struct ti_ohci *ohci = d->ohci;
2206 struct hpsb_packet *packet, *ptmp;
2207
2208 ohci1394_stop_context(ohci, d->ctrlClear, NULL);
2209
2210 /* Lock the context, reset it and release it. Move the packets
2211 * that were pending in the context to packet_list and free
2212 * them after releasing the lock. */
2213
2214 spin_lock_irqsave(&d->lock, flags);
2215
2216 list_splice(&d->fifo_list, &packet_list);
2217 list_splice(&d->pending_list, &packet_list);
2218 INIT_LIST_HEAD(&d->fifo_list);
2219 INIT_LIST_HEAD(&d->pending_list);
2220
2221 d->branchAddrPtr = NULL;
2222 d->sent_ind = d->prg_ind;
2223 d->free_prgs = d->num_desc;
2224
2225 spin_unlock_irqrestore(&d->lock, flags);
2226
2227 if (list_empty(&packet_list))
2228 return;
2229
2230 PRINT(KERN_INFO, "AT dma reset ctx=%d, aborting transmission", d->ctx);
2231
2232 /* Now process subsystem callbacks for the packets from this
2233 * context. */
2234 list_for_each_entry_safe(packet, ptmp, &packet_list, driver_list) {
2235 list_del_init(&packet->driver_list);
2236 hpsb_packet_sent(ohci->host, packet, ACKX_ABORTED);
2237 }
2238 }
2239
2240 static void ohci_schedule_iso_tasklets(struct ti_ohci *ohci,
2241 quadlet_t rx_event,
2242 quadlet_t tx_event)
2243 {
2244 struct ohci1394_iso_tasklet *t;
2245 unsigned long mask;
2246
2247 spin_lock(&ohci->iso_tasklet_list_lock);
2248
2249 list_for_each_entry(t, &ohci->iso_tasklet_list, link) {
2250 mask = 1 << t->context;
2251
2252 if (t->type == OHCI_ISO_TRANSMIT && tx_event & mask)
2253 tasklet_schedule(&t->tasklet);
2254 else if (rx_event & mask)
2255 tasklet_schedule(&t->tasklet);
2256 }
2257
2258 spin_unlock(&ohci->iso_tasklet_list_lock);
2259
2260 }
2261
2262 static irqreturn_t ohci_irq_handler(int irq, void *dev_id,
2263 struct pt_regs *regs_are_unused)
2264 {
2265 quadlet_t event, node_id;
2266 struct ti_ohci *ohci = (struct ti_ohci *)dev_id;
2267 struct hpsb_host *host = ohci->host;
2268 int phyid = -1, isroot = 0;
2269 unsigned long flags;
2270
2271 /* Read and clear the interrupt event register. Don't clear
2272 * the busReset event, though. This is done when we get the
2273 * selfIDComplete interrupt. */
2274 spin_lock_irqsave(&ohci->event_lock, flags);
2275 event = reg_read(ohci, OHCI1394_IntEventClear);
2276 reg_write(ohci, OHCI1394_IntEventClear, event & ~OHCI1394_busReset);
2277 spin_unlock_irqrestore(&ohci->event_lock, flags);
2278
2279 if (!event)
2280 return IRQ_NONE;
2281
2282 /* If event is ~(u32)0 cardbus card was ejected. In this case
2283 * we just return, and clean up in the ohci1394_pci_remove
2284 * function. */
2285 if (event == ~(u32) 0) {
2286 DBGMSG("Device removed.");
2287 return IRQ_NONE;
2288 }
2289
2290 DBGMSG("IntEvent: %08x", event);
2291
2292 if (event & OHCI1394_unrecoverableError) {
2293 int ctx;
2294 PRINT(KERN_ERR, "Unrecoverable error!");
2295
2296 if (reg_read(ohci, OHCI1394_AsReqTrContextControlSet) & 0x800)
2297 PRINT(KERN_ERR, "Async Req Tx Context died: "
2298 "ctrl[%08x] cmdptr[%08x]",
2299 reg_read(ohci, OHCI1394_AsReqTrContextControlSet),
2300 reg_read(ohci, OHCI1394_AsReqTrCommandPtr));
2301
2302 if (reg_read(ohci, OHCI1394_AsRspTrContextControlSet) & 0x800)
2303 PRINT(KERN_ERR, "Async Rsp Tx Context died: "
2304 "ctrl[%08x] cmdptr[%08x]",
2305 reg_read(ohci, OHCI1394_AsRspTrContextControlSet),
2306 reg_read(ohci, OHCI1394_AsRspTrCommandPtr));
2307
2308 if (reg_read(ohci, OHCI1394_AsReqRcvContextControlSet) & 0x800)
2309 PRINT(KERN_ERR, "Async Req Rcv Context died: "
2310 "ctrl[%08x] cmdptr[%08x]",
2311 reg_read(ohci, OHCI1394_AsReqRcvContextControlSet),
2312 reg_read(ohci, OHCI1394_AsReqRcvCommandPtr));
2313
2314 if (reg_read(ohci, OHCI1394_AsRspRcvContextControlSet) & 0x800)
2315 PRINT(KERN_ERR, "Async Rsp Rcv Context died: "
2316 "ctrl[%08x] cmdptr[%08x]",
2317 reg_read(ohci, OHCI1394_AsRspRcvContextControlSet),
2318 reg_read(ohci, OHCI1394_AsRspRcvCommandPtr));
2319
2320 for (ctx = 0; ctx < ohci->nb_iso_xmit_ctx; ctx++) {
2321 if (reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)) & 0x800)
2322 PRINT(KERN_ERR, "Iso Xmit %d Context died: "
2323 "ctrl[%08x] cmdptr[%08x]", ctx,
2324 reg_read(ohci, OHCI1394_IsoXmitContextControlSet + (16 * ctx)),
2325 reg_read(ohci, OHCI1394_IsoXmitCommandPtr + (16 * ctx)));
2326 }
2327
2328 for (ctx = 0; ctx < ohci->nb_iso_rcv_ctx; ctx++) {
2329 if (reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)) & 0x800)
2330 PRINT(KERN_ERR, "Iso Recv %d Context died: "
2331 "ctrl[%08x] cmdptr[%08x] match[%08x]", ctx,
2332 reg_read(ohci, OHCI1394_IsoRcvContextControlSet + (32 * ctx)),
2333 reg_read(ohci, OHCI1394_IsoRcvCommandPtr + (32 * ctx)),
2334 reg_read(ohci, OHCI1394_IsoRcvContextMatch + (32 * ctx)));
2335 }
2336
2337 event &= ~OHCI1394_unrecoverableError;
2338 }
2339
2340 if (event & OHCI1394_cycleInconsistent) {
2341 /* We subscribe to the cycleInconsistent event only to
2342 * clear the corresponding event bit... otherwise,
2343 * isochronous cycleMatch DMA won't work. */
2344 DBGMSG("OHCI1394_cycleInconsistent");
2345 event &= ~OHCI1394_cycleInconsistent;
2346 }
2347
2348 if (event & OHCI1394_busReset) {
2349 /* The busReset event bit can't be cleared during the
2350 * selfID phase, so we disable busReset interrupts, to
2351 * avoid burying the cpu in interrupt requests. */
2352 spin_lock_irqsave(&ohci->event_lock, flags);
2353 reg_write(ohci, OHCI1394_IntMaskClear, OHCI1394_busReset);
2354
2355 if (ohci->check_busreset) {
2356 int loop_count = 0;
2357
2358 udelay(10);
2359
2360 while (reg_read(ohci, OHCI1394_IntEventSet) & OHCI1394_busReset) {
2361 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2362
2363 spin_unlock_irqrestore(&ohci->event_lock, flags);
2364 udelay(10);
2365 spin_lock_irqsave(&ohci->event_lock, flags);
2366
2367 /* The loop counter check is to prevent the driver
2368 * from remaining in this state forever. For the
2369 * initial bus reset, the loop continues for ever
2370 * and the system hangs, until some device is plugged-in
2371 * or out manually into a port! The forced reset seems
2372 * to solve this problem. This mainly effects nForce2. */
2373 if (loop_count > 10000) {
2374 ohci_devctl(host, RESET_BUS, LONG_RESET);
2375 DBGMSG("Detected bus-reset loop. Forced a bus reset!");
2376 loop_count = 0;
2377 }
2378
2379 loop_count++;
2380 }
2381 }
2382 spin_unlock_irqrestore(&ohci->event_lock, flags);
2383 if (!host->in_bus_reset) {
2384 DBGMSG("irq_handler: Bus reset requested");
2385
2386 /* Subsystem call */
2387 hpsb_bus_reset(ohci->host);
2388 }
2389 event &= ~OHCI1394_busReset;
2390 }
2391
2392 if (event & OHCI1394_reqTxComplete) {
2393 struct dma_trm_ctx *d = &ohci->at_req_context;
2394 DBGMSG("Got reqTxComplete interrupt "
2395 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2396 if (reg_read(ohci, d->ctrlSet) & 0x800)
2397 ohci1394_stop_context(ohci, d->ctrlClear,
2398 "reqTxComplete");
2399 else
2400 dma_trm_tasklet((unsigned long)d);
2401 //tasklet_schedule(&d->task);
2402 event &= ~OHCI1394_reqTxComplete;
2403 }
2404 if (event & OHCI1394_respTxComplete) {
2405 struct dma_trm_ctx *d = &ohci->at_resp_context;
2406 DBGMSG("Got respTxComplete interrupt "
2407 "status=0x%08X", reg_read(ohci, d->ctrlSet));
2408 if (reg_read(ohci, d->ctrlSet) & 0x800)
2409 ohci1394_stop_context(ohci, d->ctrlClear,
2410 "respTxComplete");
2411 else
2412 tasklet_schedule(&d->task);
2413 event &= ~OHCI1394_respTxComplete;
2414 }
2415 if (event & OHCI1394_RQPkt) {
2416 struct dma_rcv_ctx *d = &ohci->ar_req_context;
2417 DBGMSG("Got RQPkt interrupt status=0x%08X",
2418 reg_read(ohci, d->ctrlSet));
2419 if (reg_read(ohci, d->ctrlSet) & 0x800)
2420 ohci1394_stop_context(ohci, d->ctrlClear, "RQPkt");
2421 else
2422 tasklet_schedule(&d->task);
2423 event &= ~OHCI1394_RQPkt;
2424 }
2425 if (event & OHCI1394_RSPkt) {
2426 struct dma_rcv_ctx *d = &ohci->ar_resp_context;
2427 DBGMSG("Got RSPkt interrupt status=0x%08X",
2428 reg_read(ohci, d->ctrlSet));
2429 if (reg_read(ohci, d->ctrlSet) & 0x800)
2430 ohci1394_stop_context(ohci, d->ctrlClear, "RSPkt");
2431 else
2432 tasklet_schedule(&d->task);
2433 event &= ~OHCI1394_RSPkt;
2434 }
2435 if (event & OHCI1394_isochRx) {
2436 quadlet_t rx_event;
2437
2438 rx_event = reg_read(ohci, OHCI1394_IsoRecvIntEventSet);
2439 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, rx_event);
2440 ohci_schedule_iso_tasklets(ohci, rx_event, 0);
2441 event &= ~OHCI1394_isochRx;
2442 }
2443 if (event & OHCI1394_isochTx) {
2444 quadlet_t tx_event;
2445
2446 tx_event = reg_read(ohci, OHCI1394_IsoXmitIntEventSet);
2447 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, tx_event);
2448 ohci_schedule_iso_tasklets(ohci, 0, tx_event);
2449 event &= ~OHCI1394_isochTx;
2450 }
2451 if (event & OHCI1394_selfIDComplete) {
2452 if (host->in_bus_reset) {
2453 node_id = reg_read(ohci, OHCI1394_NodeID);
2454
2455 if (!(node_id & 0x80000000)) {
2456 PRINT(KERN_ERR,
2457 "SelfID received, but NodeID invalid "
2458 "(probably new bus reset occurred): %08X",
2459 node_id);
2460 goto selfid_not_valid;
2461 }
2462
2463 phyid = node_id & 0x0000003f;
2464 isroot = (node_id & 0x40000000) != 0;
2465
2466 DBGMSG("SelfID interrupt received "
2467 "(phyid %d, %s)", phyid,
2468 (isroot ? "root" : "not root"));
2469
2470 handle_selfid(ohci, host, phyid, isroot);
2471
2472 /* Clear the bus reset event and re-enable the
2473 * busReset interrupt. */
2474 spin_lock_irqsave(&ohci->event_lock, flags);
2475 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2476 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
2477 spin_unlock_irqrestore(&ohci->event_lock, flags);
2478
2479 /* Accept Physical requests from all nodes. */
2480 reg_write(ohci,OHCI1394_AsReqFilterHiSet, 0xffffffff);
2481 reg_write(ohci,OHCI1394_AsReqFilterLoSet, 0xffffffff);
2482
2483 /* Turn on phys dma reception.
2484 *
2485 * TODO: Enable some sort of filtering management.
2486 */
2487 if (phys_dma) {
2488 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0xffffffff);
2489 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0xffffffff);
2490 reg_write(ohci,OHCI1394_PhyUpperBound, 0xffff0000);
2491 } else {
2492 reg_write(ohci,OHCI1394_PhyReqFilterHiSet, 0x00000000);
2493 reg_write(ohci,OHCI1394_PhyReqFilterLoSet, 0x00000000);
2494 }
2495
2496 DBGMSG("PhyReqFilter=%08x%08x",
2497 reg_read(ohci,OHCI1394_PhyReqFilterHiSet),
2498 reg_read(ohci,OHCI1394_PhyReqFilterLoSet));
2499
2500 hpsb_selfid_complete(host, phyid, isroot);
2501 } else
2502 PRINT(KERN_ERR,
2503 "SelfID received outside of bus reset sequence");
2504
2505 selfid_not_valid:
2506 event &= ~OHCI1394_selfIDComplete;
2507 }
2508
2509 /* Make sure we handle everything, just in case we accidentally
2510 * enabled an interrupt that we didn't write a handler for. */
2511 if (event)
2512 PRINT(KERN_ERR, "Unhandled interrupt(s) 0x%08x",
2513 event);
2514
2515 return IRQ_HANDLED;
2516 }
2517
2518 /* Put the buffer back into the dma context */
2519 static void insert_dma_buffer(struct dma_rcv_ctx *d, int idx)
2520 {
2521 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2522 DBGMSG("Inserting dma buf ctx=%d idx=%d", d->ctx, idx);
2523
2524 d->prg_cpu[idx]->status = cpu_to_le32(d->buf_size);
2525 d->prg_cpu[idx]->branchAddress &= le32_to_cpu(0xfffffff0);
2526 idx = (idx + d->num_desc - 1 ) % d->num_desc;
2527 d->prg_cpu[idx]->branchAddress |= le32_to_cpu(0x00000001);
2528
2529 /* To avoid a race, ensure 1394 interface hardware sees the inserted
2530 * context program descriptors before it sees the wakeup bit set. */
2531 wmb();
2532
2533 /* wake up the dma context if necessary */
2534 if (!(reg_read(ohci, d->ctrlSet) & 0x400)) {
2535 PRINT(KERN_INFO,
2536 "Waking dma ctx=%d ... processing is probably too slow",
2537 d->ctx);
2538 }
2539
2540 /* do this always, to avoid race condition */
2541 reg_write(ohci, d->ctrlSet, 0x1000);
2542 }
2543
2544 #define cond_le32_to_cpu(data, noswap) \
2545 (noswap ? data : le32_to_cpu(data))
2546
2547 static const int TCODE_SIZE[16] = {20, 0, 16, -1, 16, 20, 20, 0,
2548 -1, 0, -1, 0, -1, -1, 16, -1};
2549
2550 /*
2551 * Determine the length of a packet in the buffer
2552 * Optimization suggested by Pascal Drolet <pascal.drolet@informission.ca>
2553 */
2554 static __inline__ int packet_length(struct dma_rcv_ctx *d, int idx, quadlet_t *buf_ptr,
2555 int offset, unsigned char tcode, int noswap)
2556 {
2557 int length = -1;
2558
2559 if (d->type == DMA_CTX_ASYNC_REQ || d->type == DMA_CTX_ASYNC_RESP) {
2560 length = TCODE_SIZE[tcode];
2561 if (length == 0) {
2562 if (offset + 12 >= d->buf_size) {
2563 length = (cond_le32_to_cpu(d->buf_cpu[(idx + 1) % d->num_desc]
2564 [3 - ((d->buf_size - offset) >> 2)], noswap) >> 16);
2565 } else {
2566 length = (cond_le32_to_cpu(buf_ptr[3], noswap) >> 16);
2567 }
2568 length += 20;
2569 }
2570 } else if (d->type == DMA_CTX_ISO) {
2571 /* Assumption: buffer fill mode with header/trailer */
2572 length = (cond_le32_to_cpu(buf_ptr[0], noswap) >> 16) + 8;
2573 }
2574
2575 if (length > 0 && length % 4)
2576 length += 4 - (length % 4);
2577
2578 return length;
2579 }
2580
2581 /* Tasklet that processes dma receive buffers */
2582 static void dma_rcv_tasklet (unsigned long data)
2583 {
2584 struct dma_rcv_ctx *d = (struct dma_rcv_ctx*)data;
2585 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2586 unsigned int split_left, idx, offset, rescount;
2587 unsigned char tcode;
2588 int length, bytes_left, ack;
2589 unsigned long flags;
2590 quadlet_t *buf_ptr;
2591 char *split_ptr;
2592 char msg[256];
2593
2594 spin_lock_irqsave(&d->lock, flags);
2595
2596 idx = d->buf_ind;
2597 offset = d->buf_offset;
2598 buf_ptr = d->buf_cpu[idx] + offset/4;
2599
2600 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2601 bytes_left = d->buf_size - rescount - offset;
2602
2603 while (bytes_left > 0) {
2604 tcode = (cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming) >> 4) & 0xf;
2605
2606 /* packet_length() will return < 4 for an error */
2607 length = packet_length(d, idx, buf_ptr, offset, tcode, ohci->no_swap_incoming);
2608
2609 if (length < 4) { /* something is wrong */
2610 sprintf(msg,"Unexpected tcode 0x%x(0x%08x) in AR ctx=%d, length=%d",
2611 tcode, cond_le32_to_cpu(buf_ptr[0], ohci->no_swap_incoming),
2612 d->ctx, length);
2613 ohci1394_stop_context(ohci, d->ctrlClear, msg);
2614 spin_unlock_irqrestore(&d->lock, flags);
2615 return;
2616 }
2617
2618 /* The first case is where we have a packet that crosses
2619 * over more than one descriptor. The next case is where
2620 * it's all in the first descriptor. */
2621 if ((offset + length) > d->buf_size) {
2622 DBGMSG("Split packet rcv'd");
2623 if (length > d->split_buf_size) {
2624 ohci1394_stop_context(ohci, d->ctrlClear,
2625 "Split packet size exceeded");
2626 d->buf_ind = idx;
2627 d->buf_offset = offset;
2628 spin_unlock_irqrestore(&d->lock, flags);
2629 return;
2630 }
2631
2632 if (le32_to_cpu(d->prg_cpu[(idx+1)%d->num_desc]->status)
2633 == d->buf_size) {
2634 /* Other part of packet not written yet.
2635 * this should never happen I think
2636 * anyway we'll get it on the next call. */
2637 PRINT(KERN_INFO,
2638 "Got only half a packet!");
2639 d->buf_ind = idx;
2640 d->buf_offset = offset;
2641 spin_unlock_irqrestore(&d->lock, flags);
2642 return;
2643 }
2644
2645 split_left = length;
2646 split_ptr = (char *)d->spb;
2647 memcpy(split_ptr,buf_ptr,d->buf_size-offset);
2648 split_left -= d->buf_size-offset;
2649 split_ptr += d->buf_size-offset;
2650 insert_dma_buffer(d, idx);
2651 idx = (idx+1) % d->num_desc;
2652 buf_ptr = d->buf_cpu[idx];
2653 offset=0;
2654
2655 while (split_left >= d->buf_size) {
2656 memcpy(split_ptr,buf_ptr,d->buf_size);
2657 split_ptr += d->buf_size;
2658 split_left -= d->buf_size;
2659 insert_dma_buffer(d, idx);
2660 idx = (idx+1) % d->num_desc;
2661 buf_ptr = d->buf_cpu[idx];
2662 }
2663
2664 if (split_left > 0) {
2665 memcpy(split_ptr, buf_ptr, split_left);
2666 offset = split_left;
2667 buf_ptr += offset/4;
2668 }
2669 } else {
2670 DBGMSG("Single packet rcv'd");
2671 memcpy(d->spb, buf_ptr, length);
2672 offset += length;
2673 buf_ptr += length/4;
2674 if (offset==d->buf_size) {
2675 insert_dma_buffer(d, idx);
2676 idx = (idx+1) % d->num_desc;
2677 buf_ptr = d->buf_cpu[idx];
2678 offset=0;
2679 }
2680 }
2681
2682 /* We get one phy packet to the async descriptor for each
2683 * bus reset. We always ignore it. */
2684 if (tcode != OHCI1394_TCODE_PHY) {
2685 if (!ohci->no_swap_incoming)
2686 packet_swab(d->spb, tcode);
2687 DBGMSG("Packet received from node"
2688 " %d ack=0x%02X spd=%d tcode=0x%X"
2689 " length=%d ctx=%d tlabel=%d",
2690 (d->spb[1]>>16)&0x3f,
2691 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f,
2692 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>21)&0x3,
2693 tcode, length, d->ctx,
2694 (cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>10)&0x3f);
2695
2696 ack = (((cond_le32_to_cpu(d->spb[length/4-1], ohci->no_swap_incoming)>>16)&0x1f)
2697 == 0x11) ? 1 : 0;
2698
2699 hpsb_packet_received(ohci->host, d->spb,
2700 length-4, ack);
2701 }
2702 #ifdef OHCI1394_DEBUG
2703 else
2704 PRINT (KERN_DEBUG, "Got phy packet ctx=%d ... discarded",
2705 d->ctx);
2706 #endif
2707
2708 rescount = le32_to_cpu(d->prg_cpu[idx]->status) & 0xffff;
2709
2710 bytes_left = d->buf_size - rescount - offset;
2711
2712 }
2713
2714 d->buf_ind = idx;
2715 d->buf_offset = offset;
2716
2717 spin_unlock_irqrestore(&d->lock, flags);
2718 }
2719
2720 /* Bottom half that processes sent packets */
2721 static void dma_trm_tasklet (unsigned long data)
2722 {
2723 struct dma_trm_ctx *d = (struct dma_trm_ctx*)data;
2724 struct ti_ohci *ohci = (struct ti_ohci*)(d->ohci);
2725 struct hpsb_packet *packet, *ptmp;
2726 unsigned long flags;
2727 u32 status, ack;
2728 size_t datasize;
2729
2730 spin_lock_irqsave(&d->lock, flags);
2731
2732 list_for_each_entry_safe(packet, ptmp, &d->fifo_list, driver_list) {
2733 datasize = packet->data_size;
2734 if (datasize && packet->type != hpsb_raw)
2735 status = le32_to_cpu(
2736 d->prg_cpu[d->sent_ind]->end.status) >> 16;
2737 else
2738 status = le32_to_cpu(
2739 d->prg_cpu[d->sent_ind]->begin.status) >> 16;
2740
2741 if (status == 0)
2742 /* this packet hasn't been sent yet*/
2743 break;
2744
2745 #ifdef OHCI1394_DEBUG
2746 if (datasize)
2747 if (((le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf) == 0xa)
2748 DBGMSG("Stream packet sent to channel %d tcode=0x%X "
2749 "ack=0x%X spd=%d dataLength=%d ctx=%d",
2750 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>8)&0x3f,
2751 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2752 status&0x1f, (status>>5)&0x3,
2753 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16,
2754 d->ctx);
2755 else
2756 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2757 "0x%02X ack=0x%X spd=%d dataLength=%d ctx=%d",
2758 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])>>16)&0x3f,
2759 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>4)&0xf,
2760 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])>>10)&0x3f,
2761 status&0x1f, (status>>5)&0x3,
2762 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3])>>16,
2763 d->ctx);
2764 else
2765 DBGMSG("Packet sent to node %d tcode=0x%X tLabel="
2766 "0x%02X ack=0x%X spd=%d data=0x%08X ctx=%d",
2767 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[1])
2768 >>16)&0x3f,
2769 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2770 >>4)&0xf,
2771 (le32_to_cpu(d->prg_cpu[d->sent_ind]->data[0])
2772 >>10)&0x3f,
2773 status&0x1f, (status>>5)&0x3,
2774 le32_to_cpu(d->prg_cpu[d->sent_ind]->data[3]),
2775 d->ctx);
2776 #endif
2777
2778 if (status & 0x10) {
2779 ack = status & 0xf;
2780 } else {
2781 switch (status & 0x1f) {
2782 case EVT_NO_STATUS: /* that should never happen */
2783 case EVT_RESERVED_A: /* that should never happen */
2784 case EVT_LONG_PACKET: /* that should never happen */
2785 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2786 ack = ACKX_SEND_ERROR;
2787 break;
2788 case EVT_MISSING_ACK:
2789 ack = ACKX_TIMEOUT;
2790 break;
2791 case EVT_UNDERRUN:
2792 ack = ACKX_SEND_ERROR;
2793 break;
2794 case EVT_OVERRUN: /* that should never happen */
2795 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2796 ack = ACKX_SEND_ERROR;
2797 break;
2798 case EVT_DESCRIPTOR_READ:
2799 case EVT_DATA_READ:
2800 case EVT_DATA_WRITE:
2801 ack = ACKX_SEND_ERROR;
2802 break;
2803 case EVT_BUS_RESET: /* that should never happen */
2804 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2805 ack = ACKX_SEND_ERROR;
2806 break;
2807 case EVT_TIMEOUT:
2808 ack = ACKX_TIMEOUT;
2809 break;
2810 case EVT_TCODE_ERR:
2811 ack = ACKX_SEND_ERROR;
2812 break;
2813 case EVT_RESERVED_B: /* that should never happen */
2814 case EVT_RESERVED_C: /* that should never happen */
2815 PRINT(KERN_WARNING, "Received OHCI evt_* error 0x%x", status & 0x1f);
2816 ack = ACKX_SEND_ERROR;
2817 break;
2818 case EVT_UNKNOWN:
2819 case EVT_FLUSHED:
2820 ack = ACKX_SEND_ERROR;
2821 break;
2822 default:
2823 PRINT(KERN_ERR, "Unhandled OHCI evt_* error 0x%x", status & 0x1f);
2824 ack = ACKX_SEND_ERROR;
2825 BUG();
2826 }
2827 }
2828
2829 list_del_init(&packet->driver_list);
2830 hpsb_packet_sent(ohci->host, packet, ack);
2831
2832 if (datasize) {
2833 pci_unmap_single(ohci->dev,
2834 cpu_to_le32(d->prg_cpu[d->sent_ind]->end.address),
2835 datasize, PCI_DMA_TODEVICE);
2836 OHCI_DMA_FREE("single Xmit data packet");
2837 }
2838
2839 d->sent_ind = (d->sent_ind+1)%d->num_desc;
2840 d->free_prgs++;
2841 }
2842
2843 dma_trm_flush(ohci, d);
2844
2845 spin_unlock_irqrestore(&d->lock, flags);
2846 }
2847
2848 static void stop_dma_rcv_ctx(struct dma_rcv_ctx *d)
2849 {
2850 if (d->ctrlClear) {
2851 ohci1394_stop_context(d->ohci, d->ctrlClear, NULL);
2852
2853 if (d->type == DMA_CTX_ISO) {
2854 /* disable interrupts */
2855 reg_write(d->ohci, OHCI1394_IsoRecvIntMaskClear, 1 << d->ctx);
2856 ohci1394_unregister_iso_tasklet(d->ohci, &d->ohci->ir_legacy_tasklet);
2857 } else {
2858 tasklet_kill(&d->task);
2859 }
2860 }
2861 }
2862
2863
2864 static void free_dma_rcv_ctx(struct dma_rcv_ctx *d)
2865 {
2866 int i;
2867 struct ti_ohci *ohci = d->ohci;
2868
2869 if (ohci == NULL)
2870 return;
2871
2872 DBGMSG("Freeing dma_rcv_ctx %d", d->ctx);
2873
2874 if (d->buf_cpu) {
2875 for (i=0; i<d->num_desc; i++)
2876 if (d->buf_cpu[i] && d->buf_bus[i]) {
2877 pci_free_consistent(
2878 ohci->dev, d->buf_size,
2879 d->buf_cpu[i], d->buf_bus[i]);
2880 OHCI_DMA_FREE("consistent dma_rcv buf[%d]", i);
2881 }
2882 kfree(d->buf_cpu);
2883 kfree(d->buf_bus);
2884 }
2885 if (d->prg_cpu) {
2886 for (i=0; i<d->num_desc; i++)
2887 if (d->prg_cpu[i] && d->prg_bus[i]) {
2888 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
2889 OHCI_DMA_FREE("consistent dma_rcv prg[%d]", i);
2890 }
2891 pci_pool_destroy(d->prg_pool);
2892 OHCI_DMA_FREE("dma_rcv prg pool");
2893 kfree(d->prg_cpu);
2894 kfree(d->prg_bus);
2895 }
2896 if (d->spb) kfree(d->spb);
2897
2898 /* Mark this context as freed. */
2899 d->ohci = NULL;
2900 }
2901
2902 static int
2903 alloc_dma_rcv_ctx(struct ti_ohci *ohci, struct dma_rcv_ctx *d,
2904 enum context_type type, int ctx, int num_desc,
2905 int buf_size, int split_buf_size, int context_base)
2906 {
2907 int i, len;
2908 static int num_allocs;
2909 static char pool_name[20];
2910
2911 d->ohci = ohci;
2912 d->type = type;
2913 d->ctx = ctx;
2914
2915 d->num_desc = num_desc;
2916 d->buf_size = buf_size;
2917 d->split_buf_size = split_buf_size;
2918
2919 d->ctrlSet = 0;
2920 d->ctrlClear = 0;
2921 d->cmdPtr = 0;
2922
2923 d->buf_cpu = kmalloc(d->num_desc * sizeof(quadlet_t*), GFP_ATOMIC);
2924 d->buf_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2925
2926 if (d->buf_cpu == NULL || d->buf_bus == NULL) {
2927 PRINT(KERN_ERR, "Failed to allocate dma buffer");
2928 free_dma_rcv_ctx(d);
2929 return -ENOMEM;
2930 }
2931 memset(d->buf_cpu, 0, d->num_desc * sizeof(quadlet_t*));
2932 memset(d->buf_bus, 0, d->num_desc * sizeof(dma_addr_t));
2933
2934 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct dma_cmd*),
2935 GFP_ATOMIC);
2936 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_ATOMIC);
2937
2938 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
2939 PRINT(KERN_ERR, "Failed to allocate dma prg");
2940 free_dma_rcv_ctx(d);
2941 return -ENOMEM;
2942 }
2943 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct dma_cmd*));
2944 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
2945
2946 d->spb = kmalloc(d->split_buf_size, GFP_ATOMIC);
2947
2948 if (d->spb == NULL) {
2949 PRINT(KERN_ERR, "Failed to allocate split buffer");
2950 free_dma_rcv_ctx(d);
2951 return -ENOMEM;
2952 }
2953
2954 len = sprintf(pool_name, "ohci1394_rcv_prg");
2955 sprintf(pool_name+len, "%d", num_allocs);
2956 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
2957 sizeof(struct dma_cmd), 4, 0);
2958 if(d->prg_pool == NULL)
2959 {
2960 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
2961 free_dma_rcv_ctx(d);
2962 return -ENOMEM;
2963 }
2964 num_allocs++;
2965
2966 OHCI_DMA_ALLOC("dma_rcv prg pool");
2967
2968 for (i=0; i<d->num_desc; i++) {
2969 d->buf_cpu[i] = pci_alloc_consistent(ohci->dev,
2970 d->buf_size,
2971 d->buf_bus+i);
2972 OHCI_DMA_ALLOC("consistent dma_rcv buf[%d]", i);
2973
2974 if (d->buf_cpu[i] != NULL) {
2975 memset(d->buf_cpu[i], 0, d->buf_size);
2976 } else {
2977 PRINT(KERN_ERR,
2978 "Failed to allocate dma buffer");
2979 free_dma_rcv_ctx(d);
2980 return -ENOMEM;
2981 }
2982
2983 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
2984 OHCI_DMA_ALLOC("pool dma_rcv prg[%d]", i);
2985
2986 if (d->prg_cpu[i] != NULL) {
2987 memset(d->prg_cpu[i], 0, sizeof(struct dma_cmd));
2988 } else {
2989 PRINT(KERN_ERR,
2990 "Failed to allocate dma prg");
2991 free_dma_rcv_ctx(d);
2992 return -ENOMEM;
2993 }
2994 }
2995
2996 spin_lock_init(&d->lock);
2997
2998 if (type == DMA_CTX_ISO) {
2999 ohci1394_init_iso_tasklet(&ohci->ir_legacy_tasklet,
3000 OHCI_ISO_MULTICHANNEL_RECEIVE,
3001 dma_rcv_tasklet, (unsigned long) d);
3002 if (ohci1394_register_iso_tasklet(ohci,
3003 &ohci->ir_legacy_tasklet) < 0) {
3004 PRINT(KERN_ERR, "No IR DMA context available");
3005 free_dma_rcv_ctx(d);
3006 return -EBUSY;
3007 }
3008
3009 /* the IR context can be assigned to any DMA context
3010 * by ohci1394_register_iso_tasklet */
3011 d->ctx = ohci->ir_legacy_tasklet.context;
3012 d->ctrlSet = OHCI1394_IsoRcvContextControlSet + 32*d->ctx;
3013 d->ctrlClear = OHCI1394_IsoRcvContextControlClear + 32*d->ctx;
3014 d->cmdPtr = OHCI1394_IsoRcvCommandPtr + 32*d->ctx;
3015 d->ctxtMatch = OHCI1394_IsoRcvContextMatch + 32*d->ctx;
3016 } else {
3017 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3018 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3019 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3020
3021 tasklet_init (&d->task, dma_rcv_tasklet, (unsigned long) d);
3022 }
3023
3024 return 0;
3025 }
3026
3027 static void free_dma_trm_ctx(struct dma_trm_ctx *d)
3028 {
3029 int i;
3030 struct ti_ohci *ohci = d->ohci;
3031
3032 if (ohci == NULL)
3033 return;
3034
3035 DBGMSG("Freeing dma_trm_ctx %d", d->ctx);
3036
3037 if (d->prg_cpu) {
3038 for (i=0; i<d->num_desc; i++)
3039 if (d->prg_cpu[i] && d->prg_bus[i]) {
3040 pci_pool_free(d->prg_pool, d->prg_cpu[i], d->prg_bus[i]);
3041 OHCI_DMA_FREE("pool dma_trm prg[%d]", i);
3042 }
3043 pci_pool_destroy(d->prg_pool);
3044 OHCI_DMA_FREE("dma_trm prg pool");
3045 kfree(d->prg_cpu);
3046 kfree(d->prg_bus);
3047 }
3048
3049 /* Mark this context as freed. */
3050 d->ohci = NULL;
3051 }
3052
3053 static int
3054 alloc_dma_trm_ctx(struct ti_ohci *ohci, struct dma_trm_ctx *d,
3055 enum context_type type, int ctx, int num_desc,
3056 int context_base)
3057 {
3058 int i, len;
3059 static char pool_name[20];
3060 static int num_allocs=0;
3061
3062 d->ohci = ohci;
3063 d->type = type;
3064 d->ctx = ctx;
3065 d->num_desc = num_desc;
3066 d->ctrlSet = 0;
3067 d->ctrlClear = 0;
3068 d->cmdPtr = 0;
3069
3070 d->prg_cpu = kmalloc(d->num_desc * sizeof(struct at_dma_prg*),
3071 GFP_KERNEL);
3072 d->prg_bus = kmalloc(d->num_desc * sizeof(dma_addr_t), GFP_KERNEL);
3073
3074 if (d->prg_cpu == NULL || d->prg_bus == NULL) {
3075 PRINT(KERN_ERR, "Failed to allocate at dma prg");
3076 free_dma_trm_ctx(d);
3077 return -ENOMEM;
3078 }
3079 memset(d->prg_cpu, 0, d->num_desc * sizeof(struct at_dma_prg*));
3080 memset(d->prg_bus, 0, d->num_desc * sizeof(dma_addr_t));
3081
3082 len = sprintf(pool_name, "ohci1394_trm_prg");
3083 sprintf(pool_name+len, "%d", num_allocs);
3084 d->prg_pool = pci_pool_create(pool_name, ohci->dev,
3085 sizeof(struct at_dma_prg), 4, 0);
3086 if (d->prg_pool == NULL) {
3087 PRINT(KERN_ERR, "pci_pool_create failed for %s", pool_name);
3088 free_dma_trm_ctx(d);
3089 return -ENOMEM;
3090 }
3091 num_allocs++;
3092
3093 OHCI_DMA_ALLOC("dma_rcv prg pool");
3094
3095 for (i = 0; i < d->num_desc; i++) {
3096 d->prg_cpu[i] = pci_pool_alloc(d->prg_pool, SLAB_KERNEL, d->prg_bus+i);
3097 OHCI_DMA_ALLOC("pool dma_trm prg[%d]", i);
3098
3099 if (d->prg_cpu[i] != NULL) {
3100 memset(d->prg_cpu[i], 0, sizeof(struct at_dma_prg));
3101 } else {
3102 PRINT(KERN_ERR,
3103 "Failed to allocate at dma prg");
3104 free_dma_trm_ctx(d);
3105 return -ENOMEM;
3106 }
3107 }
3108
3109 spin_lock_init(&d->lock);
3110
3111 /* initialize tasklet */
3112 if (type == DMA_CTX_ISO) {
3113 ohci1394_init_iso_tasklet(&ohci->it_legacy_tasklet, OHCI_ISO_TRANSMIT,
3114 dma_trm_tasklet, (unsigned long) d);
3115 if (ohci1394_register_iso_tasklet(ohci,
3116 &ohci->it_legacy_tasklet) < 0) {
3117 PRINT(KERN_ERR, "No IT DMA context available");
3118 free_dma_trm_ctx(d);
3119 return -EBUSY;
3120 }
3121
3122 /* IT can be assigned to any context by register_iso_tasklet */
3123 d->ctx = ohci->it_legacy_tasklet.context;
3124 d->ctrlSet = OHCI1394_IsoXmitContextControlSet + 16 * d->ctx;
3125 d->ctrlClear = OHCI1394_IsoXmitContextControlClear + 16 * d->ctx;
3126 d->cmdPtr = OHCI1394_IsoXmitCommandPtr + 16 * d->ctx;
3127 } else {
3128 d->ctrlSet = context_base + OHCI1394_ContextControlSet;
3129 d->ctrlClear = context_base + OHCI1394_ContextControlClear;
3130 d->cmdPtr = context_base + OHCI1394_ContextCommandPtr;
3131 tasklet_init (&d->task, dma_trm_tasklet, (unsigned long)d);
3132 }
3133
3134 return 0;
3135 }
3136
3137 static void ohci_set_hw_config_rom(struct hpsb_host *host, quadlet_t *config_rom)
3138 {
3139 struct ti_ohci *ohci = host->hostdata;
3140
3141 reg_write(ohci, OHCI1394_ConfigROMhdr, be32_to_cpu(config_rom[0]));
3142 reg_write(ohci, OHCI1394_BusOptions, be32_to_cpu(config_rom[2]));
3143
3144 memcpy(ohci->csr_config_rom_cpu, config_rom, OHCI_CONFIG_ROM_LEN);
3145 }
3146
3147
3148 static quadlet_t ohci_hw_csr_reg(struct hpsb_host *host, int reg,
3149 quadlet_t data, quadlet_t compare)
3150 {
3151 struct ti_ohci *ohci = host->hostdata;
3152 int i;
3153
3154 reg_write(ohci, OHCI1394_CSRData, data);
3155 reg_write(ohci, OHCI1394_CSRCompareData, compare);
3156 reg_write(ohci, OHCI1394_CSRControl, reg & 0x3);
3157
3158 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
3159 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000)
3160 break;
3161
3162 mdelay(1);
3163 }
3164
3165 return reg_read(ohci, OHCI1394_CSRData);
3166 }
3167
3168 static struct hpsb_host_driver ohci1394_driver = {
3169 .owner = THIS_MODULE,
3170 .name = OHCI1394_DRIVER_NAME,
3171 .set_hw_config_rom = ohci_set_hw_config_rom,
3172 .transmit_packet = ohci_transmit,
3173 .devctl = ohci_devctl,
3174 .isoctl = ohci_isoctl,
3175 .hw_csr_reg = ohci_hw_csr_reg,
3176 };
3177
3178 \f
3179
3180 /***********************************
3181 * PCI Driver Interface functions *
3182 ***********************************/
3183
3184 #define FAIL(err, fmt, args...) \
3185 do { \
3186 PRINT_G(KERN_ERR, fmt , ## args); \
3187 ohci1394_pci_remove(dev); \
3188 return err; \
3189 } while (0)
3190
3191 static int __devinit ohci1394_pci_probe(struct pci_dev *dev,
3192 const struct pci_device_id *ent)
3193 {
3194 static int version_printed = 0;
3195
3196 struct hpsb_host *host;
3197 struct ti_ohci *ohci; /* shortcut to currently handled device */
3198 unsigned long ohci_base;
3199
3200 if (version_printed++ == 0)
3201 PRINT_G(KERN_INFO, "%s", version);
3202
3203 if (pci_enable_device(dev))
3204 FAIL(-ENXIO, "Failed to enable OHCI hardware");
3205 pci_set_master(dev);
3206
3207 host = hpsb_alloc_host(&ohci1394_driver, sizeof(struct ti_ohci), &dev->dev);
3208 if (!host) FAIL(-ENOMEM, "Failed to allocate host structure");
3209
3210 ohci = host->hostdata;
3211 ohci->dev = dev;
3212 ohci->host = host;
3213 ohci->init_state = OHCI_INIT_ALLOC_HOST;
3214 host->pdev = dev;
3215 pci_set_drvdata(dev, ohci);
3216
3217 /* We don't want hardware swapping */
3218 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3219
3220 /* Some oddball Apple controllers do not order the selfid
3221 * properly, so we make up for it here. */
3222 #ifndef __LITTLE_ENDIAN
3223 /* XXX: Need a better way to check this. I'm wondering if we can
3224 * read the values of the OHCI1394_PCI_HCI_Control and the
3225 * noByteSwapData registers to see if they were not cleared to
3226 * zero. Should this work? Obviously it's not defined what these
3227 * registers will read when they aren't supported. Bleh! */
3228 if (dev->vendor == PCI_VENDOR_ID_APPLE &&
3229 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) {
3230 ohci->no_swap_incoming = 1;
3231 ohci->selfid_swap = 0;
3232 } else
3233 ohci->selfid_swap = 1;
3234 #endif
3235
3236
3237 #ifndef PCI_DEVICE_ID_NVIDIA_NFORCE2_FW
3238 #define PCI_DEVICE_ID_NVIDIA_NFORCE2_FW 0x006e
3239 #endif
3240
3241 /* These chipsets require a bit of extra care when checking after
3242 * a busreset. */
3243 if ((dev->vendor == PCI_VENDOR_ID_APPLE &&
3244 dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW) ||
3245 (dev->vendor == PCI_VENDOR_ID_NVIDIA &&
3246 dev->device == PCI_DEVICE_ID_NVIDIA_NFORCE2_FW))
3247 ohci->check_busreset = 1;
3248
3249 /* We hardwire the MMIO length, since some CardBus adaptors
3250 * fail to report the right length. Anyway, the ohci spec
3251 * clearly says it's 2kb, so this shouldn't be a problem. */
3252 ohci_base = pci_resource_start(dev, 0);
3253 if (pci_resource_len(dev, 0) != OHCI1394_REGISTER_SIZE)
3254 PRINT(KERN_WARNING, "Unexpected PCI resource length of %lx!",
3255 pci_resource_len(dev, 0));
3256
3257 /* Seems PCMCIA handles this internally. Not sure why. Seems
3258 * pretty bogus to force a driver to special case this. */
3259 #ifndef PCMCIA
3260 if (!request_mem_region (ohci_base, OHCI1394_REGISTER_SIZE, OHCI1394_DRIVER_NAME))
3261 FAIL(-ENOMEM, "MMIO resource (0x%lx - 0x%lx) unavailable",
3262 ohci_base, ohci_base + OHCI1394_REGISTER_SIZE);
3263 #endif
3264 ohci->init_state = OHCI_INIT_HAVE_MEM_REGION;
3265
3266 ohci->registers = ioremap(ohci_base, OHCI1394_REGISTER_SIZE);
3267 if (ohci->registers == NULL)
3268 FAIL(-ENXIO, "Failed to remap registers - card not accessible");
3269 ohci->init_state = OHCI_INIT_HAVE_IOMAPPING;
3270 DBGMSG("Remapped memory spaces reg 0x%p", ohci->registers);
3271
3272 /* csr_config rom allocation */
3273 ohci->csr_config_rom_cpu =
3274 pci_alloc_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3275 &ohci->csr_config_rom_bus);
3276 OHCI_DMA_ALLOC("consistent csr_config_rom");
3277 if (ohci->csr_config_rom_cpu == NULL)
3278 FAIL(-ENOMEM, "Failed to allocate buffer config rom");
3279 ohci->init_state = OHCI_INIT_HAVE_CONFIG_ROM_BUFFER;
3280
3281 /* self-id dma buffer allocation */
3282 ohci->selfid_buf_cpu =
3283 pci_alloc_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3284 &ohci->selfid_buf_bus);
3285 OHCI_DMA_ALLOC("consistent selfid_buf");
3286
3287 if (ohci->selfid_buf_cpu == NULL)
3288 FAIL(-ENOMEM, "Failed to allocate DMA buffer for self-id packets");
3289 ohci->init_state = OHCI_INIT_HAVE_SELFID_BUFFER;
3290
3291 if ((unsigned long)ohci->selfid_buf_cpu & 0x1fff)
3292 PRINT(KERN_INFO, "SelfID buffer %p is not aligned on "
3293 "8Kb boundary... may cause problems on some CXD3222 chip",
3294 ohci->selfid_buf_cpu);
3295
3296 /* No self-id errors at startup */
3297 ohci->self_id_errors = 0;
3298
3299 ohci->init_state = OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE;
3300 /* AR DMA request context allocation */
3301 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_req_context,
3302 DMA_CTX_ASYNC_REQ, 0, AR_REQ_NUM_DESC,
3303 AR_REQ_BUF_SIZE, AR_REQ_SPLIT_BUF_SIZE,
3304 OHCI1394_AsReqRcvContextBase) < 0)
3305 FAIL(-ENOMEM, "Failed to allocate AR Req context");
3306
3307 /* AR DMA response context allocation */
3308 if (alloc_dma_rcv_ctx(ohci, &ohci->ar_resp_context,
3309 DMA_CTX_ASYNC_RESP, 0, AR_RESP_NUM_DESC,
3310 AR_RESP_BUF_SIZE, AR_RESP_SPLIT_BUF_SIZE,
3311 OHCI1394_AsRspRcvContextBase) < 0)
3312 FAIL(-ENOMEM, "Failed to allocate AR Resp context");
3313
3314 /* AT DMA request context */
3315 if (alloc_dma_trm_ctx(ohci, &ohci->at_req_context,
3316 DMA_CTX_ASYNC_REQ, 0, AT_REQ_NUM_DESC,
3317 OHCI1394_AsReqTrContextBase) < 0)
3318 FAIL(-ENOMEM, "Failed to allocate AT Req context");
3319
3320 /* AT DMA response context */
3321 if (alloc_dma_trm_ctx(ohci, &ohci->at_resp_context,
3322 DMA_CTX_ASYNC_RESP, 1, AT_RESP_NUM_DESC,
3323 OHCI1394_AsRspTrContextBase) < 0)
3324 FAIL(-ENOMEM, "Failed to allocate AT Resp context");
3325
3326 /* Start off with a soft reset, to clear everything to a sane
3327 * state. */
3328 ohci_soft_reset(ohci);
3329
3330 /* Now enable LPS, which we need in order to start accessing
3331 * most of the registers. In fact, on some cards (ALI M5251),
3332 * accessing registers in the SClk domain without LPS enabled
3333 * will lock up the machine. Wait 50msec to make sure we have
3334 * full link enabled. */
3335 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS);
3336
3337 /* Disable and clear interrupts */
3338 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3339 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3340
3341 mdelay(50);
3342
3343 /* Determine the number of available IR and IT contexts. */
3344 ohci->nb_iso_rcv_ctx =
3345 get_nb_iso_ctx(ohci, OHCI1394_IsoRecvIntMaskSet);
3346 DBGMSG("%d iso receive contexts available",
3347 ohci->nb_iso_rcv_ctx);
3348
3349 ohci->nb_iso_xmit_ctx =
3350 get_nb_iso_ctx(ohci, OHCI1394_IsoXmitIntMaskSet);
3351 DBGMSG("%d iso transmit contexts available",
3352 ohci->nb_iso_xmit_ctx);
3353
3354 /* Set the usage bits for non-existent contexts so they can't
3355 * be allocated */
3356 ohci->ir_ctx_usage = ~0 << ohci->nb_iso_rcv_ctx;
3357 ohci->it_ctx_usage = ~0 << ohci->nb_iso_xmit_ctx;
3358
3359 INIT_LIST_HEAD(&ohci->iso_tasklet_list);
3360 spin_lock_init(&ohci->iso_tasklet_list_lock);
3361 ohci->ISO_channel_usage = 0;
3362 spin_lock_init(&ohci->IR_channel_lock);
3363
3364 /* Allocate the IR DMA context right here so we don't have
3365 * to do it in interrupt path - note that this doesn't
3366 * waste much memory and avoids the jugglery required to
3367 * allocate it in IRQ path. */
3368 if (alloc_dma_rcv_ctx(ohci, &ohci->ir_legacy_context,
3369 DMA_CTX_ISO, 0, IR_NUM_DESC,
3370 IR_BUF_SIZE, IR_SPLIT_BUF_SIZE,
3371 OHCI1394_IsoRcvContextBase) < 0) {
3372 FAIL(-ENOMEM, "Cannot allocate IR Legacy DMA context");
3373 }
3374
3375 /* We hopefully don't have to pre-allocate IT DMA like we did
3376 * for IR DMA above. Allocate it on-demand and mark inactive. */
3377 ohci->it_legacy_context.ohci = NULL;
3378
3379 if (request_irq(dev->irq, ohci_irq_handler, SA_SHIRQ,
3380 OHCI1394_DRIVER_NAME, ohci))
3381 FAIL(-ENOMEM, "Failed to allocate shared interrupt %d", dev->irq);
3382
3383 ohci->init_state = OHCI_INIT_HAVE_IRQ;
3384 ohci_initialize(ohci);
3385
3386 /* Set certain csr values */
3387 host->csr.guid_hi = reg_read(ohci, OHCI1394_GUIDHi);
3388 host->csr.guid_lo = reg_read(ohci, OHCI1394_GUIDLo);
3389 host->csr.cyc_clk_acc = 100; /* how do we determine clk accuracy? */
3390 host->csr.max_rec = (reg_read(ohci, OHCI1394_BusOptions) >> 12) & 0xf;
3391 host->csr.lnk_spd = reg_read(ohci, OHCI1394_BusOptions) & 0x7;
3392
3393 /* Tell the highlevel this host is ready */
3394 if (hpsb_add_host(host))
3395 FAIL(-ENOMEM, "Failed to register host with highlevel");
3396
3397 ohci->init_state = OHCI_INIT_DONE;
3398
3399 return 0;
3400 #undef FAIL
3401 }
3402
3403 static void ohci1394_pci_remove(struct pci_dev *pdev)
3404 {
3405 struct ti_ohci *ohci;
3406 struct device *dev;
3407
3408 ohci = pci_get_drvdata(pdev);
3409 if (!ohci)
3410 return;
3411
3412 dev = get_device(&ohci->host->device);
3413
3414 switch (ohci->init_state) {
3415 case OHCI_INIT_DONE:
3416 stop_dma_rcv_ctx(&ohci->ir_legacy_context);
3417 hpsb_remove_host(ohci->host);
3418
3419 /* Clear out BUS Options */
3420 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
3421 reg_write(ohci, OHCI1394_BusOptions,
3422 (reg_read(ohci, OHCI1394_BusOptions) & 0x0000f007) |
3423 0x00ff0000);
3424 memset(ohci->csr_config_rom_cpu, 0, OHCI_CONFIG_ROM_LEN);
3425
3426 case OHCI_INIT_HAVE_IRQ:
3427 /* Clear interrupt registers */
3428 reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff);
3429 reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff);
3430 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff);
3431 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff);
3432 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff);
3433 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff);
3434
3435 /* Disable IRM Contender */
3436 set_phy_reg(ohci, 4, ~0xc0 & get_phy_reg(ohci, 4));
3437
3438 /* Clear link control register */
3439 reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff);
3440
3441 /* Let all other nodes know to ignore us */
3442 ohci_devctl(ohci->host, RESET_BUS, LONG_RESET_NO_FORCE_ROOT);
3443
3444 /* Soft reset before we start - this disables
3445 * interrupts and clears linkEnable and LPS. */
3446 ohci_soft_reset(ohci);
3447 free_irq(ohci->dev->irq, ohci);
3448
3449 case OHCI_INIT_HAVE_TXRX_BUFFERS__MAYBE:
3450 /* The ohci_soft_reset() stops all DMA contexts, so we
3451 * dont need to do this. */
3452 /* Free AR dma */
3453 free_dma_rcv_ctx(&ohci->ar_req_context);
3454 free_dma_rcv_ctx(&ohci->ar_resp_context);
3455
3456 /* Free AT dma */
3457 free_dma_trm_ctx(&ohci->at_req_context);
3458 free_dma_trm_ctx(&ohci->at_resp_context);
3459
3460 /* Free IR dma */
3461 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3462
3463 /* Free IT dma */
3464 free_dma_trm_ctx(&ohci->it_legacy_context);
3465
3466 /* Free IR legacy dma */
3467 free_dma_rcv_ctx(&ohci->ir_legacy_context);
3468
3469
3470 case OHCI_INIT_HAVE_SELFID_BUFFER:
3471 pci_free_consistent(ohci->dev, OHCI1394_SI_DMA_BUF_SIZE,
3472 ohci->selfid_buf_cpu,
3473 ohci->selfid_buf_bus);
3474 OHCI_DMA_FREE("consistent selfid_buf");
3475
3476 case OHCI_INIT_HAVE_CONFIG_ROM_BUFFER:
3477 pci_free_consistent(ohci->dev, OHCI_CONFIG_ROM_LEN,
3478 ohci->csr_config_rom_cpu,
3479 ohci->csr_config_rom_bus);
3480 OHCI_DMA_FREE("consistent csr_config_rom");
3481
3482 case OHCI_INIT_HAVE_IOMAPPING:
3483 iounmap(ohci->registers);
3484
3485 case OHCI_INIT_HAVE_MEM_REGION:
3486 #ifndef PCMCIA
3487 release_mem_region(pci_resource_start(ohci->dev, 0),
3488 OHCI1394_REGISTER_SIZE);
3489 #endif
3490
3491 #ifdef CONFIG_PPC_PMAC
3492 /* On UniNorth, power down the cable and turn off the chip
3493 * clock when the module is removed to save power on
3494 * laptops. Turning it back ON is done by the arch code when
3495 * pci_enable_device() is called */
3496 {
3497 struct device_node* of_node;
3498
3499 of_node = pci_device_to_OF_node(ohci->dev);
3500 if (of_node) {
3501 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3502 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, of_node, 0, 0);
3503 }
3504 }
3505 #endif /* CONFIG_PPC_PMAC */
3506
3507 case OHCI_INIT_ALLOC_HOST:
3508 pci_set_drvdata(ohci->dev, NULL);
3509 }
3510
3511 if (dev)
3512 put_device(dev);
3513 }
3514
3515
3516 static int ohci1394_pci_resume (struct pci_dev *pdev)
3517 {
3518 #ifdef CONFIG_PMAC_PBOOK
3519 {
3520 struct device_node *of_node;
3521
3522 /* Re-enable 1394 */
3523 of_node = pci_device_to_OF_node (pdev);
3524 if (of_node)
3525 pmac_call_feature (PMAC_FTR_1394_ENABLE, of_node, 0, 1);
3526 }
3527 #endif
3528
3529 pci_enable_device(pdev);
3530
3531 return 0;
3532 }
3533
3534
3535 static int ohci1394_pci_suspend (struct pci_dev *pdev, pm_message_t state)
3536 {
3537 #ifdef CONFIG_PMAC_PBOOK
3538 {
3539 struct device_node *of_node;
3540
3541 /* Disable 1394 */
3542 of_node = pci_device_to_OF_node (pdev);
3543 if (of_node)
3544 pmac_call_feature(PMAC_FTR_1394_ENABLE, of_node, 0, 0);
3545 }
3546 #endif
3547
3548 return 0;
3549 }
3550
3551
3552 #define PCI_CLASS_FIREWIRE_OHCI ((PCI_CLASS_SERIAL_FIREWIRE << 8) | 0x10)
3553
3554 static struct pci_device_id ohci1394_pci_tbl[] = {
3555 {
3556 .class = PCI_CLASS_FIREWIRE_OHCI,
3557 .class_mask = PCI_ANY_ID,
3558 .vendor = PCI_ANY_ID,
3559 .device = PCI_ANY_ID,
3560 .subvendor = PCI_ANY_ID,
3561 .subdevice = PCI_ANY_ID,
3562 },
3563 { 0, },
3564 };
3565
3566 MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl);
3567
3568 static struct pci_driver ohci1394_pci_driver = {
3569 .name = OHCI1394_DRIVER_NAME,
3570 .id_table = ohci1394_pci_tbl,
3571 .probe = ohci1394_pci_probe,
3572 .remove = ohci1394_pci_remove,
3573 .resume = ohci1394_pci_resume,
3574 .suspend = ohci1394_pci_suspend,
3575 };
3576
3577 \f
3578
3579 /***********************************
3580 * OHCI1394 Video Interface *
3581 ***********************************/
3582
3583 /* essentially the only purpose of this code is to allow another
3584 module to hook into ohci's interrupt handler */
3585
3586 int ohci1394_stop_context(struct ti_ohci *ohci, int reg, char *msg)
3587 {
3588 int i=0;
3589
3590 /* stop the channel program if it's still running */
3591 reg_write(ohci, reg, 0x8000);
3592
3593 /* Wait until it effectively stops */
3594 while (reg_read(ohci, reg) & 0x400) {
3595 i++;
3596 if (i>5000) {
3597 PRINT(KERN_ERR,
3598 "Runaway loop while stopping context: %s...", msg ? msg : "");
3599 return 1;
3600 }
3601
3602 mb();
3603 udelay(10);
3604 }
3605 if (msg) PRINT(KERN_ERR, "%s: dma prg stopped", msg);
3606 return 0;
3607 }
3608
3609 void ohci1394_init_iso_tasklet(struct ohci1394_iso_tasklet *tasklet, int type,
3610 void (*func)(unsigned long), unsigned long data)
3611 {
3612 tasklet_init(&tasklet->tasklet, func, data);
3613 tasklet->type = type;
3614 /* We init the tasklet->link field, so we can list_del() it
3615 * without worrying whether it was added to the list or not. */
3616 INIT_LIST_HEAD(&tasklet->link);
3617 }
3618
3619 int ohci1394_register_iso_tasklet(struct ti_ohci *ohci,
3620 struct ohci1394_iso_tasklet *tasklet)
3621 {
3622 unsigned long flags, *usage;
3623 int n, i, r = -EBUSY;
3624
3625 if (tasklet->type == OHCI_ISO_TRANSMIT) {
3626 n = ohci->nb_iso_xmit_ctx;
3627 usage = &ohci->it_ctx_usage;
3628 }
3629 else {
3630 n = ohci->nb_iso_rcv_ctx;
3631 usage = &ohci->ir_ctx_usage;
3632
3633 /* only one receive context can be multichannel (OHCI sec 10.4.1) */
3634 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3635 if (test_and_set_bit(0, &ohci->ir_multichannel_used)) {
3636 return r;
3637 }
3638 }
3639 }
3640
3641 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3642
3643 for (i = 0; i < n; i++)
3644 if (!test_and_set_bit(i, usage)) {
3645 tasklet->context = i;
3646 list_add_tail(&tasklet->link, &ohci->iso_tasklet_list);
3647 r = 0;
3648 break;
3649 }
3650
3651 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3652
3653 return r;
3654 }
3655
3656 void ohci1394_unregister_iso_tasklet(struct ti_ohci *ohci,
3657 struct ohci1394_iso_tasklet *tasklet)
3658 {
3659 unsigned long flags;
3660
3661 tasklet_kill(&tasklet->tasklet);
3662
3663 spin_lock_irqsave(&ohci->iso_tasklet_list_lock, flags);
3664
3665 if (tasklet->type == OHCI_ISO_TRANSMIT)
3666 clear_bit(tasklet->context, &ohci->it_ctx_usage);
3667 else {
3668 clear_bit(tasklet->context, &ohci->ir_ctx_usage);
3669
3670 if (tasklet->type == OHCI_ISO_MULTICHANNEL_RECEIVE) {
3671 clear_bit(0, &ohci->ir_multichannel_used);
3672 }
3673 }
3674
3675 list_del(&tasklet->link);
3676
3677 spin_unlock_irqrestore(&ohci->iso_tasklet_list_lock, flags);
3678 }
3679
3680 EXPORT_SYMBOL(ohci1394_stop_context);
3681 EXPORT_SYMBOL(ohci1394_init_iso_tasklet);
3682 EXPORT_SYMBOL(ohci1394_register_iso_tasklet);
3683 EXPORT_SYMBOL(ohci1394_unregister_iso_tasklet);
3684
3685
3686 /***********************************
3687 * General module initialization *
3688 ***********************************/
3689
3690 MODULE_AUTHOR("Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>");
3691 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE-1394 controllers");
3692 MODULE_LICENSE("GPL");
3693
3694 static void __exit ohci1394_cleanup (void)
3695 {
3696 pci_unregister_driver(&ohci1394_pci_driver);
3697 }
3698
3699 static int __init ohci1394_init(void)
3700 {
3701 return pci_register_driver(&ohci1394_pci_driver);
3702 }
3703
3704 module_init(ohci1394_init);
3705 module_exit(ohci1394_cleanup);