1 /* -*- c-basic-offset: 8 -*-
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/poll.h>
28 #include <linux/dma-mapping.h>
30 #include <asm/uaccess.h>
31 #include <asm/semaphore.h>
33 #include "fw-transaction.h"
36 #define descriptor_output_more 0
37 #define descriptor_output_last (1 << 12)
38 #define descriptor_input_more (2 << 12)
39 #define descriptor_input_last (3 << 12)
40 #define descriptor_status (1 << 11)
41 #define descriptor_key_immediate (2 << 8)
42 #define descriptor_ping (1 << 7)
43 #define descriptor_yy (1 << 6)
44 #define descriptor_no_irq (0 << 4)
45 #define descriptor_irq_error (1 << 4)
46 #define descriptor_irq_always (3 << 4)
47 #define descriptor_branch_always (3 << 2)
53 __le32 branch_address
;
55 __le16 transfer_status
;
56 } __attribute__((aligned(16)));
60 struct descriptor descriptor
;
62 dma_addr_t descriptor_bus
;
63 dma_addr_t buffer_bus
;
69 struct tasklet_struct tasklet
;
74 dma_addr_t descriptor_bus
;
75 dma_addr_t buffer_bus
;
77 struct list_head list
;
80 struct descriptor more
;
82 struct descriptor last
;
89 struct tasklet_struct tasklet
;
92 #define it_header_sy(v) ((v) << 0)
93 #define it_header_tcode(v) ((v) << 4)
94 #define it_header_channel(v) ((v) << 8)
95 #define it_header_tag(v) ((v) << 14)
96 #define it_header_speed(v) ((v) << 16)
97 #define it_header_data_length(v) ((v) << 16)
100 struct fw_iso_context base
;
101 struct tasklet_struct tasklet
;
107 struct descriptor
*buffer
;
108 dma_addr_t buffer_bus
;
109 struct descriptor
*head_descriptor
;
110 struct descriptor
*tail_descriptor
;
111 struct descriptor
*tail_descriptor_last
;
112 struct descriptor
*prev_descriptor
;
115 #define CONFIG_ROM_SIZE 1024
120 __iomem
char *registers
;
121 dma_addr_t self_id_bus
;
123 struct tasklet_struct bus_reset_tasklet
;
126 int request_generation
;
128 /* Spinlock for accessing fw_ohci data. Never call out of
129 * this driver with this lock held. */
131 u32 self_id_buffer
[512];
133 /* Config rom buffers */
135 dma_addr_t config_rom_bus
;
136 __be32
*next_config_rom
;
137 dma_addr_t next_config_rom_bus
;
140 struct ar_context ar_request_ctx
;
141 struct ar_context ar_response_ctx
;
142 struct at_context at_request_ctx
;
143 struct at_context at_response_ctx
;
146 struct iso_context
*it_context_list
;
148 struct iso_context
*ir_context_list
;
151 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
153 return container_of(card
, struct fw_ohci
, card
);
156 #define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
158 #define CONTEXT_RUN 0x8000
159 #define CONTEXT_WAKE 0x1000
160 #define CONTEXT_DEAD 0x0800
161 #define CONTEXT_ACTIVE 0x0400
163 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
164 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
165 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
167 #define FW_OHCI_MAJOR 240
168 #define OHCI1394_REGISTER_SIZE 0x800
169 #define OHCI_LOOP_COUNT 500
170 #define OHCI1394_PCI_HCI_Control 0x40
171 #define SELF_ID_BUF_SIZE 0x800
173 static char ohci_driver_name
[] = KBUILD_MODNAME
;
175 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
177 writel(data
, ohci
->registers
+ offset
);
180 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
182 return readl(ohci
->registers
+ offset
);
185 static inline void flush_writes(const struct fw_ohci
*ohci
)
187 /* Do a dummy read to flush writes. */
188 reg_read(ohci
, OHCI1394_Version
);
192 ohci_update_phy_reg(struct fw_card
*card
, int addr
,
193 int clear_bits
, int set_bits
)
195 struct fw_ohci
*ohci
= fw_ohci(card
);
198 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
200 val
= reg_read(ohci
, OHCI1394_PhyControl
);
201 if ((val
& OHCI1394_PhyControl_ReadDone
) == 0) {
202 fw_error("failed to set phy reg bits.\n");
206 old
= OHCI1394_PhyControl_ReadData(val
);
207 old
= (old
& ~clear_bits
) | set_bits
;
208 reg_write(ohci
, OHCI1394_PhyControl
,
209 OHCI1394_PhyControl_Write(addr
, old
));
214 static void ar_context_run(struct ar_context
*ctx
)
216 reg_write(ctx
->ohci
, ctx
->command_ptr
, ctx
->descriptor_bus
| 1);
217 reg_write(ctx
->ohci
, ctx
->control_set
, CONTEXT_RUN
);
218 flush_writes(ctx
->ohci
);
221 static void ar_context_tasklet(unsigned long data
)
223 struct ar_context
*ctx
= (struct ar_context
*)data
;
224 struct fw_ohci
*ohci
= ctx
->ohci
;
226 u32 status
, length
, tcode
;
228 /* FIXME: What to do about evt_* errors? */
229 length
= le16_to_cpu(ctx
->descriptor
.req_count
) -
230 le16_to_cpu(ctx
->descriptor
.res_count
) - 4;
231 status
= le32_to_cpu(ctx
->buffer
[length
/ 4]);
233 p
.ack
= ((status
>> 16) & 0x1f) - 16;
234 p
.speed
= (status
>> 21) & 0x7;
235 p
.timestamp
= status
& 0xffff;
236 p
.generation
= ohci
->request_generation
;
238 p
.header
[0] = le32_to_cpu(ctx
->buffer
[0]);
239 p
.header
[1] = le32_to_cpu(ctx
->buffer
[1]);
240 p
.header
[2] = le32_to_cpu(ctx
->buffer
[2]);
242 tcode
= (p
.header
[0] >> 4) & 0x0f;
244 case TCODE_WRITE_QUADLET_REQUEST
:
245 case TCODE_READ_QUADLET_RESPONSE
:
246 p
.header
[3] = ctx
->buffer
[3];
247 p
.header_length
= 16;
250 case TCODE_WRITE_BLOCK_REQUEST
:
251 case TCODE_READ_BLOCK_REQUEST
:
252 case TCODE_READ_BLOCK_RESPONSE
:
253 case TCODE_LOCK_REQUEST
:
254 case TCODE_LOCK_RESPONSE
:
255 p
.header
[3] = le32_to_cpu(ctx
->buffer
[3]);
256 p
.header_length
= 16;
259 case TCODE_WRITE_RESPONSE
:
260 case TCODE_READ_QUADLET_REQUEST
:
261 p
.header_length
= 12;
265 p
.payload
= (void *) ctx
->buffer
+ p
.header_length
;
266 p
.payload_length
= length
- p
.header_length
;
268 /* The OHCI bus reset handler synthesizes a phy packet with
269 * the new generation number when a bus reset happens (see
270 * section 8.4.2.3). This helps us determine when a request
271 * was received and make sure we send the response in the same
272 * generation. We only need this for requests; for responses
273 * we use the unique tlabel for finding the matching
276 if (p
.ack
+ 16 == 0x09)
277 ohci
->request_generation
= (ctx
->buffer
[2] >> 16) & 0xff;
278 else if (ctx
== &ohci
->ar_request_ctx
)
279 fw_core_handle_request(&ohci
->card
, &p
);
281 fw_core_handle_response(&ohci
->card
, &p
);
283 ctx
->descriptor
.data_address
= cpu_to_le32(ctx
->buffer_bus
);
284 ctx
->descriptor
.req_count
= cpu_to_le16(sizeof ctx
->buffer
);
285 ctx
->descriptor
.res_count
= cpu_to_le16(sizeof ctx
->buffer
);
287 dma_sync_single_for_device(ohci
->card
.device
, ctx
->descriptor_bus
,
288 sizeof ctx
->descriptor_bus
, DMA_TO_DEVICE
);
290 /* FIXME: We stop and restart the ar context here, what if we
291 * stop while a receive is in progress? Maybe we could just
292 * loop the context back to itself and use it in buffer fill
293 * mode as intended... */
295 reg_write(ctx
->ohci
, ctx
->control_clear
, CONTEXT_RUN
);
300 ar_context_init(struct ar_context
*ctx
, struct fw_ohci
*ohci
, u32 control_set
)
302 ctx
->descriptor_bus
=
303 dma_map_single(ohci
->card
.device
, &ctx
->descriptor
,
304 sizeof ctx
->descriptor
, DMA_TO_DEVICE
);
305 if (ctx
->descriptor_bus
== 0)
308 if (ctx
->descriptor_bus
& 0xf)
309 fw_notify("descriptor not 16-byte aligned: 0x%08lx\n",
310 (unsigned long)ctx
->descriptor_bus
);
313 dma_map_single(ohci
->card
.device
, ctx
->buffer
,
314 sizeof ctx
->buffer
, DMA_FROM_DEVICE
);
316 if (ctx
->buffer_bus
== 0) {
317 dma_unmap_single(ohci
->card
.device
, ctx
->descriptor_bus
,
318 sizeof ctx
->descriptor
, DMA_TO_DEVICE
);
322 memset(&ctx
->descriptor
, 0, sizeof ctx
->descriptor
);
323 ctx
->descriptor
.control
= cpu_to_le16(descriptor_input_more
|
325 descriptor_branch_always
);
326 ctx
->descriptor
.req_count
= cpu_to_le16(sizeof ctx
->buffer
);
327 ctx
->descriptor
.data_address
= cpu_to_le32(ctx
->buffer_bus
);
328 ctx
->descriptor
.res_count
= cpu_to_le16(sizeof ctx
->buffer
);
330 ctx
->control_set
= control_set
;
331 ctx
->control_clear
= control_set
+ 4;
332 ctx
->command_ptr
= control_set
+ 12;
335 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
343 do_packet_callbacks(struct fw_ohci
*ohci
, struct list_head
*list
)
345 struct fw_packet
*p
, *next
;
347 list_for_each_entry_safe(p
, next
, list
, link
)
348 p
->callback(p
, &ohci
->card
, p
->ack
);
352 complete_transmission(struct fw_packet
*packet
,
353 int ack
, struct list_head
*list
)
355 list_move_tail(&packet
->link
, list
);
359 /* This function prepares the first packet in the context queue for
360 * transmission. Must always be called with the ochi->lock held to
361 * ensure proper generation handling and locking around packet queue
364 at_context_setup_packet(struct at_context
*ctx
, struct list_head
*list
)
366 struct fw_packet
*packet
;
367 struct fw_ohci
*ohci
= ctx
->ohci
;
370 packet
= fw_packet(ctx
->list
.next
);
372 memset(&ctx
->d
, 0, sizeof ctx
->d
);
373 if (packet
->payload_length
> 0) {
374 packet
->payload_bus
= dma_map_single(ohci
->card
.device
,
376 packet
->payload_length
,
378 if (packet
->payload_bus
== 0) {
379 complete_transmission(packet
, -ENOMEM
, list
);
383 ctx
->d
.more
.control
=
384 cpu_to_le16(descriptor_output_more
|
385 descriptor_key_immediate
);
386 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
387 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
388 ctx
->d
.last
.control
=
389 cpu_to_le16(descriptor_output_last
|
390 descriptor_irq_always
|
391 descriptor_branch_always
);
392 ctx
->d
.last
.req_count
= cpu_to_le16(packet
->payload_length
);
393 ctx
->d
.last
.data_address
= cpu_to_le32(packet
->payload_bus
);
396 ctx
->d
.more
.control
=
397 cpu_to_le16(descriptor_output_last
|
398 descriptor_key_immediate
|
399 descriptor_irq_always
|
400 descriptor_branch_always
);
401 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
402 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
406 /* The DMA format for asyncronous link packets is different
407 * from the IEEE1394 layout, so shift the fields around
408 * accordingly. If header_length is 8, it's a PHY packet, to
409 * which we need to prepend an extra quadlet. */
410 if (packet
->header_length
> 8) {
411 ctx
->d
.header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
412 (packet
->speed
<< 16));
413 ctx
->d
.header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
414 (packet
->header
[0] & 0xffff0000));
415 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[2]);
417 tcode
= (packet
->header
[0] >> 4) & 0x0f;
418 if (TCODE_IS_BLOCK_PACKET(tcode
))
419 ctx
->d
.header
[3] = cpu_to_le32(packet
->header
[3]);
421 ctx
->d
.header
[3] = packet
->header
[3];
424 cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
425 (packet
->speed
<< 16));
426 ctx
->d
.header
[1] = cpu_to_le32(packet
->header
[0]);
427 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[1]);
428 ctx
->d
.more
.req_count
= cpu_to_le16(12);
431 /* FIXME: Document how the locking works. */
432 if (ohci
->generation
== packet
->generation
) {
433 reg_write(ctx
->ohci
, ctx
->command_ptr
,
434 ctx
->descriptor_bus
| z
);
435 reg_write(ctx
->ohci
, ctx
->control_set
,
436 CONTEXT_RUN
| CONTEXT_WAKE
);
438 /* We dont return error codes from this function; all
439 * transmission errors are reported through the
441 complete_transmission(packet
, -ESTALE
, list
);
445 static void at_context_stop(struct at_context
*ctx
)
449 reg_write(ctx
->ohci
, ctx
->control_clear
, CONTEXT_RUN
);
451 reg
= reg_read(ctx
->ohci
, ctx
->control_set
);
452 if (reg
& CONTEXT_ACTIVE
)
453 fw_notify("Tried to stop context, but it is still active "
457 static void at_context_tasklet(unsigned long data
)
459 struct at_context
*ctx
= (struct at_context
*)data
;
460 struct fw_ohci
*ohci
= ctx
->ohci
;
461 struct fw_packet
*packet
;
466 spin_lock_irqsave(&ohci
->lock
, flags
);
468 packet
= fw_packet(ctx
->list
.next
);
470 at_context_stop(ctx
);
472 if (packet
->payload_length
> 0) {
473 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
474 packet
->payload_length
, DMA_TO_DEVICE
);
475 evt
= le16_to_cpu(ctx
->d
.last
.transfer_status
) & 0x1f;
476 packet
->timestamp
= le16_to_cpu(ctx
->d
.last
.res_count
);
479 evt
= le16_to_cpu(ctx
->d
.more
.transfer_status
) & 0x1f;
480 packet
->timestamp
= le16_to_cpu(ctx
->d
.more
.res_count
);
485 case OHCI1394_evt_timeout
:
486 /* Async response transmit timed out. */
487 complete_transmission(packet
, -ETIMEDOUT
, &list
);
490 case OHCI1394_evt_flushed
:
491 /* The packet was flushed should give same
492 * error as when we try to use a stale
493 * generation count. */
494 complete_transmission(packet
, -ESTALE
, &list
);
497 case OHCI1394_evt_missing_ack
:
498 /* This would be a higher level software
499 * error, it is using a valid (current)
500 * generation count, but the node is not on
502 complete_transmission(packet
, -ENODEV
, &list
);
506 complete_transmission(packet
, -EIO
, &list
);
510 complete_transmission(packet
, evt
- 16, &list
);
512 /* If more packets are queued, set up the next one. */
513 if (!list_empty(&ctx
->list
))
514 at_context_setup_packet(ctx
, &list
);
516 spin_unlock_irqrestore(&ohci
->lock
, flags
);
518 do_packet_callbacks(ohci
, &list
);
522 at_context_init(struct at_context
*ctx
, struct fw_ohci
*ohci
, u32 control_set
)
524 INIT_LIST_HEAD(&ctx
->list
);
526 ctx
->descriptor_bus
=
527 dma_map_single(ohci
->card
.device
, &ctx
->d
,
528 sizeof ctx
->d
, DMA_TO_DEVICE
);
529 if (ctx
->descriptor_bus
== 0)
532 ctx
->control_set
= control_set
;
533 ctx
->control_clear
= control_set
+ 4;
534 ctx
->command_ptr
= control_set
+ 12;
537 tasklet_init(&ctx
->tasklet
, at_context_tasklet
, (unsigned long)ctx
);
542 #define header_get_destination(q) (((q) >> 16) & 0xffff)
543 #define header_get_tcode(q) (((q) >> 4) & 0x0f)
544 #define header_get_offset_high(q) (((q) >> 0) & 0xffff)
545 #define header_get_data_length(q) (((q) >> 16) & 0xffff)
546 #define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
549 handle_local_rom(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
551 struct fw_packet response
;
552 int tcode
, length
, i
;
554 tcode
= header_get_tcode(packet
->header
[0]);
555 if (TCODE_IS_BLOCK_PACKET(tcode
))
556 length
= header_get_data_length(packet
->header
[3]);
560 i
= csr
- CSR_CONFIG_ROM
;
561 if (i
+ length
> CONFIG_ROM_SIZE
) {
562 fw_fill_response(&response
, packet
->header
,
563 RCODE_ADDRESS_ERROR
, NULL
, 0);
564 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
565 fw_fill_response(&response
, packet
->header
,
566 RCODE_TYPE_ERROR
, NULL
, 0);
568 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
569 (void *) ohci
->config_rom
+ i
, length
);
572 fw_core_handle_response(&ohci
->card
, &response
);
576 handle_local_lock(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
578 struct fw_packet response
;
579 int tcode
, length
, ext_tcode
, sel
;
580 __be32
*payload
, lock_old
;
581 u32 lock_arg
, lock_data
;
583 tcode
= header_get_tcode(packet
->header
[0]);
584 length
= header_get_data_length(packet
->header
[3]);
585 payload
= packet
->payload
;
586 ext_tcode
= header_get_extended_tcode(packet
->header
[3]);
588 if (tcode
== TCODE_LOCK_REQUEST
&&
589 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
590 lock_arg
= be32_to_cpu(payload
[0]);
591 lock_data
= be32_to_cpu(payload
[1]);
592 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
596 fw_fill_response(&response
, packet
->header
,
597 RCODE_TYPE_ERROR
, NULL
, 0);
601 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
602 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
603 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
604 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
606 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
607 lock_old
= cpu_to_be32(reg_read(ohci
, OHCI1394_CSRData
));
609 fw_notify("swap not done yet\n");
611 fw_fill_response(&response
, packet
->header
,
612 RCODE_COMPLETE
, &lock_old
, sizeof lock_old
);
614 fw_core_handle_response(&ohci
->card
, &response
);
618 handle_local_request(struct at_context
*ctx
, struct fw_packet
*packet
)
623 packet
->ack
= ACK_PENDING
;
624 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
627 ((unsigned long long)
628 header_get_offset_high(packet
->header
[1]) << 32) |
630 csr
= offset
- CSR_REGISTER_BASE
;
632 /* Handle config rom reads. */
633 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
634 handle_local_rom(ctx
->ohci
, packet
, csr
);
636 case CSR_BUS_MANAGER_ID
:
637 case CSR_BANDWIDTH_AVAILABLE
:
638 case CSR_CHANNELS_AVAILABLE_HI
:
639 case CSR_CHANNELS_AVAILABLE_LO
:
640 handle_local_lock(ctx
->ohci
, packet
, csr
);
643 if (ctx
== &ctx
->ohci
->at_request_ctx
)
644 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
646 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
652 at_context_transmit(struct at_context
*ctx
, struct fw_packet
*packet
)
657 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
659 if (header_get_destination(packet
->header
[0]) == ctx
->ohci
->node_id
&&
660 ctx
->ohci
->generation
== packet
->generation
) {
661 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
662 handle_local_request(ctx
, packet
);
666 list_add_tail(&packet
->link
, &ctx
->list
);
667 if (ctx
->list
.next
== &packet
->link
)
668 at_context_setup_packet(ctx
, &list
);
670 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
672 do_packet_callbacks(ctx
->ohci
, &list
);
675 static void bus_reset_tasklet(unsigned long data
)
677 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
678 int self_id_count
, i
, j
, reg
;
679 int generation
, new_generation
;
682 reg
= reg_read(ohci
, OHCI1394_NodeID
);
683 if (!(reg
& OHCI1394_NodeID_idValid
)) {
684 fw_error("node ID not valid, new bus reset in progress\n");
687 ohci
->node_id
= reg
& 0xffff;
689 /* The count in the SelfIDCount register is the number of
690 * bytes in the self ID receive buffer. Since we also receive
691 * the inverted quadlets and a header quadlet, we shift one
692 * bit extra to get the actual number of self IDs. */
694 self_id_count
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 3) & 0x3ff;
695 generation
= (le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
697 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
698 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1])
699 fw_error("inconsistent self IDs\n");
700 ohci
->self_id_buffer
[j
] = le32_to_cpu(ohci
->self_id_cpu
[i
]);
703 /* Check the consistency of the self IDs we just read. The
704 * problem we face is that a new bus reset can start while we
705 * read out the self IDs from the DMA buffer. If this happens,
706 * the DMA buffer will be overwritten with new self IDs and we
707 * will read out inconsistent data. The OHCI specification
708 * (section 11.2) recommends a technique similar to
709 * linux/seqlock.h, where we remember the generation of the
710 * self IDs in the buffer before reading them out and compare
711 * it to the current generation after reading them out. If
712 * the two generations match we know we have a consistent set
715 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
716 if (new_generation
!= generation
) {
717 fw_notify("recursive bus reset detected, "
718 "discarding self ids\n");
722 /* FIXME: Document how the locking works. */
723 spin_lock_irqsave(&ohci
->lock
, flags
);
725 ohci
->generation
= generation
;
726 at_context_stop(&ohci
->at_request_ctx
);
727 at_context_stop(&ohci
->at_response_ctx
);
728 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
730 /* This next bit is unrelated to the AT context stuff but we
731 * have to do it under the spinlock also. If a new config rom
732 * was set up before this reset, the old one is now no longer
733 * in use and we can free it. Update the config rom pointers
734 * to point to the current config rom and clear the
735 * next_config_rom pointer so a new udpate can take place. */
737 if (ohci
->next_config_rom
!= NULL
) {
738 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
739 ohci
->config_rom
, ohci
->config_rom_bus
);
740 ohci
->config_rom
= ohci
->next_config_rom
;
741 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
742 ohci
->next_config_rom
= NULL
;
744 /* Restore config_rom image and manually update
745 * config_rom registers. Writing the header quadlet
746 * will indicate that the config rom is ready, so we
748 reg_write(ohci
, OHCI1394_BusOptions
,
749 be32_to_cpu(ohci
->config_rom
[2]));
750 ohci
->config_rom
[0] = cpu_to_be32(ohci
->next_header
);
751 reg_write(ohci
, OHCI1394_ConfigROMhdr
, ohci
->next_header
);
754 spin_unlock_irqrestore(&ohci
->lock
, flags
);
756 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
757 self_id_count
, ohci
->self_id_buffer
);
760 static irqreturn_t
irq_handler(int irq
, void *data
)
762 struct fw_ohci
*ohci
= data
;
763 u32 event
, iso_event
;
766 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
771 reg_write(ohci
, OHCI1394_IntEventClear
, event
);
773 if (event
& OHCI1394_selfIDComplete
)
774 tasklet_schedule(&ohci
->bus_reset_tasklet
);
776 if (event
& OHCI1394_RQPkt
)
777 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
779 if (event
& OHCI1394_RSPkt
)
780 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
782 if (event
& OHCI1394_reqTxComplete
)
783 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
785 if (event
& OHCI1394_respTxComplete
)
786 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
788 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventSet
);
789 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
792 i
= ffs(iso_event
) - 1;
793 tasklet_schedule(&ohci
->ir_context_list
[i
].tasklet
);
794 iso_event
&= ~(1 << i
);
797 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventSet
);
798 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
801 i
= ffs(iso_event
) - 1;
802 tasklet_schedule(&ohci
->it_context_list
[i
].tasklet
);
803 iso_event
&= ~(1 << i
);
809 static int ohci_enable(struct fw_card
*card
, u32
*config_rom
, size_t length
)
811 struct fw_ohci
*ohci
= fw_ohci(card
);
812 struct pci_dev
*dev
= to_pci_dev(card
->device
);
814 /* When the link is not yet enabled, the atomic config rom
815 * update mechanism described below in ohci_set_config_rom()
816 * is not active. We have to update ConfigRomHeader and
817 * BusOptions manually, and the write to ConfigROMmap takes
818 * effect immediately. We tie this to the enabling of the
819 * link, so we have a valid config rom before enabling - the
820 * OHCI requires that ConfigROMhdr and BusOptions have valid
821 * values before enabling.
823 * However, when the ConfigROMmap is written, some controllers
824 * always read back quadlets 0 and 2 from the config rom to
825 * the ConfigRomHeader and BusOptions registers on bus reset.
826 * They shouldn't do that in this initial case where the link
827 * isn't enabled. This means we have to use the same
828 * workaround here, setting the bus header to 0 and then write
829 * the right values in the bus reset tasklet.
832 ohci
->next_config_rom
=
833 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
834 &ohci
->next_config_rom_bus
, GFP_KERNEL
);
835 if (ohci
->next_config_rom
== NULL
)
838 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
839 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
, length
* 4);
841 ohci
->next_header
= config_rom
[0];
842 ohci
->next_config_rom
[0] = 0;
843 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
844 reg_write(ohci
, OHCI1394_BusOptions
, config_rom
[2]);
845 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
847 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
849 if (request_irq(dev
->irq
, irq_handler
,
850 SA_SHIRQ
, ohci_driver_name
, ohci
)) {
851 fw_error("Failed to allocate shared interrupt %d.\n",
853 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
854 ohci
->config_rom
, ohci
->config_rom_bus
);
858 reg_write(ohci
, OHCI1394_HCControlSet
,
859 OHCI1394_HCControl_linkEnable
|
860 OHCI1394_HCControl_BIBimageValid
);
863 /* We are ready to go, initiate bus reset to finish the
866 fw_core_initiate_bus_reset(&ohci
->card
, 1);
872 ohci_set_config_rom(struct fw_card
*card
, u32
*config_rom
, size_t length
)
874 struct fw_ohci
*ohci
;
877 __be32
*next_config_rom
;
878 dma_addr_t next_config_rom_bus
;
880 ohci
= fw_ohci(card
);
882 /* When the OHCI controller is enabled, the config rom update
883 * mechanism is a bit tricky, but easy enough to use. See
884 * section 5.5.6 in the OHCI specification.
886 * The OHCI controller caches the new config rom address in a
887 * shadow register (ConfigROMmapNext) and needs a bus reset
888 * for the changes to take place. When the bus reset is
889 * detected, the controller loads the new values for the
890 * ConfigRomHeader and BusOptions registers from the specified
891 * config rom and loads ConfigROMmap from the ConfigROMmapNext
892 * shadow register. All automatically and atomically.
894 * Now, there's a twist to this story. The automatic load of
895 * ConfigRomHeader and BusOptions doesn't honor the
896 * noByteSwapData bit, so with a be32 config rom, the
897 * controller will load be32 values in to these registers
898 * during the atomic update, even on litte endian
899 * architectures. The workaround we use is to put a 0 in the
900 * header quadlet; 0 is endian agnostic and means that the
901 * config rom isn't ready yet. In the bus reset tasklet we
902 * then set up the real values for the two registers.
904 * We use ohci->lock to avoid racing with the code that sets
905 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
909 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
910 &next_config_rom_bus
, GFP_KERNEL
);
911 if (next_config_rom
== NULL
)
914 spin_lock_irqsave(&ohci
->lock
, flags
);
916 if (ohci
->next_config_rom
== NULL
) {
917 ohci
->next_config_rom
= next_config_rom
;
918 ohci
->next_config_rom_bus
= next_config_rom_bus
;
920 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
921 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
,
924 ohci
->next_header
= config_rom
[0];
925 ohci
->next_config_rom
[0] = 0;
927 reg_write(ohci
, OHCI1394_ConfigROMmap
,
928 ohci
->next_config_rom_bus
);
930 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
931 next_config_rom
, next_config_rom_bus
);
935 spin_unlock_irqrestore(&ohci
->lock
, flags
);
937 /* Now initiate a bus reset to have the changes take
938 * effect. We clean up the old config rom memory and DMA
939 * mappings in the bus reset tasklet, since the OHCI
940 * controller could need to access it before the bus reset
943 fw_core_initiate_bus_reset(&ohci
->card
, 1);
948 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
950 struct fw_ohci
*ohci
= fw_ohci(card
);
952 at_context_transmit(&ohci
->at_request_ctx
, packet
);
955 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
957 struct fw_ohci
*ohci
= fw_ohci(card
);
959 at_context_transmit(&ohci
->at_response_ctx
, packet
);
963 ohci_enable_phys_dma(struct fw_card
*card
, int node_id
, int generation
)
965 struct fw_ohci
*ohci
= fw_ohci(card
);
969 /* FIXME: Make sure this bitmask is cleared when we clear the busReset
970 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */
972 spin_lock_irqsave(&ohci
->lock
, flags
);
974 if (ohci
->generation
!= generation
) {
979 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
980 * enabled for _all_ nodes on remote buses. */
982 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
984 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
986 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
990 spin_unlock_irqrestore(&ohci
->lock
, flags
);
994 static void ir_context_tasklet(unsigned long data
)
996 struct iso_context
*ctx
= (struct iso_context
*)data
;
1001 #define ISO_BUFFER_SIZE (64 * 1024)
1003 static void flush_iso_context(struct iso_context
*ctx
)
1005 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1006 struct descriptor
*d
, *last
;
1010 dma_sync_single_for_cpu(ohci
->card
.device
, ctx
->buffer_bus
,
1011 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1013 d
= ctx
->tail_descriptor
;
1014 last
= ctx
->tail_descriptor_last
;
1016 while (last
->branch_address
!= 0 && last
->transfer_status
!= 0) {
1017 address
= le32_to_cpu(last
->branch_address
);
1019 d
= ctx
->buffer
+ (address
- ctx
->buffer_bus
) / sizeof *d
;
1026 if (le16_to_cpu(last
->control
) & descriptor_irq_always
)
1027 ctx
->base
.callback(&ctx
->base
,
1028 0, le16_to_cpu(last
->res_count
),
1029 ctx
->base
.callback_data
);
1032 ctx
->tail_descriptor
= d
;
1033 ctx
->tail_descriptor_last
= last
;
1036 static void it_context_tasklet(unsigned long data
)
1038 struct iso_context
*ctx
= (struct iso_context
*)data
;
1040 flush_iso_context(ctx
);
1043 static struct fw_iso_context
*ohci_allocate_iso_context(struct fw_card
*card
,
1046 struct fw_ohci
*ohci
= fw_ohci(card
);
1047 struct iso_context
*ctx
, *list
;
1048 void (*tasklet
) (unsigned long data
);
1050 unsigned long flags
;
1053 if (type
== FW_ISO_CONTEXT_TRANSMIT
) {
1054 mask
= &ohci
->it_context_mask
;
1055 list
= ohci
->it_context_list
;
1056 tasklet
= it_context_tasklet
;
1058 mask
= &ohci
->ir_context_mask
;
1059 list
= ohci
->ir_context_list
;
1060 tasklet
= ir_context_tasklet
;
1063 spin_lock_irqsave(&ohci
->lock
, flags
);
1064 index
= ffs(*mask
) - 1;
1066 *mask
&= ~(1 << index
);
1067 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1070 return ERR_PTR(-EBUSY
);
1073 memset(ctx
, 0, sizeof *ctx
);
1074 tasklet_init(&ctx
->tasklet
, tasklet
, (unsigned long)ctx
);
1076 ctx
->buffer
= kmalloc(ISO_BUFFER_SIZE
, GFP_KERNEL
);
1077 if (ctx
->buffer
== NULL
) {
1078 spin_lock_irqsave(&ohci
->lock
, flags
);
1079 *mask
|= 1 << index
;
1080 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1081 return ERR_PTR(-ENOMEM
);
1085 dma_map_single(card
->device
, ctx
->buffer
,
1086 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1088 ctx
->head_descriptor
= ctx
->buffer
;
1089 ctx
->prev_descriptor
= ctx
->buffer
;
1090 ctx
->tail_descriptor
= ctx
->buffer
;
1091 ctx
->tail_descriptor_last
= ctx
->buffer
;
1093 /* We put a dummy descriptor in the buffer that has a NULL
1094 * branch address and looks like it's been sent. That way we
1095 * have a descriptor to append DMA programs to. Also, the
1096 * ring buffer invariant is that it always has at least one
1097 * element so that head == tail means buffer full. */
1099 memset(ctx
->head_descriptor
, 0, sizeof *ctx
->head_descriptor
);
1100 ctx
->head_descriptor
->control
= cpu_to_le16(descriptor_output_last
);
1101 ctx
->head_descriptor
->transfer_status
= cpu_to_le16(0x8011);
1102 ctx
->head_descriptor
++;
1107 static int ohci_send_iso(struct fw_iso_context
*base
, s32 cycle
)
1109 struct iso_context
*ctx
= (struct iso_context
*)base
;
1110 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1111 u32 cycle_match
= 0;
1114 index
= ctx
- ohci
->it_context_list
;
1116 cycle_match
= CONTEXT_CYCLE_MATCH_ENABLE
|
1117 (cycle
& 0x7fff) << 16;
1119 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
1120 reg_write(ohci
, OHCI1394_IsoXmitCommandPtr(index
),
1121 le32_to_cpu(ctx
->tail_descriptor_last
->branch_address
));
1122 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear(index
), ~0);
1123 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet(index
),
1124 CONTEXT_RUN
| cycle_match
);
1130 static void ohci_free_iso_context(struct fw_iso_context
*base
)
1132 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1133 struct iso_context
*ctx
= (struct iso_context
*)base
;
1134 unsigned long flags
;
1137 flush_iso_context(ctx
);
1139 spin_lock_irqsave(&ohci
->lock
, flags
);
1141 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1142 index
= ctx
- ohci
->it_context_list
;
1143 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear(index
), ~0);
1144 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
1145 ohci
->it_context_mask
|= 1 << index
;
1147 index
= ctx
- ohci
->ir_context_list
;
1148 reg_write(ohci
, OHCI1394_IsoRcvContextControlClear(index
), ~0);
1149 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
1150 ohci
->ir_context_mask
|= 1 << index
;
1154 dma_unmap_single(ohci
->card
.device
, ctx
->buffer_bus
,
1155 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1157 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1161 ohci_queue_iso(struct fw_iso_context
*base
,
1162 struct fw_iso_packet
*packet
, void *payload
)
1164 struct iso_context
*ctx
= (struct iso_context
*)base
;
1165 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1166 struct descriptor
*d
, *end
, *last
, *tail
, *pd
;
1167 struct fw_iso_packet
*p
;
1170 u32 z
, header_z
, payload_z
, irq
;
1171 u32 payload_index
, payload_end_index
, next_page_index
;
1172 int index
, page
, end_page
, i
, length
, offset
;
1174 /* FIXME: Cycle lost behavior should be configurable: lose
1175 * packet, retransmit or terminate.. */
1178 payload_index
= payload
- ctx
->base
.buffer
;
1179 d
= ctx
->head_descriptor
;
1180 tail
= ctx
->tail_descriptor
;
1181 end
= ctx
->buffer
+ ISO_BUFFER_SIZE
/ sizeof(struct descriptor
);
1187 if (p
->header_length
> 0)
1190 /* Determine the first page the payload isn't contained in. */
1191 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
1192 if (p
->payload_length
> 0)
1193 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
1199 /* Get header size in number of descriptors. */
1200 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof *d
);
1202 if (d
+ z
+ header_z
<= tail
) {
1204 } else if (d
> tail
&& d
+ z
+ header_z
<= end
) {
1206 } else if (d
> tail
&& ctx
->buffer
+ z
+ header_z
<= tail
) {
1211 /* No space in buffer */
1215 memset(d
, 0, (z
+ header_z
) * sizeof *d
);
1216 d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof *d
;
1219 d
[0].control
= cpu_to_le16(descriptor_key_immediate
);
1220 d
[0].req_count
= cpu_to_le16(8);
1222 header
= (__le32
*) &d
[1];
1223 header
[0] = cpu_to_le32(it_header_sy(p
->sy
) |
1224 it_header_tag(p
->tag
) |
1225 it_header_tcode(TCODE_STREAM_DATA
) |
1226 it_header_channel(ctx
->base
.channel
) |
1227 it_header_speed(ctx
->base
.speed
));
1229 cpu_to_le32(it_header_data_length(p
->header_length
+
1230 p
->payload_length
));
1233 if (p
->header_length
> 0) {
1234 d
[2].req_count
= cpu_to_le16(p
->header_length
);
1235 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof *d
);
1236 memcpy(&d
[z
], p
->header
, p
->header_length
);
1239 pd
= d
+ z
- payload_z
;
1240 payload_end_index
= payload_index
+ p
->payload_length
;
1241 for (i
= 0; i
< payload_z
; i
++) {
1242 page
= payload_index
>> PAGE_SHIFT
;
1243 offset
= payload_index
& ~PAGE_MASK
;
1244 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
1246 min(next_page_index
, payload_end_index
) - payload_index
;
1247 pd
[i
].req_count
= cpu_to_le16(length
);
1248 pd
[i
].data_address
= cpu_to_le32(ctx
->base
.pages
[page
] + offset
);
1250 payload_index
+= length
;
1259 irq
= descriptor_irq_always
;
1261 irq
= descriptor_no_irq
;
1263 last
->control
= cpu_to_le16(descriptor_output_last
|
1265 descriptor_branch_always
|
1268 dma_sync_single_for_device(ohci
->card
.device
, ctx
->buffer_bus
,
1269 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1271 ctx
->head_descriptor
= d
+ z
+ header_z
;
1272 ctx
->prev_descriptor
->branch_address
= cpu_to_le32(d_bus
| z
);
1273 ctx
->prev_descriptor
= last
;
1275 index
= ctx
- ohci
->it_context_list
;
1276 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet(index
), CONTEXT_WAKE
);
1282 static const struct fw_card_driver ohci_driver
= {
1283 .name
= ohci_driver_name
,
1284 .enable
= ohci_enable
,
1285 .update_phy_reg
= ohci_update_phy_reg
,
1286 .set_config_rom
= ohci_set_config_rom
,
1287 .send_request
= ohci_send_request
,
1288 .send_response
= ohci_send_response
,
1289 .enable_phys_dma
= ohci_enable_phys_dma
,
1291 .allocate_iso_context
= ohci_allocate_iso_context
,
1292 .free_iso_context
= ohci_free_iso_context
,
1293 .queue_iso
= ohci_queue_iso
,
1294 .send_iso
= ohci_send_iso
,
1297 static int software_reset(struct fw_ohci
*ohci
)
1301 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1303 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1304 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1305 OHCI1394_HCControl_softReset
) == 0)
1313 /* ---------- pci subsystem interface ---------- */
1323 static int cleanup(struct fw_ohci
*ohci
, int stage
, int code
)
1325 struct pci_dev
*dev
= to_pci_dev(ohci
->card
.device
);
1328 case CLEANUP_SELF_ID
:
1329 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
1330 ohci
->self_id_cpu
, ohci
->self_id_bus
);
1331 case CLEANUP_REGISTERS
:
1332 kfree(ohci
->it_context_list
);
1333 kfree(ohci
->ir_context_list
);
1334 pci_iounmap(dev
, ohci
->registers
);
1336 pci_release_region(dev
, 0);
1337 case CLEANUP_DISABLE
:
1338 pci_disable_device(dev
);
1339 case CLEANUP_PUT_CARD
:
1340 fw_card_put(&ohci
->card
);
1346 static int __devinit
1347 pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1349 struct fw_ohci
*ohci
;
1350 u32 bus_options
, max_receive
, link_speed
;
1355 ohci
= kzalloc(sizeof *ohci
, GFP_KERNEL
);
1357 fw_error("Could not malloc fw_ohci data.\n");
1361 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
1363 if (pci_enable_device(dev
)) {
1364 fw_error("Failed to enable OHCI hardware.\n");
1365 return cleanup(ohci
, CLEANUP_PUT_CARD
, -ENODEV
);
1368 pci_set_master(dev
);
1369 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
1370 pci_set_drvdata(dev
, ohci
);
1372 spin_lock_init(&ohci
->lock
);
1374 tasklet_init(&ohci
->bus_reset_tasklet
,
1375 bus_reset_tasklet
, (unsigned long)ohci
);
1377 if (pci_request_region(dev
, 0, ohci_driver_name
)) {
1378 fw_error("MMIO resource unavailable\n");
1379 return cleanup(ohci
, CLEANUP_DISABLE
, -EBUSY
);
1382 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
1383 if (ohci
->registers
== NULL
) {
1384 fw_error("Failed to remap registers\n");
1385 return cleanup(ohci
, CLEANUP_IOMEM
, -ENXIO
);
1388 if (software_reset(ohci
)) {
1389 fw_error("Failed to reset ohci card.\n");
1390 return cleanup(ohci
, CLEANUP_REGISTERS
, -EBUSY
);
1393 /* Now enable LPS, which we need in order to start accessing
1394 * most of the registers. In fact, on some cards (ALI M5251),
1395 * accessing registers in the SClk domain without LPS enabled
1396 * will lock up the machine. Wait 50msec to make sure we have
1397 * full link enabled. */
1398 reg_write(ohci
, OHCI1394_HCControlSet
,
1399 OHCI1394_HCControl_LPS
|
1400 OHCI1394_HCControl_postedWriteEnable
);
1404 reg_write(ohci
, OHCI1394_HCControlClear
,
1405 OHCI1394_HCControl_noByteSwapData
);
1407 reg_write(ohci
, OHCI1394_LinkControlSet
,
1408 OHCI1394_LinkControl_rcvSelfID
|
1409 OHCI1394_LinkControl_cycleTimerEnable
|
1410 OHCI1394_LinkControl_cycleMaster
);
1412 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
1413 OHCI1394_AsReqRcvContextControlSet
);
1415 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
1416 OHCI1394_AsRspRcvContextControlSet
);
1418 at_context_init(&ohci
->at_request_ctx
, ohci
,
1419 OHCI1394_AsReqTrContextControlSet
);
1421 at_context_init(&ohci
->at_response_ctx
, ohci
,
1422 OHCI1394_AsRspTrContextControlSet
);
1424 reg_write(ohci
, OHCI1394_ATRetries
,
1425 OHCI1394_MAX_AT_REQ_RETRIES
|
1426 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1427 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8));
1429 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
1430 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
1431 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
1432 size
= sizeof(struct iso_context
) * hweight32(ohci
->it_context_mask
);
1433 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
1435 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
1436 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
1437 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
1438 size
= sizeof(struct iso_context
) * hweight32(ohci
->ir_context_mask
);
1439 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
1441 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
1442 fw_error("Out of memory for it/ir contexts.\n");
1443 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1446 /* self-id dma buffer allocation */
1447 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
1451 if (ohci
->self_id_cpu
== NULL
) {
1452 fw_error("Out of memory for self ID buffer.\n");
1453 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1456 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1457 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1458 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1459 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1460 reg_write(ohci
, OHCI1394_IntMaskSet
,
1461 OHCI1394_selfIDComplete
|
1462 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1463 OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1464 OHCI1394_isochRx
| OHCI1394_isochTx
|
1465 OHCI1394_masterIntEnable
);
1467 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
1468 max_receive
= (bus_options
>> 12) & 0xf;
1469 link_speed
= bus_options
& 0x7;
1470 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
1471 reg_read(ohci
, OHCI1394_GUIDLo
);
1473 error_code
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
1475 return cleanup(ohci
, CLEANUP_SELF_ID
, error_code
);
1477 fw_notify("Added fw-ohci device %s.\n", dev
->dev
.bus_id
);
1482 static void pci_remove(struct pci_dev
*dev
)
1484 struct fw_ohci
*ohci
;
1486 ohci
= pci_get_drvdata(dev
);
1487 reg_write(ohci
, OHCI1394_IntMaskClear
, OHCI1394_masterIntEnable
);
1488 fw_core_remove_card(&ohci
->card
);
1490 /* FIXME: Fail all pending packets here, now that the upper
1491 * layers can't queue any more. */
1493 software_reset(ohci
);
1494 free_irq(dev
->irq
, ohci
);
1495 cleanup(ohci
, CLEANUP_SELF_ID
, 0);
1497 fw_notify("Removed fw-ohci device.\n");
1500 static struct pci_device_id pci_table
[] = {
1501 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
1505 MODULE_DEVICE_TABLE(pci
, pci_table
);
1507 static struct pci_driver fw_ohci_pci_driver
= {
1508 .name
= ohci_driver_name
,
1509 .id_table
= pci_table
,
1511 .remove
= pci_remove
,
1514 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1515 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1516 MODULE_LICENSE("GPL");
1518 static int __init
fw_ohci_init(void)
1520 return pci_register_driver(&fw_ohci_pci_driver
);
1523 static void __exit
fw_ohci_cleanup(void)
1525 pci_unregister_driver(&fw_ohci_pci_driver
);
1528 module_init(fw_ohci_init
);
1529 module_exit(fw_ohci_cleanup
);