1 /* -*- c-basic-offset: 8 -*-
3 * fw-ohci.c - Driver for OHCI 1394 boards
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/pci.h>
26 #include <linux/delay.h>
27 #include <linux/poll.h>
28 #include <linux/dma-mapping.h>
30 #include <asm/uaccess.h>
31 #include <asm/semaphore.h>
33 #include "fw-transaction.h"
36 #define descriptor_output_more 0
37 #define descriptor_output_last (1 << 12)
38 #define descriptor_input_more (2 << 12)
39 #define descriptor_input_last (3 << 12)
40 #define descriptor_status (1 << 11)
41 #define descriptor_key_immediate (2 << 8)
42 #define descriptor_ping (1 << 7)
43 #define descriptor_yy (1 << 6)
44 #define descriptor_no_irq (0 << 4)
45 #define descriptor_irq_error (1 << 4)
46 #define descriptor_irq_always (3 << 4)
47 #define descriptor_branch_always (3 << 2)
53 __le32 branch_address
;
55 __le16 transfer_status
;
56 } __attribute__((aligned(16)));
58 #define control_set(regs) (regs)
59 #define control_clear(regs) ((regs) + 4)
60 #define command_ptr(regs) ((regs) + 12)
61 #define context_match(regs) ((regs) + 16)
64 struct descriptor descriptor
;
65 struct ar_buffer
*next
;
71 struct ar_buffer
*current_buffer
;
72 struct ar_buffer
*last_buffer
;
75 struct tasklet_struct tasklet
;
80 dma_addr_t descriptor_bus
;
81 dma_addr_t buffer_bus
;
82 struct fw_packet
*current_packet
;
84 struct list_head list
;
87 struct descriptor more
;
89 struct descriptor last
;
94 struct tasklet_struct tasklet
;
97 #define it_header_sy(v) ((v) << 0)
98 #define it_header_tcode(v) ((v) << 4)
99 #define it_header_channel(v) ((v) << 8)
100 #define it_header_tag(v) ((v) << 14)
101 #define it_header_speed(v) ((v) << 16)
102 #define it_header_data_length(v) ((v) << 16)
105 struct fw_iso_context base
;
106 struct tasklet_struct tasklet
;
109 struct descriptor
*buffer
;
110 dma_addr_t buffer_bus
;
111 struct descriptor
*head_descriptor
;
112 struct descriptor
*tail_descriptor
;
113 struct descriptor
*tail_descriptor_last
;
114 struct descriptor
*prev_descriptor
;
117 #define CONFIG_ROM_SIZE 1024
122 __iomem
char *registers
;
123 dma_addr_t self_id_bus
;
125 struct tasklet_struct bus_reset_tasklet
;
128 int request_generation
;
130 /* Spinlock for accessing fw_ohci data. Never call out of
131 * this driver with this lock held. */
133 u32 self_id_buffer
[512];
135 /* Config rom buffers */
137 dma_addr_t config_rom_bus
;
138 __be32
*next_config_rom
;
139 dma_addr_t next_config_rom_bus
;
142 struct ar_context ar_request_ctx
;
143 struct ar_context ar_response_ctx
;
144 struct at_context at_request_ctx
;
145 struct at_context at_response_ctx
;
148 struct iso_context
*it_context_list
;
150 struct iso_context
*ir_context_list
;
153 static inline struct fw_ohci
*fw_ohci(struct fw_card
*card
)
155 return container_of(card
, struct fw_ohci
, card
);
158 #define CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
160 #define CONTEXT_RUN 0x8000
161 #define CONTEXT_WAKE 0x1000
162 #define CONTEXT_DEAD 0x0800
163 #define CONTEXT_ACTIVE 0x0400
165 #define OHCI1394_MAX_AT_REQ_RETRIES 0x2
166 #define OHCI1394_MAX_AT_RESP_RETRIES 0x2
167 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
169 #define FW_OHCI_MAJOR 240
170 #define OHCI1394_REGISTER_SIZE 0x800
171 #define OHCI_LOOP_COUNT 500
172 #define OHCI1394_PCI_HCI_Control 0x40
173 #define SELF_ID_BUF_SIZE 0x800
174 #define OHCI_TCODE_PHY_PACKET 0x0e
176 static char ohci_driver_name
[] = KBUILD_MODNAME
;
178 static inline void reg_write(const struct fw_ohci
*ohci
, int offset
, u32 data
)
180 writel(data
, ohci
->registers
+ offset
);
183 static inline u32
reg_read(const struct fw_ohci
*ohci
, int offset
)
185 return readl(ohci
->registers
+ offset
);
188 static inline void flush_writes(const struct fw_ohci
*ohci
)
190 /* Do a dummy read to flush writes. */
191 reg_read(ohci
, OHCI1394_Version
);
195 ohci_update_phy_reg(struct fw_card
*card
, int addr
,
196 int clear_bits
, int set_bits
)
198 struct fw_ohci
*ohci
= fw_ohci(card
);
201 reg_write(ohci
, OHCI1394_PhyControl
, OHCI1394_PhyControl_Read(addr
));
203 val
= reg_read(ohci
, OHCI1394_PhyControl
);
204 if ((val
& OHCI1394_PhyControl_ReadDone
) == 0) {
205 fw_error("failed to set phy reg bits.\n");
209 old
= OHCI1394_PhyControl_ReadData(val
);
210 old
= (old
& ~clear_bits
) | set_bits
;
211 reg_write(ohci
, OHCI1394_PhyControl
,
212 OHCI1394_PhyControl_Write(addr
, old
));
217 static int ar_context_add_page(struct ar_context
*ctx
)
219 struct device
*dev
= ctx
->ohci
->card
.device
;
220 struct ar_buffer
*ab
;
224 ab
= (struct ar_buffer
*) __get_free_page(GFP_ATOMIC
);
228 ab_bus
= dma_map_single(dev
, ab
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
229 if (dma_mapping_error(ab_bus
)) {
230 free_page((unsigned long) ab
);
234 memset(&ab
->descriptor
, 0, sizeof ab
->descriptor
);
235 ab
->descriptor
.control
= cpu_to_le16(descriptor_input_more
|
237 descriptor_branch_always
);
238 offset
= offsetof(struct ar_buffer
, data
);
239 ab
->descriptor
.req_count
= cpu_to_le16(PAGE_SIZE
- offset
);
240 ab
->descriptor
.data_address
= cpu_to_le32(ab_bus
+ offset
);
241 ab
->descriptor
.res_count
= cpu_to_le16(PAGE_SIZE
- offset
);
242 ab
->descriptor
.branch_address
= 0;
244 dma_sync_single_for_device(dev
, ab_bus
, PAGE_SIZE
, DMA_BIDIRECTIONAL
);
246 ctx
->last_buffer
->descriptor
.branch_address
= ab_bus
| 1;
247 ctx
->last_buffer
->next
= ab
;
248 ctx
->last_buffer
= ab
;
250 reg_write(ctx
->ohci
, control_set(ctx
->regs
), CONTEXT_WAKE
);
251 flush_writes(ctx
->ohci
);
256 static __le32
*handle_ar_packet(struct ar_context
*ctx
, __le32
*buffer
)
258 struct fw_ohci
*ohci
= ctx
->ohci
;
260 u32 status
, length
, tcode
;
262 p
.header
[0] = le32_to_cpu(buffer
[0]);
263 p
.header
[1] = le32_to_cpu(buffer
[1]);
264 p
.header
[2] = le32_to_cpu(buffer
[2]);
266 tcode
= (p
.header
[0] >> 4) & 0x0f;
268 case TCODE_WRITE_QUADLET_REQUEST
:
269 case TCODE_READ_QUADLET_RESPONSE
:
270 p
.header
[3] = (__force __u32
) buffer
[3];
271 p
.header_length
= 16;
272 p
.payload_length
= 0;
275 case TCODE_READ_BLOCK_REQUEST
:
276 p
.header
[3] = le32_to_cpu(buffer
[3]);
277 p
.header_length
= 16;
278 p
.payload_length
= 0;
281 case TCODE_WRITE_BLOCK_REQUEST
:
282 case TCODE_READ_BLOCK_RESPONSE
:
283 case TCODE_LOCK_REQUEST
:
284 case TCODE_LOCK_RESPONSE
:
285 p
.header
[3] = le32_to_cpu(buffer
[3]);
286 p
.header_length
= 16;
287 p
.payload_length
= p
.header
[3] >> 16;
290 case TCODE_WRITE_RESPONSE
:
291 case TCODE_READ_QUADLET_REQUEST
:
292 case OHCI_TCODE_PHY_PACKET
:
293 p
.header_length
= 12;
294 p
.payload_length
= 0;
298 p
.payload
= (void *) buffer
+ p
.header_length
;
300 /* FIXME: What to do about evt_* errors? */
301 length
= (p
.header_length
+ p
.payload_length
+ 3) / 4;
302 status
= le32_to_cpu(buffer
[length
]);
304 p
.ack
= ((status
>> 16) & 0x1f) - 16;
305 p
.speed
= (status
>> 21) & 0x7;
306 p
.timestamp
= status
& 0xffff;
307 p
.generation
= ohci
->request_generation
;
309 /* The OHCI bus reset handler synthesizes a phy packet with
310 * the new generation number when a bus reset happens (see
311 * section 8.4.2.3). This helps us determine when a request
312 * was received and make sure we send the response in the same
313 * generation. We only need this for requests; for responses
314 * we use the unique tlabel for finding the matching
317 if (p
.ack
+ 16 == 0x09)
318 ohci
->request_generation
= (buffer
[2] >> 16) & 0xff;
319 else if (ctx
== &ohci
->ar_request_ctx
)
320 fw_core_handle_request(&ohci
->card
, &p
);
322 fw_core_handle_response(&ohci
->card
, &p
);
324 return buffer
+ length
+ 1;
327 static void ar_context_tasklet(unsigned long data
)
329 struct ar_context
*ctx
= (struct ar_context
*)data
;
330 struct fw_ohci
*ohci
= ctx
->ohci
;
331 struct ar_buffer
*ab
;
332 struct descriptor
*d
;
335 ab
= ctx
->current_buffer
;
338 if (d
->res_count
== 0) {
339 size_t size
, rest
, offset
;
341 /* This descriptor is finished and we may have a
342 * packet split across this and the next buffer. We
343 * reuse the page for reassembling the split packet. */
345 offset
= offsetof(struct ar_buffer
, data
);
346 dma_unmap_single(ohci
->card
.device
,
347 ab
->descriptor
.data_address
- offset
,
348 PAGE_SIZE
, DMA_BIDIRECTIONAL
);
353 size
= buffer
+ PAGE_SIZE
- ctx
->pointer
;
354 rest
= le16_to_cpu(d
->req_count
) - le16_to_cpu(d
->res_count
);
355 memmove(buffer
, ctx
->pointer
, size
);
356 memcpy(buffer
+ size
, ab
->data
, rest
);
357 ctx
->current_buffer
= ab
;
358 ctx
->pointer
= (void *) ab
->data
+ rest
;
359 end
= buffer
+ size
+ rest
;
362 buffer
= handle_ar_packet(ctx
, buffer
);
364 free_page((unsigned long)buffer
);
365 ar_context_add_page(ctx
);
367 buffer
= ctx
->pointer
;
369 (void *) ab
+ PAGE_SIZE
- le16_to_cpu(d
->res_count
);
372 buffer
= handle_ar_packet(ctx
, buffer
);
377 ar_context_init(struct ar_context
*ctx
, struct fw_ohci
*ohci
, u32 regs
)
383 ctx
->last_buffer
= &ab
;
384 tasklet_init(&ctx
->tasklet
, ar_context_tasklet
, (unsigned long)ctx
);
386 ar_context_add_page(ctx
);
387 ar_context_add_page(ctx
);
388 ctx
->current_buffer
= ab
.next
;
389 ctx
->pointer
= ctx
->current_buffer
->data
;
391 reg_write(ctx
->ohci
, command_ptr(ctx
->regs
), ab
.descriptor
.branch_address
);
392 reg_write(ctx
->ohci
, control_set(ctx
->regs
), CONTEXT_RUN
);
393 flush_writes(ctx
->ohci
);
399 do_packet_callbacks(struct fw_ohci
*ohci
, struct list_head
*list
)
401 struct fw_packet
*p
, *next
;
403 list_for_each_entry_safe(p
, next
, list
, link
)
404 p
->callback(p
, &ohci
->card
, p
->ack
);
408 complete_transmission(struct fw_packet
*packet
,
409 int ack
, struct list_head
*list
)
411 list_move_tail(&packet
->link
, list
);
415 /* This function prepares the first packet in the context queue for
416 * transmission. Must always be called with the ochi->lock held to
417 * ensure proper generation handling and locking around packet queue
420 at_context_setup_packet(struct at_context
*ctx
, struct list_head
*list
)
422 struct fw_packet
*packet
;
423 struct fw_ohci
*ohci
= ctx
->ohci
;
426 packet
= fw_packet(ctx
->list
.next
);
428 memset(&ctx
->d
, 0, sizeof ctx
->d
);
429 if (packet
->payload_length
> 0) {
430 packet
->payload_bus
= dma_map_single(ohci
->card
.device
,
432 packet
->payload_length
,
434 if (dma_mapping_error(packet
->payload_bus
)) {
435 complete_transmission(packet
, RCODE_SEND_ERROR
, list
);
439 ctx
->d
.more
.control
=
440 cpu_to_le16(descriptor_output_more
|
441 descriptor_key_immediate
);
442 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
443 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
444 ctx
->d
.last
.control
=
445 cpu_to_le16(descriptor_output_last
|
446 descriptor_irq_always
|
447 descriptor_branch_always
);
448 ctx
->d
.last
.req_count
= cpu_to_le16(packet
->payload_length
);
449 ctx
->d
.last
.data_address
= cpu_to_le32(packet
->payload_bus
);
452 ctx
->d
.more
.control
=
453 cpu_to_le16(descriptor_output_last
|
454 descriptor_key_immediate
|
455 descriptor_irq_always
|
456 descriptor_branch_always
);
457 ctx
->d
.more
.req_count
= cpu_to_le16(packet
->header_length
);
458 ctx
->d
.more
.res_count
= cpu_to_le16(packet
->timestamp
);
462 /* The DMA format for asyncronous link packets is different
463 * from the IEEE1394 layout, so shift the fields around
464 * accordingly. If header_length is 8, it's a PHY packet, to
465 * which we need to prepend an extra quadlet. */
466 if (packet
->header_length
> 8) {
467 ctx
->d
.header
[0] = cpu_to_le32((packet
->header
[0] & 0xffff) |
468 (packet
->speed
<< 16));
469 ctx
->d
.header
[1] = cpu_to_le32((packet
->header
[1] & 0xffff) |
470 (packet
->header
[0] & 0xffff0000));
471 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[2]);
473 tcode
= (packet
->header
[0] >> 4) & 0x0f;
474 if (TCODE_IS_BLOCK_PACKET(tcode
))
475 ctx
->d
.header
[3] = cpu_to_le32(packet
->header
[3]);
477 ctx
->d
.header
[3] = packet
->header
[3];
480 cpu_to_le32((OHCI1394_phy_tcode
<< 4) |
481 (packet
->speed
<< 16));
482 ctx
->d
.header
[1] = cpu_to_le32(packet
->header
[0]);
483 ctx
->d
.header
[2] = cpu_to_le32(packet
->header
[1]);
484 ctx
->d
.more
.req_count
= cpu_to_le16(12);
487 /* FIXME: Document how the locking works. */
488 if (ohci
->generation
== packet
->generation
) {
489 reg_write(ctx
->ohci
, command_ptr(ctx
->regs
),
490 ctx
->descriptor_bus
| z
);
491 reg_write(ctx
->ohci
, control_set(ctx
->regs
),
492 CONTEXT_RUN
| CONTEXT_WAKE
);
493 ctx
->current_packet
= packet
;
495 /* We dont return error codes from this function; all
496 * transmission errors are reported through the
498 complete_transmission(packet
, RCODE_GENERATION
, list
);
502 static void at_context_stop(struct at_context
*ctx
)
506 reg_write(ctx
->ohci
, control_clear(ctx
->regs
), CONTEXT_RUN
);
508 reg
= reg_read(ctx
->ohci
, control_set(ctx
->regs
));
509 if (reg
& CONTEXT_ACTIVE
)
510 fw_notify("Tried to stop context, but it is still active "
514 static void at_context_tasklet(unsigned long data
)
516 struct at_context
*ctx
= (struct at_context
*)data
;
517 struct fw_ohci
*ohci
= ctx
->ohci
;
518 struct fw_packet
*packet
;
523 spin_lock_irqsave(&ohci
->lock
, flags
);
525 packet
= fw_packet(ctx
->list
.next
);
527 at_context_stop(ctx
);
529 /* If the head of the list isn't the packet that just got
530 * transmitted, the packet got cancelled before we finished
531 * transmitting it. */
532 if (ctx
->current_packet
!= packet
)
535 if (packet
->payload_length
> 0) {
536 dma_unmap_single(ohci
->card
.device
, packet
->payload_bus
,
537 packet
->payload_length
, DMA_TO_DEVICE
);
538 evt
= le16_to_cpu(ctx
->d
.last
.transfer_status
) & 0x1f;
539 packet
->timestamp
= le16_to_cpu(ctx
->d
.last
.res_count
);
542 evt
= le16_to_cpu(ctx
->d
.more
.transfer_status
) & 0x1f;
543 packet
->timestamp
= le16_to_cpu(ctx
->d
.more
.res_count
);
548 case OHCI1394_evt_timeout
:
549 /* Async response transmit timed out. */
550 complete_transmission(packet
, RCODE_CANCELLED
, &list
);
553 case OHCI1394_evt_flushed
:
554 /* The packet was flushed should give same
555 * error as when we try to use a stale
556 * generation count. */
557 complete_transmission(packet
,
558 RCODE_GENERATION
, &list
);
561 case OHCI1394_evt_missing_ack
:
562 /* Using a valid (current) generation count,
563 * but the node is not on the bus or not
565 complete_transmission(packet
, RCODE_NO_ACK
, &list
);
569 complete_transmission(packet
, RCODE_SEND_ERROR
, &list
);
573 complete_transmission(packet
, evt
- 16, &list
);
576 /* If more packets are queued, set up the next one. */
577 if (!list_empty(&ctx
->list
))
578 at_context_setup_packet(ctx
, &list
);
580 spin_unlock_irqrestore(&ohci
->lock
, flags
);
582 do_packet_callbacks(ohci
, &list
);
586 at_context_init(struct at_context
*ctx
, struct fw_ohci
*ohci
, u32 regs
)
588 INIT_LIST_HEAD(&ctx
->list
);
590 ctx
->descriptor_bus
=
591 dma_map_single(ohci
->card
.device
, &ctx
->d
,
592 sizeof ctx
->d
, DMA_TO_DEVICE
);
593 if (dma_mapping_error(ctx
->descriptor_bus
))
599 tasklet_init(&ctx
->tasklet
, at_context_tasklet
, (unsigned long)ctx
);
604 #define header_get_destination(q) (((q) >> 16) & 0xffff)
605 #define header_get_tcode(q) (((q) >> 4) & 0x0f)
606 #define header_get_offset_high(q) (((q) >> 0) & 0xffff)
607 #define header_get_data_length(q) (((q) >> 16) & 0xffff)
608 #define header_get_extended_tcode(q) (((q) >> 0) & 0xffff)
611 handle_local_rom(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
613 struct fw_packet response
;
614 int tcode
, length
, i
;
616 tcode
= header_get_tcode(packet
->header
[0]);
617 if (TCODE_IS_BLOCK_PACKET(tcode
))
618 length
= header_get_data_length(packet
->header
[3]);
622 i
= csr
- CSR_CONFIG_ROM
;
623 if (i
+ length
> CONFIG_ROM_SIZE
) {
624 fw_fill_response(&response
, packet
->header
,
625 RCODE_ADDRESS_ERROR
, NULL
, 0);
626 } else if (!TCODE_IS_READ_REQUEST(tcode
)) {
627 fw_fill_response(&response
, packet
->header
,
628 RCODE_TYPE_ERROR
, NULL
, 0);
630 fw_fill_response(&response
, packet
->header
, RCODE_COMPLETE
,
631 (void *) ohci
->config_rom
+ i
, length
);
634 fw_core_handle_response(&ohci
->card
, &response
);
638 handle_local_lock(struct fw_ohci
*ohci
, struct fw_packet
*packet
, u32 csr
)
640 struct fw_packet response
;
641 int tcode
, length
, ext_tcode
, sel
;
642 __be32
*payload
, lock_old
;
643 u32 lock_arg
, lock_data
;
645 tcode
= header_get_tcode(packet
->header
[0]);
646 length
= header_get_data_length(packet
->header
[3]);
647 payload
= packet
->payload
;
648 ext_tcode
= header_get_extended_tcode(packet
->header
[3]);
650 if (tcode
== TCODE_LOCK_REQUEST
&&
651 ext_tcode
== EXTCODE_COMPARE_SWAP
&& length
== 8) {
652 lock_arg
= be32_to_cpu(payload
[0]);
653 lock_data
= be32_to_cpu(payload
[1]);
654 } else if (tcode
== TCODE_READ_QUADLET_REQUEST
) {
658 fw_fill_response(&response
, packet
->header
,
659 RCODE_TYPE_ERROR
, NULL
, 0);
663 sel
= (csr
- CSR_BUS_MANAGER_ID
) / 4;
664 reg_write(ohci
, OHCI1394_CSRData
, lock_data
);
665 reg_write(ohci
, OHCI1394_CSRCompareData
, lock_arg
);
666 reg_write(ohci
, OHCI1394_CSRControl
, sel
);
668 if (reg_read(ohci
, OHCI1394_CSRControl
) & 0x80000000)
669 lock_old
= cpu_to_be32(reg_read(ohci
, OHCI1394_CSRData
));
671 fw_notify("swap not done yet\n");
673 fw_fill_response(&response
, packet
->header
,
674 RCODE_COMPLETE
, &lock_old
, sizeof lock_old
);
676 fw_core_handle_response(&ohci
->card
, &response
);
680 handle_local_request(struct at_context
*ctx
, struct fw_packet
*packet
)
685 packet
->ack
= ACK_PENDING
;
686 packet
->callback(packet
, &ctx
->ohci
->card
, packet
->ack
);
689 ((unsigned long long)
690 header_get_offset_high(packet
->header
[1]) << 32) |
692 csr
= offset
- CSR_REGISTER_BASE
;
694 /* Handle config rom reads. */
695 if (csr
>= CSR_CONFIG_ROM
&& csr
< CSR_CONFIG_ROM_END
)
696 handle_local_rom(ctx
->ohci
, packet
, csr
);
698 case CSR_BUS_MANAGER_ID
:
699 case CSR_BANDWIDTH_AVAILABLE
:
700 case CSR_CHANNELS_AVAILABLE_HI
:
701 case CSR_CHANNELS_AVAILABLE_LO
:
702 handle_local_lock(ctx
->ohci
, packet
, csr
);
705 if (ctx
== &ctx
->ohci
->at_request_ctx
)
706 fw_core_handle_request(&ctx
->ohci
->card
, packet
);
708 fw_core_handle_response(&ctx
->ohci
->card
, packet
);
714 at_context_transmit(struct at_context
*ctx
, struct fw_packet
*packet
)
719 spin_lock_irqsave(&ctx
->ohci
->lock
, flags
);
721 if (header_get_destination(packet
->header
[0]) == ctx
->ohci
->node_id
&&
722 ctx
->ohci
->generation
== packet
->generation
) {
723 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
724 handle_local_request(ctx
, packet
);
728 list_add_tail(&packet
->link
, &ctx
->list
);
729 if (ctx
->list
.next
== &packet
->link
)
730 at_context_setup_packet(ctx
, &list
);
732 spin_unlock_irqrestore(&ctx
->ohci
->lock
, flags
);
734 do_packet_callbacks(ctx
->ohci
, &list
);
737 static void bus_reset_tasklet(unsigned long data
)
739 struct fw_ohci
*ohci
= (struct fw_ohci
*)data
;
740 int self_id_count
, i
, j
, reg
;
741 int generation
, new_generation
;
744 reg
= reg_read(ohci
, OHCI1394_NodeID
);
745 if (!(reg
& OHCI1394_NodeID_idValid
)) {
746 fw_error("node ID not valid, new bus reset in progress\n");
749 ohci
->node_id
= reg
& 0xffff;
751 /* The count in the SelfIDCount register is the number of
752 * bytes in the self ID receive buffer. Since we also receive
753 * the inverted quadlets and a header quadlet, we shift one
754 * bit extra to get the actual number of self IDs. */
756 self_id_count
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 3) & 0x3ff;
757 generation
= (le32_to_cpu(ohci
->self_id_cpu
[0]) >> 16) & 0xff;
759 for (i
= 1, j
= 0; j
< self_id_count
; i
+= 2, j
++) {
760 if (ohci
->self_id_cpu
[i
] != ~ohci
->self_id_cpu
[i
+ 1])
761 fw_error("inconsistent self IDs\n");
762 ohci
->self_id_buffer
[j
] = le32_to_cpu(ohci
->self_id_cpu
[i
]);
765 /* Check the consistency of the self IDs we just read. The
766 * problem we face is that a new bus reset can start while we
767 * read out the self IDs from the DMA buffer. If this happens,
768 * the DMA buffer will be overwritten with new self IDs and we
769 * will read out inconsistent data. The OHCI specification
770 * (section 11.2) recommends a technique similar to
771 * linux/seqlock.h, where we remember the generation of the
772 * self IDs in the buffer before reading them out and compare
773 * it to the current generation after reading them out. If
774 * the two generations match we know we have a consistent set
777 new_generation
= (reg_read(ohci
, OHCI1394_SelfIDCount
) >> 16) & 0xff;
778 if (new_generation
!= generation
) {
779 fw_notify("recursive bus reset detected, "
780 "discarding self ids\n");
784 /* FIXME: Document how the locking works. */
785 spin_lock_irqsave(&ohci
->lock
, flags
);
787 ohci
->generation
= generation
;
788 at_context_stop(&ohci
->at_request_ctx
);
789 at_context_stop(&ohci
->at_response_ctx
);
790 reg_write(ohci
, OHCI1394_IntEventClear
, OHCI1394_busReset
);
792 /* This next bit is unrelated to the AT context stuff but we
793 * have to do it under the spinlock also. If a new config rom
794 * was set up before this reset, the old one is now no longer
795 * in use and we can free it. Update the config rom pointers
796 * to point to the current config rom and clear the
797 * next_config_rom pointer so a new udpate can take place. */
799 if (ohci
->next_config_rom
!= NULL
) {
800 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
801 ohci
->config_rom
, ohci
->config_rom_bus
);
802 ohci
->config_rom
= ohci
->next_config_rom
;
803 ohci
->config_rom_bus
= ohci
->next_config_rom_bus
;
804 ohci
->next_config_rom
= NULL
;
806 /* Restore config_rom image and manually update
807 * config_rom registers. Writing the header quadlet
808 * will indicate that the config rom is ready, so we
810 reg_write(ohci
, OHCI1394_BusOptions
,
811 be32_to_cpu(ohci
->config_rom
[2]));
812 ohci
->config_rom
[0] = cpu_to_be32(ohci
->next_header
);
813 reg_write(ohci
, OHCI1394_ConfigROMhdr
, ohci
->next_header
);
816 spin_unlock_irqrestore(&ohci
->lock
, flags
);
818 fw_core_handle_bus_reset(&ohci
->card
, ohci
->node_id
, generation
,
819 self_id_count
, ohci
->self_id_buffer
);
822 static irqreturn_t
irq_handler(int irq
, void *data
)
824 struct fw_ohci
*ohci
= data
;
825 u32 event
, iso_event
;
828 event
= reg_read(ohci
, OHCI1394_IntEventClear
);
833 reg_write(ohci
, OHCI1394_IntEventClear
, event
);
835 if (event
& OHCI1394_selfIDComplete
)
836 tasklet_schedule(&ohci
->bus_reset_tasklet
);
838 if (event
& OHCI1394_RQPkt
)
839 tasklet_schedule(&ohci
->ar_request_ctx
.tasklet
);
841 if (event
& OHCI1394_RSPkt
)
842 tasklet_schedule(&ohci
->ar_response_ctx
.tasklet
);
844 if (event
& OHCI1394_reqTxComplete
)
845 tasklet_schedule(&ohci
->at_request_ctx
.tasklet
);
847 if (event
& OHCI1394_respTxComplete
)
848 tasklet_schedule(&ohci
->at_response_ctx
.tasklet
);
850 iso_event
= reg_read(ohci
, OHCI1394_IsoRecvIntEventClear
);
851 reg_write(ohci
, OHCI1394_IsoRecvIntEventClear
, iso_event
);
854 i
= ffs(iso_event
) - 1;
855 tasklet_schedule(&ohci
->ir_context_list
[i
].tasklet
);
856 iso_event
&= ~(1 << i
);
859 iso_event
= reg_read(ohci
, OHCI1394_IsoXmitIntEventClear
);
860 reg_write(ohci
, OHCI1394_IsoXmitIntEventClear
, iso_event
);
863 i
= ffs(iso_event
) - 1;
864 tasklet_schedule(&ohci
->it_context_list
[i
].tasklet
);
865 iso_event
&= ~(1 << i
);
871 static int ohci_enable(struct fw_card
*card
, u32
*config_rom
, size_t length
)
873 struct fw_ohci
*ohci
= fw_ohci(card
);
874 struct pci_dev
*dev
= to_pci_dev(card
->device
);
876 /* When the link is not yet enabled, the atomic config rom
877 * update mechanism described below in ohci_set_config_rom()
878 * is not active. We have to update ConfigRomHeader and
879 * BusOptions manually, and the write to ConfigROMmap takes
880 * effect immediately. We tie this to the enabling of the
881 * link, so we have a valid config rom before enabling - the
882 * OHCI requires that ConfigROMhdr and BusOptions have valid
883 * values before enabling.
885 * However, when the ConfigROMmap is written, some controllers
886 * always read back quadlets 0 and 2 from the config rom to
887 * the ConfigRomHeader and BusOptions registers on bus reset.
888 * They shouldn't do that in this initial case where the link
889 * isn't enabled. This means we have to use the same
890 * workaround here, setting the bus header to 0 and then write
891 * the right values in the bus reset tasklet.
894 ohci
->next_config_rom
=
895 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
896 &ohci
->next_config_rom_bus
, GFP_KERNEL
);
897 if (ohci
->next_config_rom
== NULL
)
900 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
901 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
, length
* 4);
903 ohci
->next_header
= config_rom
[0];
904 ohci
->next_config_rom
[0] = 0;
905 reg_write(ohci
, OHCI1394_ConfigROMhdr
, 0);
906 reg_write(ohci
, OHCI1394_BusOptions
, config_rom
[2]);
907 reg_write(ohci
, OHCI1394_ConfigROMmap
, ohci
->next_config_rom_bus
);
909 reg_write(ohci
, OHCI1394_AsReqFilterHiSet
, 0x80000000);
911 if (request_irq(dev
->irq
, irq_handler
,
912 SA_SHIRQ
, ohci_driver_name
, ohci
)) {
913 fw_error("Failed to allocate shared interrupt %d.\n",
915 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
916 ohci
->config_rom
, ohci
->config_rom_bus
);
920 reg_write(ohci
, OHCI1394_HCControlSet
,
921 OHCI1394_HCControl_linkEnable
|
922 OHCI1394_HCControl_BIBimageValid
);
925 /* We are ready to go, initiate bus reset to finish the
928 fw_core_initiate_bus_reset(&ohci
->card
, 1);
934 ohci_set_config_rom(struct fw_card
*card
, u32
*config_rom
, size_t length
)
936 struct fw_ohci
*ohci
;
939 __be32
*next_config_rom
;
940 dma_addr_t next_config_rom_bus
;
942 ohci
= fw_ohci(card
);
944 /* When the OHCI controller is enabled, the config rom update
945 * mechanism is a bit tricky, but easy enough to use. See
946 * section 5.5.6 in the OHCI specification.
948 * The OHCI controller caches the new config rom address in a
949 * shadow register (ConfigROMmapNext) and needs a bus reset
950 * for the changes to take place. When the bus reset is
951 * detected, the controller loads the new values for the
952 * ConfigRomHeader and BusOptions registers from the specified
953 * config rom and loads ConfigROMmap from the ConfigROMmapNext
954 * shadow register. All automatically and atomically.
956 * Now, there's a twist to this story. The automatic load of
957 * ConfigRomHeader and BusOptions doesn't honor the
958 * noByteSwapData bit, so with a be32 config rom, the
959 * controller will load be32 values in to these registers
960 * during the atomic update, even on litte endian
961 * architectures. The workaround we use is to put a 0 in the
962 * header quadlet; 0 is endian agnostic and means that the
963 * config rom isn't ready yet. In the bus reset tasklet we
964 * then set up the real values for the two registers.
966 * We use ohci->lock to avoid racing with the code that sets
967 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
971 dma_alloc_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
972 &next_config_rom_bus
, GFP_KERNEL
);
973 if (next_config_rom
== NULL
)
976 spin_lock_irqsave(&ohci
->lock
, flags
);
978 if (ohci
->next_config_rom
== NULL
) {
979 ohci
->next_config_rom
= next_config_rom
;
980 ohci
->next_config_rom_bus
= next_config_rom_bus
;
982 memset(ohci
->next_config_rom
, 0, CONFIG_ROM_SIZE
);
983 fw_memcpy_to_be32(ohci
->next_config_rom
, config_rom
,
986 ohci
->next_header
= config_rom
[0];
987 ohci
->next_config_rom
[0] = 0;
989 reg_write(ohci
, OHCI1394_ConfigROMmap
,
990 ohci
->next_config_rom_bus
);
992 dma_free_coherent(ohci
->card
.device
, CONFIG_ROM_SIZE
,
993 next_config_rom
, next_config_rom_bus
);
997 spin_unlock_irqrestore(&ohci
->lock
, flags
);
999 /* Now initiate a bus reset to have the changes take
1000 * effect. We clean up the old config rom memory and DMA
1001 * mappings in the bus reset tasklet, since the OHCI
1002 * controller could need to access it before the bus reset
1005 fw_core_initiate_bus_reset(&ohci
->card
, 1);
1010 static void ohci_send_request(struct fw_card
*card
, struct fw_packet
*packet
)
1012 struct fw_ohci
*ohci
= fw_ohci(card
);
1014 at_context_transmit(&ohci
->at_request_ctx
, packet
);
1017 static void ohci_send_response(struct fw_card
*card
, struct fw_packet
*packet
)
1019 struct fw_ohci
*ohci
= fw_ohci(card
);
1021 at_context_transmit(&ohci
->at_response_ctx
, packet
);
1024 static int ohci_cancel_packet(struct fw_card
*card
, struct fw_packet
*packet
)
1026 struct fw_ohci
*ohci
= fw_ohci(card
);
1028 unsigned long flags
;
1030 spin_lock_irqsave(&ohci
->lock
, flags
);
1032 if (packet
->ack
== 0) {
1033 fw_notify("cancelling packet %p (header[0]=%08x)\n",
1034 packet
, packet
->header
[0]);
1036 complete_transmission(packet
, RCODE_CANCELLED
, &list
);
1039 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1041 do_packet_callbacks(ohci
, &list
);
1043 /* Return success if we actually cancelled something. */
1044 return list_empty(&list
) ? -ENOENT
: 0;
1048 ohci_enable_phys_dma(struct fw_card
*card
, int node_id
, int generation
)
1050 struct fw_ohci
*ohci
= fw_ohci(card
);
1051 unsigned long flags
;
1054 /* FIXME: Make sure this bitmask is cleared when we clear the busReset
1055 * interrupt bit. Clear physReqResourceAllBuses on bus reset. */
1057 spin_lock_irqsave(&ohci
->lock
, flags
);
1059 if (ohci
->generation
!= generation
) {
1064 /* NOTE, if the node ID contains a non-local bus ID, physical DMA is
1065 * enabled for _all_ nodes on remote buses. */
1067 n
= (node_id
& 0xffc0) == LOCAL_BUS
? node_id
& 0x3f : 63;
1069 reg_write(ohci
, OHCI1394_PhyReqFilterLoSet
, 1 << n
);
1071 reg_write(ohci
, OHCI1394_PhyReqFilterHiSet
, 1 << (n
- 32));
1075 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1079 static void ir_context_tasklet(unsigned long data
)
1081 struct iso_context
*ctx
= (struct iso_context
*)data
;
1086 #define ISO_BUFFER_SIZE (64 * 1024)
1088 static void flush_iso_context(struct iso_context
*ctx
)
1090 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1091 struct descriptor
*d
, *last
;
1095 dma_sync_single_for_cpu(ohci
->card
.device
, ctx
->buffer_bus
,
1096 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1098 d
= ctx
->tail_descriptor
;
1099 last
= ctx
->tail_descriptor_last
;
1101 while (last
->branch_address
!= 0 && last
->transfer_status
!= 0) {
1102 address
= le32_to_cpu(last
->branch_address
);
1104 d
= ctx
->buffer
+ (address
- ctx
->buffer_bus
) / sizeof *d
;
1111 if (le16_to_cpu(last
->control
) & descriptor_irq_always
)
1112 ctx
->base
.callback(&ctx
->base
,
1113 0, le16_to_cpu(last
->res_count
),
1114 ctx
->base
.callback_data
);
1117 ctx
->tail_descriptor
= d
;
1118 ctx
->tail_descriptor_last
= last
;
1121 static void it_context_tasklet(unsigned long data
)
1123 struct iso_context
*ctx
= (struct iso_context
*)data
;
1125 flush_iso_context(ctx
);
1128 static struct fw_iso_context
*ohci_allocate_iso_context(struct fw_card
*card
,
1131 struct fw_ohci
*ohci
= fw_ohci(card
);
1132 struct iso_context
*ctx
, *list
;
1133 void (*tasklet
) (unsigned long data
);
1135 unsigned long flags
;
1138 if (type
== FW_ISO_CONTEXT_TRANSMIT
) {
1139 mask
= &ohci
->it_context_mask
;
1140 list
= ohci
->it_context_list
;
1141 tasklet
= it_context_tasklet
;
1143 mask
= &ohci
->ir_context_mask
;
1144 list
= ohci
->ir_context_list
;
1145 tasklet
= ir_context_tasklet
;
1148 spin_lock_irqsave(&ohci
->lock
, flags
);
1149 index
= ffs(*mask
) - 1;
1151 *mask
&= ~(1 << index
);
1152 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1155 return ERR_PTR(-EBUSY
);
1158 memset(ctx
, 0, sizeof *ctx
);
1159 tasklet_init(&ctx
->tasklet
, tasklet
, (unsigned long)ctx
);
1161 ctx
->buffer
= kmalloc(ISO_BUFFER_SIZE
, GFP_KERNEL
);
1162 if (ctx
->buffer
== NULL
)
1163 goto buffer_alloc_failed
;
1166 dma_map_single(card
->device
, ctx
->buffer
,
1167 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1168 if (dma_mapping_error(ctx
->buffer_bus
))
1169 goto buffer_map_failed
;
1171 ctx
->head_descriptor
= ctx
->buffer
;
1172 ctx
->prev_descriptor
= ctx
->buffer
;
1173 ctx
->tail_descriptor
= ctx
->buffer
;
1174 ctx
->tail_descriptor_last
= ctx
->buffer
;
1176 /* We put a dummy descriptor in the buffer that has a NULL
1177 * branch address and looks like it's been sent. That way we
1178 * have a descriptor to append DMA programs to. Also, the
1179 * ring buffer invariant is that it always has at least one
1180 * element so that head == tail means buffer full. */
1182 memset(ctx
->head_descriptor
, 0, sizeof *ctx
->head_descriptor
);
1183 ctx
->head_descriptor
->control
= cpu_to_le16(descriptor_output_last
);
1184 ctx
->head_descriptor
->transfer_status
= cpu_to_le16(0x8011);
1185 ctx
->head_descriptor
++;
1191 buffer_alloc_failed
:
1192 spin_lock_irqsave(&ohci
->lock
, flags
);
1193 *mask
|= 1 << index
;
1194 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1196 return ERR_PTR(-ENOMEM
);
1199 static int ohci_send_iso(struct fw_iso_context
*base
, s32 cycle
)
1201 struct iso_context
*ctx
= (struct iso_context
*)base
;
1202 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1203 u32 cycle_match
= 0;
1206 index
= ctx
- ohci
->it_context_list
;
1208 cycle_match
= CONTEXT_CYCLE_MATCH_ENABLE
|
1209 (cycle
& 0x7fff) << 16;
1211 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, 1 << index
);
1212 reg_write(ohci
, OHCI1394_IsoXmitCommandPtr(index
),
1213 le32_to_cpu(ctx
->tail_descriptor_last
->branch_address
));
1214 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear(index
), ~0);
1215 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet(index
),
1216 CONTEXT_RUN
| cycle_match
);
1222 static void ohci_free_iso_context(struct fw_iso_context
*base
)
1224 struct fw_ohci
*ohci
= fw_ohci(base
->card
);
1225 struct iso_context
*ctx
= (struct iso_context
*)base
;
1226 unsigned long flags
;
1229 flush_iso_context(ctx
);
1231 spin_lock_irqsave(&ohci
->lock
, flags
);
1233 if (ctx
->base
.type
== FW_ISO_CONTEXT_TRANSMIT
) {
1234 index
= ctx
- ohci
->it_context_list
;
1235 reg_write(ohci
, OHCI1394_IsoXmitContextControlClear(index
), ~0);
1236 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, 1 << index
);
1237 ohci
->it_context_mask
|= 1 << index
;
1239 index
= ctx
- ohci
->ir_context_list
;
1240 reg_write(ohci
, OHCI1394_IsoRcvContextControlClear(index
), ~0);
1241 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, 1 << index
);
1242 ohci
->ir_context_mask
|= 1 << index
;
1246 dma_unmap_single(ohci
->card
.device
, ctx
->buffer_bus
,
1247 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1249 spin_unlock_irqrestore(&ohci
->lock
, flags
);
1253 ohci_queue_iso(struct fw_iso_context
*base
,
1254 struct fw_iso_packet
*packet
, void *payload
)
1256 struct iso_context
*ctx
= (struct iso_context
*)base
;
1257 struct fw_ohci
*ohci
= fw_ohci(ctx
->base
.card
);
1258 struct descriptor
*d
, *end
, *last
, *tail
, *pd
;
1259 struct fw_iso_packet
*p
;
1262 u32 z
, header_z
, payload_z
, irq
;
1263 u32 payload_index
, payload_end_index
, next_page_index
;
1264 int index
, page
, end_page
, i
, length
, offset
;
1266 /* FIXME: Cycle lost behavior should be configurable: lose
1267 * packet, retransmit or terminate.. */
1270 payload_index
= payload
- ctx
->base
.buffer
;
1271 d
= ctx
->head_descriptor
;
1272 tail
= ctx
->tail_descriptor
;
1273 end
= ctx
->buffer
+ ISO_BUFFER_SIZE
/ sizeof(struct descriptor
);
1279 if (p
->header_length
> 0)
1282 /* Determine the first page the payload isn't contained in. */
1283 end_page
= PAGE_ALIGN(payload_index
+ p
->payload_length
) >> PAGE_SHIFT
;
1284 if (p
->payload_length
> 0)
1285 payload_z
= end_page
- (payload_index
>> PAGE_SHIFT
);
1291 /* Get header size in number of descriptors. */
1292 header_z
= DIV_ROUND_UP(p
->header_length
, sizeof *d
);
1294 if (d
+ z
+ header_z
<= tail
) {
1296 } else if (d
> tail
&& d
+ z
+ header_z
<= end
) {
1298 } else if (d
> tail
&& ctx
->buffer
+ z
+ header_z
<= tail
) {
1303 /* No space in buffer */
1307 memset(d
, 0, (z
+ header_z
) * sizeof *d
);
1308 d_bus
= ctx
->buffer_bus
+ (d
- ctx
->buffer
) * sizeof *d
;
1311 d
[0].control
= cpu_to_le16(descriptor_key_immediate
);
1312 d
[0].req_count
= cpu_to_le16(8);
1314 header
= (__le32
*) &d
[1];
1315 header
[0] = cpu_to_le32(it_header_sy(p
->sy
) |
1316 it_header_tag(p
->tag
) |
1317 it_header_tcode(TCODE_STREAM_DATA
) |
1318 it_header_channel(ctx
->base
.channel
) |
1319 it_header_speed(ctx
->base
.speed
));
1321 cpu_to_le32(it_header_data_length(p
->header_length
+
1322 p
->payload_length
));
1325 if (p
->header_length
> 0) {
1326 d
[2].req_count
= cpu_to_le16(p
->header_length
);
1327 d
[2].data_address
= cpu_to_le32(d_bus
+ z
* sizeof *d
);
1328 memcpy(&d
[z
], p
->header
, p
->header_length
);
1331 pd
= d
+ z
- payload_z
;
1332 payload_end_index
= payload_index
+ p
->payload_length
;
1333 for (i
= 0; i
< payload_z
; i
++) {
1334 page
= payload_index
>> PAGE_SHIFT
;
1335 offset
= payload_index
& ~PAGE_MASK
;
1336 next_page_index
= (page
+ 1) << PAGE_SHIFT
;
1338 min(next_page_index
, payload_end_index
) - payload_index
;
1339 pd
[i
].req_count
= cpu_to_le16(length
);
1340 pd
[i
].data_address
= cpu_to_le32(ctx
->base
.pages
[page
] + offset
);
1342 payload_index
+= length
;
1351 irq
= descriptor_irq_always
;
1353 irq
= descriptor_no_irq
;
1355 last
->control
|= cpu_to_le16(descriptor_output_last
|
1357 descriptor_branch_always
|
1360 dma_sync_single_for_device(ohci
->card
.device
, ctx
->buffer_bus
,
1361 ISO_BUFFER_SIZE
, DMA_TO_DEVICE
);
1363 ctx
->head_descriptor
= d
+ z
+ header_z
;
1364 ctx
->prev_descriptor
->branch_address
= cpu_to_le32(d_bus
| z
);
1365 ctx
->prev_descriptor
= last
;
1367 index
= ctx
- ohci
->it_context_list
;
1368 reg_write(ohci
, OHCI1394_IsoXmitContextControlSet(index
), CONTEXT_WAKE
);
1374 static const struct fw_card_driver ohci_driver
= {
1375 .name
= ohci_driver_name
,
1376 .enable
= ohci_enable
,
1377 .update_phy_reg
= ohci_update_phy_reg
,
1378 .set_config_rom
= ohci_set_config_rom
,
1379 .send_request
= ohci_send_request
,
1380 .send_response
= ohci_send_response
,
1381 .cancel_packet
= ohci_cancel_packet
,
1382 .enable_phys_dma
= ohci_enable_phys_dma
,
1384 .allocate_iso_context
= ohci_allocate_iso_context
,
1385 .free_iso_context
= ohci_free_iso_context
,
1386 .queue_iso
= ohci_queue_iso
,
1387 .send_iso
= ohci_send_iso
,
1390 static int software_reset(struct fw_ohci
*ohci
)
1394 reg_write(ohci
, OHCI1394_HCControlSet
, OHCI1394_HCControl_softReset
);
1396 for (i
= 0; i
< OHCI_LOOP_COUNT
; i
++) {
1397 if ((reg_read(ohci
, OHCI1394_HCControlSet
) &
1398 OHCI1394_HCControl_softReset
) == 0)
1406 /* ---------- pci subsystem interface ---------- */
1416 static int cleanup(struct fw_ohci
*ohci
, int stage
, int code
)
1418 struct pci_dev
*dev
= to_pci_dev(ohci
->card
.device
);
1421 case CLEANUP_SELF_ID
:
1422 dma_free_coherent(ohci
->card
.device
, SELF_ID_BUF_SIZE
,
1423 ohci
->self_id_cpu
, ohci
->self_id_bus
);
1424 case CLEANUP_REGISTERS
:
1425 kfree(ohci
->it_context_list
);
1426 kfree(ohci
->ir_context_list
);
1427 pci_iounmap(dev
, ohci
->registers
);
1429 pci_release_region(dev
, 0);
1430 case CLEANUP_DISABLE
:
1431 pci_disable_device(dev
);
1432 case CLEANUP_PUT_CARD
:
1433 fw_card_put(&ohci
->card
);
1439 static int __devinit
1440 pci_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
)
1442 struct fw_ohci
*ohci
;
1443 u32 bus_options
, max_receive
, link_speed
;
1448 ohci
= kzalloc(sizeof *ohci
, GFP_KERNEL
);
1450 fw_error("Could not malloc fw_ohci data.\n");
1454 fw_card_initialize(&ohci
->card
, &ohci_driver
, &dev
->dev
);
1456 if (pci_enable_device(dev
)) {
1457 fw_error("Failed to enable OHCI hardware.\n");
1458 return cleanup(ohci
, CLEANUP_PUT_CARD
, -ENODEV
);
1461 pci_set_master(dev
);
1462 pci_write_config_dword(dev
, OHCI1394_PCI_HCI_Control
, 0);
1463 pci_set_drvdata(dev
, ohci
);
1465 spin_lock_init(&ohci
->lock
);
1467 tasklet_init(&ohci
->bus_reset_tasklet
,
1468 bus_reset_tasklet
, (unsigned long)ohci
);
1470 if (pci_request_region(dev
, 0, ohci_driver_name
)) {
1471 fw_error("MMIO resource unavailable\n");
1472 return cleanup(ohci
, CLEANUP_DISABLE
, -EBUSY
);
1475 ohci
->registers
= pci_iomap(dev
, 0, OHCI1394_REGISTER_SIZE
);
1476 if (ohci
->registers
== NULL
) {
1477 fw_error("Failed to remap registers\n");
1478 return cleanup(ohci
, CLEANUP_IOMEM
, -ENXIO
);
1481 if (software_reset(ohci
)) {
1482 fw_error("Failed to reset ohci card.\n");
1483 return cleanup(ohci
, CLEANUP_REGISTERS
, -EBUSY
);
1486 /* Now enable LPS, which we need in order to start accessing
1487 * most of the registers. In fact, on some cards (ALI M5251),
1488 * accessing registers in the SClk domain without LPS enabled
1489 * will lock up the machine. Wait 50msec to make sure we have
1490 * full link enabled. */
1491 reg_write(ohci
, OHCI1394_HCControlSet
,
1492 OHCI1394_HCControl_LPS
|
1493 OHCI1394_HCControl_postedWriteEnable
);
1497 reg_write(ohci
, OHCI1394_HCControlClear
,
1498 OHCI1394_HCControl_noByteSwapData
);
1500 reg_write(ohci
, OHCI1394_LinkControlSet
,
1501 OHCI1394_LinkControl_rcvSelfID
|
1502 OHCI1394_LinkControl_cycleTimerEnable
|
1503 OHCI1394_LinkControl_cycleMaster
);
1505 ar_context_init(&ohci
->ar_request_ctx
, ohci
,
1506 OHCI1394_AsReqRcvContextControlSet
);
1508 ar_context_init(&ohci
->ar_response_ctx
, ohci
,
1509 OHCI1394_AsRspRcvContextControlSet
);
1511 at_context_init(&ohci
->at_request_ctx
, ohci
,
1512 OHCI1394_AsReqTrContextControlSet
);
1514 at_context_init(&ohci
->at_response_ctx
, ohci
,
1515 OHCI1394_AsRspTrContextControlSet
);
1517 reg_write(ohci
, OHCI1394_ATRetries
,
1518 OHCI1394_MAX_AT_REQ_RETRIES
|
1519 (OHCI1394_MAX_AT_RESP_RETRIES
<< 4) |
1520 (OHCI1394_MAX_PHYS_RESP_RETRIES
<< 8));
1522 reg_write(ohci
, OHCI1394_IsoRecvIntMaskSet
, ~0);
1523 ohci
->it_context_mask
= reg_read(ohci
, OHCI1394_IsoRecvIntMaskSet
);
1524 reg_write(ohci
, OHCI1394_IsoRecvIntMaskClear
, ~0);
1525 size
= sizeof(struct iso_context
) * hweight32(ohci
->it_context_mask
);
1526 ohci
->it_context_list
= kzalloc(size
, GFP_KERNEL
);
1528 reg_write(ohci
, OHCI1394_IsoXmitIntMaskSet
, ~0);
1529 ohci
->ir_context_mask
= reg_read(ohci
, OHCI1394_IsoXmitIntMaskSet
);
1530 reg_write(ohci
, OHCI1394_IsoXmitIntMaskClear
, ~0);
1531 size
= sizeof(struct iso_context
) * hweight32(ohci
->ir_context_mask
);
1532 ohci
->ir_context_list
= kzalloc(size
, GFP_KERNEL
);
1534 if (ohci
->it_context_list
== NULL
|| ohci
->ir_context_list
== NULL
) {
1535 fw_error("Out of memory for it/ir contexts.\n");
1536 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1539 /* self-id dma buffer allocation */
1540 ohci
->self_id_cpu
= dma_alloc_coherent(ohci
->card
.device
,
1544 if (ohci
->self_id_cpu
== NULL
) {
1545 fw_error("Out of memory for self ID buffer.\n");
1546 return cleanup(ohci
, CLEANUP_REGISTERS
, -ENOMEM
);
1549 reg_write(ohci
, OHCI1394_SelfIDBuffer
, ohci
->self_id_bus
);
1550 reg_write(ohci
, OHCI1394_PhyUpperBound
, 0x00010000);
1551 reg_write(ohci
, OHCI1394_IntEventClear
, ~0);
1552 reg_write(ohci
, OHCI1394_IntMaskClear
, ~0);
1553 reg_write(ohci
, OHCI1394_IntMaskSet
,
1554 OHCI1394_selfIDComplete
|
1555 OHCI1394_RQPkt
| OHCI1394_RSPkt
|
1556 OHCI1394_reqTxComplete
| OHCI1394_respTxComplete
|
1557 OHCI1394_isochRx
| OHCI1394_isochTx
|
1558 OHCI1394_masterIntEnable
);
1560 bus_options
= reg_read(ohci
, OHCI1394_BusOptions
);
1561 max_receive
= (bus_options
>> 12) & 0xf;
1562 link_speed
= bus_options
& 0x7;
1563 guid
= ((u64
) reg_read(ohci
, OHCI1394_GUIDHi
) << 32) |
1564 reg_read(ohci
, OHCI1394_GUIDLo
);
1566 error_code
= fw_card_add(&ohci
->card
, max_receive
, link_speed
, guid
);
1568 return cleanup(ohci
, CLEANUP_SELF_ID
, error_code
);
1570 fw_notify("Added fw-ohci device %s.\n", dev
->dev
.bus_id
);
1575 static void pci_remove(struct pci_dev
*dev
)
1577 struct fw_ohci
*ohci
;
1579 ohci
= pci_get_drvdata(dev
);
1580 reg_write(ohci
, OHCI1394_IntMaskClear
, OHCI1394_masterIntEnable
);
1581 fw_core_remove_card(&ohci
->card
);
1583 /* FIXME: Fail all pending packets here, now that the upper
1584 * layers can't queue any more. */
1586 software_reset(ohci
);
1587 free_irq(dev
->irq
, ohci
);
1588 cleanup(ohci
, CLEANUP_SELF_ID
, 0);
1590 fw_notify("Removed fw-ohci device.\n");
1593 static struct pci_device_id pci_table
[] = {
1594 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI
, ~0) },
1598 MODULE_DEVICE_TABLE(pci
, pci_table
);
1600 static struct pci_driver fw_ohci_pci_driver
= {
1601 .name
= ohci_driver_name
,
1602 .id_table
= pci_table
,
1604 .remove
= pci_remove
,
1607 MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1608 MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
1609 MODULE_LICENSE("GPL");
1611 static int __init
fw_ohci_init(void)
1613 return pci_register_driver(&fw_ohci_pci_driver
);
1616 static void __exit
fw_ohci_cleanup(void)
1618 pci_unregister_driver(&fw_ohci_pci_driver
);
1621 module_init(fw_ohci_init
);
1622 module_exit(fw_ohci_cleanup
);