]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/firewire/ohci.c
firewire: ohci: avoid separate DMA mapping for small AT payloads
[mirror_ubuntu-artful-kernel.git] / drivers / firewire / ohci.c
CommitLineData
c781c06d
KH
1/*
2 * Driver for OHCI 1394 controllers
ed568912 3 *
ed568912
KH
4 * Copyright (C) 2003-2006 Kristian Hoegsberg <krh@bitplanet.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
dd23736e 21#include <linux/bitops.h>
65b2742a 22#include <linux/bug.h>
e524f616 23#include <linux/compiler.h>
ed568912 24#include <linux/delay.h>
e8ca9702 25#include <linux/device.h>
cf3e72fd 26#include <linux/dma-mapping.h>
77c9a5da 27#include <linux/firewire.h>
e8ca9702 28#include <linux/firewire-constants.h>
a7fb60db
SR
29#include <linux/init.h>
30#include <linux/interrupt.h>
e8ca9702 31#include <linux/io.h>
a7fb60db 32#include <linux/kernel.h>
e8ca9702 33#include <linux/list.h>
faa2fb4e 34#include <linux/mm.h>
a7fb60db 35#include <linux/module.h>
ad3c0fe8 36#include <linux/moduleparam.h>
02d37bed 37#include <linux/mutex.h>
a7fb60db 38#include <linux/pci.h>
fc383796 39#include <linux/pci_ids.h>
5a0e3ad6 40#include <linux/slab.h>
c26f0234 41#include <linux/spinlock.h>
e8ca9702 42#include <linux/string.h>
e78483c5 43#include <linux/time.h>
7a39d8b8 44#include <linux/vmalloc.h>
cf3e72fd 45
e8ca9702 46#include <asm/byteorder.h>
c26f0234 47#include <asm/page.h>
ee71c2f9 48#include <asm/system.h>
ed568912 49
ea8d006b
SR
50#ifdef CONFIG_PPC_PMAC
51#include <asm/pmac_feature.h>
52#endif
53
77c9a5da
SR
54#include "core.h"
55#include "ohci.h"
ed568912 56
a77754a7
KH
57#define DESCRIPTOR_OUTPUT_MORE 0
58#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
59#define DESCRIPTOR_INPUT_MORE (2 << 12)
60#define DESCRIPTOR_INPUT_LAST (3 << 12)
61#define DESCRIPTOR_STATUS (1 << 11)
62#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
63#define DESCRIPTOR_PING (1 << 7)
64#define DESCRIPTOR_YY (1 << 6)
65#define DESCRIPTOR_NO_IRQ (0 << 4)
66#define DESCRIPTOR_IRQ_ERROR (1 << 4)
67#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
68#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
69#define DESCRIPTOR_WAIT (3 << 0)
ed568912
KH
70
71struct descriptor {
72 __le16 req_count;
73 __le16 control;
74 __le32 data_address;
75 __le32 branch_address;
76 __le16 res_count;
77 __le16 transfer_status;
78} __attribute__((aligned(16)));
79
a77754a7
KH
80#define CONTROL_SET(regs) (regs)
81#define CONTROL_CLEAR(regs) ((regs) + 4)
82#define COMMAND_PTR(regs) ((regs) + 12)
83#define CONTEXT_MATCH(regs) ((regs) + 16)
72e318e0 84
7a39d8b8
CL
85#define AR_BUFFER_SIZE (32*1024)
86#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
87/* we need at least two pages for proper list management */
88#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
89
90#define MAX_ASYNC_PAYLOAD 4096
91#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
92#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
ed568912 93
32b46093
KH
94struct ar_context {
95 struct fw_ohci *ohci;
7a39d8b8
CL
96 struct page *pages[AR_BUFFERS];
97 void *buffer;
98 struct descriptor *descriptors;
99 dma_addr_t descriptors_bus;
32b46093 100 void *pointer;
7a39d8b8 101 unsigned int last_buffer_index;
72e318e0 102 u32 regs;
ed568912
KH
103 struct tasklet_struct tasklet;
104};
105
30200739
KH
106struct context;
107
108typedef int (*descriptor_callback_t)(struct context *ctx,
109 struct descriptor *d,
110 struct descriptor *last);
fe5ca634
DM
111
112/*
113 * A buffer that contains a block of DMA-able coherent memory used for
114 * storing a portion of a DMA descriptor program.
115 */
116struct descriptor_buffer {
117 struct list_head list;
118 dma_addr_t buffer_bus;
119 size_t buffer_size;
120 size_t used;
121 struct descriptor buffer[0];
122};
123
30200739 124struct context {
373b2edd 125 struct fw_ohci *ohci;
30200739 126 u32 regs;
fe5ca634 127 int total_allocation;
386a4153 128 bool running;
82b662dc 129 bool flushing;
373b2edd 130
fe5ca634
DM
131 /*
132 * List of page-sized buffers for storing DMA descriptors.
133 * Head of list contains buffers in use and tail of list contains
134 * free buffers.
135 */
136 struct list_head buffer_list;
137
138 /*
139 * Pointer to a buffer inside buffer_list that contains the tail
140 * end of the current DMA program.
141 */
142 struct descriptor_buffer *buffer_tail;
143
144 /*
145 * The descriptor containing the branch address of the first
146 * descriptor that has not yet been filled by the device.
147 */
148 struct descriptor *last;
149
150 /*
151 * The last descriptor in the DMA program. It contains the branch
152 * address that must be updated upon appending a new descriptor.
153 */
154 struct descriptor *prev;
30200739
KH
155
156 descriptor_callback_t callback;
157
373b2edd 158 struct tasklet_struct tasklet;
30200739 159};
30200739 160
a77754a7
KH
161#define IT_HEADER_SY(v) ((v) << 0)
162#define IT_HEADER_TCODE(v) ((v) << 4)
163#define IT_HEADER_CHANNEL(v) ((v) << 8)
164#define IT_HEADER_TAG(v) ((v) << 14)
165#define IT_HEADER_SPEED(v) ((v) << 16)
166#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
ed568912
KH
167
168struct iso_context {
169 struct fw_iso_context base;
30200739 170 struct context context;
0642b657 171 int excess_bytes;
9b32d5f3
KH
172 void *header;
173 size_t header_length;
dd23736e
ML
174
175 u8 sync;
176 u8 tags;
ed568912
KH
177};
178
179#define CONFIG_ROM_SIZE 1024
180
181struct fw_ohci {
182 struct fw_card card;
183
184 __iomem char *registers;
e636fe25 185 int node_id;
ed568912 186 int generation;
e09770db 187 int request_generation; /* for timestamping incoming requests */
4a635593 188 unsigned quirks;
a1a1132b 189 unsigned int pri_req_max;
a48777e0 190 u32 bus_time;
4ffb7a6a 191 bool is_root;
c8a94ded 192 bool csr_state_setclear_abdicate;
dd23736e
ML
193 int n_ir;
194 int n_it;
c781c06d
KH
195 /*
196 * Spinlock for accessing fw_ohci data. Never call out of
197 * this driver with this lock held.
198 */
ed568912 199 spinlock_t lock;
ed568912 200
02d37bed
SR
201 struct mutex phy_reg_mutex;
202
ec766a79
CL
203 void *misc_buffer;
204 dma_addr_t misc_buffer_bus;
205
ed568912
KH
206 struct ar_context ar_request_ctx;
207 struct ar_context ar_response_ctx;
f319b6a0
KH
208 struct context at_request_ctx;
209 struct context at_response_ctx;
ed568912 210
f117a3e3 211 u32 it_context_support;
872e330e 212 u32 it_context_mask; /* unoccupied IT contexts */
ed568912 213 struct iso_context *it_context_list;
872e330e 214 u64 ir_context_channels; /* unoccupied channels */
f117a3e3 215 u32 ir_context_support;
872e330e 216 u32 ir_context_mask; /* unoccupied IR contexts */
ed568912 217 struct iso_context *ir_context_list;
872e330e
SR
218 u64 mc_channels; /* channels in use by the multichannel IR context */
219 bool mc_allocated;
ecb1cf9c
SR
220
221 __be32 *config_rom;
222 dma_addr_t config_rom_bus;
223 __be32 *next_config_rom;
224 dma_addr_t next_config_rom_bus;
225 __be32 next_header;
226
227 __le32 *self_id_cpu;
228 dma_addr_t self_id_bus;
229 struct tasklet_struct bus_reset_tasklet;
230
231 u32 self_id_buffer[512];
ed568912
KH
232};
233
95688e97 234static inline struct fw_ohci *fw_ohci(struct fw_card *card)
ed568912
KH
235{
236 return container_of(card, struct fw_ohci, card);
237}
238
295e3feb
KH
239#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
240#define IR_CONTEXT_BUFFER_FILL 0x80000000
241#define IR_CONTEXT_ISOCH_HEADER 0x40000000
242#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
243#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
244#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
ed568912
KH
245
246#define CONTEXT_RUN 0x8000
247#define CONTEXT_WAKE 0x1000
248#define CONTEXT_DEAD 0x0800
249#define CONTEXT_ACTIVE 0x0400
250
8b7b6afa 251#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
ed568912
KH
252#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
253#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
254
ed568912
KH
255#define OHCI1394_REGISTER_SIZE 0x800
256#define OHCI_LOOP_COUNT 500
257#define OHCI1394_PCI_HCI_Control 0x40
258#define SELF_ID_BUF_SIZE 0x800
32b46093 259#define OHCI_TCODE_PHY_PACKET 0x0e
e364cf4e 260#define OHCI_VERSION_1_1 0x010010
0edeefd9 261
ed568912
KH
262static char ohci_driver_name[] = KBUILD_MODNAME;
263
9993e0fe 264#define PCI_DEVICE_ID_AGERE_FW643 0x5901
262444ee 265#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
8301b91b
CL
266#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
267
4a635593
SR
268#define QUIRK_CYCLE_TIMER 1
269#define QUIRK_RESET_PACKET 2
270#define QUIRK_BE_HEADERS 4
925e7a65 271#define QUIRK_NO_1394A 8
262444ee 272#define QUIRK_NO_MSI 16
4a635593
SR
273
274/* In case of multiple matches in ohci_quirks[], only the first one is used. */
275static const struct {
9993e0fe 276 unsigned short vendor, device, revision, flags;
4a635593 277} ohci_quirks[] = {
9993e0fe
SR
278 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
279 QUIRK_CYCLE_TIMER},
280
281 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
282 QUIRK_BE_HEADERS},
283
284 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
285 QUIRK_NO_MSI},
286
287 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
288 QUIRK_NO_MSI},
289
290 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
291 QUIRK_CYCLE_TIMER},
292
293 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
294 QUIRK_CYCLE_TIMER},
295
296 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
297 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
298
299 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
300 QUIRK_RESET_PACKET},
301
302 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
303 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
4a635593
SR
304};
305
3e9cc2f3
SR
306/* This overrides anything that was found in ohci_quirks[]. */
307static int param_quirks;
308module_param_named(quirks, param_quirks, int, 0644);
309MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
310 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
311 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
312 ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
925e7a65 313 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
262444ee 314 ", disable MSI = " __stringify(QUIRK_NO_MSI)
3e9cc2f3
SR
315 ")");
316
a007bb85 317#define OHCI_PARAM_DEBUG_AT_AR 1
ad3c0fe8 318#define OHCI_PARAM_DEBUG_SELFIDS 2
a007bb85
SR
319#define OHCI_PARAM_DEBUG_IRQS 4
320#define OHCI_PARAM_DEBUG_BUSRESETS 8 /* only effective before chip init */
ad3c0fe8 321
5da3dac8
SR
322#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
323
ad3c0fe8
SR
324static int param_debug;
325module_param_named(debug, param_debug, int, 0644);
326MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
ad3c0fe8 327 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
a007bb85
SR
328 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
329 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
330 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
ad3c0fe8
SR
331 ", or a combination, or all = -1)");
332
333static void log_irqs(u32 evt)
334{
a007bb85
SR
335 if (likely(!(param_debug &
336 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
337 return;
338
339 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
340 !(evt & OHCI1394_busReset))
ad3c0fe8
SR
341 return;
342
f117a3e3 343 fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
161b96e7
SR
344 evt & OHCI1394_selfIDComplete ? " selfID" : "",
345 evt & OHCI1394_RQPkt ? " AR_req" : "",
346 evt & OHCI1394_RSPkt ? " AR_resp" : "",
347 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
348 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
349 evt & OHCI1394_isochRx ? " IR" : "",
350 evt & OHCI1394_isochTx ? " IT" : "",
351 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
352 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
a48777e0 353 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
5ed1f321 354 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
161b96e7 355 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
f117a3e3 356 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
161b96e7
SR
357 evt & OHCI1394_busReset ? " busReset" : "",
358 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
359 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
360 OHCI1394_respTxComplete | OHCI1394_isochRx |
361 OHCI1394_isochTx | OHCI1394_postedWriteErr |
a48777e0
CL
362 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
363 OHCI1394_cycleInconsistent |
161b96e7 364 OHCI1394_regAccessFail | OHCI1394_busReset)
ad3c0fe8
SR
365 ? " ?" : "");
366}
367
368static const char *speed[] = {
369 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
370};
371static const char *power[] = {
372 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
373 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
374};
375static const char port[] = { '.', '-', 'p', 'c', };
376
377static char _p(u32 *s, int shift)
378{
379 return port[*s >> shift & 3];
380}
381
08ddb2f4 382static void log_selfids(int node_id, int generation, int self_id_count, u32 *s)
ad3c0fe8
SR
383{
384 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
385 return;
386
161b96e7
SR
387 fw_notify("%d selfIDs, generation %d, local node ID %04x\n",
388 self_id_count, generation, node_id);
ad3c0fe8
SR
389
390 for (; self_id_count--; ++s)
391 if ((*s & 1 << 23) == 0)
161b96e7
SR
392 fw_notify("selfID 0: %08x, phy %d [%c%c%c] "
393 "%s gc=%d %s %s%s%s\n",
394 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
395 speed[*s >> 14 & 3], *s >> 16 & 63,
396 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
397 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
ad3c0fe8 398 else
161b96e7
SR
399 fw_notify("selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
400 *s, *s >> 24 & 63,
401 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
402 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
ad3c0fe8
SR
403}
404
405static const char *evts[] = {
406 [0x00] = "evt_no_status", [0x01] = "-reserved-",
407 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
408 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
409 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
410 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
411 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
412 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
413 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
414 [0x10] = "-reserved-", [0x11] = "ack_complete",
415 [0x12] = "ack_pending ", [0x13] = "-reserved-",
416 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
417 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
418 [0x18] = "-reserved-", [0x19] = "-reserved-",
419 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
420 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
421 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
422 [0x20] = "pending/cancelled",
423};
424static const char *tcodes[] = {
425 [0x0] = "QW req", [0x1] = "BW req",
426 [0x2] = "W resp", [0x3] = "-reserved-",
427 [0x4] = "QR req", [0x5] = "BR req",
428 [0x6] = "QR resp", [0x7] = "BR resp",
429 [0x8] = "cycle start", [0x9] = "Lk req",
430 [0xa] = "async stream packet", [0xb] = "Lk resp",
431 [0xc] = "-reserved-", [0xd] = "-reserved-",
432 [0xe] = "link internal", [0xf] = "-reserved-",
433};
ad3c0fe8
SR
434
435static void log_ar_at_event(char dir, int speed, u32 *header, int evt)
436{
437 int tcode = header[0] >> 4 & 0xf;
438 char specific[12];
439
440 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
441 return;
442
443 if (unlikely(evt >= ARRAY_SIZE(evts)))
444 evt = 0x1f;
445
08ddb2f4 446 if (evt == OHCI1394_evt_bus_reset) {
161b96e7
SR
447 fw_notify("A%c evt_bus_reset, generation %d\n",
448 dir, (header[2] >> 16) & 0xff);
08ddb2f4
SR
449 return;
450 }
451
ad3c0fe8
SR
452 switch (tcode) {
453 case 0x0: case 0x6: case 0x8:
454 snprintf(specific, sizeof(specific), " = %08x",
455 be32_to_cpu((__force __be32)header[3]));
456 break;
457 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
458 snprintf(specific, sizeof(specific), " %x,%x",
459 header[3] >> 16, header[3] & 0xffff);
460 break;
461 default:
462 specific[0] = '\0';
463 }
464
465 switch (tcode) {
5b06db16 466 case 0xa:
161b96e7 467 fw_notify("A%c %s, %s\n", dir, evts[evt], tcodes[tcode]);
ad3c0fe8 468 break;
5b06db16
CL
469 case 0xe:
470 fw_notify("A%c %s, PHY %08x %08x\n",
471 dir, evts[evt], header[1], header[2]);
472 break;
ad3c0fe8 473 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
161b96e7
SR
474 fw_notify("A%c spd %x tl %02x, "
475 "%04x -> %04x, %s, "
476 "%s, %04x%08x%s\n",
477 dir, speed, header[0] >> 10 & 0x3f,
478 header[1] >> 16, header[0] >> 16, evts[evt],
479 tcodes[tcode], header[1] & 0xffff, header[2], specific);
ad3c0fe8
SR
480 break;
481 default:
161b96e7
SR
482 fw_notify("A%c spd %x tl %02x, "
483 "%04x -> %04x, %s, "
484 "%s%s\n",
485 dir, speed, header[0] >> 10 & 0x3f,
486 header[1] >> 16, header[0] >> 16, evts[evt],
487 tcodes[tcode], specific);
ad3c0fe8
SR
488 }
489}
490
491#else
492
5da3dac8
SR
493#define param_debug 0
494static inline void log_irqs(u32 evt) {}
495static inline void log_selfids(int node_id, int generation, int self_id_count, u32 *s) {}
496static inline void log_ar_at_event(char dir, int speed, u32 *header, int evt) {}
ad3c0fe8
SR
497
498#endif /* CONFIG_FIREWIRE_OHCI_DEBUG */
499
95688e97 500static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
ed568912
KH
501{
502 writel(data, ohci->registers + offset);
503}
504
95688e97 505static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
ed568912
KH
506{
507 return readl(ohci->registers + offset);
508}
509
95688e97 510static inline void flush_writes(const struct fw_ohci *ohci)
ed568912
KH
511{
512 /* Do a dummy read to flush writes. */
513 reg_read(ohci, OHCI1394_Version);
514}
515
35d999b1 516static int read_phy_reg(struct fw_ohci *ohci, int addr)
ed568912 517{
4a96b4fc 518 u32 val;
35d999b1 519 int i;
ed568912
KH
520
521 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
153e3979 522 for (i = 0; i < 3 + 100; i++) {
35d999b1
SR
523 val = reg_read(ohci, OHCI1394_PhyControl);
524 if (val & OHCI1394_PhyControl_ReadDone)
525 return OHCI1394_PhyControl_ReadData(val);
526
153e3979
CL
527 /*
528 * Try a few times without waiting. Sleeping is necessary
529 * only when the link/PHY interface is busy.
530 */
531 if (i >= 3)
532 msleep(1);
ed568912 533 }
35d999b1 534 fw_error("failed to read phy reg\n");
ed568912 535
35d999b1
SR
536 return -EBUSY;
537}
4a96b4fc 538
35d999b1
SR
539static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
540{
541 int i;
ed568912 542
ed568912 543 reg_write(ohci, OHCI1394_PhyControl,
35d999b1 544 OHCI1394_PhyControl_Write(addr, val));
153e3979 545 for (i = 0; i < 3 + 100; i++) {
35d999b1
SR
546 val = reg_read(ohci, OHCI1394_PhyControl);
547 if (!(val & OHCI1394_PhyControl_WritePending))
548 return 0;
ed568912 549
153e3979
CL
550 if (i >= 3)
551 msleep(1);
35d999b1
SR
552 }
553 fw_error("failed to write phy reg\n");
554
555 return -EBUSY;
4a96b4fc
CL
556}
557
02d37bed
SR
558static int update_phy_reg(struct fw_ohci *ohci, int addr,
559 int clear_bits, int set_bits)
4a96b4fc 560{
02d37bed 561 int ret = read_phy_reg(ohci, addr);
35d999b1
SR
562 if (ret < 0)
563 return ret;
4a96b4fc 564
e7014dad
CL
565 /*
566 * The interrupt status bits are cleared by writing a one bit.
567 * Avoid clearing them unless explicitly requested in set_bits.
568 */
569 if (addr == 5)
570 clear_bits |= PHY_INT_STATUS_BITS;
571
35d999b1 572 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
ed568912
KH
573}
574
35d999b1 575static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
925e7a65 576{
35d999b1 577 int ret;
925e7a65 578
02d37bed 579 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
35d999b1
SR
580 if (ret < 0)
581 return ret;
925e7a65 582
35d999b1 583 return read_phy_reg(ohci, addr);
ed568912
KH
584}
585
02d37bed
SR
586static int ohci_read_phy_reg(struct fw_card *card, int addr)
587{
588 struct fw_ohci *ohci = fw_ohci(card);
589 int ret;
590
591 mutex_lock(&ohci->phy_reg_mutex);
592 ret = read_phy_reg(ohci, addr);
593 mutex_unlock(&ohci->phy_reg_mutex);
594
595 return ret;
596}
597
598static int ohci_update_phy_reg(struct fw_card *card, int addr,
599 int clear_bits, int set_bits)
600{
601 struct fw_ohci *ohci = fw_ohci(card);
602 int ret;
603
604 mutex_lock(&ohci->phy_reg_mutex);
605 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
606 mutex_unlock(&ohci->phy_reg_mutex);
607
608 return ret;
ed568912
KH
609}
610
7a39d8b8
CL
611static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
612{
613 return page_private(ctx->pages[i]);
614}
615
616static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
ed568912 617{
7a39d8b8 618 struct descriptor *d;
32b46093 619
7a39d8b8
CL
620 d = &ctx->descriptors[index];
621 d->branch_address &= cpu_to_le32(~0xf);
622 d->res_count = cpu_to_le16(PAGE_SIZE);
623 d->transfer_status = 0;
32b46093 624
071595eb 625 wmb(); /* finish init of new descriptors before branch_address update */
7a39d8b8
CL
626 d = &ctx->descriptors[ctx->last_buffer_index];
627 d->branch_address |= cpu_to_le32(1);
628
629 ctx->last_buffer_index = index;
32b46093 630
a77754a7 631 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
ed568912 632 flush_writes(ctx->ohci);
837596a6
CL
633}
634
7a39d8b8 635static void ar_context_release(struct ar_context *ctx)
837596a6 636{
7a39d8b8 637 unsigned int i;
837596a6 638
7a39d8b8
CL
639 if (ctx->buffer)
640 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
32b46093 641
7a39d8b8
CL
642 for (i = 0; i < AR_BUFFERS; i++)
643 if (ctx->pages[i]) {
644 dma_unmap_page(ctx->ohci->card.device,
645 ar_buffer_bus(ctx, i),
646 PAGE_SIZE, DMA_FROM_DEVICE);
647 __free_page(ctx->pages[i]);
648 }
ed568912
KH
649}
650
7a39d8b8 651static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
a55709ba 652{
7a39d8b8
CL
653 if (reg_read(ctx->ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
654 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
655 flush_writes(ctx->ohci);
a55709ba 656
7a39d8b8 657 fw_error("AR error: %s; DMA stopped\n", error_msg);
a55709ba 658 }
7a39d8b8
CL
659 /* FIXME: restart? */
660}
661
662static inline unsigned int ar_next_buffer_index(unsigned int index)
663{
664 return (index + 1) % AR_BUFFERS;
665}
666
667static inline unsigned int ar_prev_buffer_index(unsigned int index)
668{
669 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
670}
671
672static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
673{
674 return ar_next_buffer_index(ctx->last_buffer_index);
675}
676
677/*
678 * We search for the buffer that contains the last AR packet DMA data written
679 * by the controller.
680 */
681static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
682 unsigned int *buffer_offset)
683{
684 unsigned int i, next_i, last = ctx->last_buffer_index;
685 __le16 res_count, next_res_count;
686
687 i = ar_first_buffer_index(ctx);
688 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
689
690 /* A buffer that is not yet completely filled must be the last one. */
691 while (i != last && res_count == 0) {
692
693 /* Peek at the next descriptor. */
694 next_i = ar_next_buffer_index(i);
695 rmb(); /* read descriptors in order */
696 next_res_count = ACCESS_ONCE(
697 ctx->descriptors[next_i].res_count);
698 /*
699 * If the next descriptor is still empty, we must stop at this
700 * descriptor.
701 */
702 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
703 /*
704 * The exception is when the DMA data for one packet is
705 * split over three buffers; in this case, the middle
706 * buffer's descriptor might be never updated by the
707 * controller and look still empty, and we have to peek
708 * at the third one.
709 */
710 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
711 next_i = ar_next_buffer_index(next_i);
712 rmb();
713 next_res_count = ACCESS_ONCE(
714 ctx->descriptors[next_i].res_count);
715 if (next_res_count != cpu_to_le16(PAGE_SIZE))
716 goto next_buffer_is_active;
717 }
718
719 break;
720 }
721
722next_buffer_is_active:
723 i = next_i;
724 res_count = next_res_count;
725 }
726
727 rmb(); /* read res_count before the DMA data */
728
729 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
730 if (*buffer_offset > PAGE_SIZE) {
731 *buffer_offset = 0;
732 ar_context_abort(ctx, "corrupted descriptor");
733 }
734
735 return i;
736}
737
738static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
739 unsigned int end_buffer_index,
740 unsigned int end_buffer_offset)
741{
742 unsigned int i;
743
744 i = ar_first_buffer_index(ctx);
745 while (i != end_buffer_index) {
746 dma_sync_single_for_cpu(ctx->ohci->card.device,
747 ar_buffer_bus(ctx, i),
748 PAGE_SIZE, DMA_FROM_DEVICE);
749 i = ar_next_buffer_index(i);
750 }
751 if (end_buffer_offset > 0)
752 dma_sync_single_for_cpu(ctx->ohci->card.device,
753 ar_buffer_bus(ctx, i),
754 end_buffer_offset, DMA_FROM_DEVICE);
a55709ba
JF
755}
756
11bf20ad
SR
757#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
758#define cond_le32_to_cpu(v) \
4a635593 759 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
11bf20ad
SR
760#else
761#define cond_le32_to_cpu(v) le32_to_cpu(v)
762#endif
763
32b46093 764static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
ed568912 765{
ed568912 766 struct fw_ohci *ohci = ctx->ohci;
2639a6fb
KH
767 struct fw_packet p;
768 u32 status, length, tcode;
43286568 769 int evt;
2639a6fb 770
11bf20ad
SR
771 p.header[0] = cond_le32_to_cpu(buffer[0]);
772 p.header[1] = cond_le32_to_cpu(buffer[1]);
773 p.header[2] = cond_le32_to_cpu(buffer[2]);
2639a6fb
KH
774
775 tcode = (p.header[0] >> 4) & 0x0f;
776 switch (tcode) {
777 case TCODE_WRITE_QUADLET_REQUEST:
778 case TCODE_READ_QUADLET_RESPONSE:
32b46093 779 p.header[3] = (__force __u32) buffer[3];
2639a6fb 780 p.header_length = 16;
32b46093 781 p.payload_length = 0;
2639a6fb
KH
782 break;
783
2639a6fb 784 case TCODE_READ_BLOCK_REQUEST :
11bf20ad 785 p.header[3] = cond_le32_to_cpu(buffer[3]);
32b46093
KH
786 p.header_length = 16;
787 p.payload_length = 0;
788 break;
789
790 case TCODE_WRITE_BLOCK_REQUEST:
2639a6fb
KH
791 case TCODE_READ_BLOCK_RESPONSE:
792 case TCODE_LOCK_REQUEST:
793 case TCODE_LOCK_RESPONSE:
11bf20ad 794 p.header[3] = cond_le32_to_cpu(buffer[3]);
2639a6fb 795 p.header_length = 16;
32b46093 796 p.payload_length = p.header[3] >> 16;
7a39d8b8
CL
797 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
798 ar_context_abort(ctx, "invalid packet length");
799 return NULL;
800 }
2639a6fb
KH
801 break;
802
803 case TCODE_WRITE_RESPONSE:
804 case TCODE_READ_QUADLET_REQUEST:
32b46093 805 case OHCI_TCODE_PHY_PACKET:
2639a6fb 806 p.header_length = 12;
32b46093 807 p.payload_length = 0;
2639a6fb 808 break;
ccff9629
SR
809
810 default:
7a39d8b8
CL
811 ar_context_abort(ctx, "invalid tcode");
812 return NULL;
2639a6fb 813 }
ed568912 814
32b46093
KH
815 p.payload = (void *) buffer + p.header_length;
816
817 /* FIXME: What to do about evt_* errors? */
818 length = (p.header_length + p.payload_length + 3) / 4;
11bf20ad 819 status = cond_le32_to_cpu(buffer[length]);
43286568 820 evt = (status >> 16) & 0x1f;
32b46093 821
43286568 822 p.ack = evt - 16;
32b46093
KH
823 p.speed = (status >> 21) & 0x7;
824 p.timestamp = status & 0xffff;
825 p.generation = ohci->request_generation;
ed568912 826
43286568 827 log_ar_at_event('R', p.speed, p.header, evt);
ad3c0fe8 828
c781c06d 829 /*
a4dc090b
SR
830 * Several controllers, notably from NEC and VIA, forget to
831 * write ack_complete status at PHY packet reception.
832 */
833 if (evt == OHCI1394_evt_no_status &&
834 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
835 p.ack = ACK_COMPLETE;
836
837 /*
838 * The OHCI bus reset handler synthesizes a PHY packet with
ed568912
KH
839 * the new generation number when a bus reset happens (see
840 * section 8.4.2.3). This helps us determine when a request
841 * was received and make sure we send the response in the same
842 * generation. We only need this for requests; for responses
843 * we use the unique tlabel for finding the matching
c781c06d 844 * request.
d34316a4
SR
845 *
846 * Alas some chips sometimes emit bus reset packets with a
847 * wrong generation. We set the correct generation for these
848 * at a slightly incorrect time (in bus_reset_tasklet).
c781c06d 849 */
d34316a4 850 if (evt == OHCI1394_evt_bus_reset) {
4a635593 851 if (!(ohci->quirks & QUIRK_RESET_PACKET))
d34316a4
SR
852 ohci->request_generation = (p.header[2] >> 16) & 0xff;
853 } else if (ctx == &ohci->ar_request_ctx) {
2639a6fb 854 fw_core_handle_request(&ohci->card, &p);
d34316a4 855 } else {
2639a6fb 856 fw_core_handle_response(&ohci->card, &p);
d34316a4 857 }
ed568912 858
32b46093
KH
859 return buffer + length + 1;
860}
ed568912 861
7a39d8b8
CL
862static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
863{
864 void *next;
865
866 while (p < end) {
867 next = handle_ar_packet(ctx, p);
868 if (!next)
869 return p;
870 p = next;
871 }
872
873 return p;
874}
875
876static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
877{
878 unsigned int i;
879
880 i = ar_first_buffer_index(ctx);
881 while (i != end_buffer) {
882 dma_sync_single_for_device(ctx->ohci->card.device,
883 ar_buffer_bus(ctx, i),
884 PAGE_SIZE, DMA_FROM_DEVICE);
885 ar_context_link_page(ctx, i);
886 i = ar_next_buffer_index(i);
887 }
888}
889
32b46093
KH
890static void ar_context_tasklet(unsigned long data)
891{
892 struct ar_context *ctx = (struct ar_context *)data;
7a39d8b8
CL
893 unsigned int end_buffer_index, end_buffer_offset;
894 void *p, *end;
32b46093 895
7a39d8b8
CL
896 p = ctx->pointer;
897 if (!p)
898 return;
32b46093 899
7a39d8b8
CL
900 end_buffer_index = ar_search_last_active_buffer(ctx,
901 &end_buffer_offset);
902 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
903 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
32b46093 904
7a39d8b8 905 if (end_buffer_index < ar_first_buffer_index(ctx)) {
c781c06d 906 /*
7a39d8b8
CL
907 * The filled part of the overall buffer wraps around; handle
908 * all packets up to the buffer end here. If the last packet
909 * wraps around, its tail will be visible after the buffer end
910 * because the buffer start pages are mapped there again.
c781c06d 911 */
7a39d8b8
CL
912 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
913 p = handle_ar_packets(ctx, p, buffer_end);
914 if (p < buffer_end)
915 goto error;
916 /* adjust p to point back into the actual buffer */
917 p -= AR_BUFFERS * PAGE_SIZE;
918 }
32b46093 919
7a39d8b8
CL
920 p = handle_ar_packets(ctx, p, end);
921 if (p != end) {
922 if (p > end)
923 ar_context_abort(ctx, "inconsistent descriptor");
924 goto error;
925 }
32b46093 926
7a39d8b8
CL
927 ctx->pointer = p;
928 ar_recycle_buffers(ctx, end_buffer_index);
32b46093 929
7a39d8b8 930 return;
a1f805e5 931
7a39d8b8
CL
932error:
933 ctx->pointer = NULL;
ed568912
KH
934}
935
ec766a79
CL
936static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
937 unsigned int descriptors_offset, u32 regs)
ed568912 938{
7a39d8b8
CL
939 unsigned int i;
940 dma_addr_t dma_addr;
941 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
942 struct descriptor *d;
ed568912 943
72e318e0
KH
944 ctx->regs = regs;
945 ctx->ohci = ohci;
ed568912
KH
946 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
947
7a39d8b8
CL
948 for (i = 0; i < AR_BUFFERS; i++) {
949 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
950 if (!ctx->pages[i])
951 goto out_of_memory;
952 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
953 0, PAGE_SIZE, DMA_FROM_DEVICE);
954 if (dma_mapping_error(ohci->card.device, dma_addr)) {
955 __free_page(ctx->pages[i]);
956 ctx->pages[i] = NULL;
957 goto out_of_memory;
958 }
959 set_page_private(ctx->pages[i], dma_addr);
960 }
961
962 for (i = 0; i < AR_BUFFERS; i++)
963 pages[i] = ctx->pages[i];
964 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
965 pages[AR_BUFFERS + i] = ctx->pages[i];
966 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
14271304 967 -1, PAGE_KERNEL);
7a39d8b8
CL
968 if (!ctx->buffer)
969 goto out_of_memory;
970
ec766a79
CL
971 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
972 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
7a39d8b8
CL
973
974 for (i = 0; i < AR_BUFFERS; i++) {
975 d = &ctx->descriptors[i];
976 d->req_count = cpu_to_le16(PAGE_SIZE);
977 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
978 DESCRIPTOR_STATUS |
979 DESCRIPTOR_BRANCH_ALWAYS);
980 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
981 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
982 ar_next_buffer_index(i) * sizeof(struct descriptor));
983 }
32b46093 984
2aef469a 985 return 0;
7a39d8b8
CL
986
987out_of_memory:
988 ar_context_release(ctx);
989
990 return -ENOMEM;
2aef469a
KH
991}
992
993static void ar_context_run(struct ar_context *ctx)
994{
7a39d8b8
CL
995 unsigned int i;
996
997 for (i = 0; i < AR_BUFFERS; i++)
998 ar_context_link_page(ctx, i);
2aef469a 999
7a39d8b8 1000 ctx->pointer = ctx->buffer;
2aef469a 1001
7a39d8b8 1002 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
a77754a7 1003 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
32b46093 1004 flush_writes(ctx->ohci);
ed568912 1005}
373b2edd 1006
53dca511 1007static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
a186b4a6
JW
1008{
1009 int b, key;
1010
1011 b = (le16_to_cpu(d->control) & DESCRIPTOR_BRANCH_ALWAYS) >> 2;
1012 key = (le16_to_cpu(d->control) & DESCRIPTOR_KEY_IMMEDIATE) >> 8;
1013
1014 /* figure out which descriptor the branch address goes in */
1015 if (z == 2 && (b == 3 || key == 2))
1016 return d;
1017 else
1018 return d + z - 1;
1019}
1020
30200739
KH
1021static void context_tasklet(unsigned long data)
1022{
1023 struct context *ctx = (struct context *) data;
30200739
KH
1024 struct descriptor *d, *last;
1025 u32 address;
1026 int z;
fe5ca634 1027 struct descriptor_buffer *desc;
30200739 1028
fe5ca634
DM
1029 desc = list_entry(ctx->buffer_list.next,
1030 struct descriptor_buffer, list);
1031 last = ctx->last;
30200739 1032 while (last->branch_address != 0) {
fe5ca634 1033 struct descriptor_buffer *old_desc = desc;
30200739
KH
1034 address = le32_to_cpu(last->branch_address);
1035 z = address & 0xf;
fe5ca634
DM
1036 address &= ~0xf;
1037
1038 /* If the branch address points to a buffer outside of the
1039 * current buffer, advance to the next buffer. */
1040 if (address < desc->buffer_bus ||
1041 address >= desc->buffer_bus + desc->used)
1042 desc = list_entry(desc->list.next,
1043 struct descriptor_buffer, list);
1044 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
a186b4a6 1045 last = find_branch_descriptor(d, z);
30200739
KH
1046
1047 if (!ctx->callback(ctx, d, last))
1048 break;
1049
fe5ca634
DM
1050 if (old_desc != desc) {
1051 /* If we've advanced to the next buffer, move the
1052 * previous buffer to the free list. */
1053 unsigned long flags;
1054 old_desc->used = 0;
1055 spin_lock_irqsave(&ctx->ohci->lock, flags);
1056 list_move_tail(&old_desc->list, &ctx->buffer_list);
1057 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1058 }
1059 ctx->last = last;
30200739
KH
1060 }
1061}
1062
fe5ca634
DM
1063/*
1064 * Allocate a new buffer and add it to the list of free buffers for this
1065 * context. Must be called with ohci->lock held.
1066 */
53dca511 1067static int context_add_buffer(struct context *ctx)
fe5ca634
DM
1068{
1069 struct descriptor_buffer *desc;
f5101d58 1070 dma_addr_t uninitialized_var(bus_addr);
fe5ca634
DM
1071 int offset;
1072
1073 /*
1074 * 16MB of descriptors should be far more than enough for any DMA
1075 * program. This will catch run-away userspace or DoS attacks.
1076 */
1077 if (ctx->total_allocation >= 16*1024*1024)
1078 return -ENOMEM;
1079
1080 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1081 &bus_addr, GFP_ATOMIC);
1082 if (!desc)
1083 return -ENOMEM;
1084
1085 offset = (void *)&desc->buffer - (void *)desc;
1086 desc->buffer_size = PAGE_SIZE - offset;
1087 desc->buffer_bus = bus_addr + offset;
1088 desc->used = 0;
1089
1090 list_add_tail(&desc->list, &ctx->buffer_list);
1091 ctx->total_allocation += PAGE_SIZE;
1092
1093 return 0;
1094}
1095
53dca511
SR
1096static int context_init(struct context *ctx, struct fw_ohci *ohci,
1097 u32 regs, descriptor_callback_t callback)
30200739
KH
1098{
1099 ctx->ohci = ohci;
1100 ctx->regs = regs;
fe5ca634
DM
1101 ctx->total_allocation = 0;
1102
1103 INIT_LIST_HEAD(&ctx->buffer_list);
1104 if (context_add_buffer(ctx) < 0)
30200739
KH
1105 return -ENOMEM;
1106
fe5ca634
DM
1107 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1108 struct descriptor_buffer, list);
1109
30200739
KH
1110 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1111 ctx->callback = callback;
1112
c781c06d
KH
1113 /*
1114 * We put a dummy descriptor in the buffer that has a NULL
30200739 1115 * branch address and looks like it's been sent. That way we
fe5ca634 1116 * have a descriptor to append DMA programs to.
c781c06d 1117 */
fe5ca634
DM
1118 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1119 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1120 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1121 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1122 ctx->last = ctx->buffer_tail->buffer;
1123 ctx->prev = ctx->buffer_tail->buffer;
30200739
KH
1124
1125 return 0;
1126}
1127
53dca511 1128static void context_release(struct context *ctx)
30200739
KH
1129{
1130 struct fw_card *card = &ctx->ohci->card;
fe5ca634 1131 struct descriptor_buffer *desc, *tmp;
30200739 1132
fe5ca634
DM
1133 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1134 dma_free_coherent(card->device, PAGE_SIZE, desc,
1135 desc->buffer_bus -
1136 ((void *)&desc->buffer - (void *)desc));
30200739
KH
1137}
1138
fe5ca634 1139/* Must be called with ohci->lock held */
53dca511
SR
1140static struct descriptor *context_get_descriptors(struct context *ctx,
1141 int z, dma_addr_t *d_bus)
30200739 1142{
fe5ca634
DM
1143 struct descriptor *d = NULL;
1144 struct descriptor_buffer *desc = ctx->buffer_tail;
1145
1146 if (z * sizeof(*d) > desc->buffer_size)
1147 return NULL;
1148
1149 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1150 /* No room for the descriptor in this buffer, so advance to the
1151 * next one. */
30200739 1152
fe5ca634
DM
1153 if (desc->list.next == &ctx->buffer_list) {
1154 /* If there is no free buffer next in the list,
1155 * allocate one. */
1156 if (context_add_buffer(ctx) < 0)
1157 return NULL;
1158 }
1159 desc = list_entry(desc->list.next,
1160 struct descriptor_buffer, list);
1161 ctx->buffer_tail = desc;
1162 }
30200739 1163
fe5ca634 1164 d = desc->buffer + desc->used / sizeof(*d);
2d826cc5 1165 memset(d, 0, z * sizeof(*d));
fe5ca634 1166 *d_bus = desc->buffer_bus + desc->used;
30200739
KH
1167
1168 return d;
1169}
1170
295e3feb 1171static void context_run(struct context *ctx, u32 extra)
30200739
KH
1172{
1173 struct fw_ohci *ohci = ctx->ohci;
1174
a77754a7 1175 reg_write(ohci, COMMAND_PTR(ctx->regs),
fe5ca634 1176 le32_to_cpu(ctx->last->branch_address));
a77754a7
KH
1177 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1178 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
386a4153 1179 ctx->running = true;
30200739
KH
1180 flush_writes(ohci);
1181}
1182
1183static void context_append(struct context *ctx,
1184 struct descriptor *d, int z, int extra)
1185{
1186 dma_addr_t d_bus;
fe5ca634 1187 struct descriptor_buffer *desc = ctx->buffer_tail;
30200739 1188
fe5ca634 1189 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
30200739 1190
fe5ca634 1191 desc->used += (z + extra) * sizeof(*d);
071595eb
SR
1192
1193 wmb(); /* finish init of new descriptors before branch_address update */
fe5ca634
DM
1194 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1195 ctx->prev = find_branch_descriptor(d, z);
30200739 1196
a77754a7 1197 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
30200739
KH
1198 flush_writes(ctx->ohci);
1199}
1200
1201static void context_stop(struct context *ctx)
1202{
1203 u32 reg;
b8295668 1204 int i;
30200739 1205
a77754a7 1206 reg_write(ctx->ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
386a4153 1207 ctx->running = false;
b8295668 1208 flush_writes(ctx->ohci);
30200739 1209
b8295668 1210 for (i = 0; i < 10; i++) {
a77754a7 1211 reg = reg_read(ctx->ohci, CONTROL_SET(ctx->regs));
b8295668 1212 if ((reg & CONTEXT_ACTIVE) == 0)
b0068549 1213 return;
b8295668 1214
b980f5a2 1215 mdelay(1);
b8295668 1216 }
b0068549 1217 fw_error("Error: DMA context still active (0x%08x)\n", reg);
30200739 1218}
ed568912 1219
f319b6a0 1220struct driver_data {
da28947e 1221 u8 inline_data[8];
f319b6a0
KH
1222 struct fw_packet *packet;
1223};
ed568912 1224
c781c06d
KH
1225/*
1226 * This function apppends a packet to the DMA queue for transmission.
f319b6a0 1227 * Must always be called with the ochi->lock held to ensure proper
c781c06d
KH
1228 * generation handling and locking around packet queue manipulation.
1229 */
53dca511
SR
1230static int at_context_queue_packet(struct context *ctx,
1231 struct fw_packet *packet)
ed568912 1232{
ed568912 1233 struct fw_ohci *ohci = ctx->ohci;
4b6d51ec 1234 dma_addr_t d_bus, uninitialized_var(payload_bus);
f319b6a0
KH
1235 struct driver_data *driver_data;
1236 struct descriptor *d, *last;
1237 __le32 *header;
ed568912
KH
1238 int z, tcode;
1239
f319b6a0
KH
1240 d = context_get_descriptors(ctx, 4, &d_bus);
1241 if (d == NULL) {
1242 packet->ack = RCODE_SEND_ERROR;
1243 return -1;
ed568912
KH
1244 }
1245
a77754a7 1246 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
f319b6a0
KH
1247 d[0].res_count = cpu_to_le16(packet->timestamp);
1248
c781c06d
KH
1249 /*
1250 * The DMA format for asyncronous link packets is different
ed568912 1251 * from the IEEE1394 layout, so shift the fields around
5b06db16 1252 * accordingly.
c781c06d 1253 */
f319b6a0 1254
5b06db16 1255 tcode = (packet->header[0] >> 4) & 0x0f;
f319b6a0 1256 header = (__le32 *) &d[1];
5b06db16
CL
1257 switch (tcode) {
1258 case TCODE_WRITE_QUADLET_REQUEST:
1259 case TCODE_WRITE_BLOCK_REQUEST:
1260 case TCODE_WRITE_RESPONSE:
1261 case TCODE_READ_QUADLET_REQUEST:
1262 case TCODE_READ_BLOCK_REQUEST:
1263 case TCODE_READ_QUADLET_RESPONSE:
1264 case TCODE_READ_BLOCK_RESPONSE:
1265 case TCODE_LOCK_REQUEST:
1266 case TCODE_LOCK_RESPONSE:
f319b6a0
KH
1267 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1268 (packet->speed << 16));
1269 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1270 (packet->header[0] & 0xffff0000));
1271 header[2] = cpu_to_le32(packet->header[2]);
ed568912 1272
ed568912 1273 if (TCODE_IS_BLOCK_PACKET(tcode))
f319b6a0 1274 header[3] = cpu_to_le32(packet->header[3]);
ed568912 1275 else
f319b6a0
KH
1276 header[3] = (__force __le32) packet->header[3];
1277
1278 d[0].req_count = cpu_to_le16(packet->header_length);
f8c2287c
JF
1279 break;
1280
5b06db16 1281 case TCODE_LINK_INTERNAL:
f319b6a0
KH
1282 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1283 (packet->speed << 16));
5b06db16
CL
1284 header[1] = cpu_to_le32(packet->header[1]);
1285 header[2] = cpu_to_le32(packet->header[2]);
f319b6a0 1286 d[0].req_count = cpu_to_le16(12);
cc550216 1287
5b06db16 1288 if (is_ping_packet(&packet->header[1]))
cc550216 1289 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
f8c2287c
JF
1290 break;
1291
5b06db16 1292 case TCODE_STREAM_DATA:
f8c2287c
JF
1293 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1294 (packet->speed << 16));
1295 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1296 d[0].req_count = cpu_to_le16(8);
1297 break;
1298
1299 default:
1300 /* BUG(); */
1301 packet->ack = RCODE_SEND_ERROR;
1302 return -1;
ed568912
KH
1303 }
1304
da28947e 1305 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
f319b6a0
KH
1306 driver_data = (struct driver_data *) &d[3];
1307 driver_data->packet = packet;
20d11673 1308 packet->driver_data = driver_data;
a186b4a6 1309
f319b6a0 1310 if (packet->payload_length > 0) {
da28947e
CL
1311 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1312 payload_bus = dma_map_single(ohci->card.device,
1313 packet->payload,
1314 packet->payload_length,
1315 DMA_TO_DEVICE);
1316 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1317 packet->ack = RCODE_SEND_ERROR;
1318 return -1;
1319 }
1320 packet->payload_bus = payload_bus;
1321 packet->payload_mapped = true;
1322 } else {
1323 memcpy(driver_data->inline_data, packet->payload,
1324 packet->payload_length);
1325 payload_bus = d_bus + 3 * sizeof(*d);
f319b6a0
KH
1326 }
1327
1328 d[2].req_count = cpu_to_le16(packet->payload_length);
1329 d[2].data_address = cpu_to_le32(payload_bus);
1330 last = &d[2];
1331 z = 3;
ed568912 1332 } else {
f319b6a0
KH
1333 last = &d[0];
1334 z = 2;
ed568912 1335 }
ed568912 1336
a77754a7
KH
1337 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1338 DESCRIPTOR_IRQ_ALWAYS |
1339 DESCRIPTOR_BRANCH_ALWAYS);
ed568912 1340
b6258fc1
SR
1341 /* FIXME: Document how the locking works. */
1342 if (ohci->generation != packet->generation) {
19593ffd 1343 if (packet->payload_mapped)
ab88ca48
SR
1344 dma_unmap_single(ohci->card.device, payload_bus,
1345 packet->payload_length, DMA_TO_DEVICE);
f319b6a0
KH
1346 packet->ack = RCODE_GENERATION;
1347 return -1;
1348 }
1349
1350 context_append(ctx, d, z, 4 - z);
ed568912 1351
386a4153 1352 if (!ctx->running)
f319b6a0
KH
1353 context_run(ctx, 0);
1354
1355 return 0;
ed568912
KH
1356}
1357
82b662dc
CL
1358static void at_context_flush(struct context *ctx)
1359{
1360 tasklet_disable(&ctx->tasklet);
1361
1362 ctx->flushing = true;
1363 context_tasklet((unsigned long)ctx);
1364 ctx->flushing = false;
1365
1366 tasklet_enable(&ctx->tasklet);
1367}
1368
f319b6a0
KH
1369static int handle_at_packet(struct context *context,
1370 struct descriptor *d,
1371 struct descriptor *last)
ed568912 1372{
f319b6a0 1373 struct driver_data *driver_data;
ed568912 1374 struct fw_packet *packet;
f319b6a0 1375 struct fw_ohci *ohci = context->ohci;
ed568912
KH
1376 int evt;
1377
82b662dc 1378 if (last->transfer_status == 0 && !context->flushing)
f319b6a0
KH
1379 /* This descriptor isn't done yet, stop iteration. */
1380 return 0;
ed568912 1381
f319b6a0
KH
1382 driver_data = (struct driver_data *) &d[3];
1383 packet = driver_data->packet;
1384 if (packet == NULL)
1385 /* This packet was cancelled, just continue. */
1386 return 1;
730c32f5 1387
19593ffd 1388 if (packet->payload_mapped)
1d1dc5e8 1389 dma_unmap_single(ohci->card.device, packet->payload_bus,
ed568912 1390 packet->payload_length, DMA_TO_DEVICE);
ed568912 1391
f319b6a0
KH
1392 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1393 packet->timestamp = le16_to_cpu(last->res_count);
ed568912 1394
ad3c0fe8
SR
1395 log_ar_at_event('T', packet->speed, packet->header, evt);
1396
f319b6a0
KH
1397 switch (evt) {
1398 case OHCI1394_evt_timeout:
1399 /* Async response transmit timed out. */
1400 packet->ack = RCODE_CANCELLED;
1401 break;
ed568912 1402
f319b6a0 1403 case OHCI1394_evt_flushed:
c781c06d
KH
1404 /*
1405 * The packet was flushed should give same error as
1406 * when we try to use a stale generation count.
1407 */
f319b6a0
KH
1408 packet->ack = RCODE_GENERATION;
1409 break;
ed568912 1410
f319b6a0 1411 case OHCI1394_evt_missing_ack:
82b662dc
CL
1412 if (context->flushing)
1413 packet->ack = RCODE_GENERATION;
1414 else {
1415 /*
1416 * Using a valid (current) generation count, but the
1417 * node is not on the bus or not sending acks.
1418 */
1419 packet->ack = RCODE_NO_ACK;
1420 }
f319b6a0 1421 break;
ed568912 1422
f319b6a0
KH
1423 case ACK_COMPLETE + 0x10:
1424 case ACK_PENDING + 0x10:
1425 case ACK_BUSY_X + 0x10:
1426 case ACK_BUSY_A + 0x10:
1427 case ACK_BUSY_B + 0x10:
1428 case ACK_DATA_ERROR + 0x10:
1429 case ACK_TYPE_ERROR + 0x10:
1430 packet->ack = evt - 0x10;
1431 break;
ed568912 1432
82b662dc
CL
1433 case OHCI1394_evt_no_status:
1434 if (context->flushing) {
1435 packet->ack = RCODE_GENERATION;
1436 break;
1437 }
1438 /* fall through */
1439
f319b6a0
KH
1440 default:
1441 packet->ack = RCODE_SEND_ERROR;
1442 break;
1443 }
ed568912 1444
f319b6a0 1445 packet->callback(packet, &ohci->card, packet->ack);
ed568912 1446
f319b6a0 1447 return 1;
ed568912
KH
1448}
1449
a77754a7
KH
1450#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1451#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1452#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1453#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1454#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
93c4cceb 1455
53dca511
SR
1456static void handle_local_rom(struct fw_ohci *ohci,
1457 struct fw_packet *packet, u32 csr)
93c4cceb
KH
1458{
1459 struct fw_packet response;
1460 int tcode, length, i;
1461
a77754a7 1462 tcode = HEADER_GET_TCODE(packet->header[0]);
93c4cceb 1463 if (TCODE_IS_BLOCK_PACKET(tcode))
a77754a7 1464 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
93c4cceb
KH
1465 else
1466 length = 4;
1467
1468 i = csr - CSR_CONFIG_ROM;
1469 if (i + length > CONFIG_ROM_SIZE) {
1470 fw_fill_response(&response, packet->header,
1471 RCODE_ADDRESS_ERROR, NULL, 0);
1472 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1473 fw_fill_response(&response, packet->header,
1474 RCODE_TYPE_ERROR, NULL, 0);
1475 } else {
1476 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1477 (void *) ohci->config_rom + i, length);
1478 }
1479
1480 fw_core_handle_response(&ohci->card, &response);
1481}
1482
53dca511
SR
1483static void handle_local_lock(struct fw_ohci *ohci,
1484 struct fw_packet *packet, u32 csr)
93c4cceb
KH
1485{
1486 struct fw_packet response;
e1393667 1487 int tcode, length, ext_tcode, sel, try;
93c4cceb
KH
1488 __be32 *payload, lock_old;
1489 u32 lock_arg, lock_data;
1490
a77754a7
KH
1491 tcode = HEADER_GET_TCODE(packet->header[0]);
1492 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
93c4cceb 1493 payload = packet->payload;
a77754a7 1494 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
93c4cceb
KH
1495
1496 if (tcode == TCODE_LOCK_REQUEST &&
1497 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1498 lock_arg = be32_to_cpu(payload[0]);
1499 lock_data = be32_to_cpu(payload[1]);
1500 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1501 lock_arg = 0;
1502 lock_data = 0;
1503 } else {
1504 fw_fill_response(&response, packet->header,
1505 RCODE_TYPE_ERROR, NULL, 0);
1506 goto out;
1507 }
1508
1509 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1510 reg_write(ohci, OHCI1394_CSRData, lock_data);
1511 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1512 reg_write(ohci, OHCI1394_CSRControl, sel);
1513
e1393667
CL
1514 for (try = 0; try < 20; try++)
1515 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1516 lock_old = cpu_to_be32(reg_read(ohci,
1517 OHCI1394_CSRData));
1518 fw_fill_response(&response, packet->header,
1519 RCODE_COMPLETE,
1520 &lock_old, sizeof(lock_old));
1521 goto out;
1522 }
1523
1524 fw_error("swap not done (CSR lock timeout)\n");
1525 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
93c4cceb 1526
93c4cceb
KH
1527 out:
1528 fw_core_handle_response(&ohci->card, &response);
1529}
1530
53dca511 1531static void handle_local_request(struct context *ctx, struct fw_packet *packet)
93c4cceb 1532{
2608203d 1533 u64 offset, csr;
93c4cceb 1534
473d28c7
KH
1535 if (ctx == &ctx->ohci->at_request_ctx) {
1536 packet->ack = ACK_PENDING;
1537 packet->callback(packet, &ctx->ohci->card, packet->ack);
1538 }
93c4cceb
KH
1539
1540 offset =
1541 ((unsigned long long)
a77754a7 1542 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
93c4cceb
KH
1543 packet->header[2];
1544 csr = offset - CSR_REGISTER_BASE;
1545
1546 /* Handle config rom reads. */
1547 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1548 handle_local_rom(ctx->ohci, packet, csr);
1549 else switch (csr) {
1550 case CSR_BUS_MANAGER_ID:
1551 case CSR_BANDWIDTH_AVAILABLE:
1552 case CSR_CHANNELS_AVAILABLE_HI:
1553 case CSR_CHANNELS_AVAILABLE_LO:
1554 handle_local_lock(ctx->ohci, packet, csr);
1555 break;
1556 default:
1557 if (ctx == &ctx->ohci->at_request_ctx)
1558 fw_core_handle_request(&ctx->ohci->card, packet);
1559 else
1560 fw_core_handle_response(&ctx->ohci->card, packet);
1561 break;
1562 }
473d28c7
KH
1563
1564 if (ctx == &ctx->ohci->at_response_ctx) {
1565 packet->ack = ACK_COMPLETE;
1566 packet->callback(packet, &ctx->ohci->card, packet->ack);
1567 }
93c4cceb 1568}
e636fe25 1569
53dca511 1570static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
ed568912 1571{
ed568912 1572 unsigned long flags;
2dbd7d7e 1573 int ret;
ed568912
KH
1574
1575 spin_lock_irqsave(&ctx->ohci->lock, flags);
1576
a77754a7 1577 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
e636fe25 1578 ctx->ohci->generation == packet->generation) {
93c4cceb
KH
1579 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1580 handle_local_request(ctx, packet);
1581 return;
e636fe25 1582 }
ed568912 1583
2dbd7d7e 1584 ret = at_context_queue_packet(ctx, packet);
ed568912
KH
1585 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1586
2dbd7d7e 1587 if (ret < 0)
f319b6a0 1588 packet->callback(packet, &ctx->ohci->card, packet->ack);
a186b4a6 1589
ed568912
KH
1590}
1591
f117a3e3
CL
1592static void detect_dead_context(struct fw_ohci *ohci,
1593 const char *name, unsigned int regs)
1594{
1595 u32 ctl;
1596
1597 ctl = reg_read(ohci, CONTROL_SET(regs));
1598 if (ctl & CONTEXT_DEAD) {
1599#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
1600 fw_error("DMA context %s has stopped, error code: %s\n",
1601 name, evts[ctl & 0x1f]);
1602#else
1603 fw_error("DMA context %s has stopped, error code: %#x\n",
1604 name, ctl & 0x1f);
1605#endif
1606 }
1607}
1608
1609static void handle_dead_contexts(struct fw_ohci *ohci)
1610{
1611 unsigned int i;
1612 char name[8];
1613
1614 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1615 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1616 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1617 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1618 for (i = 0; i < 32; ++i) {
1619 if (!(ohci->it_context_support & (1 << i)))
1620 continue;
1621 sprintf(name, "IT%u", i);
1622 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1623 }
1624 for (i = 0; i < 32; ++i) {
1625 if (!(ohci->ir_context_support & (1 << i)))
1626 continue;
1627 sprintf(name, "IR%u", i);
1628 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1629 }
1630 /* TODO: maybe try to flush and restart the dead contexts */
1631}
1632
a48777e0
CL
1633static u32 cycle_timer_ticks(u32 cycle_timer)
1634{
1635 u32 ticks;
1636
1637 ticks = cycle_timer & 0xfff;
1638 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1639 ticks += (3072 * 8000) * (cycle_timer >> 25);
1640
1641 return ticks;
1642}
1643
1644/*
1645 * Some controllers exhibit one or more of the following bugs when updating the
1646 * iso cycle timer register:
1647 * - When the lowest six bits are wrapping around to zero, a read that happens
1648 * at the same time will return garbage in the lowest ten bits.
1649 * - When the cycleOffset field wraps around to zero, the cycleCount field is
1650 * not incremented for about 60 ns.
1651 * - Occasionally, the entire register reads zero.
1652 *
1653 * To catch these, we read the register three times and ensure that the
1654 * difference between each two consecutive reads is approximately the same, i.e.
1655 * less than twice the other. Furthermore, any negative difference indicates an
1656 * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
1657 * execute, so we have enough precision to compute the ratio of the differences.)
1658 */
1659static u32 get_cycle_time(struct fw_ohci *ohci)
1660{
1661 u32 c0, c1, c2;
1662 u32 t0, t1, t2;
1663 s32 diff01, diff12;
1664 int i;
1665
1666 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1667
1668 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1669 i = 0;
1670 c1 = c2;
1671 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1672 do {
1673 c0 = c1;
1674 c1 = c2;
1675 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1676 t0 = cycle_timer_ticks(c0);
1677 t1 = cycle_timer_ticks(c1);
1678 t2 = cycle_timer_ticks(c2);
1679 diff01 = t1 - t0;
1680 diff12 = t2 - t1;
1681 } while ((diff01 <= 0 || diff12 <= 0 ||
1682 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1683 && i++ < 20);
1684 }
1685
1686 return c2;
1687}
1688
1689/*
1690 * This function has to be called at least every 64 seconds. The bus_time
1691 * field stores not only the upper 25 bits of the BUS_TIME register but also
1692 * the most significant bit of the cycle timer in bit 6 so that we can detect
1693 * changes in this bit.
1694 */
1695static u32 update_bus_time(struct fw_ohci *ohci)
1696{
1697 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1698
1699 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1700 ohci->bus_time += 0x40;
1701
1702 return ohci->bus_time | cycle_time_seconds;
1703}
1704
ed568912
KH
1705static void bus_reset_tasklet(unsigned long data)
1706{
1707 struct fw_ohci *ohci = (struct fw_ohci *)data;
e636fe25 1708 int self_id_count, i, j, reg;
ed568912
KH
1709 int generation, new_generation;
1710 unsigned long flags;
4eaff7d6
SR
1711 void *free_rom = NULL;
1712 dma_addr_t free_rom_bus = 0;
4ffb7a6a 1713 bool is_new_root;
ed568912
KH
1714
1715 reg = reg_read(ohci, OHCI1394_NodeID);
1716 if (!(reg & OHCI1394_NodeID_idValid)) {
02ff8f8e 1717 fw_notify("node ID not valid, new bus reset in progress\n");
ed568912
KH
1718 return;
1719 }
02ff8f8e
SR
1720 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1721 fw_notify("malconfigured bus\n");
1722 return;
1723 }
1724 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1725 OHCI1394_NodeID_nodeNumber);
ed568912 1726
4ffb7a6a
CL
1727 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1728 if (!(ohci->is_root && is_new_root))
1729 reg_write(ohci, OHCI1394_LinkControlSet,
1730 OHCI1394_LinkControl_cycleMaster);
1731 ohci->is_root = is_new_root;
1732
c8a9a498
SR
1733 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1734 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1735 fw_notify("inconsistent self IDs\n");
1736 return;
1737 }
c781c06d
KH
1738 /*
1739 * The count in the SelfIDCount register is the number of
ed568912
KH
1740 * bytes in the self ID receive buffer. Since we also receive
1741 * the inverted quadlets and a header quadlet, we shift one
c781c06d
KH
1742 * bit extra to get the actual number of self IDs.
1743 */
928ec5f1
SR
1744 self_id_count = (reg >> 3) & 0xff;
1745 if (self_id_count == 0 || self_id_count > 252) {
016bf3df
SR
1746 fw_notify("inconsistent self IDs\n");
1747 return;
1748 }
11bf20ad 1749 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
ee71c2f9 1750 rmb();
ed568912
KH
1751
1752 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
c8a9a498
SR
1753 if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) {
1754 fw_notify("inconsistent self IDs\n");
1755 return;
1756 }
11bf20ad
SR
1757 ohci->self_id_buffer[j] =
1758 cond_le32_to_cpu(ohci->self_id_cpu[i]);
ed568912 1759 }
ee71c2f9 1760 rmb();
ed568912 1761
c781c06d
KH
1762 /*
1763 * Check the consistency of the self IDs we just read. The
ed568912
KH
1764 * problem we face is that a new bus reset can start while we
1765 * read out the self IDs from the DMA buffer. If this happens,
1766 * the DMA buffer will be overwritten with new self IDs and we
1767 * will read out inconsistent data. The OHCI specification
1768 * (section 11.2) recommends a technique similar to
1769 * linux/seqlock.h, where we remember the generation of the
1770 * self IDs in the buffer before reading them out and compare
1771 * it to the current generation after reading them out. If
1772 * the two generations match we know we have a consistent set
c781c06d
KH
1773 * of self IDs.
1774 */
ed568912
KH
1775
1776 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1777 if (new_generation != generation) {
1778 fw_notify("recursive bus reset detected, "
1779 "discarding self ids\n");
1780 return;
1781 }
1782
1783 /* FIXME: Document how the locking works. */
1784 spin_lock_irqsave(&ohci->lock, flags);
1785
82b662dc 1786 ohci->generation = -1; /* prevent AT packet queueing */
f319b6a0
KH
1787 context_stop(&ohci->at_request_ctx);
1788 context_stop(&ohci->at_response_ctx);
82b662dc
CL
1789
1790 spin_unlock_irqrestore(&ohci->lock, flags);
1791
78dec56d
SR
1792 /*
1793 * Per OHCI 1.2 draft, clause 7.2.3.3, hardware may leave unsent
1794 * packets in the AT queues and software needs to drain them.
1795 * Some OHCI 1.1 controllers (JMicron) apparently require this too.
1796 */
82b662dc
CL
1797 at_context_flush(&ohci->at_request_ctx);
1798 at_context_flush(&ohci->at_response_ctx);
1799
1800 spin_lock_irqsave(&ohci->lock, flags);
1801
1802 ohci->generation = generation;
ed568912
KH
1803 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
1804
4a635593 1805 if (ohci->quirks & QUIRK_RESET_PACKET)
d34316a4
SR
1806 ohci->request_generation = generation;
1807
c781c06d
KH
1808 /*
1809 * This next bit is unrelated to the AT context stuff but we
ed568912
KH
1810 * have to do it under the spinlock also. If a new config rom
1811 * was set up before this reset, the old one is now no longer
1812 * in use and we can free it. Update the config rom pointers
1813 * to point to the current config rom and clear the
88393161 1814 * next_config_rom pointer so a new update can take place.
c781c06d 1815 */
ed568912
KH
1816
1817 if (ohci->next_config_rom != NULL) {
0bd243c4
KH
1818 if (ohci->next_config_rom != ohci->config_rom) {
1819 free_rom = ohci->config_rom;
1820 free_rom_bus = ohci->config_rom_bus;
1821 }
ed568912
KH
1822 ohci->config_rom = ohci->next_config_rom;
1823 ohci->config_rom_bus = ohci->next_config_rom_bus;
1824 ohci->next_config_rom = NULL;
1825
c781c06d
KH
1826 /*
1827 * Restore config_rom image and manually update
ed568912
KH
1828 * config_rom registers. Writing the header quadlet
1829 * will indicate that the config rom is ready, so we
c781c06d
KH
1830 * do that last.
1831 */
ed568912
KH
1832 reg_write(ohci, OHCI1394_BusOptions,
1833 be32_to_cpu(ohci->config_rom[2]));
8e85973e
SR
1834 ohci->config_rom[0] = ohci->next_header;
1835 reg_write(ohci, OHCI1394_ConfigROMhdr,
1836 be32_to_cpu(ohci->next_header));
ed568912
KH
1837 }
1838
080de8c2
SR
1839#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
1840 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
1841 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
1842#endif
1843
ed568912
KH
1844 spin_unlock_irqrestore(&ohci->lock, flags);
1845
4eaff7d6
SR
1846 if (free_rom)
1847 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
1848 free_rom, free_rom_bus);
1849
08ddb2f4
SR
1850 log_selfids(ohci->node_id, generation,
1851 self_id_count, ohci->self_id_buffer);
ad3c0fe8 1852
e636fe25 1853 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
c8a94ded
SR
1854 self_id_count, ohci->self_id_buffer,
1855 ohci->csr_state_setclear_abdicate);
1856 ohci->csr_state_setclear_abdicate = false;
ed568912
KH
1857}
1858
1859static irqreturn_t irq_handler(int irq, void *data)
1860{
1861 struct fw_ohci *ohci = data;
168cf9af 1862 u32 event, iso_event;
ed568912
KH
1863 int i;
1864
1865 event = reg_read(ohci, OHCI1394_IntEventClear);
1866
a515958d 1867 if (!event || !~event)
ed568912
KH
1868 return IRQ_NONE;
1869
8327b37b
CL
1870 /*
1871 * busReset and postedWriteErr must not be cleared yet
1872 * (OHCI 1.1 clauses 7.2.3.2 and 13.2.8.1)
1873 */
1874 reg_write(ohci, OHCI1394_IntEventClear,
1875 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
ad3c0fe8 1876 log_irqs(event);
ed568912
KH
1877
1878 if (event & OHCI1394_selfIDComplete)
1879 tasklet_schedule(&ohci->bus_reset_tasklet);
1880
1881 if (event & OHCI1394_RQPkt)
1882 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
1883
1884 if (event & OHCI1394_RSPkt)
1885 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
1886
1887 if (event & OHCI1394_reqTxComplete)
1888 tasklet_schedule(&ohci->at_request_ctx.tasklet);
1889
1890 if (event & OHCI1394_respTxComplete)
1891 tasklet_schedule(&ohci->at_response_ctx.tasklet);
1892
2dd5bed5
CL
1893 if (event & OHCI1394_isochRx) {
1894 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
1895 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
1896
1897 while (iso_event) {
1898 i = ffs(iso_event) - 1;
1899 tasklet_schedule(
1900 &ohci->ir_context_list[i].context.tasklet);
1901 iso_event &= ~(1 << i);
1902 }
ed568912
KH
1903 }
1904
2dd5bed5
CL
1905 if (event & OHCI1394_isochTx) {
1906 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
1907 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
ed568912 1908
2dd5bed5
CL
1909 while (iso_event) {
1910 i = ffs(iso_event) - 1;
1911 tasklet_schedule(
1912 &ohci->it_context_list[i].context.tasklet);
1913 iso_event &= ~(1 << i);
1914 }
ed568912
KH
1915 }
1916
75f7832e
JW
1917 if (unlikely(event & OHCI1394_regAccessFail))
1918 fw_error("Register access failure - "
1919 "please notify linux1394-devel@lists.sf.net\n");
1920
8327b37b
CL
1921 if (unlikely(event & OHCI1394_postedWriteErr)) {
1922 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
1923 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
1924 reg_write(ohci, OHCI1394_IntEventClear,
1925 OHCI1394_postedWriteErr);
e524f616 1926 fw_error("PCI posted write error\n");
8327b37b 1927 }
e524f616 1928
bb9f2206
SR
1929 if (unlikely(event & OHCI1394_cycleTooLong)) {
1930 if (printk_ratelimit())
1931 fw_notify("isochronous cycle too long\n");
1932 reg_write(ohci, OHCI1394_LinkControlSet,
1933 OHCI1394_LinkControl_cycleMaster);
1934 }
1935
5ed1f321
JF
1936 if (unlikely(event & OHCI1394_cycleInconsistent)) {
1937 /*
1938 * We need to clear this event bit in order to make
1939 * cycleMatch isochronous I/O work. In theory we should
1940 * stop active cycleMatch iso contexts now and restart
1941 * them at least two cycles later. (FIXME?)
1942 */
1943 if (printk_ratelimit())
1944 fw_notify("isochronous cycle inconsistent\n");
1945 }
1946
f117a3e3
CL
1947 if (unlikely(event & OHCI1394_unrecoverableError))
1948 handle_dead_contexts(ohci);
1949
a48777e0
CL
1950 if (event & OHCI1394_cycle64Seconds) {
1951 spin_lock(&ohci->lock);
1952 update_bus_time(ohci);
1953 spin_unlock(&ohci->lock);
e597e989
CL
1954 } else
1955 flush_writes(ohci);
a48777e0 1956
ed568912
KH
1957 return IRQ_HANDLED;
1958}
1959
2aef469a
KH
1960static int software_reset(struct fw_ohci *ohci)
1961{
1962 int i;
1963
1964 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
1965
1966 for (i = 0; i < OHCI_LOOP_COUNT; i++) {
1967 if ((reg_read(ohci, OHCI1394_HCControlSet) &
1968 OHCI1394_HCControl_softReset) == 0)
1969 return 0;
1970 msleep(1);
1971 }
1972
1973 return -EBUSY;
1974}
1975
8e85973e
SR
1976static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
1977{
1978 size_t size = length * 4;
1979
1980 memcpy(dest, src, size);
1981 if (size < CONFIG_ROM_SIZE)
1982 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
1983}
1984
925e7a65
CL
1985static int configure_1394a_enhancements(struct fw_ohci *ohci)
1986{
1987 bool enable_1394a;
35d999b1 1988 int ret, clear, set, offset;
925e7a65
CL
1989
1990 /* Check if the driver should configure link and PHY. */
1991 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
1992 OHCI1394_HCControl_programPhyEnable))
1993 return 0;
1994
1995 /* Paranoia: check whether the PHY supports 1394a, too. */
1996 enable_1394a = false;
35d999b1
SR
1997 ret = read_phy_reg(ohci, 2);
1998 if (ret < 0)
1999 return ret;
2000 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2001 ret = read_paged_phy_reg(ohci, 1, 8);
2002 if (ret < 0)
2003 return ret;
2004 if (ret >= 1)
925e7a65
CL
2005 enable_1394a = true;
2006 }
2007
2008 if (ohci->quirks & QUIRK_NO_1394A)
2009 enable_1394a = false;
2010
2011 /* Configure PHY and link consistently. */
2012 if (enable_1394a) {
2013 clear = 0;
2014 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2015 } else {
2016 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2017 set = 0;
2018 }
02d37bed 2019 ret = update_phy_reg(ohci, 5, clear, set);
35d999b1
SR
2020 if (ret < 0)
2021 return ret;
925e7a65
CL
2022
2023 if (enable_1394a)
2024 offset = OHCI1394_HCControlSet;
2025 else
2026 offset = OHCI1394_HCControlClear;
2027 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2028
2029 /* Clean up: configuration has been taken care of. */
2030 reg_write(ohci, OHCI1394_HCControlClear,
2031 OHCI1394_HCControl_programPhyEnable);
2032
2033 return 0;
2034}
2035
8e85973e
SR
2036static int ohci_enable(struct fw_card *card,
2037 const __be32 *config_rom, size_t length)
ed568912
KH
2038{
2039 struct fw_ohci *ohci = fw_ohci(card);
2040 struct pci_dev *dev = to_pci_dev(card->device);
e91b2787 2041 u32 lps, seconds, version, irqs;
35d999b1 2042 int i, ret;
ed568912 2043
2aef469a
KH
2044 if (software_reset(ohci)) {
2045 fw_error("Failed to reset ohci card.\n");
2046 return -EBUSY;
2047 }
2048
2049 /*
2050 * Now enable LPS, which we need in order to start accessing
2051 * most of the registers. In fact, on some cards (ALI M5251),
2052 * accessing registers in the SClk domain without LPS enabled
2053 * will lock up the machine. Wait 50msec to make sure we have
02214724
JW
2054 * full link enabled. However, with some cards (well, at least
2055 * a JMicron PCIe card), we have to try again sometimes.
2aef469a
KH
2056 */
2057 reg_write(ohci, OHCI1394_HCControlSet,
2058 OHCI1394_HCControl_LPS |
2059 OHCI1394_HCControl_postedWriteEnable);
2060 flush_writes(ohci);
02214724
JW
2061
2062 for (lps = 0, i = 0; !lps && i < 3; i++) {
2063 msleep(50);
2064 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2065 OHCI1394_HCControl_LPS;
2066 }
2067
2068 if (!lps) {
2069 fw_error("Failed to set Link Power Status\n");
2070 return -EIO;
2071 }
2aef469a
KH
2072
2073 reg_write(ohci, OHCI1394_HCControlClear,
2074 OHCI1394_HCControl_noByteSwapData);
2075
affc9c24 2076 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2aef469a 2077 reg_write(ohci, OHCI1394_LinkControlSet,
2aef469a
KH
2078 OHCI1394_LinkControl_cycleTimerEnable |
2079 OHCI1394_LinkControl_cycleMaster);
2080
2081 reg_write(ohci, OHCI1394_ATRetries,
2082 OHCI1394_MAX_AT_REQ_RETRIES |
2083 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
27a2329f
CL
2084 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2085 (200 << 16));
2aef469a 2086
a48777e0
CL
2087 seconds = lower_32_bits(get_seconds());
2088 reg_write(ohci, OHCI1394_IsochronousCycleTimer, seconds << 25);
2089 ohci->bus_time = seconds & ~0x3f;
2090
e91b2787
CL
2091 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2092 if (version >= OHCI_VERSION_1_1) {
2093 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2094 0xfffffffe);
db3c9cc1 2095 card->broadcast_channel_auto_allocated = true;
e91b2787
CL
2096 }
2097
a1a1132b
CL
2098 /* Get implemented bits of the priority arbitration request counter. */
2099 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2100 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2101 reg_write(ohci, OHCI1394_FairnessControl, 0);
db3c9cc1 2102 card->priority_budget_implemented = ohci->pri_req_max != 0;
2aef469a 2103
2aef469a
KH
2104 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2105 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2106 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2aef469a 2107
35d999b1
SR
2108 ret = configure_1394a_enhancements(ohci);
2109 if (ret < 0)
2110 return ret;
925e7a65 2111
2aef469a 2112 /* Activate link_on bit and contender bit in our self ID packets.*/
35d999b1
SR
2113 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2114 if (ret < 0)
2115 return ret;
2aef469a 2116
c781c06d
KH
2117 /*
2118 * When the link is not yet enabled, the atomic config rom
ed568912
KH
2119 * update mechanism described below in ohci_set_config_rom()
2120 * is not active. We have to update ConfigRomHeader and
2121 * BusOptions manually, and the write to ConfigROMmap takes
2122 * effect immediately. We tie this to the enabling of the
2123 * link, so we have a valid config rom before enabling - the
2124 * OHCI requires that ConfigROMhdr and BusOptions have valid
2125 * values before enabling.
2126 *
2127 * However, when the ConfigROMmap is written, some controllers
2128 * always read back quadlets 0 and 2 from the config rom to
2129 * the ConfigRomHeader and BusOptions registers on bus reset.
2130 * They shouldn't do that in this initial case where the link
2131 * isn't enabled. This means we have to use the same
2132 * workaround here, setting the bus header to 0 and then write
2133 * the right values in the bus reset tasklet.
2134 */
2135
0bd243c4
KH
2136 if (config_rom) {
2137 ohci->next_config_rom =
2138 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2139 &ohci->next_config_rom_bus,
2140 GFP_KERNEL);
2141 if (ohci->next_config_rom == NULL)
2142 return -ENOMEM;
ed568912 2143
8e85973e 2144 copy_config_rom(ohci->next_config_rom, config_rom, length);
0bd243c4
KH
2145 } else {
2146 /*
2147 * In the suspend case, config_rom is NULL, which
2148 * means that we just reuse the old config rom.
2149 */
2150 ohci->next_config_rom = ohci->config_rom;
2151 ohci->next_config_rom_bus = ohci->config_rom_bus;
2152 }
ed568912 2153
8e85973e 2154 ohci->next_header = ohci->next_config_rom[0];
ed568912
KH
2155 ohci->next_config_rom[0] = 0;
2156 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
0bd243c4
KH
2157 reg_write(ohci, OHCI1394_BusOptions,
2158 be32_to_cpu(ohci->next_config_rom[2]));
ed568912
KH
2159 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2160
2161 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2162
262444ee
CL
2163 if (!(ohci->quirks & QUIRK_NO_MSI))
2164 pci_enable_msi(dev);
ed568912 2165 if (request_irq(dev->irq, irq_handler,
262444ee
CL
2166 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
2167 ohci_driver_name, ohci)) {
2168 fw_error("Failed to allocate interrupt %d.\n", dev->irq);
2169 pci_disable_msi(dev);
ed568912
KH
2170 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2171 ohci->config_rom, ohci->config_rom_bus);
2172 return -EIO;
2173 }
2174
148c7866
SR
2175 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2176 OHCI1394_RQPkt | OHCI1394_RSPkt |
2177 OHCI1394_isochTx | OHCI1394_isochRx |
2178 OHCI1394_postedWriteErr |
2179 OHCI1394_selfIDComplete |
2180 OHCI1394_regAccessFail |
a48777e0 2181 OHCI1394_cycle64Seconds |
f117a3e3
CL
2182 OHCI1394_cycleInconsistent |
2183 OHCI1394_unrecoverableError |
2184 OHCI1394_cycleTooLong |
148c7866
SR
2185 OHCI1394_masterIntEnable;
2186 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2187 irqs |= OHCI1394_busReset;
2188 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2189
ed568912
KH
2190 reg_write(ohci, OHCI1394_HCControlSet,
2191 OHCI1394_HCControl_linkEnable |
2192 OHCI1394_HCControl_BIBimageValid);
ecf8328e
CL
2193
2194 reg_write(ohci, OHCI1394_LinkControlSet,
2195 OHCI1394_LinkControl_rcvSelfID |
2196 OHCI1394_LinkControl_rcvPhyPkt);
2197
2198 ar_context_run(&ohci->ar_request_ctx);
2199 ar_context_run(&ohci->ar_response_ctx); /* also flushes writes */
ed568912 2200
02d37bed
SR
2201 /* We are ready to go, reset bus to finish initialization. */
2202 fw_schedule_bus_reset(&ohci->card, false, true);
ed568912
KH
2203
2204 return 0;
2205}
2206
53dca511 2207static int ohci_set_config_rom(struct fw_card *card,
8e85973e 2208 const __be32 *config_rom, size_t length)
ed568912
KH
2209{
2210 struct fw_ohci *ohci;
2211 unsigned long flags;
2dbd7d7e 2212 int ret = -EBUSY;
ed568912 2213 __be32 *next_config_rom;
f5101d58 2214 dma_addr_t uninitialized_var(next_config_rom_bus);
ed568912
KH
2215
2216 ohci = fw_ohci(card);
2217
c781c06d
KH
2218 /*
2219 * When the OHCI controller is enabled, the config rom update
ed568912
KH
2220 * mechanism is a bit tricky, but easy enough to use. See
2221 * section 5.5.6 in the OHCI specification.
2222 *
2223 * The OHCI controller caches the new config rom address in a
2224 * shadow register (ConfigROMmapNext) and needs a bus reset
2225 * for the changes to take place. When the bus reset is
2226 * detected, the controller loads the new values for the
2227 * ConfigRomHeader and BusOptions registers from the specified
2228 * config rom and loads ConfigROMmap from the ConfigROMmapNext
2229 * shadow register. All automatically and atomically.
2230 *
2231 * Now, there's a twist to this story. The automatic load of
2232 * ConfigRomHeader and BusOptions doesn't honor the
2233 * noByteSwapData bit, so with a be32 config rom, the
2234 * controller will load be32 values in to these registers
2235 * during the atomic update, even on litte endian
2236 * architectures. The workaround we use is to put a 0 in the
2237 * header quadlet; 0 is endian agnostic and means that the
2238 * config rom isn't ready yet. In the bus reset tasklet we
2239 * then set up the real values for the two registers.
2240 *
2241 * We use ohci->lock to avoid racing with the code that sets
2242 * ohci->next_config_rom to NULL (see bus_reset_tasklet).
2243 */
2244
2245 next_config_rom =
2246 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2247 &next_config_rom_bus, GFP_KERNEL);
2248 if (next_config_rom == NULL)
2249 return -ENOMEM;
2250
2251 spin_lock_irqsave(&ohci->lock, flags);
2252
2253 if (ohci->next_config_rom == NULL) {
2254 ohci->next_config_rom = next_config_rom;
2255 ohci->next_config_rom_bus = next_config_rom_bus;
2256
8e85973e 2257 copy_config_rom(ohci->next_config_rom, config_rom, length);
ed568912
KH
2258
2259 ohci->next_header = config_rom[0];
2260 ohci->next_config_rom[0] = 0;
2261
2262 reg_write(ohci, OHCI1394_ConfigROMmap,
2263 ohci->next_config_rom_bus);
2dbd7d7e 2264 ret = 0;
ed568912
KH
2265 }
2266
2267 spin_unlock_irqrestore(&ohci->lock, flags);
2268
c781c06d
KH
2269 /*
2270 * Now initiate a bus reset to have the changes take
ed568912
KH
2271 * effect. We clean up the old config rom memory and DMA
2272 * mappings in the bus reset tasklet, since the OHCI
2273 * controller could need to access it before the bus reset
c781c06d
KH
2274 * takes effect.
2275 */
2dbd7d7e 2276 if (ret == 0)
02d37bed 2277 fw_schedule_bus_reset(&ohci->card, true, true);
4eaff7d6
SR
2278 else
2279 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2280 next_config_rom, next_config_rom_bus);
ed568912 2281
2dbd7d7e 2282 return ret;
ed568912
KH
2283}
2284
2285static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2286{
2287 struct fw_ohci *ohci = fw_ohci(card);
2288
2289 at_context_transmit(&ohci->at_request_ctx, packet);
2290}
2291
2292static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2293{
2294 struct fw_ohci *ohci = fw_ohci(card);
2295
2296 at_context_transmit(&ohci->at_response_ctx, packet);
2297}
2298
730c32f5
KH
2299static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2300{
2301 struct fw_ohci *ohci = fw_ohci(card);
f319b6a0
KH
2302 struct context *ctx = &ohci->at_request_ctx;
2303 struct driver_data *driver_data = packet->driver_data;
2dbd7d7e 2304 int ret = -ENOENT;
730c32f5 2305
f319b6a0 2306 tasklet_disable(&ctx->tasklet);
730c32f5 2307
f319b6a0
KH
2308 if (packet->ack != 0)
2309 goto out;
730c32f5 2310
19593ffd 2311 if (packet->payload_mapped)
1d1dc5e8
SR
2312 dma_unmap_single(ohci->card.device, packet->payload_bus,
2313 packet->payload_length, DMA_TO_DEVICE);
2314
ad3c0fe8 2315 log_ar_at_event('T', packet->speed, packet->header, 0x20);
f319b6a0
KH
2316 driver_data->packet = NULL;
2317 packet->ack = RCODE_CANCELLED;
2318 packet->callback(packet, &ohci->card, packet->ack);
2dbd7d7e 2319 ret = 0;
f319b6a0
KH
2320 out:
2321 tasklet_enable(&ctx->tasklet);
730c32f5 2322
2dbd7d7e 2323 return ret;
730c32f5
KH
2324}
2325
53dca511
SR
2326static int ohci_enable_phys_dma(struct fw_card *card,
2327 int node_id, int generation)
ed568912 2328{
080de8c2
SR
2329#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2330 return 0;
2331#else
ed568912
KH
2332 struct fw_ohci *ohci = fw_ohci(card);
2333 unsigned long flags;
2dbd7d7e 2334 int n, ret = 0;
ed568912 2335
c781c06d
KH
2336 /*
2337 * FIXME: Make sure this bitmask is cleared when we clear the busReset
2338 * interrupt bit. Clear physReqResourceAllBuses on bus reset.
2339 */
ed568912
KH
2340
2341 spin_lock_irqsave(&ohci->lock, flags);
2342
2343 if (ohci->generation != generation) {
2dbd7d7e 2344 ret = -ESTALE;
ed568912
KH
2345 goto out;
2346 }
2347
c781c06d
KH
2348 /*
2349 * Note, if the node ID contains a non-local bus ID, physical DMA is
2350 * enabled for _all_ nodes on remote buses.
2351 */
907293d7
SR
2352
2353 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2354 if (n < 32)
2355 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2356 else
2357 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2358
ed568912 2359 flush_writes(ohci);
ed568912 2360 out:
6cad95fe 2361 spin_unlock_irqrestore(&ohci->lock, flags);
2dbd7d7e
SR
2362
2363 return ret;
080de8c2 2364#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
ed568912 2365}
373b2edd 2366
0fcff4e3 2367static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
b677532b 2368{
60d32970 2369 struct fw_ohci *ohci = fw_ohci(card);
a48777e0
CL
2370 unsigned long flags;
2371 u32 value;
60d32970
CL
2372
2373 switch (csr_offset) {
4ffb7a6a
CL
2374 case CSR_STATE_CLEAR:
2375 case CSR_STATE_SET:
4ffb7a6a
CL
2376 if (ohci->is_root &&
2377 (reg_read(ohci, OHCI1394_LinkControlSet) &
2378 OHCI1394_LinkControl_cycleMaster))
c8a94ded 2379 value = CSR_STATE_BIT_CMSTR;
4ffb7a6a 2380 else
c8a94ded
SR
2381 value = 0;
2382 if (ohci->csr_state_setclear_abdicate)
2383 value |= CSR_STATE_BIT_ABDICATE;
b677532b 2384
c8a94ded 2385 return value;
4a9bde9b 2386
506f1a31
CL
2387 case CSR_NODE_IDS:
2388 return reg_read(ohci, OHCI1394_NodeID) << 16;
2389
60d32970
CL
2390 case CSR_CYCLE_TIME:
2391 return get_cycle_time(ohci);
2392
a48777e0
CL
2393 case CSR_BUS_TIME:
2394 /*
2395 * We might be called just after the cycle timer has wrapped
2396 * around but just before the cycle64Seconds handler, so we
2397 * better check here, too, if the bus time needs to be updated.
2398 */
2399 spin_lock_irqsave(&ohci->lock, flags);
2400 value = update_bus_time(ohci);
2401 spin_unlock_irqrestore(&ohci->lock, flags);
2402 return value;
2403
27a2329f
CL
2404 case CSR_BUSY_TIMEOUT:
2405 value = reg_read(ohci, OHCI1394_ATRetries);
2406 return (value >> 4) & 0x0ffff00f;
2407
a1a1132b
CL
2408 case CSR_PRIORITY_BUDGET:
2409 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2410 (ohci->pri_req_max << 8);
2411
60d32970
CL
2412 default:
2413 WARN_ON(1);
2414 return 0;
2415 }
b677532b
CL
2416}
2417
0fcff4e3 2418static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
d60d7f1d
KH
2419{
2420 struct fw_ohci *ohci = fw_ohci(card);
a48777e0 2421 unsigned long flags;
d60d7f1d 2422
506f1a31 2423 switch (csr_offset) {
4ffb7a6a 2424 case CSR_STATE_CLEAR:
4ffb7a6a
CL
2425 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2426 reg_write(ohci, OHCI1394_LinkControlClear,
2427 OHCI1394_LinkControl_cycleMaster);
2428 flush_writes(ohci);
2429 }
c8a94ded
SR
2430 if (value & CSR_STATE_BIT_ABDICATE)
2431 ohci->csr_state_setclear_abdicate = false;
4ffb7a6a 2432 break;
4a9bde9b 2433
4ffb7a6a
CL
2434 case CSR_STATE_SET:
2435 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2436 reg_write(ohci, OHCI1394_LinkControlSet,
2437 OHCI1394_LinkControl_cycleMaster);
2438 flush_writes(ohci);
2439 }
c8a94ded
SR
2440 if (value & CSR_STATE_BIT_ABDICATE)
2441 ohci->csr_state_setclear_abdicate = true;
4ffb7a6a 2442 break;
d60d7f1d 2443
506f1a31
CL
2444 case CSR_NODE_IDS:
2445 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2446 flush_writes(ohci);
2447 break;
2448
9ab5071c
CL
2449 case CSR_CYCLE_TIME:
2450 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2451 reg_write(ohci, OHCI1394_IntEventSet,
2452 OHCI1394_cycleInconsistent);
2453 flush_writes(ohci);
2454 break;
2455
a48777e0
CL
2456 case CSR_BUS_TIME:
2457 spin_lock_irqsave(&ohci->lock, flags);
2458 ohci->bus_time = (ohci->bus_time & 0x7f) | (value & ~0x7f);
2459 spin_unlock_irqrestore(&ohci->lock, flags);
2460 break;
2461
27a2329f
CL
2462 case CSR_BUSY_TIMEOUT:
2463 value = (value & 0xf) | ((value & 0xf) << 4) |
2464 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2465 reg_write(ohci, OHCI1394_ATRetries, value);
2466 flush_writes(ohci);
2467 break;
2468
a1a1132b
CL
2469 case CSR_PRIORITY_BUDGET:
2470 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2471 flush_writes(ohci);
2472 break;
2473
506f1a31
CL
2474 default:
2475 WARN_ON(1);
2476 break;
2477 }
d60d7f1d
KH
2478}
2479
1aa292bb
DM
2480static void copy_iso_headers(struct iso_context *ctx, void *p)
2481{
2482 int i = ctx->header_length;
2483
2484 if (i + ctx->base.header_size > PAGE_SIZE)
2485 return;
2486
2487 /*
2488 * The iso header is byteswapped to little endian by
2489 * the controller, but the remaining header quadlets
2490 * are big endian. We want to present all the headers
2491 * as big endian, so we have to swap the first quadlet.
2492 */
2493 if (ctx->base.header_size > 0)
2494 *(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
2495 if (ctx->base.header_size > 4)
2496 *(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
2497 if (ctx->base.header_size > 8)
2498 memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
2499 ctx->header_length += ctx->base.header_size;
2500}
2501
a186b4a6
JW
2502static int handle_ir_packet_per_buffer(struct context *context,
2503 struct descriptor *d,
2504 struct descriptor *last)
2505{
2506 struct iso_context *ctx =
2507 container_of(context, struct iso_context, context);
bcee893c 2508 struct descriptor *pd;
a186b4a6 2509 __le32 *ir_header;
bcee893c 2510 void *p;
a186b4a6 2511
872e330e 2512 for (pd = d; pd <= last; pd++)
bcee893c
DM
2513 if (pd->transfer_status)
2514 break;
bcee893c 2515 if (pd > last)
a186b4a6
JW
2516 /* Descriptor(s) not done yet, stop iteration */
2517 return 0;
2518
1aa292bb
DM
2519 p = last + 1;
2520 copy_iso_headers(ctx, p);
a186b4a6 2521
bcee893c
DM
2522 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
2523 ir_header = (__le32 *) p;
872e330e
SR
2524 ctx->base.callback.sc(&ctx->base,
2525 le32_to_cpu(ir_header[0]) & 0xffff,
2526 ctx->header_length, ctx->header,
2527 ctx->base.callback_data);
a186b4a6
JW
2528 ctx->header_length = 0;
2529 }
2530
a186b4a6
JW
2531 return 1;
2532}
2533
872e330e
SR
2534/* d == last because each descriptor block is only a single descriptor. */
2535static int handle_ir_buffer_fill(struct context *context,
2536 struct descriptor *d,
2537 struct descriptor *last)
2538{
2539 struct iso_context *ctx =
2540 container_of(context, struct iso_context, context);
2541
2542 if (!last->transfer_status)
2543 /* Descriptor(s) not done yet, stop iteration */
2544 return 0;
2545
2546 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
2547 ctx->base.callback.mc(&ctx->base,
2548 le32_to_cpu(last->data_address) +
2549 le16_to_cpu(last->req_count) -
2550 le16_to_cpu(last->res_count),
2551 ctx->base.callback_data);
2552
2553 return 1;
2554}
2555
30200739
KH
2556static int handle_it_packet(struct context *context,
2557 struct descriptor *d,
2558 struct descriptor *last)
ed568912 2559{
30200739
KH
2560 struct iso_context *ctx =
2561 container_of(context, struct iso_context, context);
31769cef
JF
2562 int i;
2563 struct descriptor *pd;
373b2edd 2564
31769cef
JF
2565 for (pd = d; pd <= last; pd++)
2566 if (pd->transfer_status)
2567 break;
2568 if (pd > last)
2569 /* Descriptor(s) not done yet, stop iteration */
30200739
KH
2570 return 0;
2571
31769cef
JF
2572 i = ctx->header_length;
2573 if (i + 4 < PAGE_SIZE) {
2574 /* Present this value as big-endian to match the receive code */
2575 *(__be32 *)(ctx->header + i) = cpu_to_be32(
2576 ((u32)le16_to_cpu(pd->transfer_status) << 16) |
2577 le16_to_cpu(pd->res_count));
2578 ctx->header_length += 4;
2579 }
2580 if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
872e330e
SR
2581 ctx->base.callback.sc(&ctx->base, le16_to_cpu(last->res_count),
2582 ctx->header_length, ctx->header,
2583 ctx->base.callback_data);
31769cef
JF
2584 ctx->header_length = 0;
2585 }
30200739 2586 return 1;
ed568912
KH
2587}
2588
872e330e
SR
2589static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2590{
2591 u32 hi = channels >> 32, lo = channels;
2592
2593 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2594 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2595 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2596 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2597 mmiowb();
2598 ohci->mc_channels = channels;
2599}
2600
53dca511 2601static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
4817ed24 2602 int type, int channel, size_t header_size)
ed568912
KH
2603{
2604 struct fw_ohci *ohci = fw_ohci(card);
872e330e
SR
2605 struct iso_context *uninitialized_var(ctx);
2606 descriptor_callback_t uninitialized_var(callback);
2607 u64 *uninitialized_var(channels);
2608 u32 *uninitialized_var(mask), uninitialized_var(regs);
ed568912 2609 unsigned long flags;
872e330e 2610 int index, ret = -EBUSY;
ed568912 2611
872e330e 2612 spin_lock_irqsave(&ohci->lock, flags);
ed568912 2613
872e330e
SR
2614 switch (type) {
2615 case FW_ISO_CONTEXT_TRANSMIT:
2616 mask = &ohci->it_context_mask;
30200739 2617 callback = handle_it_packet;
872e330e
SR
2618 index = ffs(*mask) - 1;
2619 if (index >= 0) {
2620 *mask &= ~(1 << index);
2621 regs = OHCI1394_IsoXmitContextBase(index);
2622 ctx = &ohci->it_context_list[index];
2623 }
2624 break;
2625
2626 case FW_ISO_CONTEXT_RECEIVE:
4817ed24 2627 channels = &ohci->ir_context_channels;
872e330e 2628 mask = &ohci->ir_context_mask;
6498ba04 2629 callback = handle_ir_packet_per_buffer;
872e330e
SR
2630 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2631 if (index >= 0) {
2632 *channels &= ~(1ULL << channel);
2633 *mask &= ~(1 << index);
2634 regs = OHCI1394_IsoRcvContextBase(index);
2635 ctx = &ohci->ir_context_list[index];
2636 }
2637 break;
ed568912 2638
872e330e
SR
2639 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2640 mask = &ohci->ir_context_mask;
2641 callback = handle_ir_buffer_fill;
2642 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2643 if (index >= 0) {
2644 ohci->mc_allocated = true;
2645 *mask &= ~(1 << index);
2646 regs = OHCI1394_IsoRcvContextBase(index);
2647 ctx = &ohci->ir_context_list[index];
2648 }
2649 break;
2650
2651 default:
2652 index = -1;
2653 ret = -ENOSYS;
4817ed24 2654 }
872e330e 2655
ed568912
KH
2656 spin_unlock_irqrestore(&ohci->lock, flags);
2657
2658 if (index < 0)
872e330e 2659 return ERR_PTR(ret);
373b2edd 2660
2d826cc5 2661 memset(ctx, 0, sizeof(*ctx));
9b32d5f3
KH
2662 ctx->header_length = 0;
2663 ctx->header = (void *) __get_free_page(GFP_KERNEL);
872e330e
SR
2664 if (ctx->header == NULL) {
2665 ret = -ENOMEM;
9b32d5f3 2666 goto out;
872e330e 2667 }
2dbd7d7e
SR
2668 ret = context_init(&ctx->context, ohci, regs, callback);
2669 if (ret < 0)
9b32d5f3 2670 goto out_with_header;
ed568912 2671
872e330e
SR
2672 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL)
2673 set_multichannel_mask(ohci, 0);
2674
ed568912 2675 return &ctx->base;
9b32d5f3
KH
2676
2677 out_with_header:
2678 free_page((unsigned long)ctx->header);
2679 out:
2680 spin_lock_irqsave(&ohci->lock, flags);
872e330e
SR
2681
2682 switch (type) {
2683 case FW_ISO_CONTEXT_RECEIVE:
2684 *channels |= 1ULL << channel;
2685 break;
2686
2687 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2688 ohci->mc_allocated = false;
2689 break;
2690 }
9b32d5f3 2691 *mask |= 1 << index;
872e330e 2692
9b32d5f3
KH
2693 spin_unlock_irqrestore(&ohci->lock, flags);
2694
2dbd7d7e 2695 return ERR_PTR(ret);
ed568912
KH
2696}
2697
eb0306ea
KH
2698static int ohci_start_iso(struct fw_iso_context *base,
2699 s32 cycle, u32 sync, u32 tags)
ed568912 2700{
373b2edd 2701 struct iso_context *ctx = container_of(base, struct iso_context, base);
30200739 2702 struct fw_ohci *ohci = ctx->context.ohci;
872e330e 2703 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
ed568912
KH
2704 int index;
2705
44b74d90
CL
2706 /* the controller cannot start without any queued packets */
2707 if (ctx->context.last->branch_address == 0)
2708 return -ENODATA;
2709
872e330e
SR
2710 switch (ctx->base.type) {
2711 case FW_ISO_CONTEXT_TRANSMIT:
295e3feb 2712 index = ctx - ohci->it_context_list;
8a2f7d93
KH
2713 match = 0;
2714 if (cycle >= 0)
2715 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
295e3feb 2716 (cycle & 0x7fff) << 16;
21efb3cf 2717
295e3feb
KH
2718 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
2719 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
8a2f7d93 2720 context_run(&ctx->context, match);
872e330e
SR
2721 break;
2722
2723 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2724 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
2725 /* fall through */
2726 case FW_ISO_CONTEXT_RECEIVE:
295e3feb 2727 index = ctx - ohci->ir_context_list;
8a2f7d93
KH
2728 match = (tags << 28) | (sync << 8) | ctx->base.channel;
2729 if (cycle >= 0) {
2730 match |= (cycle & 0x07fff) << 12;
2731 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
2732 }
ed568912 2733
295e3feb
KH
2734 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
2735 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
a77754a7 2736 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
8a2f7d93 2737 context_run(&ctx->context, control);
dd23736e
ML
2738
2739 ctx->sync = sync;
2740 ctx->tags = tags;
2741
872e330e 2742 break;
295e3feb 2743 }
ed568912
KH
2744
2745 return 0;
2746}
2747
b8295668
KH
2748static int ohci_stop_iso(struct fw_iso_context *base)
2749{
2750 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 2751 struct iso_context *ctx = container_of(base, struct iso_context, base);
b8295668
KH
2752 int index;
2753
872e330e
SR
2754 switch (ctx->base.type) {
2755 case FW_ISO_CONTEXT_TRANSMIT:
b8295668
KH
2756 index = ctx - ohci->it_context_list;
2757 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
872e330e
SR
2758 break;
2759
2760 case FW_ISO_CONTEXT_RECEIVE:
2761 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
b8295668
KH
2762 index = ctx - ohci->ir_context_list;
2763 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
872e330e 2764 break;
b8295668
KH
2765 }
2766 flush_writes(ohci);
2767 context_stop(&ctx->context);
e81cbebd 2768 tasklet_kill(&ctx->context.tasklet);
b8295668
KH
2769
2770 return 0;
2771}
2772
ed568912
KH
2773static void ohci_free_iso_context(struct fw_iso_context *base)
2774{
2775 struct fw_ohci *ohci = fw_ohci(base->card);
373b2edd 2776 struct iso_context *ctx = container_of(base, struct iso_context, base);
ed568912
KH
2777 unsigned long flags;
2778 int index;
2779
b8295668
KH
2780 ohci_stop_iso(base);
2781 context_release(&ctx->context);
9b32d5f3 2782 free_page((unsigned long)ctx->header);
b8295668 2783
ed568912
KH
2784 spin_lock_irqsave(&ohci->lock, flags);
2785
872e330e
SR
2786 switch (base->type) {
2787 case FW_ISO_CONTEXT_TRANSMIT:
ed568912 2788 index = ctx - ohci->it_context_list;
ed568912 2789 ohci->it_context_mask |= 1 << index;
872e330e
SR
2790 break;
2791
2792 case FW_ISO_CONTEXT_RECEIVE:
ed568912 2793 index = ctx - ohci->ir_context_list;
ed568912 2794 ohci->ir_context_mask |= 1 << index;
4817ed24 2795 ohci->ir_context_channels |= 1ULL << base->channel;
872e330e
SR
2796 break;
2797
2798 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2799 index = ctx - ohci->ir_context_list;
2800 ohci->ir_context_mask |= 1 << index;
2801 ohci->ir_context_channels |= ohci->mc_channels;
2802 ohci->mc_channels = 0;
2803 ohci->mc_allocated = false;
2804 break;
ed568912 2805 }
ed568912
KH
2806
2807 spin_unlock_irqrestore(&ohci->lock, flags);
2808}
2809
872e330e
SR
2810static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
2811{
2812 struct fw_ohci *ohci = fw_ohci(base->card);
2813 unsigned long flags;
2814 int ret;
2815
2816 switch (base->type) {
2817 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2818
2819 spin_lock_irqsave(&ohci->lock, flags);
2820
2821 /* Don't allow multichannel to grab other contexts' channels. */
2822 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
2823 *channels = ohci->ir_context_channels;
2824 ret = -EBUSY;
2825 } else {
2826 set_multichannel_mask(ohci, *channels);
2827 ret = 0;
2828 }
2829
2830 spin_unlock_irqrestore(&ohci->lock, flags);
2831
2832 break;
2833 default:
2834 ret = -EINVAL;
2835 }
2836
2837 return ret;
2838}
2839
dd23736e
ML
2840#ifdef CONFIG_PM
2841static void ohci_resume_iso_dma(struct fw_ohci *ohci)
2842{
2843 int i;
2844 struct iso_context *ctx;
2845
2846 for (i = 0 ; i < ohci->n_ir ; i++) {
2847 ctx = &ohci->ir_context_list[i];
693a50b5 2848 if (ctx->context.running)
dd23736e
ML
2849 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2850 }
2851
2852 for (i = 0 ; i < ohci->n_it ; i++) {
2853 ctx = &ohci->it_context_list[i];
693a50b5 2854 if (ctx->context.running)
dd23736e
ML
2855 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
2856 }
2857}
2858#endif
2859
872e330e
SR
2860static int queue_iso_transmit(struct iso_context *ctx,
2861 struct fw_iso_packet *packet,
2862 struct fw_iso_buffer *buffer,
2863 unsigned long payload)
ed568912 2864{
30200739 2865 struct descriptor *d, *last, *pd;
ed568912
KH
2866 struct fw_iso_packet *p;
2867 __le32 *header;
9aad8125 2868 dma_addr_t d_bus, page_bus;
ed568912
KH
2869 u32 z, header_z, payload_z, irq;
2870 u32 payload_index, payload_end_index, next_page_index;
30200739 2871 int page, end_page, i, length, offset;
ed568912 2872
ed568912 2873 p = packet;
9aad8125 2874 payload_index = payload;
ed568912
KH
2875
2876 if (p->skip)
2877 z = 1;
2878 else
2879 z = 2;
2880 if (p->header_length > 0)
2881 z++;
2882
2883 /* Determine the first page the payload isn't contained in. */
2884 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
2885 if (p->payload_length > 0)
2886 payload_z = end_page - (payload_index >> PAGE_SHIFT);
2887 else
2888 payload_z = 0;
2889
2890 z += payload_z;
2891
2892 /* Get header size in number of descriptors. */
2d826cc5 2893 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
ed568912 2894
30200739
KH
2895 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
2896 if (d == NULL)
2897 return -ENOMEM;
ed568912
KH
2898
2899 if (!p->skip) {
a77754a7 2900 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
ed568912 2901 d[0].req_count = cpu_to_le16(8);
7f51a100
CL
2902 /*
2903 * Link the skip address to this descriptor itself. This causes
2904 * a context to skip a cycle whenever lost cycles or FIFO
2905 * overruns occur, without dropping the data. The application
2906 * should then decide whether this is an error condition or not.
2907 * FIXME: Make the context's cycle-lost behaviour configurable?
2908 */
2909 d[0].branch_address = cpu_to_le32(d_bus | z);
ed568912
KH
2910
2911 header = (__le32 *) &d[1];
a77754a7
KH
2912 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
2913 IT_HEADER_TAG(p->tag) |
2914 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
2915 IT_HEADER_CHANNEL(ctx->base.channel) |
2916 IT_HEADER_SPEED(ctx->base.speed));
ed568912 2917 header[1] =
a77754a7 2918 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
ed568912
KH
2919 p->payload_length));
2920 }
2921
2922 if (p->header_length > 0) {
2923 d[2].req_count = cpu_to_le16(p->header_length);
2d826cc5 2924 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
ed568912
KH
2925 memcpy(&d[z], p->header, p->header_length);
2926 }
2927
2928 pd = d + z - payload_z;
2929 payload_end_index = payload_index + p->payload_length;
2930 for (i = 0; i < payload_z; i++) {
2931 page = payload_index >> PAGE_SHIFT;
2932 offset = payload_index & ~PAGE_MASK;
2933 next_page_index = (page + 1) << PAGE_SHIFT;
2934 length =
2935 min(next_page_index, payload_end_index) - payload_index;
2936 pd[i].req_count = cpu_to_le16(length);
9aad8125
KH
2937
2938 page_bus = page_private(buffer->pages[page]);
2939 pd[i].data_address = cpu_to_le32(page_bus + offset);
ed568912
KH
2940
2941 payload_index += length;
2942 }
2943
ed568912 2944 if (p->interrupt)
a77754a7 2945 irq = DESCRIPTOR_IRQ_ALWAYS;
ed568912 2946 else
a77754a7 2947 irq = DESCRIPTOR_NO_IRQ;
ed568912 2948
30200739 2949 last = z == 2 ? d : d + z - 1;
a77754a7
KH
2950 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
2951 DESCRIPTOR_STATUS |
2952 DESCRIPTOR_BRANCH_ALWAYS |
cbb59da7 2953 irq);
ed568912 2954
30200739 2955 context_append(&ctx->context, d, z, header_z);
ed568912
KH
2956
2957 return 0;
2958}
373b2edd 2959
872e330e
SR
2960static int queue_iso_packet_per_buffer(struct iso_context *ctx,
2961 struct fw_iso_packet *packet,
2962 struct fw_iso_buffer *buffer,
2963 unsigned long payload)
a186b4a6 2964{
8c0c0cc2 2965 struct descriptor *d, *pd;
a186b4a6
JW
2966 dma_addr_t d_bus, page_bus;
2967 u32 z, header_z, rest;
bcee893c
DM
2968 int i, j, length;
2969 int page, offset, packet_count, header_size, payload_per_buffer;
a186b4a6
JW
2970
2971 /*
1aa292bb
DM
2972 * The OHCI controller puts the isochronous header and trailer in the
2973 * buffer, so we need at least 8 bytes.
a186b4a6 2974 */
872e330e 2975 packet_count = packet->header_length / ctx->base.header_size;
1aa292bb 2976 header_size = max(ctx->base.header_size, (size_t)8);
a186b4a6
JW
2977
2978 /* Get header size in number of descriptors. */
2979 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
2980 page = payload >> PAGE_SHIFT;
2981 offset = payload & ~PAGE_MASK;
872e330e 2982 payload_per_buffer = packet->payload_length / packet_count;
a186b4a6
JW
2983
2984 for (i = 0; i < packet_count; i++) {
2985 /* d points to the header descriptor */
bcee893c 2986 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
a186b4a6 2987 d = context_get_descriptors(&ctx->context,
bcee893c 2988 z + header_z, &d_bus);
a186b4a6
JW
2989 if (d == NULL)
2990 return -ENOMEM;
2991
bcee893c
DM
2992 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
2993 DESCRIPTOR_INPUT_MORE);
872e330e 2994 if (packet->skip && i == 0)
bcee893c 2995 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
a186b4a6
JW
2996 d->req_count = cpu_to_le16(header_size);
2997 d->res_count = d->req_count;
bcee893c 2998 d->transfer_status = 0;
a186b4a6
JW
2999 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3000
bcee893c 3001 rest = payload_per_buffer;
8c0c0cc2 3002 pd = d;
bcee893c 3003 for (j = 1; j < z; j++) {
8c0c0cc2 3004 pd++;
bcee893c
DM
3005 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3006 DESCRIPTOR_INPUT_MORE);
3007
3008 if (offset + rest < PAGE_SIZE)
3009 length = rest;
3010 else
3011 length = PAGE_SIZE - offset;
3012 pd->req_count = cpu_to_le16(length);
3013 pd->res_count = pd->req_count;
3014 pd->transfer_status = 0;
3015
3016 page_bus = page_private(buffer->pages[page]);
3017 pd->data_address = cpu_to_le32(page_bus + offset);
3018
3019 offset = (offset + length) & ~PAGE_MASK;
3020 rest -= length;
3021 if (offset == 0)
3022 page++;
3023 }
a186b4a6
JW
3024 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3025 DESCRIPTOR_INPUT_LAST |
3026 DESCRIPTOR_BRANCH_ALWAYS);
872e330e 3027 if (packet->interrupt && i == packet_count - 1)
a186b4a6
JW
3028 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3029
a186b4a6
JW
3030 context_append(&ctx->context, d, z, header_z);
3031 }
3032
3033 return 0;
3034}
3035
872e330e
SR
3036static int queue_iso_buffer_fill(struct iso_context *ctx,
3037 struct fw_iso_packet *packet,
3038 struct fw_iso_buffer *buffer,
3039 unsigned long payload)
3040{
3041 struct descriptor *d;
3042 dma_addr_t d_bus, page_bus;
3043 int page, offset, rest, z, i, length;
3044
3045 page = payload >> PAGE_SHIFT;
3046 offset = payload & ~PAGE_MASK;
3047 rest = packet->payload_length;
3048
3049 /* We need one descriptor for each page in the buffer. */
3050 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3051
3052 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3053 return -EFAULT;
3054
3055 for (i = 0; i < z; i++) {
3056 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3057 if (d == NULL)
3058 return -ENOMEM;
3059
3060 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3061 DESCRIPTOR_BRANCH_ALWAYS);
3062 if (packet->skip && i == 0)
3063 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3064 if (packet->interrupt && i == z - 1)
3065 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3066
3067 if (offset + rest < PAGE_SIZE)
3068 length = rest;
3069 else
3070 length = PAGE_SIZE - offset;
3071 d->req_count = cpu_to_le16(length);
3072 d->res_count = d->req_count;
3073 d->transfer_status = 0;
3074
3075 page_bus = page_private(buffer->pages[page]);
3076 d->data_address = cpu_to_le32(page_bus + offset);
3077
3078 rest -= length;
3079 offset = 0;
3080 page++;
3081
3082 context_append(&ctx->context, d, 1, 0);
3083 }
3084
3085 return 0;
3086}
3087
53dca511
SR
3088static int ohci_queue_iso(struct fw_iso_context *base,
3089 struct fw_iso_packet *packet,
3090 struct fw_iso_buffer *buffer,
3091 unsigned long payload)
295e3feb 3092{
e364cf4e 3093 struct iso_context *ctx = container_of(base, struct iso_context, base);
fe5ca634 3094 unsigned long flags;
872e330e 3095 int ret = -ENOSYS;
e364cf4e 3096
fe5ca634 3097 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
872e330e
SR
3098 switch (base->type) {
3099 case FW_ISO_CONTEXT_TRANSMIT:
3100 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3101 break;
3102 case FW_ISO_CONTEXT_RECEIVE:
3103 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3104 break;
3105 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3106 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3107 break;
3108 }
fe5ca634
DM
3109 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3110
2dbd7d7e 3111 return ret;
295e3feb
KH
3112}
3113
21ebcd12 3114static const struct fw_card_driver ohci_driver = {
ed568912 3115 .enable = ohci_enable,
02d37bed 3116 .read_phy_reg = ohci_read_phy_reg,
ed568912
KH
3117 .update_phy_reg = ohci_update_phy_reg,
3118 .set_config_rom = ohci_set_config_rom,
3119 .send_request = ohci_send_request,
3120 .send_response = ohci_send_response,
730c32f5 3121 .cancel_packet = ohci_cancel_packet,
ed568912 3122 .enable_phys_dma = ohci_enable_phys_dma,
0fcff4e3
SR
3123 .read_csr = ohci_read_csr,
3124 .write_csr = ohci_write_csr,
ed568912
KH
3125
3126 .allocate_iso_context = ohci_allocate_iso_context,
3127 .free_iso_context = ohci_free_iso_context,
872e330e 3128 .set_iso_channels = ohci_set_iso_channels,
ed568912 3129 .queue_iso = ohci_queue_iso,
69cdb726 3130 .start_iso = ohci_start_iso,
b8295668 3131 .stop_iso = ohci_stop_iso,
ed568912
KH
3132};
3133
ea8d006b 3134#ifdef CONFIG_PPC_PMAC
5da3dac8 3135static void pmac_ohci_on(struct pci_dev *dev)
2ed0f181 3136{
ea8d006b
SR
3137 if (machine_is(powermac)) {
3138 struct device_node *ofn = pci_device_to_OF_node(dev);
3139
3140 if (ofn) {
3141 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3142 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3143 }
3144 }
2ed0f181
SR
3145}
3146
5da3dac8 3147static void pmac_ohci_off(struct pci_dev *dev)
2ed0f181
SR
3148{
3149 if (machine_is(powermac)) {
3150 struct device_node *ofn = pci_device_to_OF_node(dev);
3151
3152 if (ofn) {
3153 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3154 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3155 }
3156 }
3157}
3158#else
5da3dac8
SR
3159static inline void pmac_ohci_on(struct pci_dev *dev) {}
3160static inline void pmac_ohci_off(struct pci_dev *dev) {}
ea8d006b
SR
3161#endif /* CONFIG_PPC_PMAC */
3162
53dca511
SR
3163static int __devinit pci_probe(struct pci_dev *dev,
3164 const struct pci_device_id *ent)
2ed0f181
SR
3165{
3166 struct fw_ohci *ohci;
aa0170ff 3167 u32 bus_options, max_receive, link_speed, version;
2ed0f181 3168 u64 guid;
dd23736e 3169 int i, err;
2ed0f181
SR
3170 size_t size;
3171
2d826cc5 3172 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
ed568912 3173 if (ohci == NULL) {
7007a076
SR
3174 err = -ENOMEM;
3175 goto fail;
ed568912
KH
3176 }
3177
3178 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3179
5da3dac8 3180 pmac_ohci_on(dev);
130d5496 3181
d79406dd
KH
3182 err = pci_enable_device(dev);
3183 if (err) {
7007a076 3184 fw_error("Failed to enable OHCI hardware\n");
bd7dee63 3185 goto fail_free;
ed568912
KH
3186 }
3187
3188 pci_set_master(dev);
3189 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3190 pci_set_drvdata(dev, ohci);
3191
3192 spin_lock_init(&ohci->lock);
02d37bed 3193 mutex_init(&ohci->phy_reg_mutex);
ed568912
KH
3194
3195 tasklet_init(&ohci->bus_reset_tasklet,
3196 bus_reset_tasklet, (unsigned long)ohci);
3197
d79406dd
KH
3198 err = pci_request_region(dev, 0, ohci_driver_name);
3199 if (err) {
ed568912 3200 fw_error("MMIO resource unavailable\n");
d79406dd 3201 goto fail_disable;
ed568912
KH
3202 }
3203
3204 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3205 if (ohci->registers == NULL) {
3206 fw_error("Failed to remap registers\n");
d79406dd
KH
3207 err = -ENXIO;
3208 goto fail_iomem;
ed568912
KH
3209 }
3210
4a635593 3211 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
9993e0fe
SR
3212 if ((ohci_quirks[i].vendor == dev->vendor) &&
3213 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3214 ohci_quirks[i].device == dev->device) &&
3215 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3216 ohci_quirks[i].revision >= dev->revision)) {
4a635593
SR
3217 ohci->quirks = ohci_quirks[i].flags;
3218 break;
3219 }
3e9cc2f3
SR
3220 if (param_quirks)
3221 ohci->quirks = param_quirks;
b677532b 3222
ec766a79
CL
3223 /*
3224 * Because dma_alloc_coherent() allocates at least one page,
3225 * we save space by using a common buffer for the AR request/
3226 * response descriptors and the self IDs buffer.
3227 */
3228 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3229 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3230 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3231 PAGE_SIZE,
3232 &ohci->misc_buffer_bus,
3233 GFP_KERNEL);
3234 if (!ohci->misc_buffer) {
3235 err = -ENOMEM;
3236 goto fail_iounmap;
3237 }
3238
3239 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
7a39d8b8
CL
3240 OHCI1394_AsReqRcvContextControlSet);
3241 if (err < 0)
ec766a79 3242 goto fail_misc_buf;
ed568912 3243
ec766a79 3244 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
7a39d8b8
CL
3245 OHCI1394_AsRspRcvContextControlSet);
3246 if (err < 0)
3247 goto fail_arreq_ctx;
ed568912 3248
c088ab30
CL
3249 err = context_init(&ohci->at_request_ctx, ohci,
3250 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3251 if (err < 0)
3252 goto fail_arrsp_ctx;
ed568912 3253
c088ab30
CL
3254 err = context_init(&ohci->at_response_ctx, ohci,
3255 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3256 if (err < 0)
3257 goto fail_atreq_ctx;
ed568912 3258
ed568912 3259 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
4802f16d 3260 ohci->ir_context_channels = ~0ULL;
f117a3e3 3261 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
ed568912 3262 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
f117a3e3 3263 ohci->ir_context_mask = ohci->ir_context_support;
dd23736e
ML
3264 ohci->n_ir = hweight32(ohci->ir_context_mask);
3265 size = sizeof(struct iso_context) * ohci->n_ir;
4802f16d 3266 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
ed568912
KH
3267
3268 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
f117a3e3 3269 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
ed568912 3270 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
f117a3e3 3271 ohci->it_context_mask = ohci->it_context_support;
dd23736e
ML
3272 ohci->n_it = hweight32(ohci->it_context_mask);
3273 size = sizeof(struct iso_context) * ohci->n_it;
4802f16d 3274 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
ed568912
KH
3275
3276 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
d79406dd 3277 err = -ENOMEM;
7007a076 3278 goto fail_contexts;
ed568912
KH
3279 }
3280
ec766a79
CL
3281 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
3282 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
ed568912 3283
ed568912
KH
3284 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3285 max_receive = (bus_options >> 12) & 0xf;
3286 link_speed = bus_options & 0x7;
3287 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3288 reg_read(ohci, OHCI1394_GUIDLo);
3289
d79406dd 3290 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
e1eff7a3 3291 if (err)
ec766a79 3292 goto fail_contexts;
ed568912 3293
6fdb2ee2
SR
3294 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3295 fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
3296 "%d IR + %d IT contexts, quirks 0x%x\n",
3297 dev_name(&dev->dev), version >> 16, version & 0xff,
dd23736e 3298 ohci->n_ir, ohci->n_it, ohci->quirks);
e1eff7a3 3299
ed568912 3300 return 0;
d79406dd 3301
7007a076 3302 fail_contexts:
d79406dd 3303 kfree(ohci->ir_context_list);
7007a076
SR
3304 kfree(ohci->it_context_list);
3305 context_release(&ohci->at_response_ctx);
c088ab30 3306 fail_atreq_ctx:
7007a076 3307 context_release(&ohci->at_request_ctx);
c088ab30 3308 fail_arrsp_ctx:
7007a076 3309 ar_context_release(&ohci->ar_response_ctx);
7a39d8b8 3310 fail_arreq_ctx:
7007a076 3311 ar_context_release(&ohci->ar_request_ctx);
ec766a79
CL
3312 fail_misc_buf:
3313 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3314 ohci->misc_buffer, ohci->misc_buffer_bus);
7a39d8b8 3315 fail_iounmap:
d79406dd
KH
3316 pci_iounmap(dev, ohci->registers);
3317 fail_iomem:
3318 pci_release_region(dev, 0);
3319 fail_disable:
3320 pci_disable_device(dev);
bd7dee63 3321 fail_free:
d838d2c0 3322 kfree(ohci);
5da3dac8 3323 pmac_ohci_off(dev);
7007a076
SR
3324 fail:
3325 if (err == -ENOMEM)
3326 fw_error("Out of memory\n");
d79406dd
KH
3327
3328 return err;
ed568912
KH
3329}
3330
3331static void pci_remove(struct pci_dev *dev)
3332{
3333 struct fw_ohci *ohci;
3334
3335 ohci = pci_get_drvdata(dev);
e254a4b4
KH
3336 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3337 flush_writes(ohci);
ed568912
KH
3338 fw_core_remove_card(&ohci->card);
3339
c781c06d
KH
3340 /*
3341 * FIXME: Fail all pending packets here, now that the upper
3342 * layers can't queue any more.
3343 */
ed568912
KH
3344
3345 software_reset(ohci);
3346 free_irq(dev->irq, ohci);
a55709ba
JF
3347
3348 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3349 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3350 ohci->next_config_rom, ohci->next_config_rom_bus);
3351 if (ohci->config_rom)
3352 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3353 ohci->config_rom, ohci->config_rom_bus);
a55709ba
JF
3354 ar_context_release(&ohci->ar_request_ctx);
3355 ar_context_release(&ohci->ar_response_ctx);
ec766a79
CL
3356 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3357 ohci->misc_buffer, ohci->misc_buffer_bus);
a55709ba
JF
3358 context_release(&ohci->at_request_ctx);
3359 context_release(&ohci->at_response_ctx);
d79406dd
KH
3360 kfree(ohci->it_context_list);
3361 kfree(ohci->ir_context_list);
262444ee 3362 pci_disable_msi(dev);
d79406dd
KH
3363 pci_iounmap(dev, ohci->registers);
3364 pci_release_region(dev, 0);
3365 pci_disable_device(dev);
d838d2c0 3366 kfree(ohci);
5da3dac8 3367 pmac_ohci_off(dev);
ea8d006b 3368
ed568912
KH
3369 fw_notify("Removed fw-ohci device.\n");
3370}
3371
2aef469a 3372#ifdef CONFIG_PM
2ed0f181 3373static int pci_suspend(struct pci_dev *dev, pm_message_t state)
2aef469a 3374{
2ed0f181 3375 struct fw_ohci *ohci = pci_get_drvdata(dev);
2aef469a
KH
3376 int err;
3377
3378 software_reset(ohci);
2ed0f181 3379 free_irq(dev->irq, ohci);
262444ee 3380 pci_disable_msi(dev);
2ed0f181 3381 err = pci_save_state(dev);
2aef469a 3382 if (err) {
8a8cea27 3383 fw_error("pci_save_state failed\n");
2aef469a
KH
3384 return err;
3385 }
2ed0f181 3386 err = pci_set_power_state(dev, pci_choose_state(dev, state));
55111428
SR
3387 if (err)
3388 fw_error("pci_set_power_state failed with %d\n", err);
5da3dac8 3389 pmac_ohci_off(dev);
ea8d006b 3390
2aef469a
KH
3391 return 0;
3392}
3393
2ed0f181 3394static int pci_resume(struct pci_dev *dev)
2aef469a 3395{
2ed0f181 3396 struct fw_ohci *ohci = pci_get_drvdata(dev);
2aef469a
KH
3397 int err;
3398
5da3dac8 3399 pmac_ohci_on(dev);
2ed0f181
SR
3400 pci_set_power_state(dev, PCI_D0);
3401 pci_restore_state(dev);
3402 err = pci_enable_device(dev);
2aef469a 3403 if (err) {
8a8cea27 3404 fw_error("pci_enable_device failed\n");
2aef469a
KH
3405 return err;
3406 }
3407
8662b6b0
ML
3408 /* Some systems don't setup GUID register on resume from ram */
3409 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3410 !reg_read(ohci, OHCI1394_GUIDHi)) {
3411 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3412 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3413 }
3414
dd23736e 3415 err = ohci_enable(&ohci->card, NULL, 0);
dd23736e
ML
3416 if (err)
3417 return err;
3418
3419 ohci_resume_iso_dma(ohci);
693a50b5 3420
dd23736e 3421 return 0;
2aef469a
KH
3422}
3423#endif
3424
a67483d2 3425static const struct pci_device_id pci_table[] = {
ed568912
KH
3426 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3427 { }
3428};
3429
3430MODULE_DEVICE_TABLE(pci, pci_table);
3431
3432static struct pci_driver fw_ohci_pci_driver = {
3433 .name = ohci_driver_name,
3434 .id_table = pci_table,
3435 .probe = pci_probe,
3436 .remove = pci_remove,
2aef469a
KH
3437#ifdef CONFIG_PM
3438 .resume = pci_resume,
3439 .suspend = pci_suspend,
3440#endif
ed568912
KH
3441};
3442
3443MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3444MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3445MODULE_LICENSE("GPL");
3446
1e4c7b0d
OH
3447/* Provide a module alias so root-on-sbp2 initrds don't break. */
3448#ifndef CONFIG_IEEE1394_OHCI1394_MODULE
3449MODULE_ALIAS("ohci1394");
3450#endif
3451
ed568912
KH
3452static int __init fw_ohci_init(void)
3453{
3454 return pci_register_driver(&fw_ohci_pci_driver);
3455}
3456
3457static void __exit fw_ohci_cleanup(void)
3458{
3459 pci_unregister_driver(&fw_ohci_pci_driver);
3460}
3461
3462module_init(fw_ohci_init);
3463module_exit(fw_ohci_cleanup);