]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/thunderbolt/nhi.c
thunderbolt: Introduce thunderbolt bus and connection manager
[mirror_ubuntu-bionic-kernel.git] / drivers / thunderbolt / nhi.c
1 /*
2 * Thunderbolt Cactus Ridge driver - NHI driver
3 *
4 * The NHI (native host interface) is the pci device that allows us to send and
5 * receive frames from the thunderbolt bus.
6 *
7 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
8 */
9
10 #include <linux/pm_runtime.h>
11 #include <linux/slab.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <linux/interrupt.h>
15 #include <linux/module.h>
16 #include <linux/dmi.h>
17
18 #include "nhi.h"
19 #include "nhi_regs.h"
20 #include "tb.h"
21
22 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
23
24 /*
25 * Minimal number of vectors when we use MSI-X. Two for control channel
26 * Rx/Tx and the rest four are for cross domain DMA paths.
27 */
28 #define MSIX_MIN_VECS 6
29 #define MSIX_MAX_VECS 16
30
31 static int ring_interrupt_index(struct tb_ring *ring)
32 {
33 int bit = ring->hop;
34 if (!ring->is_tx)
35 bit += ring->nhi->hop_count;
36 return bit;
37 }
38
39 /**
40 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
41 *
42 * ring->nhi->lock must be held.
43 */
44 static void ring_interrupt_active(struct tb_ring *ring, bool active)
45 {
46 int reg = REG_RING_INTERRUPT_BASE +
47 ring_interrupt_index(ring) / 32 * 4;
48 int bit = ring_interrupt_index(ring) & 31;
49 int mask = 1 << bit;
50 u32 old, new;
51
52 if (ring->irq > 0) {
53 u32 step, shift, ivr, misc;
54 void __iomem *ivr_base;
55 int index;
56
57 if (ring->is_tx)
58 index = ring->hop;
59 else
60 index = ring->hop + ring->nhi->hop_count;
61
62 /*
63 * Ask the hardware to clear interrupt status bits automatically
64 * since we already know which interrupt was triggered.
65 */
66 misc = ioread32(ring->nhi->iobase + REG_DMA_MISC);
67 if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) {
68 misc |= REG_DMA_MISC_INT_AUTO_CLEAR;
69 iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC);
70 }
71
72 ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE;
73 step = index / REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
74 shift = index % REG_INT_VEC_ALLOC_REGS * REG_INT_VEC_ALLOC_BITS;
75 ivr = ioread32(ivr_base + step);
76 ivr &= ~(REG_INT_VEC_ALLOC_MASK << shift);
77 if (active)
78 ivr |= ring->vector << shift;
79 iowrite32(ivr, ivr_base + step);
80 }
81
82 old = ioread32(ring->nhi->iobase + reg);
83 if (active)
84 new = old | mask;
85 else
86 new = old & ~mask;
87
88 dev_info(&ring->nhi->pdev->dev,
89 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
90 active ? "enabling" : "disabling", reg, bit, old, new);
91
92 if (new == old)
93 dev_WARN(&ring->nhi->pdev->dev,
94 "interrupt for %s %d is already %s\n",
95 RING_TYPE(ring), ring->hop,
96 active ? "enabled" : "disabled");
97 iowrite32(new, ring->nhi->iobase + reg);
98 }
99
100 /**
101 * nhi_disable_interrupts() - disable interrupts for all rings
102 *
103 * Use only during init and shutdown.
104 */
105 static void nhi_disable_interrupts(struct tb_nhi *nhi)
106 {
107 int i = 0;
108 /* disable interrupts */
109 for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++)
110 iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i);
111
112 /* clear interrupt status bits */
113 for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++)
114 ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i);
115 }
116
117 /* ring helper methods */
118
119 static void __iomem *ring_desc_base(struct tb_ring *ring)
120 {
121 void __iomem *io = ring->nhi->iobase;
122 io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE;
123 io += ring->hop * 16;
124 return io;
125 }
126
127 static void __iomem *ring_options_base(struct tb_ring *ring)
128 {
129 void __iomem *io = ring->nhi->iobase;
130 io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE;
131 io += ring->hop * 32;
132 return io;
133 }
134
135 static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset)
136 {
137 iowrite16(value, ring_desc_base(ring) + offset);
138 }
139
140 static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset)
141 {
142 iowrite32(value, ring_desc_base(ring) + offset);
143 }
144
145 static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset)
146 {
147 iowrite32(value, ring_desc_base(ring) + offset);
148 iowrite32(value >> 32, ring_desc_base(ring) + offset + 4);
149 }
150
151 static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset)
152 {
153 iowrite32(value, ring_options_base(ring) + offset);
154 }
155
156 static bool ring_full(struct tb_ring *ring)
157 {
158 return ((ring->head + 1) % ring->size) == ring->tail;
159 }
160
161 static bool ring_empty(struct tb_ring *ring)
162 {
163 return ring->head == ring->tail;
164 }
165
166 /**
167 * ring_write_descriptors() - post frames from ring->queue to the controller
168 *
169 * ring->lock is held.
170 */
171 static void ring_write_descriptors(struct tb_ring *ring)
172 {
173 struct ring_frame *frame, *n;
174 struct ring_desc *descriptor;
175 list_for_each_entry_safe(frame, n, &ring->queue, list) {
176 if (ring_full(ring))
177 break;
178 list_move_tail(&frame->list, &ring->in_flight);
179 descriptor = &ring->descriptors[ring->head];
180 descriptor->phys = frame->buffer_phy;
181 descriptor->time = 0;
182 descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT;
183 if (ring->is_tx) {
184 descriptor->length = frame->size;
185 descriptor->eof = frame->eof;
186 descriptor->sof = frame->sof;
187 }
188 ring->head = (ring->head + 1) % ring->size;
189 ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8);
190 }
191 }
192
193 /**
194 * ring_work() - progress completed frames
195 *
196 * If the ring is shutting down then all frames are marked as canceled and
197 * their callbacks are invoked.
198 *
199 * Otherwise we collect all completed frame from the ring buffer, write new
200 * frame to the ring buffer and invoke the callbacks for the completed frames.
201 */
202 static void ring_work(struct work_struct *work)
203 {
204 struct tb_ring *ring = container_of(work, typeof(*ring), work);
205 struct ring_frame *frame;
206 bool canceled = false;
207 LIST_HEAD(done);
208 mutex_lock(&ring->lock);
209
210 if (!ring->running) {
211 /* Move all frames to done and mark them as canceled. */
212 list_splice_tail_init(&ring->in_flight, &done);
213 list_splice_tail_init(&ring->queue, &done);
214 canceled = true;
215 goto invoke_callback;
216 }
217
218 while (!ring_empty(ring)) {
219 if (!(ring->descriptors[ring->tail].flags
220 & RING_DESC_COMPLETED))
221 break;
222 frame = list_first_entry(&ring->in_flight, typeof(*frame),
223 list);
224 list_move_tail(&frame->list, &done);
225 if (!ring->is_tx) {
226 frame->size = ring->descriptors[ring->tail].length;
227 frame->eof = ring->descriptors[ring->tail].eof;
228 frame->sof = ring->descriptors[ring->tail].sof;
229 frame->flags = ring->descriptors[ring->tail].flags;
230 if (frame->sof != 0)
231 dev_WARN(&ring->nhi->pdev->dev,
232 "%s %d got unexpected SOF: %#x\n",
233 RING_TYPE(ring), ring->hop,
234 frame->sof);
235 /*
236 * known flags:
237 * raw not enabled, interupt not set: 0x2=0010
238 * raw enabled: 0xa=1010
239 * raw not enabled: 0xb=1011
240 * partial frame (>MAX_FRAME_SIZE): 0xe=1110
241 */
242 if (frame->flags != 0xa)
243 dev_WARN(&ring->nhi->pdev->dev,
244 "%s %d got unexpected flags: %#x\n",
245 RING_TYPE(ring), ring->hop,
246 frame->flags);
247 }
248 ring->tail = (ring->tail + 1) % ring->size;
249 }
250 ring_write_descriptors(ring);
251
252 invoke_callback:
253 mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */
254 while (!list_empty(&done)) {
255 frame = list_first_entry(&done, typeof(*frame), list);
256 /*
257 * The callback may reenqueue or delete frame.
258 * Do not hold on to it.
259 */
260 list_del_init(&frame->list);
261 frame->callback(ring, frame, canceled);
262 }
263 }
264
265 int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame)
266 {
267 int ret = 0;
268 mutex_lock(&ring->lock);
269 if (ring->running) {
270 list_add_tail(&frame->list, &ring->queue);
271 ring_write_descriptors(ring);
272 } else {
273 ret = -ESHUTDOWN;
274 }
275 mutex_unlock(&ring->lock);
276 return ret;
277 }
278
279 static irqreturn_t ring_msix(int irq, void *data)
280 {
281 struct tb_ring *ring = data;
282
283 schedule_work(&ring->work);
284 return IRQ_HANDLED;
285 }
286
287 static int ring_request_msix(struct tb_ring *ring, bool no_suspend)
288 {
289 struct tb_nhi *nhi = ring->nhi;
290 unsigned long irqflags;
291 int ret;
292
293 if (!nhi->pdev->msix_enabled)
294 return 0;
295
296 ret = ida_simple_get(&nhi->msix_ida, 0, MSIX_MAX_VECS, GFP_KERNEL);
297 if (ret < 0)
298 return ret;
299
300 ring->vector = ret;
301
302 ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector);
303 if (ring->irq < 0)
304 return ring->irq;
305
306 irqflags = no_suspend ? IRQF_NO_SUSPEND : 0;
307 return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring);
308 }
309
310 static void ring_release_msix(struct tb_ring *ring)
311 {
312 if (ring->irq <= 0)
313 return;
314
315 free_irq(ring->irq, ring);
316 ida_simple_remove(&ring->nhi->msix_ida, ring->vector);
317 ring->vector = 0;
318 ring->irq = 0;
319 }
320
321 static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
322 bool transmit, unsigned int flags)
323 {
324 struct tb_ring *ring = NULL;
325 dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n",
326 transmit ? "TX" : "RX", hop, size);
327
328 mutex_lock(&nhi->lock);
329 if (hop >= nhi->hop_count) {
330 dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
331 goto err;
332 }
333 if (transmit && nhi->tx_rings[hop]) {
334 dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
335 goto err;
336 } else if (!transmit && nhi->rx_rings[hop]) {
337 dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
338 goto err;
339 }
340 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
341 if (!ring)
342 goto err;
343
344 mutex_init(&ring->lock);
345 INIT_LIST_HEAD(&ring->queue);
346 INIT_LIST_HEAD(&ring->in_flight);
347 INIT_WORK(&ring->work, ring_work);
348
349 ring->nhi = nhi;
350 ring->hop = hop;
351 ring->is_tx = transmit;
352 ring->size = size;
353 ring->flags = flags;
354 ring->head = 0;
355 ring->tail = 0;
356 ring->running = false;
357
358 if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
359 goto err;
360
361 ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev,
362 size * sizeof(*ring->descriptors),
363 &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO);
364 if (!ring->descriptors)
365 goto err;
366
367 if (transmit)
368 nhi->tx_rings[hop] = ring;
369 else
370 nhi->rx_rings[hop] = ring;
371 mutex_unlock(&nhi->lock);
372 return ring;
373
374 err:
375 if (ring)
376 mutex_destroy(&ring->lock);
377 kfree(ring);
378 mutex_unlock(&nhi->lock);
379 return NULL;
380 }
381
382 struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size,
383 unsigned int flags)
384 {
385 return ring_alloc(nhi, hop, size, true, flags);
386 }
387
388 struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size,
389 unsigned int flags)
390 {
391 return ring_alloc(nhi, hop, size, false, flags);
392 }
393
394 /**
395 * ring_start() - enable a ring
396 *
397 * Must not be invoked in parallel with ring_stop().
398 */
399 void ring_start(struct tb_ring *ring)
400 {
401 mutex_lock(&ring->nhi->lock);
402 mutex_lock(&ring->lock);
403 if (ring->running) {
404 dev_WARN(&ring->nhi->pdev->dev, "ring already started\n");
405 goto err;
406 }
407 dev_info(&ring->nhi->pdev->dev, "starting %s %d\n",
408 RING_TYPE(ring), ring->hop);
409
410 ring_iowrite64desc(ring, ring->descriptors_dma, 0);
411 if (ring->is_tx) {
412 ring_iowrite32desc(ring, ring->size, 12);
413 ring_iowrite32options(ring, 0, 4); /* time releated ? */
414 ring_iowrite32options(ring,
415 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
416 } else {
417 ring_iowrite32desc(ring,
418 (TB_FRAME_SIZE << 16) | ring->size, 12);
419 ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */
420 ring_iowrite32options(ring,
421 RING_FLAG_ENABLE | RING_FLAG_RAW, 0);
422 }
423 ring_interrupt_active(ring, true);
424 ring->running = true;
425 err:
426 mutex_unlock(&ring->lock);
427 mutex_unlock(&ring->nhi->lock);
428 }
429
430
431 /**
432 * ring_stop() - shutdown a ring
433 *
434 * Must not be invoked from a callback.
435 *
436 * This method will disable the ring. Further calls to ring_tx/ring_rx will
437 * return -ESHUTDOWN until ring_stop has been called.
438 *
439 * All enqueued frames will be canceled and their callbacks will be executed
440 * with frame->canceled set to true (on the callback thread). This method
441 * returns only after all callback invocations have finished.
442 */
443 void ring_stop(struct tb_ring *ring)
444 {
445 mutex_lock(&ring->nhi->lock);
446 mutex_lock(&ring->lock);
447 dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n",
448 RING_TYPE(ring), ring->hop);
449 if (!ring->running) {
450 dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n",
451 RING_TYPE(ring), ring->hop);
452 goto err;
453 }
454 ring_interrupt_active(ring, false);
455
456 ring_iowrite32options(ring, 0, 0);
457 ring_iowrite64desc(ring, 0, 0);
458 ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8);
459 ring_iowrite32desc(ring, 0, 12);
460 ring->head = 0;
461 ring->tail = 0;
462 ring->running = false;
463
464 err:
465 mutex_unlock(&ring->lock);
466 mutex_unlock(&ring->nhi->lock);
467
468 /*
469 * schedule ring->work to invoke callbacks on all remaining frames.
470 */
471 schedule_work(&ring->work);
472 flush_work(&ring->work);
473 }
474
475 /*
476 * ring_free() - free ring
477 *
478 * When this method returns all invocations of ring->callback will have
479 * finished.
480 *
481 * Ring must be stopped.
482 *
483 * Must NOT be called from ring_frame->callback!
484 */
485 void ring_free(struct tb_ring *ring)
486 {
487 mutex_lock(&ring->nhi->lock);
488 /*
489 * Dissociate the ring from the NHI. This also ensures that
490 * nhi_interrupt_work cannot reschedule ring->work.
491 */
492 if (ring->is_tx)
493 ring->nhi->tx_rings[ring->hop] = NULL;
494 else
495 ring->nhi->rx_rings[ring->hop] = NULL;
496
497 if (ring->running) {
498 dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n",
499 RING_TYPE(ring), ring->hop);
500 }
501
502 ring_release_msix(ring);
503
504 dma_free_coherent(&ring->nhi->pdev->dev,
505 ring->size * sizeof(*ring->descriptors),
506 ring->descriptors, ring->descriptors_dma);
507
508 ring->descriptors = NULL;
509 ring->descriptors_dma = 0;
510
511
512 dev_info(&ring->nhi->pdev->dev,
513 "freeing %s %d\n",
514 RING_TYPE(ring),
515 ring->hop);
516
517 mutex_unlock(&ring->nhi->lock);
518 /**
519 * ring->work can no longer be scheduled (it is scheduled only
520 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
521 * to finish before freeing the ring.
522 */
523 flush_work(&ring->work);
524 mutex_destroy(&ring->lock);
525 kfree(ring);
526 }
527
528 static void nhi_interrupt_work(struct work_struct *work)
529 {
530 struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work);
531 int value = 0; /* Suppress uninitialized usage warning. */
532 int bit;
533 int hop = -1;
534 int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
535 struct tb_ring *ring;
536
537 mutex_lock(&nhi->lock);
538
539 /*
540 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
541 * (TX, RX, RX overflow). We iterate over the bits and read a new
542 * dwords as required. The registers are cleared on read.
543 */
544 for (bit = 0; bit < 3 * nhi->hop_count; bit++) {
545 if (bit % 32 == 0)
546 value = ioread32(nhi->iobase
547 + REG_RING_NOTIFY_BASE
548 + 4 * (bit / 32));
549 if (++hop == nhi->hop_count) {
550 hop = 0;
551 type++;
552 }
553 if ((value & (1 << (bit % 32))) == 0)
554 continue;
555 if (type == 2) {
556 dev_warn(&nhi->pdev->dev,
557 "RX overflow for ring %d\n",
558 hop);
559 continue;
560 }
561 if (type == 0)
562 ring = nhi->tx_rings[hop];
563 else
564 ring = nhi->rx_rings[hop];
565 if (ring == NULL) {
566 dev_warn(&nhi->pdev->dev,
567 "got interrupt for inactive %s ring %d\n",
568 type ? "RX" : "TX",
569 hop);
570 continue;
571 }
572 /* we do not check ring->running, this is done in ring->work */
573 schedule_work(&ring->work);
574 }
575 mutex_unlock(&nhi->lock);
576 }
577
578 static irqreturn_t nhi_msi(int irq, void *data)
579 {
580 struct tb_nhi *nhi = data;
581 schedule_work(&nhi->interrupt_work);
582 return IRQ_HANDLED;
583 }
584
585 static int nhi_suspend_noirq(struct device *dev)
586 {
587 struct pci_dev *pdev = to_pci_dev(dev);
588 struct tb *tb = pci_get_drvdata(pdev);
589
590 return tb_domain_suspend_noirq(tb);
591 }
592
593 static int nhi_resume_noirq(struct device *dev)
594 {
595 struct pci_dev *pdev = to_pci_dev(dev);
596 struct tb *tb = pci_get_drvdata(pdev);
597
598 return tb_domain_resume_noirq(tb);
599 }
600
601 static void nhi_shutdown(struct tb_nhi *nhi)
602 {
603 int i;
604 dev_info(&nhi->pdev->dev, "shutdown\n");
605
606 for (i = 0; i < nhi->hop_count; i++) {
607 if (nhi->tx_rings[i])
608 dev_WARN(&nhi->pdev->dev,
609 "TX ring %d is still active\n", i);
610 if (nhi->rx_rings[i])
611 dev_WARN(&nhi->pdev->dev,
612 "RX ring %d is still active\n", i);
613 }
614 nhi_disable_interrupts(nhi);
615 /*
616 * We have to release the irq before calling flush_work. Otherwise an
617 * already executing IRQ handler could call schedule_work again.
618 */
619 if (!nhi->pdev->msix_enabled) {
620 devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi);
621 flush_work(&nhi->interrupt_work);
622 }
623 mutex_destroy(&nhi->lock);
624 ida_destroy(&nhi->msix_ida);
625 }
626
627 static int nhi_init_msi(struct tb_nhi *nhi)
628 {
629 struct pci_dev *pdev = nhi->pdev;
630 int res, irq, nvec;
631
632 /* In case someone left them on. */
633 nhi_disable_interrupts(nhi);
634
635 ida_init(&nhi->msix_ida);
636
637 /*
638 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
639 * get all MSI-X vectors and if we succeed, each ring will have
640 * one MSI-X. If for some reason that does not work out, we
641 * fallback to a single MSI.
642 */
643 nvec = pci_alloc_irq_vectors(pdev, MSIX_MIN_VECS, MSIX_MAX_VECS,
644 PCI_IRQ_MSIX);
645 if (nvec < 0) {
646 nvec = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
647 if (nvec < 0)
648 return nvec;
649
650 INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work);
651
652 irq = pci_irq_vector(nhi->pdev, 0);
653 if (irq < 0)
654 return irq;
655
656 res = devm_request_irq(&pdev->dev, irq, nhi_msi,
657 IRQF_NO_SUSPEND, "thunderbolt", nhi);
658 if (res) {
659 dev_err(&pdev->dev, "request_irq failed, aborting\n");
660 return res;
661 }
662 }
663
664 return 0;
665 }
666
667 static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
668 {
669 struct tb_nhi *nhi;
670 struct tb *tb;
671 int res;
672
673 res = pcim_enable_device(pdev);
674 if (res) {
675 dev_err(&pdev->dev, "cannot enable PCI device, aborting\n");
676 return res;
677 }
678
679 res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt");
680 if (res) {
681 dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n");
682 return res;
683 }
684
685 nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL);
686 if (!nhi)
687 return -ENOMEM;
688
689 nhi->pdev = pdev;
690 /* cannot fail - table is allocated bin pcim_iomap_regions */
691 nhi->iobase = pcim_iomap_table(pdev)[0];
692 nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff;
693 if (nhi->hop_count != 12 && nhi->hop_count != 32)
694 dev_warn(&pdev->dev, "unexpected hop count: %d\n",
695 nhi->hop_count);
696
697 nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
698 sizeof(*nhi->tx_rings), GFP_KERNEL);
699 nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count,
700 sizeof(*nhi->rx_rings), GFP_KERNEL);
701 if (!nhi->tx_rings || !nhi->rx_rings)
702 return -ENOMEM;
703
704 res = nhi_init_msi(nhi);
705 if (res) {
706 dev_err(&pdev->dev, "cannot enable MSI, aborting\n");
707 return res;
708 }
709
710 mutex_init(&nhi->lock);
711
712 pci_set_master(pdev);
713
714 /* magic value - clock related? */
715 iowrite32(3906250 / 10000, nhi->iobase + 0x38c00);
716
717 dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n");
718 tb = tb_probe(nhi);
719 if (!tb)
720 return -ENODEV;
721
722 res = tb_domain_add(tb);
723 if (res) {
724 /*
725 * At this point the RX/TX rings might already have been
726 * activated. Do a proper shutdown.
727 */
728 tb_domain_put(tb);
729 nhi_shutdown(nhi);
730 return -EIO;
731 }
732 pci_set_drvdata(pdev, tb);
733
734 return 0;
735 }
736
737 static void nhi_remove(struct pci_dev *pdev)
738 {
739 struct tb *tb = pci_get_drvdata(pdev);
740 struct tb_nhi *nhi = tb->nhi;
741
742 tb_domain_remove(tb);
743 nhi_shutdown(nhi);
744 }
745
746 /*
747 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
748 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
749 * resume_noirq until we are done.
750 */
751 static const struct dev_pm_ops nhi_pm_ops = {
752 .suspend_noirq = nhi_suspend_noirq,
753 .resume_noirq = nhi_resume_noirq,
754 .freeze_noirq = nhi_suspend_noirq, /*
755 * we just disable hotplug, the
756 * pci-tunnels stay alive.
757 */
758 .restore_noirq = nhi_resume_noirq,
759 };
760
761 static struct pci_device_id nhi_ids[] = {
762 /*
763 * We have to specify class, the TB bridges use the same device and
764 * vendor (sub)id on gen 1 and gen 2 controllers.
765 */
766 {
767 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
768 .vendor = PCI_VENDOR_ID_INTEL,
769 .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE,
770 .subvendor = 0x2222, .subdevice = 0x1111,
771 },
772 {
773 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
774 .vendor = PCI_VENDOR_ID_INTEL,
775 .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C,
776 .subvendor = 0x2222, .subdevice = 0x1111,
777 },
778 {
779 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
780 .vendor = PCI_VENDOR_ID_INTEL,
781 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI,
782 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
783 },
784 {
785 .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0,
786 .vendor = PCI_VENDOR_ID_INTEL,
787 .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI,
788 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID,
789 },
790 { 0,}
791 };
792
793 MODULE_DEVICE_TABLE(pci, nhi_ids);
794 MODULE_LICENSE("GPL");
795
796 static struct pci_driver nhi_driver = {
797 .name = "thunderbolt",
798 .id_table = nhi_ids,
799 .probe = nhi_probe,
800 .remove = nhi_remove,
801 .driver.pm = &nhi_pm_ops,
802 };
803
804 static int __init nhi_init(void)
805 {
806 int ret;
807
808 if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc."))
809 return -ENOSYS;
810 ret = tb_domain_init();
811 if (ret)
812 return ret;
813 ret = pci_register_driver(&nhi_driver);
814 if (ret)
815 tb_domain_exit();
816 return ret;
817 }
818
819 static void __exit nhi_unload(void)
820 {
821 pci_unregister_driver(&nhi_driver);
822 tb_domain_exit();
823 }
824
825 module_init(nhi_init);
826 module_exit(nhi_unload);