1 // SPDX-License-Identifier: GPL-2.0-only
3 * Thunderbolt driver - NHI driver
5 * The NHI (native host interface) is the pci device that allows us to send and
6 * receive frames from the thunderbolt bus.
8 * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
9 * Copyright (C) 2018, Intel Corporation
12 #include <linux/pm_runtime.h>
13 #include <linux/slab.h>
14 #include <linux/errno.h>
15 #include <linux/pci.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/delay.h>
19 #include <linux/property.h>
20 #include <linux/platform_data/x86/apple.h>
26 #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring")
28 #define RING_FIRST_USABLE_HOPID 1
31 * Minimal number of vectors when we use MSI-X. Two for control channel
32 * Rx/Tx and the rest four are for cross domain DMA paths.
34 #define MSIX_MIN_VECS 6
35 #define MSIX_MAX_VECS 16
37 #define NHI_MAILBOX_TIMEOUT 500 /* ms */
39 static int ring_interrupt_index(struct tb_ring
*ring
)
43 bit
+= ring
->nhi
->hop_count
;
48 * ring_interrupt_active() - activate/deactivate interrupts for a single ring
50 * ring->nhi->lock must be held.
52 static void ring_interrupt_active(struct tb_ring
*ring
, bool active
)
54 int reg
= REG_RING_INTERRUPT_BASE
+
55 ring_interrupt_index(ring
) / 32 * 4;
56 int bit
= ring_interrupt_index(ring
) & 31;
61 u32 step
, shift
, ivr
, misc
;
62 void __iomem
*ivr_base
;
68 index
= ring
->hop
+ ring
->nhi
->hop_count
;
71 * Ask the hardware to clear interrupt status bits automatically
72 * since we already know which interrupt was triggered.
74 misc
= ioread32(ring
->nhi
->iobase
+ REG_DMA_MISC
);
75 if (!(misc
& REG_DMA_MISC_INT_AUTO_CLEAR
)) {
76 misc
|= REG_DMA_MISC_INT_AUTO_CLEAR
;
77 iowrite32(misc
, ring
->nhi
->iobase
+ REG_DMA_MISC
);
80 ivr_base
= ring
->nhi
->iobase
+ REG_INT_VEC_ALLOC_BASE
;
81 step
= index
/ REG_INT_VEC_ALLOC_REGS
* REG_INT_VEC_ALLOC_BITS
;
82 shift
= index
% REG_INT_VEC_ALLOC_REGS
* REG_INT_VEC_ALLOC_BITS
;
83 ivr
= ioread32(ivr_base
+ step
);
84 ivr
&= ~(REG_INT_VEC_ALLOC_MASK
<< shift
);
86 ivr
|= ring
->vector
<< shift
;
87 iowrite32(ivr
, ivr_base
+ step
);
90 old
= ioread32(ring
->nhi
->iobase
+ reg
);
96 dev_dbg(&ring
->nhi
->pdev
->dev
,
97 "%s interrupt at register %#x bit %d (%#x -> %#x)\n",
98 active
? "enabling" : "disabling", reg
, bit
, old
, new);
101 dev_WARN(&ring
->nhi
->pdev
->dev
,
102 "interrupt for %s %d is already %s\n",
103 RING_TYPE(ring
), ring
->hop
,
104 active
? "enabled" : "disabled");
105 iowrite32(new, ring
->nhi
->iobase
+ reg
);
109 * nhi_disable_interrupts() - disable interrupts for all rings
111 * Use only during init and shutdown.
113 static void nhi_disable_interrupts(struct tb_nhi
*nhi
)
116 /* disable interrupts */
117 for (i
= 0; i
< RING_INTERRUPT_REG_COUNT(nhi
); i
++)
118 iowrite32(0, nhi
->iobase
+ REG_RING_INTERRUPT_BASE
+ 4 * i
);
120 /* clear interrupt status bits */
121 for (i
= 0; i
< RING_NOTIFY_REG_COUNT(nhi
); i
++)
122 ioread32(nhi
->iobase
+ REG_RING_NOTIFY_BASE
+ 4 * i
);
125 /* ring helper methods */
127 static void __iomem
*ring_desc_base(struct tb_ring
*ring
)
129 void __iomem
*io
= ring
->nhi
->iobase
;
130 io
+= ring
->is_tx
? REG_TX_RING_BASE
: REG_RX_RING_BASE
;
131 io
+= ring
->hop
* 16;
135 static void __iomem
*ring_options_base(struct tb_ring
*ring
)
137 void __iomem
*io
= ring
->nhi
->iobase
;
138 io
+= ring
->is_tx
? REG_TX_OPTIONS_BASE
: REG_RX_OPTIONS_BASE
;
139 io
+= ring
->hop
* 32;
143 static void ring_iowrite_cons(struct tb_ring
*ring
, u16 cons
)
146 * The other 16-bits in the register is read-only and writes to it
147 * are ignored by the hardware so we can save one ioread32() by
148 * filling the read-only bits with zeroes.
150 iowrite32(cons
, ring_desc_base(ring
) + 8);
153 static void ring_iowrite_prod(struct tb_ring
*ring
, u16 prod
)
155 /* See ring_iowrite_cons() above for explanation */
156 iowrite32(prod
<< 16, ring_desc_base(ring
) + 8);
159 static void ring_iowrite32desc(struct tb_ring
*ring
, u32 value
, u32 offset
)
161 iowrite32(value
, ring_desc_base(ring
) + offset
);
164 static void ring_iowrite64desc(struct tb_ring
*ring
, u64 value
, u32 offset
)
166 iowrite32(value
, ring_desc_base(ring
) + offset
);
167 iowrite32(value
>> 32, ring_desc_base(ring
) + offset
+ 4);
170 static void ring_iowrite32options(struct tb_ring
*ring
, u32 value
, u32 offset
)
172 iowrite32(value
, ring_options_base(ring
) + offset
);
175 static bool ring_full(struct tb_ring
*ring
)
177 return ((ring
->head
+ 1) % ring
->size
) == ring
->tail
;
180 static bool ring_empty(struct tb_ring
*ring
)
182 return ring
->head
== ring
->tail
;
186 * ring_write_descriptors() - post frames from ring->queue to the controller
188 * ring->lock is held.
190 static void ring_write_descriptors(struct tb_ring
*ring
)
192 struct ring_frame
*frame
, *n
;
193 struct ring_desc
*descriptor
;
194 list_for_each_entry_safe(frame
, n
, &ring
->queue
, list
) {
197 list_move_tail(&frame
->list
, &ring
->in_flight
);
198 descriptor
= &ring
->descriptors
[ring
->head
];
199 descriptor
->phys
= frame
->buffer_phy
;
200 descriptor
->time
= 0;
201 descriptor
->flags
= RING_DESC_POSTED
| RING_DESC_INTERRUPT
;
203 descriptor
->length
= frame
->size
;
204 descriptor
->eof
= frame
->eof
;
205 descriptor
->sof
= frame
->sof
;
207 ring
->head
= (ring
->head
+ 1) % ring
->size
;
209 ring_iowrite_prod(ring
, ring
->head
);
211 ring_iowrite_cons(ring
, ring
->head
);
216 * ring_work() - progress completed frames
218 * If the ring is shutting down then all frames are marked as canceled and
219 * their callbacks are invoked.
221 * Otherwise we collect all completed frame from the ring buffer, write new
222 * frame to the ring buffer and invoke the callbacks for the completed frames.
224 static void ring_work(struct work_struct
*work
)
226 struct tb_ring
*ring
= container_of(work
, typeof(*ring
), work
);
227 struct ring_frame
*frame
;
228 bool canceled
= false;
232 spin_lock_irqsave(&ring
->lock
, flags
);
234 if (!ring
->running
) {
235 /* Move all frames to done and mark them as canceled. */
236 list_splice_tail_init(&ring
->in_flight
, &done
);
237 list_splice_tail_init(&ring
->queue
, &done
);
239 goto invoke_callback
;
242 while (!ring_empty(ring
)) {
243 if (!(ring
->descriptors
[ring
->tail
].flags
244 & RING_DESC_COMPLETED
))
246 frame
= list_first_entry(&ring
->in_flight
, typeof(*frame
),
248 list_move_tail(&frame
->list
, &done
);
250 frame
->size
= ring
->descriptors
[ring
->tail
].length
;
251 frame
->eof
= ring
->descriptors
[ring
->tail
].eof
;
252 frame
->sof
= ring
->descriptors
[ring
->tail
].sof
;
253 frame
->flags
= ring
->descriptors
[ring
->tail
].flags
;
255 ring
->tail
= (ring
->tail
+ 1) % ring
->size
;
257 ring_write_descriptors(ring
);
260 /* allow callbacks to schedule new work */
261 spin_unlock_irqrestore(&ring
->lock
, flags
);
262 while (!list_empty(&done
)) {
263 frame
= list_first_entry(&done
, typeof(*frame
), list
);
265 * The callback may reenqueue or delete frame.
266 * Do not hold on to it.
268 list_del_init(&frame
->list
);
270 frame
->callback(ring
, frame
, canceled
);
274 int __tb_ring_enqueue(struct tb_ring
*ring
, struct ring_frame
*frame
)
279 spin_lock_irqsave(&ring
->lock
, flags
);
281 list_add_tail(&frame
->list
, &ring
->queue
);
282 ring_write_descriptors(ring
);
286 spin_unlock_irqrestore(&ring
->lock
, flags
);
289 EXPORT_SYMBOL_GPL(__tb_ring_enqueue
);
292 * tb_ring_poll() - Poll one completed frame from the ring
293 * @ring: Ring to poll
295 * This function can be called when @start_poll callback of the @ring
296 * has been called. It will read one completed frame from the ring and
297 * return it to the caller. Returns %NULL if there is no more completed
300 struct ring_frame
*tb_ring_poll(struct tb_ring
*ring
)
302 struct ring_frame
*frame
= NULL
;
305 spin_lock_irqsave(&ring
->lock
, flags
);
308 if (ring_empty(ring
))
311 if (ring
->descriptors
[ring
->tail
].flags
& RING_DESC_COMPLETED
) {
312 frame
= list_first_entry(&ring
->in_flight
, typeof(*frame
),
314 list_del_init(&frame
->list
);
317 frame
->size
= ring
->descriptors
[ring
->tail
].length
;
318 frame
->eof
= ring
->descriptors
[ring
->tail
].eof
;
319 frame
->sof
= ring
->descriptors
[ring
->tail
].sof
;
320 frame
->flags
= ring
->descriptors
[ring
->tail
].flags
;
323 ring
->tail
= (ring
->tail
+ 1) % ring
->size
;
327 spin_unlock_irqrestore(&ring
->lock
, flags
);
330 EXPORT_SYMBOL_GPL(tb_ring_poll
);
332 static void __ring_interrupt_mask(struct tb_ring
*ring
, bool mask
)
334 int idx
= ring_interrupt_index(ring
);
335 int reg
= REG_RING_INTERRUPT_BASE
+ idx
/ 32 * 4;
339 val
= ioread32(ring
->nhi
->iobase
+ reg
);
344 iowrite32(val
, ring
->nhi
->iobase
+ reg
);
347 /* Both @nhi->lock and @ring->lock should be held */
348 static void __ring_interrupt(struct tb_ring
*ring
)
353 if (ring
->start_poll
) {
354 __ring_interrupt_mask(ring
, true);
355 ring
->start_poll(ring
->poll_data
);
357 schedule_work(&ring
->work
);
362 * tb_ring_poll_complete() - Re-start interrupt for the ring
363 * @ring: Ring to re-start the interrupt
365 * This will re-start (unmask) the ring interrupt once the user is done
368 void tb_ring_poll_complete(struct tb_ring
*ring
)
372 spin_lock_irqsave(&ring
->nhi
->lock
, flags
);
373 spin_lock(&ring
->lock
);
374 if (ring
->start_poll
)
375 __ring_interrupt_mask(ring
, false);
376 spin_unlock(&ring
->lock
);
377 spin_unlock_irqrestore(&ring
->nhi
->lock
, flags
);
379 EXPORT_SYMBOL_GPL(tb_ring_poll_complete
);
381 static irqreturn_t
ring_msix(int irq
, void *data
)
383 struct tb_ring
*ring
= data
;
385 spin_lock(&ring
->nhi
->lock
);
386 spin_lock(&ring
->lock
);
387 __ring_interrupt(ring
);
388 spin_unlock(&ring
->lock
);
389 spin_unlock(&ring
->nhi
->lock
);
394 static int ring_request_msix(struct tb_ring
*ring
, bool no_suspend
)
396 struct tb_nhi
*nhi
= ring
->nhi
;
397 unsigned long irqflags
;
400 if (!nhi
->pdev
->msix_enabled
)
403 ret
= ida_simple_get(&nhi
->msix_ida
, 0, MSIX_MAX_VECS
, GFP_KERNEL
);
409 ret
= pci_irq_vector(ring
->nhi
->pdev
, ring
->vector
);
415 irqflags
= no_suspend
? IRQF_NO_SUSPEND
: 0;
416 ret
= request_irq(ring
->irq
, ring_msix
, irqflags
, "thunderbolt", ring
);
423 ida_simple_remove(&nhi
->msix_ida
, ring
->vector
);
428 static void ring_release_msix(struct tb_ring
*ring
)
433 free_irq(ring
->irq
, ring
);
434 ida_simple_remove(&ring
->nhi
->msix_ida
, ring
->vector
);
439 static int nhi_alloc_hop(struct tb_nhi
*nhi
, struct tb_ring
*ring
)
443 spin_lock_irq(&nhi
->lock
);
449 * Automatically allocate HopID from the non-reserved
450 * range 1 .. hop_count - 1.
452 for (i
= RING_FIRST_USABLE_HOPID
; i
< nhi
->hop_count
; i
++) {
454 if (!nhi
->tx_rings
[i
]) {
459 if (!nhi
->rx_rings
[i
]) {
467 if (ring
->hop
< 0 || ring
->hop
>= nhi
->hop_count
) {
468 dev_warn(&nhi
->pdev
->dev
, "invalid hop: %d\n", ring
->hop
);
472 if (ring
->is_tx
&& nhi
->tx_rings
[ring
->hop
]) {
473 dev_warn(&nhi
->pdev
->dev
, "TX hop %d already allocated\n",
477 } else if (!ring
->is_tx
&& nhi
->rx_rings
[ring
->hop
]) {
478 dev_warn(&nhi
->pdev
->dev
, "RX hop %d already allocated\n",
485 nhi
->tx_rings
[ring
->hop
] = ring
;
487 nhi
->rx_rings
[ring
->hop
] = ring
;
490 spin_unlock_irq(&nhi
->lock
);
495 static struct tb_ring
*tb_ring_alloc(struct tb_nhi
*nhi
, u32 hop
, int size
,
496 bool transmit
, unsigned int flags
,
497 int e2e_tx_hop
, u16 sof_mask
, u16 eof_mask
,
498 void (*start_poll
)(void *),
501 struct tb_ring
*ring
= NULL
;
503 dev_dbg(&nhi
->pdev
->dev
, "allocating %s ring %d of size %d\n",
504 transmit
? "TX" : "RX", hop
, size
);
506 ring
= kzalloc(sizeof(*ring
), GFP_KERNEL
);
510 spin_lock_init(&ring
->lock
);
511 INIT_LIST_HEAD(&ring
->queue
);
512 INIT_LIST_HEAD(&ring
->in_flight
);
513 INIT_WORK(&ring
->work
, ring_work
);
517 ring
->is_tx
= transmit
;
520 ring
->e2e_tx_hop
= e2e_tx_hop
;
521 ring
->sof_mask
= sof_mask
;
522 ring
->eof_mask
= eof_mask
;
525 ring
->running
= false;
526 ring
->start_poll
= start_poll
;
527 ring
->poll_data
= poll_data
;
529 ring
->descriptors
= dma_alloc_coherent(&ring
->nhi
->pdev
->dev
,
530 size
* sizeof(*ring
->descriptors
),
531 &ring
->descriptors_dma
, GFP_KERNEL
| __GFP_ZERO
);
532 if (!ring
->descriptors
)
535 if (ring_request_msix(ring
, flags
& RING_FLAG_NO_SUSPEND
))
538 if (nhi_alloc_hop(nhi
, ring
))
539 goto err_release_msix
;
544 ring_release_msix(ring
);
546 dma_free_coherent(&ring
->nhi
->pdev
->dev
,
547 ring
->size
* sizeof(*ring
->descriptors
),
548 ring
->descriptors
, ring
->descriptors_dma
);
556 * tb_ring_alloc_tx() - Allocate DMA ring for transmit
557 * @nhi: Pointer to the NHI the ring is to be allocated
558 * @hop: HopID (ring) to allocate
559 * @size: Number of entries in the ring
560 * @flags: Flags for the ring
562 struct tb_ring
*tb_ring_alloc_tx(struct tb_nhi
*nhi
, int hop
, int size
,
565 return tb_ring_alloc(nhi
, hop
, size
, true, flags
, 0, 0, 0, NULL
, NULL
);
567 EXPORT_SYMBOL_GPL(tb_ring_alloc_tx
);
570 * tb_ring_alloc_rx() - Allocate DMA ring for receive
571 * @nhi: Pointer to the NHI the ring is to be allocated
572 * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
573 * @size: Number of entries in the ring
574 * @flags: Flags for the ring
575 * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags
576 * @sof_mask: Mask of PDF values that start a frame
577 * @eof_mask: Mask of PDF values that end a frame
578 * @start_poll: If not %NULL the ring will call this function when an
579 * interrupt is triggered and masked, instead of callback
581 * @poll_data: Optional data passed to @start_poll
583 struct tb_ring
*tb_ring_alloc_rx(struct tb_nhi
*nhi
, int hop
, int size
,
584 unsigned int flags
, int e2e_tx_hop
,
585 u16 sof_mask
, u16 eof_mask
,
586 void (*start_poll
)(void *), void *poll_data
)
588 return tb_ring_alloc(nhi
, hop
, size
, false, flags
, e2e_tx_hop
, sof_mask
, eof_mask
,
589 start_poll
, poll_data
);
591 EXPORT_SYMBOL_GPL(tb_ring_alloc_rx
);
594 * tb_ring_start() - enable a ring
595 * @ring: Ring to start
597 * Must not be invoked in parallel with tb_ring_stop().
599 void tb_ring_start(struct tb_ring
*ring
)
604 spin_lock_irq(&ring
->nhi
->lock
);
605 spin_lock(&ring
->lock
);
606 if (ring
->nhi
->going_away
)
609 dev_WARN(&ring
->nhi
->pdev
->dev
, "ring already started\n");
612 dev_dbg(&ring
->nhi
->pdev
->dev
, "starting %s %d\n",
613 RING_TYPE(ring
), ring
->hop
);
615 if (ring
->flags
& RING_FLAG_FRAME
) {
618 flags
= RING_FLAG_ENABLE
;
620 frame_size
= TB_FRAME_SIZE
;
621 flags
= RING_FLAG_ENABLE
| RING_FLAG_RAW
;
624 ring_iowrite64desc(ring
, ring
->descriptors_dma
, 0);
626 ring_iowrite32desc(ring
, ring
->size
, 12);
627 ring_iowrite32options(ring
, 0, 4); /* time releated ? */
628 ring_iowrite32options(ring
, flags
, 0);
630 u32 sof_eof_mask
= ring
->sof_mask
<< 16 | ring
->eof_mask
;
632 ring_iowrite32desc(ring
, (frame_size
<< 16) | ring
->size
, 12);
633 ring_iowrite32options(ring
, sof_eof_mask
, 4);
634 ring_iowrite32options(ring
, flags
, 0);
638 * Now that the ring valid bit is set we can configure E2E if
639 * enabled for the ring.
641 if (ring
->flags
& RING_FLAG_E2E
) {
645 hop
= ring
->e2e_tx_hop
<< REG_RX_OPTIONS_E2E_HOP_SHIFT
;
646 hop
&= REG_RX_OPTIONS_E2E_HOP_MASK
;
649 dev_dbg(&ring
->nhi
->pdev
->dev
,
650 "enabling E2E for %s %d with TX HopID %d\n",
651 RING_TYPE(ring
), ring
->hop
, ring
->e2e_tx_hop
);
653 dev_dbg(&ring
->nhi
->pdev
->dev
, "enabling E2E for %s %d\n",
654 RING_TYPE(ring
), ring
->hop
);
657 flags
|= RING_FLAG_E2E_FLOW_CONTROL
;
658 ring_iowrite32options(ring
, flags
, 0);
661 ring_interrupt_active(ring
, true);
662 ring
->running
= true;
664 spin_unlock(&ring
->lock
);
665 spin_unlock_irq(&ring
->nhi
->lock
);
667 EXPORT_SYMBOL_GPL(tb_ring_start
);
670 * tb_ring_stop() - shutdown a ring
671 * @ring: Ring to stop
673 * Must not be invoked from a callback.
675 * This method will disable the ring. Further calls to
676 * tb_ring_tx/tb_ring_rx will return -ESHUTDOWN until ring_stop has been
679 * All enqueued frames will be canceled and their callbacks will be executed
680 * with frame->canceled set to true (on the callback thread). This method
681 * returns only after all callback invocations have finished.
683 void tb_ring_stop(struct tb_ring
*ring
)
685 spin_lock_irq(&ring
->nhi
->lock
);
686 spin_lock(&ring
->lock
);
687 dev_dbg(&ring
->nhi
->pdev
->dev
, "stopping %s %d\n",
688 RING_TYPE(ring
), ring
->hop
);
689 if (ring
->nhi
->going_away
)
691 if (!ring
->running
) {
692 dev_WARN(&ring
->nhi
->pdev
->dev
, "%s %d already stopped\n",
693 RING_TYPE(ring
), ring
->hop
);
696 ring_interrupt_active(ring
, false);
698 ring_iowrite32options(ring
, 0, 0);
699 ring_iowrite64desc(ring
, 0, 0);
700 ring_iowrite32desc(ring
, 0, 8);
701 ring_iowrite32desc(ring
, 0, 12);
704 ring
->running
= false;
707 spin_unlock(&ring
->lock
);
708 spin_unlock_irq(&ring
->nhi
->lock
);
711 * schedule ring->work to invoke callbacks on all remaining frames.
713 schedule_work(&ring
->work
);
714 flush_work(&ring
->work
);
716 EXPORT_SYMBOL_GPL(tb_ring_stop
);
719 * tb_ring_free() - free ring
721 * When this method returns all invocations of ring->callback will have
724 * Ring must be stopped.
726 * Must NOT be called from ring_frame->callback!
728 void tb_ring_free(struct tb_ring
*ring
)
730 spin_lock_irq(&ring
->nhi
->lock
);
732 * Dissociate the ring from the NHI. This also ensures that
733 * nhi_interrupt_work cannot reschedule ring->work.
736 ring
->nhi
->tx_rings
[ring
->hop
] = NULL
;
738 ring
->nhi
->rx_rings
[ring
->hop
] = NULL
;
741 dev_WARN(&ring
->nhi
->pdev
->dev
, "%s %d still running\n",
742 RING_TYPE(ring
), ring
->hop
);
744 spin_unlock_irq(&ring
->nhi
->lock
);
746 ring_release_msix(ring
);
748 dma_free_coherent(&ring
->nhi
->pdev
->dev
,
749 ring
->size
* sizeof(*ring
->descriptors
),
750 ring
->descriptors
, ring
->descriptors_dma
);
752 ring
->descriptors
= NULL
;
753 ring
->descriptors_dma
= 0;
756 dev_dbg(&ring
->nhi
->pdev
->dev
, "freeing %s %d\n", RING_TYPE(ring
),
760 * ring->work can no longer be scheduled (it is scheduled only
761 * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it
762 * to finish before freeing the ring.
764 flush_work(&ring
->work
);
767 EXPORT_SYMBOL_GPL(tb_ring_free
);
770 * nhi_mailbox_cmd() - Send a command through NHI mailbox
771 * @nhi: Pointer to the NHI structure
772 * @cmd: Command to send
773 * @data: Data to be send with the command
775 * Sends mailbox command to the firmware running on NHI. Returns %0 in
776 * case of success and negative errno in case of failure.
778 int nhi_mailbox_cmd(struct tb_nhi
*nhi
, enum nhi_mailbox_cmd cmd
, u32 data
)
783 iowrite32(data
, nhi
->iobase
+ REG_INMAIL_DATA
);
785 val
= ioread32(nhi
->iobase
+ REG_INMAIL_CMD
);
786 val
&= ~(REG_INMAIL_CMD_MASK
| REG_INMAIL_ERROR
);
787 val
|= REG_INMAIL_OP_REQUEST
| cmd
;
788 iowrite32(val
, nhi
->iobase
+ REG_INMAIL_CMD
);
790 timeout
= ktime_add_ms(ktime_get(), NHI_MAILBOX_TIMEOUT
);
792 val
= ioread32(nhi
->iobase
+ REG_INMAIL_CMD
);
793 if (!(val
& REG_INMAIL_OP_REQUEST
))
795 usleep_range(10, 20);
796 } while (ktime_before(ktime_get(), timeout
));
798 if (val
& REG_INMAIL_OP_REQUEST
)
800 if (val
& REG_INMAIL_ERROR
)
807 * nhi_mailbox_mode() - Return current firmware operation mode
808 * @nhi: Pointer to the NHI structure
810 * The function reads current firmware operation mode using NHI mailbox
811 * registers and returns it to the caller.
813 enum nhi_fw_mode
nhi_mailbox_mode(struct tb_nhi
*nhi
)
817 val
= ioread32(nhi
->iobase
+ REG_OUTMAIL_CMD
);
818 val
&= REG_OUTMAIL_CMD_OPMODE_MASK
;
819 val
>>= REG_OUTMAIL_CMD_OPMODE_SHIFT
;
821 return (enum nhi_fw_mode
)val
;
824 static void nhi_interrupt_work(struct work_struct
*work
)
826 struct tb_nhi
*nhi
= container_of(work
, typeof(*nhi
), interrupt_work
);
827 int value
= 0; /* Suppress uninitialized usage warning. */
830 int type
= 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */
831 struct tb_ring
*ring
;
833 spin_lock_irq(&nhi
->lock
);
836 * Starting at REG_RING_NOTIFY_BASE there are three status bitfields
837 * (TX, RX, RX overflow). We iterate over the bits and read a new
838 * dwords as required. The registers are cleared on read.
840 for (bit
= 0; bit
< 3 * nhi
->hop_count
; bit
++) {
842 value
= ioread32(nhi
->iobase
843 + REG_RING_NOTIFY_BASE
845 if (++hop
== nhi
->hop_count
) {
849 if ((value
& (1 << (bit
% 32))) == 0)
852 dev_warn(&nhi
->pdev
->dev
,
853 "RX overflow for ring %d\n",
858 ring
= nhi
->tx_rings
[hop
];
860 ring
= nhi
->rx_rings
[hop
];
862 dev_warn(&nhi
->pdev
->dev
,
863 "got interrupt for inactive %s ring %d\n",
869 spin_lock(&ring
->lock
);
870 __ring_interrupt(ring
);
871 spin_unlock(&ring
->lock
);
873 spin_unlock_irq(&nhi
->lock
);
876 static irqreturn_t
nhi_msi(int irq
, void *data
)
878 struct tb_nhi
*nhi
= data
;
879 schedule_work(&nhi
->interrupt_work
);
883 static int __nhi_suspend_noirq(struct device
*dev
, bool wakeup
)
885 struct pci_dev
*pdev
= to_pci_dev(dev
);
886 struct tb
*tb
= pci_get_drvdata(pdev
);
887 struct tb_nhi
*nhi
= tb
->nhi
;
890 ret
= tb_domain_suspend_noirq(tb
);
894 if (nhi
->ops
&& nhi
->ops
->suspend_noirq
) {
895 ret
= nhi
->ops
->suspend_noirq(tb
->nhi
, wakeup
);
903 static int nhi_suspend_noirq(struct device
*dev
)
905 return __nhi_suspend_noirq(dev
, device_may_wakeup(dev
));
908 static int nhi_freeze_noirq(struct device
*dev
)
910 struct pci_dev
*pdev
= to_pci_dev(dev
);
911 struct tb
*tb
= pci_get_drvdata(pdev
);
913 return tb_domain_freeze_noirq(tb
);
916 static int nhi_thaw_noirq(struct device
*dev
)
918 struct pci_dev
*pdev
= to_pci_dev(dev
);
919 struct tb
*tb
= pci_get_drvdata(pdev
);
921 return tb_domain_thaw_noirq(tb
);
924 static bool nhi_wake_supported(struct pci_dev
*pdev
)
929 * If power rails are sustainable for wakeup from S4 this
930 * property is set by the BIOS.
932 if (device_property_read_u8(&pdev
->dev
, "WAKE_SUPPORTED", &val
))
938 static int nhi_poweroff_noirq(struct device
*dev
)
940 struct pci_dev
*pdev
= to_pci_dev(dev
);
943 wakeup
= device_may_wakeup(dev
) && nhi_wake_supported(pdev
);
944 return __nhi_suspend_noirq(dev
, wakeup
);
947 static void nhi_enable_int_throttling(struct tb_nhi
*nhi
)
949 /* Throttling is specified in 256ns increments */
950 u32 throttle
= DIV_ROUND_UP(128 * NSEC_PER_USEC
, 256);
954 * Configure interrupt throttling for all vectors even if we
957 for (i
= 0; i
< MSIX_MAX_VECS
; i
++) {
958 u32 reg
= REG_INT_THROTTLING_RATE
+ i
* 4;
959 iowrite32(throttle
, nhi
->iobase
+ reg
);
963 static int nhi_resume_noirq(struct device
*dev
)
965 struct pci_dev
*pdev
= to_pci_dev(dev
);
966 struct tb
*tb
= pci_get_drvdata(pdev
);
967 struct tb_nhi
*nhi
= tb
->nhi
;
971 * Check that the device is still there. It may be that the user
972 * unplugged last device which causes the host controller to go
975 if (!pci_device_is_present(pdev
)) {
976 nhi
->going_away
= true;
978 if (nhi
->ops
&& nhi
->ops
->resume_noirq
) {
979 ret
= nhi
->ops
->resume_noirq(nhi
);
983 nhi_enable_int_throttling(tb
->nhi
);
986 return tb_domain_resume_noirq(tb
);
989 static int nhi_suspend(struct device
*dev
)
991 struct pci_dev
*pdev
= to_pci_dev(dev
);
992 struct tb
*tb
= pci_get_drvdata(pdev
);
994 return tb_domain_suspend(tb
);
997 static void nhi_complete(struct device
*dev
)
999 struct pci_dev
*pdev
= to_pci_dev(dev
);
1000 struct tb
*tb
= pci_get_drvdata(pdev
);
1003 * If we were runtime suspended when system suspend started,
1004 * schedule runtime resume now. It should bring the domain back
1005 * to functional state.
1007 if (pm_runtime_suspended(&pdev
->dev
))
1008 pm_runtime_resume(&pdev
->dev
);
1010 tb_domain_complete(tb
);
1013 static int nhi_runtime_suspend(struct device
*dev
)
1015 struct pci_dev
*pdev
= to_pci_dev(dev
);
1016 struct tb
*tb
= pci_get_drvdata(pdev
);
1017 struct tb_nhi
*nhi
= tb
->nhi
;
1020 ret
= tb_domain_runtime_suspend(tb
);
1024 if (nhi
->ops
&& nhi
->ops
->runtime_suspend
) {
1025 ret
= nhi
->ops
->runtime_suspend(tb
->nhi
);
1032 static int nhi_runtime_resume(struct device
*dev
)
1034 struct pci_dev
*pdev
= to_pci_dev(dev
);
1035 struct tb
*tb
= pci_get_drvdata(pdev
);
1036 struct tb_nhi
*nhi
= tb
->nhi
;
1039 if (nhi
->ops
&& nhi
->ops
->runtime_resume
) {
1040 ret
= nhi
->ops
->runtime_resume(nhi
);
1045 nhi_enable_int_throttling(nhi
);
1046 return tb_domain_runtime_resume(tb
);
1049 static void nhi_shutdown(struct tb_nhi
*nhi
)
1053 dev_dbg(&nhi
->pdev
->dev
, "shutdown\n");
1055 for (i
= 0; i
< nhi
->hop_count
; i
++) {
1056 if (nhi
->tx_rings
[i
])
1057 dev_WARN(&nhi
->pdev
->dev
,
1058 "TX ring %d is still active\n", i
);
1059 if (nhi
->rx_rings
[i
])
1060 dev_WARN(&nhi
->pdev
->dev
,
1061 "RX ring %d is still active\n", i
);
1063 nhi_disable_interrupts(nhi
);
1065 * We have to release the irq before calling flush_work. Otherwise an
1066 * already executing IRQ handler could call schedule_work again.
1068 if (!nhi
->pdev
->msix_enabled
) {
1069 devm_free_irq(&nhi
->pdev
->dev
, nhi
->pdev
->irq
, nhi
);
1070 flush_work(&nhi
->interrupt_work
);
1072 ida_destroy(&nhi
->msix_ida
);
1074 if (nhi
->ops
&& nhi
->ops
->shutdown
)
1075 nhi
->ops
->shutdown(nhi
);
1078 static int nhi_init_msi(struct tb_nhi
*nhi
)
1080 struct pci_dev
*pdev
= nhi
->pdev
;
1083 /* In case someone left them on. */
1084 nhi_disable_interrupts(nhi
);
1086 nhi_enable_int_throttling(nhi
);
1088 ida_init(&nhi
->msix_ida
);
1091 * The NHI has 16 MSI-X vectors or a single MSI. We first try to
1092 * get all MSI-X vectors and if we succeed, each ring will have
1093 * one MSI-X. If for some reason that does not work out, we
1094 * fallback to a single MSI.
1096 nvec
= pci_alloc_irq_vectors(pdev
, MSIX_MIN_VECS
, MSIX_MAX_VECS
,
1099 nvec
= pci_alloc_irq_vectors(pdev
, 1, 1, PCI_IRQ_MSI
);
1103 INIT_WORK(&nhi
->interrupt_work
, nhi_interrupt_work
);
1105 irq
= pci_irq_vector(nhi
->pdev
, 0);
1109 res
= devm_request_irq(&pdev
->dev
, irq
, nhi_msi
,
1110 IRQF_NO_SUSPEND
, "thunderbolt", nhi
);
1112 dev_err(&pdev
->dev
, "request_irq failed, aborting\n");
1120 static bool nhi_imr_valid(struct pci_dev
*pdev
)
1124 if (!device_property_read_u8(&pdev
->dev
, "IMR_VALID", &val
))
1131 * During suspend the Thunderbolt controller is reset and all PCIe
1132 * tunnels are lost. The NHI driver will try to reestablish all tunnels
1133 * during resume. This adds device links between the tunneled PCIe
1134 * downstream ports and the NHI so that the device core will make sure
1135 * NHI is resumed first before the rest.
1137 static void tb_apple_add_links(struct tb_nhi
*nhi
)
1139 struct pci_dev
*upstream
, *pdev
;
1141 if (!x86_apple_machine
)
1144 switch (nhi
->pdev
->device
) {
1145 case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
:
1146 case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
:
1147 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI
:
1148 case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI
:
1154 upstream
= pci_upstream_bridge(nhi
->pdev
);
1156 if (!pci_is_pcie(upstream
))
1158 if (pci_pcie_type(upstream
) == PCI_EXP_TYPE_UPSTREAM
)
1160 upstream
= pci_upstream_bridge(upstream
);
1167 * For each hotplug downstream port, create add device link
1168 * back to NHI so that PCIe tunnels can be re-established after
1171 for_each_pci_bridge(pdev
, upstream
->subordinate
) {
1172 const struct device_link
*link
;
1174 if (!pci_is_pcie(pdev
))
1176 if (pci_pcie_type(pdev
) != PCI_EXP_TYPE_DOWNSTREAM
||
1177 !pdev
->is_hotplug_bridge
)
1180 link
= device_link_add(&pdev
->dev
, &nhi
->pdev
->dev
,
1181 DL_FLAG_AUTOREMOVE_SUPPLIER
|
1182 DL_FLAG_PM_RUNTIME
);
1184 dev_dbg(&nhi
->pdev
->dev
, "created link from %s\n",
1185 dev_name(&pdev
->dev
));
1187 dev_warn(&nhi
->pdev
->dev
, "device link creation from %s failed\n",
1188 dev_name(&pdev
->dev
));
1193 static struct tb
*nhi_select_cm(struct tb_nhi
*nhi
)
1198 * USB4 case is simple. If we got control of any of the
1199 * capabilities, we use software CM.
1201 if (tb_acpi_is_native())
1202 return tb_probe(nhi
);
1205 * Either firmware based CM is running (we did not get control
1206 * from the firmware) or this is pre-USB4 PC so try first
1207 * firmware CM and then fallback to software CM.
1209 tb
= icm_probe(nhi
);
1216 static int nhi_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1222 if (!nhi_imr_valid(pdev
)) {
1223 dev_warn(&pdev
->dev
, "firmware image not valid, aborting\n");
1227 res
= pcim_enable_device(pdev
);
1229 dev_err(&pdev
->dev
, "cannot enable PCI device, aborting\n");
1233 res
= pcim_iomap_regions(pdev
, 1 << 0, "thunderbolt");
1235 dev_err(&pdev
->dev
, "cannot obtain PCI resources, aborting\n");
1239 nhi
= devm_kzalloc(&pdev
->dev
, sizeof(*nhi
), GFP_KERNEL
);
1244 nhi
->ops
= (const struct tb_nhi_ops
*)id
->driver_data
;
1245 /* cannot fail - table is allocated bin pcim_iomap_regions */
1246 nhi
->iobase
= pcim_iomap_table(pdev
)[0];
1247 nhi
->hop_count
= ioread32(nhi
->iobase
+ REG_HOP_COUNT
) & 0x3ff;
1248 dev_dbg(&pdev
->dev
, "total paths: %d\n", nhi
->hop_count
);
1250 nhi
->tx_rings
= devm_kcalloc(&pdev
->dev
, nhi
->hop_count
,
1251 sizeof(*nhi
->tx_rings
), GFP_KERNEL
);
1252 nhi
->rx_rings
= devm_kcalloc(&pdev
->dev
, nhi
->hop_count
,
1253 sizeof(*nhi
->rx_rings
), GFP_KERNEL
);
1254 if (!nhi
->tx_rings
|| !nhi
->rx_rings
)
1257 res
= nhi_init_msi(nhi
);
1259 dev_err(&pdev
->dev
, "cannot enable MSI, aborting\n");
1263 spin_lock_init(&nhi
->lock
);
1265 res
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
1267 res
= dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(32));
1269 dev_err(&pdev
->dev
, "failed to set DMA mask\n");
1273 pci_set_master(pdev
);
1275 if (nhi
->ops
&& nhi
->ops
->init
) {
1276 res
= nhi
->ops
->init(nhi
);
1281 tb_apple_add_links(nhi
);
1282 tb_acpi_add_links(nhi
);
1284 tb
= nhi_select_cm(nhi
);
1286 dev_err(&nhi
->pdev
->dev
,
1287 "failed to determine connection manager, aborting\n");
1291 dev_dbg(&nhi
->pdev
->dev
, "NHI initialized, starting thunderbolt\n");
1293 res
= tb_domain_add(tb
);
1296 * At this point the RX/TX rings might already have been
1297 * activated. Do a proper shutdown.
1303 pci_set_drvdata(pdev
, tb
);
1305 device_wakeup_enable(&pdev
->dev
);
1307 pm_runtime_allow(&pdev
->dev
);
1308 pm_runtime_set_autosuspend_delay(&pdev
->dev
, TB_AUTOSUSPEND_DELAY
);
1309 pm_runtime_use_autosuspend(&pdev
->dev
);
1310 pm_runtime_put_autosuspend(&pdev
->dev
);
1315 static void nhi_remove(struct pci_dev
*pdev
)
1317 struct tb
*tb
= pci_get_drvdata(pdev
);
1318 struct tb_nhi
*nhi
= tb
->nhi
;
1320 pm_runtime_get_sync(&pdev
->dev
);
1321 pm_runtime_dont_use_autosuspend(&pdev
->dev
);
1322 pm_runtime_forbid(&pdev
->dev
);
1324 tb_domain_remove(tb
);
1329 * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable
1330 * the tunnels asap. A corresponding pci quirk blocks the downstream bridges
1331 * resume_noirq until we are done.
1333 static const struct dev_pm_ops nhi_pm_ops
= {
1334 .suspend_noirq
= nhi_suspend_noirq
,
1335 .resume_noirq
= nhi_resume_noirq
,
1336 .freeze_noirq
= nhi_freeze_noirq
, /*
1337 * we just disable hotplug, the
1338 * pci-tunnels stay alive.
1340 .thaw_noirq
= nhi_thaw_noirq
,
1341 .restore_noirq
= nhi_resume_noirq
,
1342 .suspend
= nhi_suspend
,
1343 .poweroff_noirq
= nhi_poweroff_noirq
,
1344 .poweroff
= nhi_suspend
,
1345 .complete
= nhi_complete
,
1346 .runtime_suspend
= nhi_runtime_suspend
,
1347 .runtime_resume
= nhi_runtime_resume
,
1350 static struct pci_device_id nhi_ids
[] = {
1352 * We have to specify class, the TB bridges use the same device and
1353 * vendor (sub)id on gen 1 and gen 2 controllers.
1356 .class = PCI_CLASS_SYSTEM_OTHER
<< 8, .class_mask
= ~0,
1357 .vendor
= PCI_VENDOR_ID_INTEL
,
1358 .device
= PCI_DEVICE_ID_INTEL_LIGHT_RIDGE
,
1359 .subvendor
= 0x2222, .subdevice
= 0x1111,
1362 .class = PCI_CLASS_SYSTEM_OTHER
<< 8, .class_mask
= ~0,
1363 .vendor
= PCI_VENDOR_ID_INTEL
,
1364 .device
= PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C
,
1365 .subvendor
= 0x2222, .subdevice
= 0x1111,
1368 .class = PCI_CLASS_SYSTEM_OTHER
<< 8, .class_mask
= ~0,
1369 .vendor
= PCI_VENDOR_ID_INTEL
,
1370 .device
= PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI
,
1371 .subvendor
= PCI_ANY_ID
, .subdevice
= PCI_ANY_ID
,
1374 .class = PCI_CLASS_SYSTEM_OTHER
<< 8, .class_mask
= ~0,
1375 .vendor
= PCI_VENDOR_ID_INTEL
,
1376 .device
= PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI
,
1377 .subvendor
= PCI_ANY_ID
, .subdevice
= PCI_ANY_ID
,
1381 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_NHI
) },
1382 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI
) },
1383 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI
) },
1384 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI
) },
1385 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI
) },
1386 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI
) },
1387 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI
) },
1388 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI
) },
1389 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_NHI
) },
1390 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI
) },
1391 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ICL_NHI0
),
1392 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1393 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_ICL_NHI1
),
1394 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1395 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TGL_NHI0
),
1396 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1397 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TGL_NHI1
),
1398 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1399 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TGL_H_NHI0
),
1400 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1401 { PCI_VDEVICE(INTEL
, PCI_DEVICE_ID_INTEL_TGL_H_NHI1
),
1402 .driver_data
= (kernel_ulong_t
)&icl_nhi_ops
},
1404 /* Any USB4 compliant host */
1405 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4
, ~0) },
1410 MODULE_DEVICE_TABLE(pci
, nhi_ids
);
1411 MODULE_LICENSE("GPL");
1413 static struct pci_driver nhi_driver
= {
1414 .name
= "thunderbolt",
1415 .id_table
= nhi_ids
,
1417 .remove
= nhi_remove
,
1418 .shutdown
= nhi_remove
,
1419 .driver
.pm
= &nhi_pm_ops
,
1422 static int __init
nhi_init(void)
1426 ret
= tb_domain_init();
1429 ret
= pci_register_driver(&nhi_driver
);
1435 static void __exit
nhi_unload(void)
1437 pci_unregister_driver(&nhi_driver
);
1441 rootfs_initcall(nhi_init
);
1442 module_exit(nhi_unload
);