]>
Commit | Line | Data |
---|---|---|
16603153 AN |
1 | /* |
2 | * Thunderbolt Cactus Ridge driver - NHI driver | |
3 | * | |
4 | * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> | |
5 | */ | |
6 | ||
7 | #ifndef DSL3510_H_ | |
8 | #define DSL3510_H_ | |
9 | ||
046bee1f | 10 | #include <linux/idr.h> |
16603153 AN |
11 | #include <linux/mutex.h> |
12 | #include <linux/workqueue.h> | |
13 | ||
14 | /** | |
15 | * struct tb_nhi - thunderbolt native host interface | |
046bee1f MW |
16 | * @lock: Must be held during ring creation/destruction. Is acquired by |
17 | * interrupt_work when dispatching interrupts to individual rings. | |
18 | * @pdev: Pointer to the PCI device | |
19 | * @iobase: MMIO space of the NHI | |
20 | * @tx_rings: All Tx rings available on this host controller | |
21 | * @rx_rings: All Rx rings available on this host controller | |
22 | * @msix_ida: Used to allocate MSI-X vectors for rings | |
bdccf295 MW |
23 | * @going_away: The host controller device is about to disappear so when |
24 | * this flag is set, avoid touching the hardware anymore. | |
046bee1f MW |
25 | * @interrupt_work: Work scheduled to handle ring interrupt when no |
26 | * MSI-X is used. | |
27 | * @hop_count: Number of rings (end point hops) supported by NHI. | |
16603153 AN |
28 | */ |
29 | struct tb_nhi { | |
046bee1f | 30 | struct mutex lock; |
16603153 AN |
31 | struct pci_dev *pdev; |
32 | void __iomem *iobase; | |
33 | struct tb_ring **tx_rings; | |
34 | struct tb_ring **rx_rings; | |
046bee1f | 35 | struct ida msix_ida; |
bdccf295 | 36 | bool going_away; |
16603153 | 37 | struct work_struct interrupt_work; |
046bee1f | 38 | u32 hop_count; |
16603153 AN |
39 | }; |
40 | ||
41 | /** | |
42 | * struct tb_ring - thunderbolt TX or RX ring associated with a NHI | |
046bee1f MW |
43 | * @lock: Lock serializing actions to this ring. Must be acquired after |
44 | * nhi->lock. | |
45 | * @nhi: Pointer to the native host controller interface | |
46 | * @size: Size of the ring | |
47 | * @hop: Hop (DMA channel) associated with this ring | |
48 | * @head: Head of the ring (write next descriptor here) | |
49 | * @tail: Tail of the ring (complete next descriptor here) | |
50 | * @descriptors: Allocated descriptors for this ring | |
51 | * @queue: Queue holding frames to be transferred over this ring | |
52 | * @in_flight: Queue holding frames that are currently in flight | |
53 | * @work: Interrupt work structure | |
54 | * @is_tx: Is the ring Tx or Rx | |
55 | * @running: Is the ring running | |
56 | * @irq: MSI-X irq number if the ring uses MSI-X. %0 otherwise. | |
57 | * @vector: MSI-X vector number the ring uses (only set if @irq is > 0) | |
58 | * @flags: Ring specific flags | |
16603153 AN |
59 | */ |
60 | struct tb_ring { | |
046bee1f | 61 | struct mutex lock; |
16603153 AN |
62 | struct tb_nhi *nhi; |
63 | int size; | |
64 | int hop; | |
046bee1f MW |
65 | int head; |
66 | int tail; | |
16603153 AN |
67 | struct ring_desc *descriptors; |
68 | dma_addr_t descriptors_dma; | |
69 | struct list_head queue; | |
70 | struct list_head in_flight; | |
71 | struct work_struct work; | |
046bee1f | 72 | bool is_tx:1; |
16603153 | 73 | bool running:1; |
046bee1f MW |
74 | int irq; |
75 | u8 vector; | |
76 | unsigned int flags; | |
16603153 AN |
77 | }; |
78 | ||
046bee1f MW |
79 | /* Leave ring interrupt enabled on suspend */ |
80 | #define RING_FLAG_NO_SUSPEND BIT(0) | |
81 | ||
16603153 AN |
82 | struct ring_frame; |
83 | typedef void (*ring_cb)(struct tb_ring*, struct ring_frame*, bool canceled); | |
84 | ||
85 | /** | |
86 | * struct ring_frame - for use with ring_rx/ring_tx | |
87 | */ | |
88 | struct ring_frame { | |
89 | dma_addr_t buffer_phy; | |
90 | ring_cb callback; | |
91 | struct list_head list; | |
92 | u32 size:12; /* TX: in, RX: out*/ | |
93 | u32 flags:12; /* RX: out */ | |
94 | u32 eof:4; /* TX:in, RX: out */ | |
95 | u32 sof:4; /* TX:in, RX: out */ | |
96 | }; | |
97 | ||
98 | #define TB_FRAME_SIZE 0x100 /* minimum size for ring_rx */ | |
99 | ||
046bee1f MW |
100 | struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, |
101 | unsigned int flags); | |
102 | struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, | |
103 | unsigned int flags); | |
16603153 AN |
104 | void ring_start(struct tb_ring *ring); |
105 | void ring_stop(struct tb_ring *ring); | |
106 | void ring_free(struct tb_ring *ring); | |
107 | ||
108 | int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame); | |
109 | ||
110 | /** | |
111 | * ring_rx() - enqueue a frame on an RX ring | |
112 | * | |
113 | * frame->buffer, frame->buffer_phy and frame->callback have to be set. The | |
114 | * buffer must contain at least TB_FRAME_SIZE bytes. | |
115 | * | |
116 | * frame->callback will be invoked with frame->size, frame->flags, frame->eof, | |
117 | * frame->sof set once the frame has been received. | |
118 | * | |
119 | * If ring_stop is called after the packet has been enqueued frame->callback | |
120 | * will be called with canceled set to true. | |
121 | * | |
122 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. | |
123 | */ | |
124 | static inline int ring_rx(struct tb_ring *ring, struct ring_frame *frame) | |
125 | { | |
126 | WARN_ON(ring->is_tx); | |
127 | return __ring_enqueue(ring, frame); | |
128 | } | |
129 | ||
130 | /** | |
131 | * ring_tx() - enqueue a frame on an TX ring | |
132 | * | |
133 | * frame->buffer, frame->buffer_phy, frame->callback, frame->size, frame->eof | |
134 | * and frame->sof have to be set. | |
135 | * | |
136 | * frame->callback will be invoked with once the frame has been transmitted. | |
137 | * | |
138 | * If ring_stop is called after the packet has been enqueued frame->callback | |
139 | * will be called with canceled set to true. | |
140 | * | |
141 | * Return: Returns ESHUTDOWN if ring_stop has been called. Zero otherwise. | |
142 | */ | |
143 | static inline int ring_tx(struct tb_ring *ring, struct ring_frame *frame) | |
144 | { | |
145 | WARN_ON(!ring->is_tx); | |
146 | return __ring_enqueue(ring, frame); | |
147 | } | |
148 | ||
cd446ee2 MW |
149 | enum nhi_fw_mode { |
150 | NHI_FW_SAFE_MODE, | |
151 | NHI_FW_AUTH_MODE, | |
152 | NHI_FW_EP_MODE, | |
153 | NHI_FW_CM_MODE, | |
154 | }; | |
155 | ||
156 | enum nhi_mailbox_cmd { | |
157 | NHI_MAILBOX_SAVE_DEVS = 0x05, | |
e6b245cc | 158 | NHI_MAILBOX_DISCONNECT_PCIE_PATHS = 0x06, |
cd446ee2 MW |
159 | NHI_MAILBOX_DRV_UNLOADS = 0x07, |
160 | NHI_MAILBOX_ALLOW_ALL_DEVS = 0x23, | |
161 | }; | |
162 | ||
163 | int nhi_mailbox_cmd(struct tb_nhi *nhi, enum nhi_mailbox_cmd cmd, u32 data); | |
164 | enum nhi_fw_mode nhi_mailbox_mode(struct tb_nhi *nhi); | |
165 | ||
5e2781bc MW |
166 | /* |
167 | * PCI IDs used in this driver from Win Ridge forward. There is no | |
168 | * need for the PCI quirk anymore as we will use ICM also on Apple | |
169 | * hardware. | |
170 | */ | |
171 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d | |
172 | #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e | |
173 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf | |
174 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE 0x15c0 | |
175 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_NHI 0x15d2 | |
176 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE 0x15d3 | |
177 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_NHI 0x15d9 | |
178 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE 0x15da | |
179 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_USBONLY_NHI 0x15dc | |
180 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_USBONLY_NHI 0x15dd | |
181 | #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_USBONLY_NHI 0x15de | |
182 | ||
16603153 | 183 | #endif |