]>
Commit | Line | Data |
---|---|---|
98f44cb0 IM |
1 | /* |
2 | * Copyright (c) 2015-2016 Quantenna Communications, Inc. | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License | |
7 | * as published by the Free Software Foundation; either version 2 | |
8 | * of the License, or (at your option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | * | |
15 | */ | |
16 | ||
17 | #include <linux/kernel.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/firmware.h> | |
20 | #include <linux/pci.h> | |
21 | #include <linux/vmalloc.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/sched.h> | |
25 | #include <linux/completion.h> | |
26 | #include <linux/crc32.h> | |
27 | #include <linux/spinlock.h> | |
3cbc3a0f | 28 | #include <linux/circ_buf.h> |
97f38011 | 29 | #include <linux/log2.h> |
98f44cb0 IM |
30 | |
31 | #include "qtn_hw_ids.h" | |
32 | #include "pcie_bus_priv.h" | |
33 | #include "core.h" | |
34 | #include "bus.h" | |
35 | #include "debug.h" | |
36 | ||
37 | static bool use_msi = true; | |
38 | module_param(use_msi, bool, 0644); | |
39 | MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); | |
40 | ||
dfb13db6 | 41 | static unsigned int tx_bd_size_param = 32; |
98f44cb0 | 42 | module_param(tx_bd_size_param, uint, 0644); |
97f38011 | 43 | MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two"); |
98f44cb0 IM |
44 | |
45 | static unsigned int rx_bd_size_param = 256; | |
46 | module_param(rx_bd_size_param, uint, 0644); | |
97f38011 | 47 | MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two"); |
98f44cb0 | 48 | |
98f44cb0 IM |
49 | static u8 flashboot = 1; |
50 | module_param(flashboot, byte, 0644); | |
51 | MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); | |
52 | ||
53 | #define DRV_NAME "qtnfmac_pearl_pcie" | |
54 | ||
55 | static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg) | |
56 | { | |
57 | writel(val, basereg); | |
58 | ||
59 | /* flush posted write */ | |
60 | readl(basereg); | |
61 | } | |
62 | ||
63 | static inline void qtnf_init_hdp_irqs(struct qtnf_pcie_bus_priv *priv) | |
64 | { | |
65 | unsigned long flags; | |
66 | ||
67 | spin_lock_irqsave(&priv->irq_lock, flags); | |
68 | priv->pcie_irq_mask = (PCIE_HDP_INT_RX_BITS | PCIE_HDP_INT_TX_BITS); | |
69 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
70 | } | |
71 | ||
72 | static inline void qtnf_enable_hdp_irqs(struct qtnf_pcie_bus_priv *priv) | |
73 | { | |
74 | unsigned long flags; | |
75 | ||
76 | spin_lock_irqsave(&priv->irq_lock, flags); | |
77 | writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
78 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
79 | } | |
80 | ||
81 | static inline void qtnf_disable_hdp_irqs(struct qtnf_pcie_bus_priv *priv) | |
82 | { | |
83 | unsigned long flags; | |
84 | ||
85 | spin_lock_irqsave(&priv->irq_lock, flags); | |
86 | writel(0x0, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
87 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
88 | } | |
89 | ||
90 | static inline void qtnf_en_rxdone_irq(struct qtnf_pcie_bus_priv *priv) | |
91 | { | |
92 | unsigned long flags; | |
93 | ||
94 | spin_lock_irqsave(&priv->irq_lock, flags); | |
95 | priv->pcie_irq_mask |= PCIE_HDP_INT_RX_BITS; | |
96 | writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
97 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
98 | } | |
99 | ||
100 | static inline void qtnf_dis_rxdone_irq(struct qtnf_pcie_bus_priv *priv) | |
101 | { | |
102 | unsigned long flags; | |
103 | ||
104 | spin_lock_irqsave(&priv->irq_lock, flags); | |
105 | priv->pcie_irq_mask &= ~PCIE_HDP_INT_RX_BITS; | |
106 | writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
107 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
108 | } | |
109 | ||
110 | static inline void qtnf_en_txdone_irq(struct qtnf_pcie_bus_priv *priv) | |
111 | { | |
112 | unsigned long flags; | |
113 | ||
114 | spin_lock_irqsave(&priv->irq_lock, flags); | |
115 | priv->pcie_irq_mask |= PCIE_HDP_INT_TX_BITS; | |
116 | writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
117 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
118 | } | |
119 | ||
120 | static inline void qtnf_dis_txdone_irq(struct qtnf_pcie_bus_priv *priv) | |
121 | { | |
122 | unsigned long flags; | |
123 | ||
124 | spin_lock_irqsave(&priv->irq_lock, flags); | |
125 | priv->pcie_irq_mask &= ~PCIE_HDP_INT_TX_BITS; | |
126 | writel(priv->pcie_irq_mask, PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
127 | spin_unlock_irqrestore(&priv->irq_lock, flags); | |
128 | } | |
129 | ||
c3b2f7ca | 130 | static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv) |
98f44cb0 IM |
131 | { |
132 | struct pci_dev *pdev = priv->pdev; | |
133 | ||
134 | /* fall back to legacy INTx interrupts by default */ | |
135 | priv->msi_enabled = 0; | |
136 | ||
137 | /* check if MSI capability is available */ | |
138 | if (use_msi) { | |
139 | if (!pci_enable_msi(pdev)) { | |
140 | pr_debug("MSI interrupt enabled\n"); | |
141 | priv->msi_enabled = 1; | |
142 | } else { | |
143 | pr_warn("failed to enable MSI interrupts"); | |
144 | } | |
145 | } | |
146 | ||
147 | if (!priv->msi_enabled) { | |
148 | pr_warn("legacy PCIE interrupts enabled\n"); | |
149 | pci_intx(pdev, 1); | |
150 | } | |
98f44cb0 IM |
151 | } |
152 | ||
153 | static void qtnf_deassert_intx(struct qtnf_pcie_bus_priv *priv) | |
154 | { | |
155 | void __iomem *reg = priv->sysctl_bar + PEARL_PCIE_CFG0_OFFSET; | |
156 | u32 cfg; | |
157 | ||
158 | cfg = readl(reg); | |
159 | cfg &= ~PEARL_ASSERT_INTX; | |
160 | qtnf_non_posted_write(cfg, reg); | |
161 | } | |
162 | ||
a34d7bcb SM |
163 | static void qtnf_reset_card(struct qtnf_pcie_bus_priv *priv) |
164 | { | |
165 | const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_EP_RESET); | |
166 | void __iomem *reg = priv->sysctl_bar + | |
167 | QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; | |
168 | ||
169 | qtnf_non_posted_write(data, reg); | |
170 | msleep(QTN_EP_RESET_WAIT_MS); | |
171 | pci_restore_state(priv->pdev); | |
172 | } | |
173 | ||
98f44cb0 IM |
174 | static void qtnf_ipc_gen_ep_int(void *arg) |
175 | { | |
176 | const struct qtnf_pcie_bus_priv *priv = arg; | |
177 | const u32 data = QTN_PEARL_IPC_IRQ_WORD(QTN_PEARL_LHOST_IPC_IRQ); | |
178 | void __iomem *reg = priv->sysctl_bar + | |
179 | QTN_PEARL_SYSCTL_LHOST_IRQ_OFFSET; | |
180 | ||
181 | qtnf_non_posted_write(data, reg); | |
182 | } | |
183 | ||
184 | static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) | |
185 | { | |
186 | void __iomem *vaddr; | |
187 | dma_addr_t busaddr; | |
188 | size_t len; | |
189 | int ret; | |
190 | ||
191 | ret = pcim_iomap_regions(priv->pdev, 1 << index, DRV_NAME); | |
192 | if (ret) | |
193 | return IOMEM_ERR_PTR(ret); | |
194 | ||
195 | busaddr = pci_resource_start(priv->pdev, index); | |
98f44cb0 | 196 | len = pci_resource_len(priv->pdev, index); |
bab5dac7 SM |
197 | vaddr = pcim_iomap_table(priv->pdev)[index]; |
198 | if (!vaddr) | |
199 | return IOMEM_ERR_PTR(-ENOMEM); | |
98f44cb0 IM |
200 | |
201 | pr_debug("BAR%u vaddr=0x%p busaddr=%pad len=%u\n", | |
202 | index, vaddr, &busaddr, (int)len); | |
203 | ||
204 | return vaddr; | |
205 | } | |
206 | ||
207 | static void qtnf_pcie_control_rx_callback(void *arg, const u8 *buf, size_t len) | |
208 | { | |
209 | struct qtnf_pcie_bus_priv *priv = arg; | |
210 | struct qtnf_bus *bus = pci_get_drvdata(priv->pdev); | |
211 | struct sk_buff *skb; | |
212 | ||
213 | if (unlikely(len == 0)) { | |
214 | pr_warn("zero length packet received\n"); | |
215 | return; | |
216 | } | |
217 | ||
218 | skb = __dev_alloc_skb(len, GFP_KERNEL); | |
219 | ||
220 | if (unlikely(!skb)) { | |
221 | pr_err("failed to allocate skb\n"); | |
222 | return; | |
223 | } | |
224 | ||
59ae1d12 | 225 | skb_put_data(skb, buf, len); |
98f44cb0 IM |
226 | |
227 | qtnf_trans_handle_rx_ctl_packet(bus, skb); | |
228 | } | |
229 | ||
230 | static int qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv) | |
231 | { | |
232 | struct qtnf_shm_ipc_region __iomem *ipc_tx_reg; | |
233 | struct qtnf_shm_ipc_region __iomem *ipc_rx_reg; | |
234 | const struct qtnf_shm_ipc_int ipc_int = { qtnf_ipc_gen_ep_int, priv }; | |
235 | const struct qtnf_shm_ipc_rx_callback rx_callback = { | |
236 | qtnf_pcie_control_rx_callback, priv }; | |
237 | ||
238 | ipc_tx_reg = &priv->bda->bda_shm_reg1; | |
239 | ipc_rx_reg = &priv->bda->bda_shm_reg2; | |
240 | ||
241 | qtnf_shm_ipc_init(&priv->shm_ipc_ep_in, QTNF_SHM_IPC_OUTBOUND, | |
242 | ipc_tx_reg, priv->workqueue, | |
243 | &ipc_int, &rx_callback); | |
244 | qtnf_shm_ipc_init(&priv->shm_ipc_ep_out, QTNF_SHM_IPC_INBOUND, | |
245 | ipc_rx_reg, priv->workqueue, | |
246 | &ipc_int, &rx_callback); | |
247 | ||
248 | return 0; | |
249 | } | |
250 | ||
251 | static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv) | |
252 | { | |
253 | qtnf_shm_ipc_free(&priv->shm_ipc_ep_in); | |
254 | qtnf_shm_ipc_free(&priv->shm_ipc_ep_out); | |
255 | } | |
256 | ||
257 | static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) | |
258 | { | |
3e3d8aa6 | 259 | int ret = -ENOMEM; |
98f44cb0 IM |
260 | |
261 | priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR); | |
bab5dac7 | 262 | if (IS_ERR(priv->sysctl_bar)) { |
98f44cb0 IM |
263 | pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); |
264 | return ret; | |
265 | } | |
266 | ||
267 | priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR); | |
bab5dac7 | 268 | if (IS_ERR(priv->dmareg_bar)) { |
98f44cb0 IM |
269 | pr_err("failed to map BAR%u\n", QTN_DMA_BAR); |
270 | return ret; | |
271 | } | |
272 | ||
273 | priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR); | |
bab5dac7 | 274 | if (IS_ERR(priv->epmem_bar)) { |
98f44cb0 IM |
275 | pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); |
276 | return ret; | |
277 | } | |
278 | ||
279 | priv->pcie_reg_base = priv->dmareg_bar; | |
280 | priv->bda = priv->epmem_bar; | |
281 | writel(priv->msi_enabled, &priv->bda->bda_rc_msi_enabled); | |
282 | ||
283 | return 0; | |
284 | } | |
285 | ||
98f44cb0 IM |
286 | static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) |
287 | { | |
288 | struct pci_dev *pdev = priv->pdev; | |
289 | struct pci_dev *parent; | |
290 | int mps_p, mps_o, mps_m, mps; | |
291 | int ret; | |
292 | ||
293 | /* current mps */ | |
294 | mps_o = pcie_get_mps(pdev); | |
295 | ||
296 | /* maximum supported mps */ | |
297 | mps_m = 128 << pdev->pcie_mpss; | |
298 | ||
299 | /* suggested new mps value */ | |
300 | mps = mps_m; | |
301 | ||
302 | if (pdev->bus && pdev->bus->self) { | |
303 | /* parent (bus) mps */ | |
304 | parent = pdev->bus->self; | |
305 | ||
306 | if (pci_is_pcie(parent)) { | |
307 | mps_p = pcie_get_mps(parent); | |
308 | mps = min(mps_m, mps_p); | |
309 | } | |
310 | } | |
311 | ||
312 | ret = pcie_set_mps(pdev, mps); | |
313 | if (ret) { | |
314 | pr_err("failed to set mps to %d, keep using current %d\n", | |
315 | mps, mps_o); | |
316 | priv->mps = mps_o; | |
317 | return; | |
318 | } | |
319 | ||
320 | pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m); | |
321 | priv->mps = mps; | |
322 | } | |
323 | ||
324 | static int qtnf_is_state(__le32 __iomem *reg, u32 state) | |
325 | { | |
326 | u32 s = readl(reg); | |
327 | ||
328 | return s & state; | |
329 | } | |
330 | ||
331 | static void qtnf_set_state(__le32 __iomem *reg, u32 state) | |
332 | { | |
333 | u32 s = readl(reg); | |
334 | ||
335 | qtnf_non_posted_write(state | s, reg); | |
336 | } | |
337 | ||
338 | static void qtnf_clear_state(__le32 __iomem *reg, u32 state) | |
339 | { | |
340 | u32 s = readl(reg); | |
341 | ||
342 | qtnf_non_posted_write(s & ~state, reg); | |
343 | } | |
344 | ||
345 | static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms) | |
346 | { | |
347 | u32 timeout = 0; | |
348 | ||
349 | while ((qtnf_is_state(reg, state) == 0)) { | |
350 | usleep_range(1000, 1200); | |
351 | if (++timeout > delay_in_ms) | |
352 | return -1; | |
353 | } | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
358 | static int alloc_skb_array(struct qtnf_pcie_bus_priv *priv) | |
359 | { | |
360 | struct sk_buff **vaddr; | |
361 | int len; | |
362 | ||
363 | len = priv->tx_bd_num * sizeof(*priv->tx_skb) + | |
364 | priv->rx_bd_num * sizeof(*priv->rx_skb); | |
365 | vaddr = devm_kzalloc(&priv->pdev->dev, len, GFP_KERNEL); | |
366 | ||
367 | if (!vaddr) | |
368 | return -ENOMEM; | |
369 | ||
370 | priv->tx_skb = vaddr; | |
371 | ||
372 | vaddr += priv->tx_bd_num; | |
373 | priv->rx_skb = vaddr; | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static int alloc_bd_table(struct qtnf_pcie_bus_priv *priv) | |
379 | { | |
380 | dma_addr_t paddr; | |
381 | void *vaddr; | |
382 | int len; | |
383 | ||
384 | len = priv->tx_bd_num * sizeof(struct qtnf_tx_bd) + | |
385 | priv->rx_bd_num * sizeof(struct qtnf_rx_bd); | |
386 | ||
387 | vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); | |
388 | if (!vaddr) | |
389 | return -ENOMEM; | |
390 | ||
391 | /* tx bd */ | |
392 | ||
393 | memset(vaddr, 0, len); | |
394 | ||
395 | priv->bd_table_vaddr = vaddr; | |
396 | priv->bd_table_paddr = paddr; | |
397 | priv->bd_table_len = len; | |
398 | ||
399 | priv->tx_bd_vbase = vaddr; | |
400 | priv->tx_bd_pbase = paddr; | |
401 | ||
402 | pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); | |
403 | ||
3cbc3a0f SM |
404 | priv->tx_bd_r_index = 0; |
405 | priv->tx_bd_w_index = 0; | |
98f44cb0 IM |
406 | |
407 | /* rx bd */ | |
408 | ||
409 | vaddr = ((struct qtnf_tx_bd *)vaddr) + priv->tx_bd_num; | |
410 | paddr += priv->tx_bd_num * sizeof(struct qtnf_tx_bd); | |
411 | ||
412 | priv->rx_bd_vbase = vaddr; | |
413 | priv->rx_bd_pbase = paddr; | |
414 | ||
f31039d4 | 415 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
98f44cb0 IM |
416 | writel(QTN_HOST_HI32(paddr), |
417 | PCIE_HDP_TX_HOST_Q_BASE_H(priv->pcie_reg_base)); | |
f31039d4 SM |
418 | #endif |
419 | writel(QTN_HOST_LO32(paddr), | |
420 | PCIE_HDP_TX_HOST_Q_BASE_L(priv->pcie_reg_base)); | |
98f44cb0 IM |
421 | writel(priv->rx_bd_num | (sizeof(struct qtnf_rx_bd)) << 16, |
422 | PCIE_HDP_TX_HOST_Q_SZ_CTRL(priv->pcie_reg_base)); | |
423 | ||
98f44cb0 IM |
424 | pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); |
425 | ||
98f44cb0 IM |
426 | return 0; |
427 | } | |
428 | ||
867ba964 | 429 | static int skb2rbd_attach(struct qtnf_pcie_bus_priv *priv, u16 index) |
98f44cb0 IM |
430 | { |
431 | struct qtnf_rx_bd *rxbd; | |
432 | struct sk_buff *skb; | |
433 | dma_addr_t paddr; | |
434 | ||
c58730ca | 435 | skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); |
98f44cb0 | 436 | if (!skb) { |
867ba964 | 437 | priv->rx_skb[index] = NULL; |
98f44cb0 IM |
438 | return -ENOMEM; |
439 | } | |
440 | ||
867ba964 SM |
441 | priv->rx_skb[index] = skb; |
442 | rxbd = &priv->rx_bd_vbase[index]; | |
98f44cb0 IM |
443 | |
444 | paddr = pci_map_single(priv->pdev, skb->data, | |
445 | SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); | |
446 | if (pci_dma_mapping_error(priv->pdev, paddr)) { | |
447 | pr_err("skb DMA mapping error: %pad\n", &paddr); | |
448 | return -ENOMEM; | |
449 | } | |
450 | ||
98f44cb0 IM |
451 | /* keep rx skb paddrs in rx buffer descriptors for cleanup purposes */ |
452 | rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); | |
453 | rxbd->addr_h = cpu_to_le32(QTN_HOST_HI32(paddr)); | |
98f44cb0 IM |
454 | rxbd->info = 0x0; |
455 | ||
3cbc3a0f SM |
456 | priv->rx_bd_w_index = index; |
457 | ||
867ba964 SM |
458 | /* sync up all descriptor updates */ |
459 | wmb(); | |
460 | ||
f31039d4 | 461 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
867ba964 SM |
462 | writel(QTN_HOST_HI32(paddr), |
463 | PCIE_HDP_HHBM_BUF_PTR_H(priv->pcie_reg_base)); | |
f31039d4 | 464 | #endif |
867ba964 SM |
465 | writel(QTN_HOST_LO32(paddr), |
466 | PCIE_HDP_HHBM_BUF_PTR(priv->pcie_reg_base)); | |
467 | ||
468 | writel(index, PCIE_HDP_TX_HOST_Q_WR_PTR(priv->pcie_reg_base)); | |
98f44cb0 IM |
469 | return 0; |
470 | } | |
471 | ||
472 | static int alloc_rx_buffers(struct qtnf_pcie_bus_priv *priv) | |
473 | { | |
474 | u16 i; | |
475 | int ret = 0; | |
476 | ||
477 | memset(priv->rx_bd_vbase, 0x0, | |
478 | priv->rx_bd_num * sizeof(struct qtnf_rx_bd)); | |
479 | ||
480 | for (i = 0; i < priv->rx_bd_num; i++) { | |
481 | ret = skb2rbd_attach(priv, i); | |
482 | if (ret) | |
483 | break; | |
484 | } | |
485 | ||
486 | return ret; | |
487 | } | |
488 | ||
489 | /* all rx/tx activity should have ceased before calling this function */ | |
1d5e3b90 | 490 | static void qtnf_free_xfer_buffers(struct qtnf_pcie_bus_priv *priv) |
98f44cb0 | 491 | { |
1d5e3b90 | 492 | struct qtnf_tx_bd *txbd; |
98f44cb0 | 493 | struct qtnf_rx_bd *rxbd; |
1d5e3b90 | 494 | struct sk_buff *skb; |
98f44cb0 IM |
495 | dma_addr_t paddr; |
496 | int i; | |
497 | ||
498 | /* free rx buffers */ | |
499 | for (i = 0; i < priv->rx_bd_num; i++) { | |
b00edea3 | 500 | if (priv->rx_skb && priv->rx_skb[i]) { |
98f44cb0 | 501 | rxbd = &priv->rx_bd_vbase[i]; |
1d5e3b90 | 502 | skb = priv->rx_skb[i]; |
98f44cb0 IM |
503 | paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), |
504 | le32_to_cpu(rxbd->addr)); | |
505 | pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, | |
506 | PCI_DMA_FROMDEVICE); | |
1d5e3b90 SM |
507 | dev_kfree_skb_any(skb); |
508 | priv->rx_skb[i] = NULL; | |
98f44cb0 IM |
509 | } |
510 | } | |
511 | ||
512 | /* free tx buffers */ | |
513 | for (i = 0; i < priv->tx_bd_num; i++) { | |
b00edea3 | 514 | if (priv->tx_skb && priv->tx_skb[i]) { |
1d5e3b90 SM |
515 | txbd = &priv->tx_bd_vbase[i]; |
516 | skb = priv->tx_skb[i]; | |
517 | paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), | |
518 | le32_to_cpu(txbd->addr)); | |
519 | pci_unmap_single(priv->pdev, paddr, skb->len, | |
520 | PCI_DMA_TODEVICE); | |
521 | dev_kfree_skb_any(skb); | |
98f44cb0 IM |
522 | priv->tx_skb[i] = NULL; |
523 | } | |
524 | } | |
525 | } | |
526 | ||
f31039d4 SM |
527 | static int qtnf_hhbm_init(struct qtnf_pcie_bus_priv *priv) |
528 | { | |
529 | u32 val; | |
530 | ||
531 | val = readl(PCIE_HHBM_CONFIG(priv->pcie_reg_base)); | |
532 | val |= HHBM_CONFIG_SOFT_RESET; | |
533 | writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); | |
534 | usleep_range(50, 100); | |
535 | val &= ~HHBM_CONFIG_SOFT_RESET; | |
536 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
537 | val |= HHBM_64BIT; | |
538 | #endif | |
539 | writel(val, PCIE_HHBM_CONFIG(priv->pcie_reg_base)); | |
540 | writel(priv->rx_bd_num, PCIE_HHBM_Q_LIMIT_REG(priv->pcie_reg_base)); | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
98f44cb0 IM |
545 | static int qtnf_pcie_init_xfer(struct qtnf_pcie_bus_priv *priv) |
546 | { | |
547 | int ret; | |
f31039d4 | 548 | u32 val; |
98f44cb0 IM |
549 | |
550 | priv->tx_bd_num = tx_bd_size_param; | |
551 | priv->rx_bd_num = rx_bd_size_param; | |
3cbc3a0f SM |
552 | priv->rx_bd_w_index = 0; |
553 | priv->rx_bd_r_index = 0; | |
98f44cb0 | 554 | |
97f38011 SM |
555 | if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) { |
556 | pr_err("tx_bd_size_param %u is not power of two\n", | |
557 | priv->tx_bd_num); | |
558 | return -EINVAL; | |
559 | } | |
560 | ||
f31039d4 SM |
561 | val = priv->tx_bd_num * sizeof(struct qtnf_tx_bd); |
562 | if (val > PCIE_HHBM_MAX_SIZE) { | |
563 | pr_err("tx_bd_size_param %u is too large\n", | |
564 | priv->tx_bd_num); | |
565 | return -EINVAL; | |
566 | } | |
567 | ||
97f38011 SM |
568 | if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { |
569 | pr_err("rx_bd_size_param %u is not power of two\n", | |
570 | priv->rx_bd_num); | |
571 | return -EINVAL; | |
572 | } | |
573 | ||
f31039d4 SM |
574 | val = priv->rx_bd_num * sizeof(dma_addr_t); |
575 | if (val > PCIE_HHBM_MAX_SIZE) { | |
576 | pr_err("rx_bd_size_param %u is too large\n", | |
577 | priv->rx_bd_num); | |
578 | return -EINVAL; | |
579 | } | |
580 | ||
581 | ret = qtnf_hhbm_init(priv); | |
582 | if (ret) { | |
583 | pr_err("failed to init h/w queues\n"); | |
584 | return ret; | |
585 | } | |
586 | ||
98f44cb0 IM |
587 | ret = alloc_skb_array(priv); |
588 | if (ret) { | |
589 | pr_err("failed to allocate skb array\n"); | |
590 | return ret; | |
591 | } | |
592 | ||
593 | ret = alloc_bd_table(priv); | |
594 | if (ret) { | |
595 | pr_err("failed to allocate bd table\n"); | |
596 | return ret; | |
597 | } | |
598 | ||
599 | ret = alloc_rx_buffers(priv); | |
600 | if (ret) { | |
601 | pr_err("failed to allocate rx buffers\n"); | |
602 | return ret; | |
603 | } | |
604 | ||
605 | return ret; | |
606 | } | |
607 | ||
3cbc3a0f | 608 | static void qtnf_pcie_data_tx_reclaim(struct qtnf_pcie_bus_priv *priv) |
98f44cb0 IM |
609 | { |
610 | struct qtnf_tx_bd *txbd; | |
611 | struct sk_buff *skb; | |
0593da27 | 612 | unsigned long flags; |
98f44cb0 | 613 | dma_addr_t paddr; |
3cbc3a0f SM |
614 | u32 tx_done_index; |
615 | int count = 0; | |
98f44cb0 IM |
616 | int i; |
617 | ||
0593da27 | 618 | spin_lock_irqsave(&priv->tx_reclaim_lock, flags); |
98f44cb0 | 619 | |
3cbc3a0f SM |
620 | tx_done_index = readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) |
621 | & (priv->tx_bd_num - 1); | |
98f44cb0 | 622 | |
3cbc3a0f | 623 | i = priv->tx_bd_r_index; |
98f44cb0 | 624 | |
3cbc3a0f SM |
625 | while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { |
626 | skb = priv->tx_skb[i]; | |
627 | if (likely(skb)) { | |
628 | txbd = &priv->tx_bd_vbase[i]; | |
629 | paddr = QTN_HOST_ADDR(le32_to_cpu(txbd->addr_h), | |
630 | le32_to_cpu(txbd->addr)); | |
631 | pci_unmap_single(priv->pdev, paddr, skb->len, | |
632 | PCI_DMA_TODEVICE); | |
633 | ||
634 | if (skb->dev) { | |
04b01aff | 635 | qtnf_update_tx_stats(skb->dev, skb); |
c35c0d54 SM |
636 | if (unlikely(priv->tx_stopped)) { |
637 | qtnf_wake_all_queues(skb->dev); | |
638 | priv->tx_stopped = 0; | |
639 | } | |
3cbc3a0f | 640 | } |
98f44cb0 | 641 | |
3cbc3a0f | 642 | dev_kfree_skb_any(skb); |
98f44cb0 IM |
643 | } |
644 | ||
98f44cb0 | 645 | priv->tx_skb[i] = NULL; |
98f44cb0 IM |
646 | count++; |
647 | ||
648 | if (++i >= priv->tx_bd_num) | |
649 | i = 0; | |
650 | } | |
651 | ||
98f44cb0 IM |
652 | priv->tx_reclaim_done += count; |
653 | priv->tx_reclaim_req++; | |
3cbc3a0f | 654 | priv->tx_bd_r_index = i; |
98f44cb0 | 655 | |
0593da27 | 656 | spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); |
98f44cb0 IM |
657 | } |
658 | ||
3cbc3a0f | 659 | static int qtnf_tx_queue_ready(struct qtnf_pcie_bus_priv *priv) |
98f44cb0 | 660 | { |
3cbc3a0f SM |
661 | if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, |
662 | priv->tx_bd_num)) { | |
98f44cb0 IM |
663 | qtnf_pcie_data_tx_reclaim(priv); |
664 | ||
3cbc3a0f SM |
665 | if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, |
666 | priv->tx_bd_num)) { | |
e9931f98 | 667 | pr_warn_ratelimited("reclaim full Tx queue\n"); |
98f44cb0 | 668 | priv->tx_full_count++; |
3cbc3a0f | 669 | return 0; |
98f44cb0 IM |
670 | } |
671 | } | |
672 | ||
3cbc3a0f | 673 | return 1; |
98f44cb0 IM |
674 | } |
675 | ||
676 | static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) | |
677 | { | |
678 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
679 | dma_addr_t txbd_paddr, skb_paddr; | |
680 | struct qtnf_tx_bd *txbd; | |
20da2ec0 | 681 | unsigned long flags; |
98f44cb0 IM |
682 | int len, i; |
683 | u32 info; | |
684 | int ret = 0; | |
685 | ||
20da2ec0 SM |
686 | spin_lock_irqsave(&priv->tx0_lock, flags); |
687 | ||
98f44cb0 | 688 | if (!qtnf_tx_queue_ready(priv)) { |
c35c0d54 SM |
689 | if (skb->dev) { |
690 | netif_tx_stop_all_queues(skb->dev); | |
691 | priv->tx_stopped = 1; | |
692 | } | |
98f44cb0 | 693 | |
20da2ec0 | 694 | spin_unlock_irqrestore(&priv->tx0_lock, flags); |
98f44cb0 IM |
695 | return NETDEV_TX_BUSY; |
696 | } | |
697 | ||
3cbc3a0f | 698 | i = priv->tx_bd_w_index; |
98f44cb0 IM |
699 | priv->tx_skb[i] = skb; |
700 | len = skb->len; | |
701 | ||
702 | skb_paddr = pci_map_single(priv->pdev, skb->data, | |
703 | skb->len, PCI_DMA_TODEVICE); | |
704 | if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { | |
705 | pr_err("skb DMA mapping error: %pad\n", &skb_paddr); | |
706 | ret = -ENOMEM; | |
707 | goto tx_done; | |
708 | } | |
709 | ||
710 | txbd = &priv->tx_bd_vbase[i]; | |
711 | txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); | |
712 | txbd->addr_h = cpu_to_le32(QTN_HOST_HI32(skb_paddr)); | |
713 | ||
714 | info = (len & QTN_PCIE_TX_DESC_LEN_MASK) << QTN_PCIE_TX_DESC_LEN_SHIFT; | |
715 | txbd->info = cpu_to_le32(info); | |
716 | ||
717 | /* sync up all descriptor updates before passing them to EP */ | |
718 | dma_wmb(); | |
719 | ||
720 | /* write new TX descriptor to PCIE_RX_FIFO on EP */ | |
721 | txbd_paddr = priv->tx_bd_pbase + i * sizeof(struct qtnf_tx_bd); | |
f31039d4 SM |
722 | |
723 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT | |
98f44cb0 IM |
724 | writel(QTN_HOST_HI32(txbd_paddr), |
725 | PCIE_HDP_HOST_WR_DESC0_H(priv->pcie_reg_base)); | |
f31039d4 SM |
726 | #endif |
727 | writel(QTN_HOST_LO32(txbd_paddr), | |
728 | PCIE_HDP_HOST_WR_DESC0(priv->pcie_reg_base)); | |
98f44cb0 IM |
729 | |
730 | if (++i >= priv->tx_bd_num) | |
731 | i = 0; | |
732 | ||
3cbc3a0f | 733 | priv->tx_bd_w_index = i; |
98f44cb0 IM |
734 | |
735 | tx_done: | |
736 | if (ret && skb) { | |
737 | pr_err_ratelimited("drop skb\n"); | |
738 | if (skb->dev) | |
739 | skb->dev->stats.tx_dropped++; | |
740 | dev_kfree_skb_any(skb); | |
741 | } | |
742 | ||
0593da27 | 743 | priv->tx_done_count++; |
20da2ec0 SM |
744 | spin_unlock_irqrestore(&priv->tx0_lock, flags); |
745 | ||
746 | qtnf_pcie_data_tx_reclaim(priv); | |
98f44cb0 IM |
747 | |
748 | return NETDEV_TX_OK; | |
749 | } | |
750 | ||
751 | static int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) | |
752 | { | |
753 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
754 | ||
755 | return qtnf_shm_ipc_send(&priv->shm_ipc_ep_in, skb->data, skb->len); | |
756 | } | |
757 | ||
758 | static irqreturn_t qtnf_interrupt(int irq, void *data) | |
759 | { | |
760 | struct qtnf_bus *bus = (struct qtnf_bus *)data; | |
761 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
762 | u32 status; | |
763 | ||
764 | priv->pcie_irq_count++; | |
765 | status = readl(PCIE_HDP_INT_STATUS(priv->pcie_reg_base)); | |
766 | ||
767 | qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); | |
768 | qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out); | |
769 | ||
770 | if (!(status & priv->pcie_irq_mask)) | |
771 | goto irq_done; | |
772 | ||
cc75f9e5 | 773 | if (status & PCIE_HDP_INT_RX_BITS) |
98f44cb0 | 774 | priv->pcie_irq_rx_count++; |
cc75f9e5 SM |
775 | |
776 | if (status & PCIE_HDP_INT_TX_BITS) | |
777 | priv->pcie_irq_tx_count++; | |
778 | ||
779 | if (status & PCIE_HDP_INT_HHBM_UF) | |
780 | priv->pcie_irq_uf_count++; | |
781 | ||
782 | if (status & PCIE_HDP_INT_RX_BITS) { | |
98f44cb0 IM |
783 | qtnf_dis_rxdone_irq(priv); |
784 | napi_schedule(&bus->mux_napi); | |
785 | } | |
786 | ||
787 | if (status & PCIE_HDP_INT_TX_BITS) { | |
98f44cb0 IM |
788 | qtnf_dis_txdone_irq(priv); |
789 | tasklet_hi_schedule(&priv->reclaim_tq); | |
790 | } | |
791 | ||
792 | irq_done: | |
793 | /* H/W workaround: clean all bits, not only enabled */ | |
794 | qtnf_non_posted_write(~0U, PCIE_HDP_INT_STATUS(priv->pcie_reg_base)); | |
795 | ||
796 | if (!priv->msi_enabled) | |
797 | qtnf_deassert_intx(priv); | |
798 | ||
799 | return IRQ_HANDLED; | |
800 | } | |
801 | ||
3cbc3a0f | 802 | static int qtnf_rx_data_ready(struct qtnf_pcie_bus_priv *priv) |
98f44cb0 | 803 | { |
3cbc3a0f SM |
804 | u16 index = priv->rx_bd_r_index; |
805 | struct qtnf_rx_bd *rxbd; | |
806 | u32 descw; | |
98f44cb0 | 807 | |
3cbc3a0f SM |
808 | rxbd = &priv->rx_bd_vbase[index]; |
809 | descw = le32_to_cpu(rxbd->info); | |
98f44cb0 | 810 | |
3cbc3a0f SM |
811 | if (descw & QTN_TXDONE_MASK) |
812 | return 1; | |
98f44cb0 | 813 | |
3cbc3a0f | 814 | return 0; |
98f44cb0 IM |
815 | } |
816 | ||
817 | static int qtnf_rx_poll(struct napi_struct *napi, int budget) | |
818 | { | |
819 | struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi); | |
820 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
821 | struct net_device *ndev = NULL; | |
822 | struct sk_buff *skb = NULL; | |
823 | int processed = 0; | |
824 | struct qtnf_rx_bd *rxbd; | |
825 | dma_addr_t skb_paddr; | |
3cbc3a0f | 826 | int consume; |
98f44cb0 | 827 | u32 descw; |
3cbc3a0f SM |
828 | u32 psize; |
829 | u16 r_idx; | |
830 | u16 w_idx; | |
98f44cb0 IM |
831 | int ret; |
832 | ||
3cbc3a0f | 833 | while (processed < budget) { |
98f44cb0 | 834 | |
98f44cb0 | 835 | |
3cbc3a0f SM |
836 | if (!qtnf_rx_data_ready(priv)) |
837 | goto rx_out; | |
98f44cb0 | 838 | |
3cbc3a0f SM |
839 | r_idx = priv->rx_bd_r_index; |
840 | rxbd = &priv->rx_bd_vbase[r_idx]; | |
841 | descw = le32_to_cpu(rxbd->info); | |
842 | ||
843 | skb = priv->rx_skb[r_idx]; | |
844 | psize = QTN_GET_LEN(descw); | |
845 | consume = 1; | |
98f44cb0 | 846 | |
3cbc3a0f SM |
847 | if (!(descw & QTN_TXDONE_MASK)) { |
848 | pr_warn("skip invalid rxbd[%d]\n", r_idx); | |
849 | consume = 0; | |
850 | } | |
851 | ||
852 | if (!skb) { | |
853 | pr_warn("skip missing rx_skb[%d]\n", r_idx); | |
854 | consume = 0; | |
855 | } | |
856 | ||
857 | if (skb && (skb_tailroom(skb) < psize)) { | |
858 | pr_err("skip packet with invalid length: %u > %u\n", | |
859 | psize, skb_tailroom(skb)); | |
860 | consume = 0; | |
861 | } | |
862 | ||
863 | if (skb) { | |
98f44cb0 IM |
864 | skb_paddr = QTN_HOST_ADDR(le32_to_cpu(rxbd->addr_h), |
865 | le32_to_cpu(rxbd->addr)); | |
866 | pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, | |
867 | PCI_DMA_FROMDEVICE); | |
3cbc3a0f | 868 | } |
98f44cb0 | 869 | |
3cbc3a0f SM |
870 | if (consume) { |
871 | skb_put(skb, psize); | |
98f44cb0 IM |
872 | ndev = qtnf_classify_skb(bus, skb); |
873 | if (likely(ndev)) { | |
04b01aff | 874 | qtnf_update_rx_stats(ndev, skb); |
98f44cb0 | 875 | skb->protocol = eth_type_trans(skb, ndev); |
7376947d | 876 | napi_gro_receive(napi, skb); |
98f44cb0 IM |
877 | } else { |
878 | pr_debug("drop untagged skb\n"); | |
879 | bus->mux_dev.stats.rx_dropped++; | |
880 | dev_kfree_skb_any(skb); | |
881 | } | |
98f44cb0 | 882 | } else { |
3cbc3a0f SM |
883 | if (skb) { |
884 | bus->mux_dev.stats.rx_dropped++; | |
885 | dev_kfree_skb_any(skb); | |
886 | } | |
98f44cb0 IM |
887 | } |
888 | ||
3cbc3a0f SM |
889 | priv->rx_skb[r_idx] = NULL; |
890 | if (++r_idx >= priv->rx_bd_num) | |
891 | r_idx = 0; | |
98f44cb0 | 892 | |
3cbc3a0f | 893 | priv->rx_bd_r_index = r_idx; |
98f44cb0 | 894 | |
3cbc3a0f SM |
895 | /* repalce processed buffer by a new one */ |
896 | w_idx = priv->rx_bd_w_index; | |
897 | while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, | |
898 | priv->rx_bd_num) > 0) { | |
899 | if (++w_idx >= priv->rx_bd_num) | |
900 | w_idx = 0; | |
901 | ||
902 | ret = skb2rbd_attach(priv, w_idx); | |
903 | if (ret) { | |
904 | pr_err("failed to allocate new rx_skb[%d]\n", | |
905 | w_idx); | |
906 | break; | |
907 | } | |
98f44cb0 IM |
908 | } |
909 | ||
3cbc3a0f | 910 | processed++; |
98f44cb0 IM |
911 | } |
912 | ||
3cbc3a0f | 913 | rx_out: |
98f44cb0 IM |
914 | if (processed < budget) { |
915 | napi_complete(napi); | |
916 | qtnf_en_rxdone_irq(priv); | |
917 | } | |
918 | ||
919 | return processed; | |
920 | } | |
921 | ||
922 | static void | |
923 | qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev) | |
924 | { | |
925 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
926 | ||
927 | tasklet_hi_schedule(&priv->reclaim_tq); | |
928 | } | |
929 | ||
930 | static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus) | |
931 | { | |
932 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
933 | ||
934 | qtnf_enable_hdp_irqs(priv); | |
935 | napi_enable(&bus->mux_napi); | |
936 | } | |
937 | ||
938 | static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus) | |
939 | { | |
940 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); | |
941 | ||
942 | napi_disable(&bus->mux_napi); | |
943 | qtnf_disable_hdp_irqs(priv); | |
944 | } | |
945 | ||
946 | static const struct qtnf_bus_ops qtnf_pcie_bus_ops = { | |
947 | /* control path methods */ | |
948 | .control_tx = qtnf_pcie_control_tx, | |
949 | ||
950 | /* data path methods */ | |
951 | .data_tx = qtnf_pcie_data_tx, | |
952 | .data_tx_timeout = qtnf_pcie_data_tx_timeout, | |
953 | .data_rx_start = qtnf_pcie_data_rx_start, | |
954 | .data_rx_stop = qtnf_pcie_data_rx_stop, | |
955 | }; | |
956 | ||
c3b2f7ca SM |
957 | static int qtnf_dbg_mps_show(struct seq_file *s, void *data) |
958 | { | |
959 | struct qtnf_bus *bus = dev_get_drvdata(s->private); | |
960 | struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); | |
961 | ||
962 | seq_printf(s, "%d\n", priv->mps); | |
963 | ||
964 | return 0; | |
965 | } | |
966 | ||
967 | static int qtnf_dbg_msi_show(struct seq_file *s, void *data) | |
968 | { | |
969 | struct qtnf_bus *bus = dev_get_drvdata(s->private); | |
970 | struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); | |
971 | ||
972 | seq_printf(s, "%u\n", priv->msi_enabled); | |
973 | ||
974 | return 0; | |
975 | } | |
976 | ||
977 | static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) | |
978 | { | |
979 | struct qtnf_bus *bus = dev_get_drvdata(s->private); | |
980 | struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); | |
981 | u32 reg = readl(PCIE_HDP_INT_EN(priv->pcie_reg_base)); | |
982 | u32 status; | |
983 | ||
984 | seq_printf(s, "pcie_irq_count(%u)\n", priv->pcie_irq_count); | |
985 | seq_printf(s, "pcie_irq_tx_count(%u)\n", priv->pcie_irq_tx_count); | |
986 | status = reg & PCIE_HDP_INT_TX_BITS; | |
987 | seq_printf(s, "pcie_irq_tx_status(%s)\n", | |
988 | (status == PCIE_HDP_INT_TX_BITS) ? "EN" : "DIS"); | |
989 | seq_printf(s, "pcie_irq_rx_count(%u)\n", priv->pcie_irq_rx_count); | |
990 | status = reg & PCIE_HDP_INT_RX_BITS; | |
991 | seq_printf(s, "pcie_irq_rx_status(%s)\n", | |
992 | (status == PCIE_HDP_INT_RX_BITS) ? "EN" : "DIS"); | |
993 | seq_printf(s, "pcie_irq_uf_count(%u)\n", priv->pcie_irq_uf_count); | |
994 | status = reg & PCIE_HDP_INT_HHBM_UF; | |
995 | seq_printf(s, "pcie_irq_hhbm_uf_status(%s)\n", | |
996 | (status == PCIE_HDP_INT_HHBM_UF) ? "EN" : "DIS"); | |
997 | ||
998 | return 0; | |
999 | } | |
1000 | ||
1001 | static int qtnf_dbg_hdp_stats(struct seq_file *s, void *data) | |
1002 | { | |
1003 | struct qtnf_bus *bus = dev_get_drvdata(s->private); | |
1004 | struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); | |
1005 | ||
1006 | seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); | |
1007 | seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); | |
1008 | seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); | |
1009 | seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); | |
1010 | ||
1011 | seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); | |
1012 | seq_printf(s, "tx_bd_p_index(%u)\n", | |
1013 | readl(PCIE_HDP_RX0DMA_CNT(priv->pcie_reg_base)) | |
1014 | & (priv->tx_bd_num - 1)); | |
1015 | seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); | |
1016 | seq_printf(s, "tx queue len(%u)\n", | |
1017 | CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, | |
1018 | priv->tx_bd_num)); | |
1019 | ||
1020 | seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); | |
1021 | seq_printf(s, "rx_bd_p_index(%u)\n", | |
1022 | readl(PCIE_HDP_TX0DMA_CNT(priv->pcie_reg_base)) | |
1023 | & (priv->rx_bd_num - 1)); | |
1024 | seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); | |
1025 | seq_printf(s, "rx alloc queue len(%u)\n", | |
1026 | CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, | |
1027 | priv->rx_bd_num)); | |
1028 | ||
1029 | return 0; | |
1030 | } | |
1031 | ||
1032 | static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) | |
1033 | { | |
1034 | struct qtnf_bus *bus = dev_get_drvdata(s->private); | |
1035 | struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); | |
1036 | ||
1037 | seq_printf(s, "shm_ipc_ep_in.tx_packet_count(%zu)\n", | |
1038 | priv->shm_ipc_ep_in.tx_packet_count); | |
1039 | seq_printf(s, "shm_ipc_ep_in.rx_packet_count(%zu)\n", | |
1040 | priv->shm_ipc_ep_in.rx_packet_count); | |
1041 | seq_printf(s, "shm_ipc_ep_out.tx_packet_count(%zu)\n", | |
1042 | priv->shm_ipc_ep_out.tx_timeout_count); | |
1043 | seq_printf(s, "shm_ipc_ep_out.rx_packet_count(%zu)\n", | |
1044 | priv->shm_ipc_ep_out.rx_packet_count); | |
1045 | ||
1046 | return 0; | |
1047 | } | |
1048 | ||
98f44cb0 IM |
1049 | static int qtnf_ep_fw_send(struct qtnf_pcie_bus_priv *priv, uint32_t size, |
1050 | int blk, const u8 *pblk, const u8 *fw) | |
1051 | { | |
1052 | struct pci_dev *pdev = priv->pdev; | |
1053 | struct qtnf_bus *bus = pci_get_drvdata(pdev); | |
1054 | ||
1055 | struct qtnf_pcie_fw_hdr *hdr; | |
1056 | u8 *pdata; | |
1057 | ||
1058 | int hds = sizeof(*hdr); | |
1059 | struct sk_buff *skb = NULL; | |
1060 | int len = 0; | |
1061 | int ret; | |
1062 | ||
1063 | skb = __dev_alloc_skb(QTN_PCIE_FW_BUFSZ, GFP_KERNEL); | |
1064 | if (!skb) | |
1065 | return -ENOMEM; | |
1066 | ||
1067 | skb->len = QTN_PCIE_FW_BUFSZ; | |
1068 | skb->dev = NULL; | |
1069 | ||
1070 | hdr = (struct qtnf_pcie_fw_hdr *)skb->data; | |
1071 | memcpy(hdr->boardflg, QTN_PCIE_BOARDFLG, strlen(QTN_PCIE_BOARDFLG)); | |
1072 | hdr->fwsize = cpu_to_le32(size); | |
1073 | hdr->seqnum = cpu_to_le32(blk); | |
1074 | ||
1075 | if (blk) | |
1076 | hdr->type = cpu_to_le32(QTN_FW_DSUB); | |
1077 | else | |
1078 | hdr->type = cpu_to_le32(QTN_FW_DBEGIN); | |
1079 | ||
1080 | pdata = skb->data + hds; | |
1081 | ||
1082 | len = QTN_PCIE_FW_BUFSZ - hds; | |
1083 | if (pblk >= (fw + size - len)) { | |
1084 | len = fw + size - pblk; | |
1085 | hdr->type = cpu_to_le32(QTN_FW_DEND); | |
1086 | } | |
1087 | ||
1088 | hdr->pktlen = cpu_to_le32(len); | |
1089 | memcpy(pdata, pblk, len); | |
1090 | hdr->crc = cpu_to_le32(~crc32(0, pdata, len)); | |
1091 | ||
1092 | ret = qtnf_pcie_data_tx(bus, skb); | |
1093 | ||
1094 | return (ret == NETDEV_TX_OK) ? len : 0; | |
1095 | } | |
1096 | ||
1097 | static int | |
1098 | qtnf_ep_fw_load(struct qtnf_pcie_bus_priv *priv, const u8 *fw, u32 fw_size) | |
1099 | { | |
1100 | int blk_size = QTN_PCIE_FW_BUFSZ - sizeof(struct qtnf_pcie_fw_hdr); | |
1101 | int blk_count = fw_size / blk_size + ((fw_size % blk_size) ? 1 : 0); | |
1102 | const u8 *pblk = fw; | |
1103 | int threshold = 0; | |
1104 | int blk = 0; | |
1105 | int len; | |
1106 | ||
1107 | pr_debug("FW upload started: fw_addr=0x%p size=%d\n", fw, fw_size); | |
1108 | ||
1109 | while (blk < blk_count) { | |
1110 | if (++threshold > 10000) { | |
1111 | pr_err("FW upload failed: too many retries\n"); | |
1112 | return -ETIMEDOUT; | |
1113 | } | |
1114 | ||
1115 | len = qtnf_ep_fw_send(priv, fw_size, blk, pblk, fw); | |
1116 | if (len <= 0) | |
1117 | continue; | |
1118 | ||
1119 | if (!((blk + 1) & QTN_PCIE_FW_DLMASK) || | |
1120 | (blk == (blk_count - 1))) { | |
1121 | qtnf_set_state(&priv->bda->bda_rc_state, | |
1122 | QTN_RC_FW_SYNC); | |
1123 | if (qtnf_poll_state(&priv->bda->bda_ep_state, | |
1124 | QTN_EP_FW_SYNC, | |
1125 | QTN_FW_DL_TIMEOUT_MS)) { | |
1126 | pr_err("FW upload failed: SYNC timed out\n"); | |
1127 | return -ETIMEDOUT; | |
1128 | } | |
1129 | ||
1130 | qtnf_clear_state(&priv->bda->bda_ep_state, | |
1131 | QTN_EP_FW_SYNC); | |
1132 | ||
1133 | if (qtnf_is_state(&priv->bda->bda_ep_state, | |
1134 | QTN_EP_FW_RETRY)) { | |
1135 | if (blk == (blk_count - 1)) { | |
1136 | int last_round = | |
1137 | blk_count & QTN_PCIE_FW_DLMASK; | |
1138 | blk -= last_round; | |
1139 | pblk -= ((last_round - 1) * | |
1140 | blk_size + len); | |
1141 | } else { | |
1142 | blk -= QTN_PCIE_FW_DLMASK; | |
1143 | pblk -= QTN_PCIE_FW_DLMASK * blk_size; | |
1144 | } | |
1145 | ||
1146 | qtnf_clear_state(&priv->bda->bda_ep_state, | |
1147 | QTN_EP_FW_RETRY); | |
1148 | ||
1149 | pr_warn("FW upload retry: block #%d\n", blk); | |
1150 | continue; | |
1151 | } | |
1152 | ||
1153 | qtnf_pcie_data_tx_reclaim(priv); | |
1154 | } | |
1155 | ||
1156 | pblk += len; | |
1157 | blk++; | |
1158 | } | |
1159 | ||
1160 | pr_debug("FW upload completed: totally sent %d blocks\n", blk); | |
1161 | return 0; | |
1162 | } | |
1163 | ||
c3b2f7ca | 1164 | static void qtnf_fw_work_handler(struct work_struct *work) |
98f44cb0 | 1165 | { |
c3b2f7ca | 1166 | struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); |
98f44cb0 IM |
1167 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); |
1168 | struct pci_dev *pdev = priv->pdev; | |
c3b2f7ca | 1169 | const struct firmware *fw; |
98f44cb0 IM |
1170 | int ret; |
1171 | u32 state = QTN_RC_FW_LOADRDY | QTN_RC_FW_QLINK; | |
1172 | ||
c3b2f7ca | 1173 | if (flashboot) { |
98f44cb0 | 1174 | state |= QTN_RC_FW_FLASHBOOT; |
c3b2f7ca SM |
1175 | } else { |
1176 | ret = request_firmware(&fw, bus->fwname, &pdev->dev); | |
1177 | if (ret < 0) { | |
1178 | pr_err("failed to get firmware %s\n", bus->fwname); | |
1179 | goto fw_load_fail; | |
1180 | } | |
1181 | } | |
98f44cb0 IM |
1182 | |
1183 | qtnf_set_state(&priv->bda->bda_rc_state, state); | |
1184 | ||
1185 | if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY, | |
1186 | QTN_FW_DL_TIMEOUT_MS)) { | |
1187 | pr_err("card is not ready\n"); | |
c3b2f7ca | 1188 | goto fw_load_fail; |
98f44cb0 IM |
1189 | } |
1190 | ||
1191 | qtnf_clear_state(&priv->bda->bda_ep_state, QTN_EP_FW_LOADRDY); | |
1192 | ||
1193 | if (flashboot) { | |
c3b2f7ca SM |
1194 | pr_info("booting firmware from flash\n"); |
1195 | } else { | |
1196 | pr_info("starting firmware upload: %s\n", bus->fwname); | |
98f44cb0 | 1197 | |
c3b2f7ca SM |
1198 | ret = qtnf_ep_fw_load(priv, fw->data, fw->size); |
1199 | release_firmware(fw); | |
1200 | if (ret) { | |
1201 | pr_err("firmware upload error\n"); | |
1202 | goto fw_load_fail; | |
1203 | } | |
98f44cb0 IM |
1204 | } |
1205 | ||
c3b2f7ca SM |
1206 | if (qtnf_poll_state(&priv->bda->bda_ep_state, QTN_EP_FW_DONE, |
1207 | QTN_FW_DL_TIMEOUT_MS)) { | |
1208 | pr_err("firmware bringup timed out\n"); | |
1209 | goto fw_load_fail; | |
1210 | } | |
98f44cb0 | 1211 | |
c3b2f7ca SM |
1212 | bus->fw_state = QTNF_FW_STATE_FW_DNLD_DONE; |
1213 | pr_info("firmware is up and running\n"); | |
98f44cb0 | 1214 | |
c3b2f7ca SM |
1215 | if (qtnf_poll_state(&priv->bda->bda_ep_state, |
1216 | QTN_EP_FW_QLINK_DONE, QTN_FW_QLINK_TIMEOUT_MS)) { | |
1217 | pr_err("firmware runtime failure\n"); | |
1218 | goto fw_load_fail; | |
1219 | } | |
98f44cb0 | 1220 | |
c3b2f7ca SM |
1221 | ret = qtnf_core_attach(bus); |
1222 | if (ret) { | |
1223 | pr_err("failed to attach core\n"); | |
1224 | goto fw_load_fail; | |
1225 | } | |
98f44cb0 | 1226 | |
c3b2f7ca SM |
1227 | qtnf_debugfs_init(bus, DRV_NAME); |
1228 | qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); | |
1229 | qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); | |
1230 | qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); | |
1231 | qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); | |
1232 | qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); | |
98f44cb0 | 1233 | |
c3b2f7ca | 1234 | goto fw_load_exit; |
98f44cb0 | 1235 | |
c3b2f7ca SM |
1236 | fw_load_fail: |
1237 | bus->fw_state = QTNF_FW_STATE_DEAD; | |
98f44cb0 | 1238 | |
c3b2f7ca SM |
1239 | fw_load_exit: |
1240 | complete(&bus->firmware_init_complete); | |
1241 | put_device(&pdev->dev); | |
98f44cb0 IM |
1242 | } |
1243 | ||
c3b2f7ca | 1244 | static void qtnf_bringup_fw_async(struct qtnf_bus *bus) |
98f44cb0 | 1245 | { |
c3b2f7ca SM |
1246 | struct qtnf_pcie_bus_priv *priv = (void *)get_bus_priv(bus); |
1247 | struct pci_dev *pdev = priv->pdev; | |
98f44cb0 | 1248 | |
c3b2f7ca SM |
1249 | get_device(&pdev->dev); |
1250 | INIT_WORK(&bus->fw_work, qtnf_fw_work_handler); | |
1251 | schedule_work(&bus->fw_work); | |
98f44cb0 IM |
1252 | } |
1253 | ||
c3b2f7ca | 1254 | static void qtnf_reclaim_tasklet_fn(unsigned long data) |
98f44cb0 | 1255 | { |
c3b2f7ca | 1256 | struct qtnf_pcie_bus_priv *priv = (void *)data; |
98f44cb0 | 1257 | |
c3b2f7ca SM |
1258 | qtnf_pcie_data_tx_reclaim(priv); |
1259 | qtnf_en_txdone_irq(priv); | |
98f44cb0 IM |
1260 | } |
1261 | ||
1262 | static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |
1263 | { | |
1264 | struct qtnf_pcie_bus_priv *pcie_priv; | |
1265 | struct qtnf_bus *bus; | |
1266 | int ret; | |
1267 | ||
1268 | bus = devm_kzalloc(&pdev->dev, | |
1269 | sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL); | |
c3b2f7ca SM |
1270 | if (!bus) |
1271 | return -ENOMEM; | |
98f44cb0 IM |
1272 | |
1273 | pcie_priv = get_bus_priv(bus); | |
1274 | ||
1275 | pci_set_drvdata(pdev, bus); | |
1276 | bus->bus_ops = &qtnf_pcie_bus_ops; | |
1277 | bus->dev = &pdev->dev; | |
1278 | bus->fw_state = QTNF_FW_STATE_RESET; | |
1279 | pcie_priv->pdev = pdev; | |
1280 | ||
1281 | strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); | |
c3b2f7ca | 1282 | init_completion(&bus->firmware_init_complete); |
98f44cb0 | 1283 | mutex_init(&bus->bus_lock); |
20da2ec0 | 1284 | spin_lock_init(&pcie_priv->tx0_lock); |
98f44cb0 | 1285 | spin_lock_init(&pcie_priv->irq_lock); |
0593da27 | 1286 | spin_lock_init(&pcie_priv->tx_reclaim_lock); |
98f44cb0 IM |
1287 | |
1288 | /* init stats */ | |
1289 | pcie_priv->tx_full_count = 0; | |
1290 | pcie_priv->tx_done_count = 0; | |
1291 | pcie_priv->pcie_irq_count = 0; | |
1292 | pcie_priv->pcie_irq_rx_count = 0; | |
1293 | pcie_priv->pcie_irq_tx_count = 0; | |
cc75f9e5 | 1294 | pcie_priv->pcie_irq_uf_count = 0; |
98f44cb0 IM |
1295 | pcie_priv->tx_reclaim_done = 0; |
1296 | pcie_priv->tx_reclaim_req = 0; | |
1297 | ||
c3b2f7ca SM |
1298 | tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn, |
1299 | (unsigned long)pcie_priv); | |
1300 | ||
1301 | init_dummy_netdev(&bus->mux_dev); | |
1302 | netif_napi_add(&bus->mux_dev, &bus->mux_napi, | |
1303 | qtnf_rx_poll, 10); | |
1304 | ||
98f44cb0 IM |
1305 | pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE"); |
1306 | if (!pcie_priv->workqueue) { | |
1307 | pr_err("failed to alloc bus workqueue\n"); | |
1308 | ret = -ENODEV; | |
c3b2f7ca | 1309 | goto err_init; |
98f44cb0 IM |
1310 | } |
1311 | ||
1312 | if (!pci_is_pcie(pdev)) { | |
1313 | pr_err("device %s is not PCI Express\n", pci_name(pdev)); | |
1314 | ret = -EIO; | |
1315 | goto err_base; | |
1316 | } | |
1317 | ||
1318 | qtnf_tune_pcie_mps(pcie_priv); | |
1319 | ||
1320 | ret = pcim_enable_device(pdev); | |
1321 | if (ret) { | |
1322 | pr_err("failed to init PCI device %x\n", pdev->device); | |
1323 | goto err_base; | |
1324 | } else { | |
1325 | pr_debug("successful init of PCI device %x\n", pdev->device); | |
1326 | } | |
1327 | ||
f31039d4 SM |
1328 | #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT |
1329 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
1330 | #else | |
1331 | ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
1332 | #endif | |
1333 | if (ret) { | |
1334 | pr_err("PCIE DMA coherent mask init failed\n"); | |
1335 | goto err_base; | |
1336 | } | |
1337 | ||
98f44cb0 | 1338 | pci_set_master(pdev); |
c3b2f7ca | 1339 | qtnf_pcie_init_irq(pcie_priv); |
98f44cb0 IM |
1340 | |
1341 | ret = qtnf_pcie_init_memory(pcie_priv); | |
1342 | if (ret < 0) { | |
1343 | pr_err("PCIE memory init failed\n"); | |
1344 | goto err_base; | |
1345 | } | |
1346 | ||
a34d7bcb SM |
1347 | pci_save_state(pdev); |
1348 | ||
98f44cb0 IM |
1349 | ret = qtnf_pcie_init_shm_ipc(pcie_priv); |
1350 | if (ret < 0) { | |
1351 | pr_err("PCIE SHM IPC init failed\n"); | |
1352 | goto err_base; | |
1353 | } | |
1354 | ||
98f44cb0 IM |
1355 | ret = qtnf_pcie_init_xfer(pcie_priv); |
1356 | if (ret) { | |
1357 | pr_err("PCIE xfer init failed\n"); | |
c3b2f7ca | 1358 | goto err_ipc; |
98f44cb0 IM |
1359 | } |
1360 | ||
1361 | /* init default irq settings */ | |
1362 | qtnf_init_hdp_irqs(pcie_priv); | |
1363 | ||
1364 | /* start with disabled irqs */ | |
1365 | qtnf_disable_hdp_irqs(pcie_priv); | |
1366 | ||
1367 | ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0, | |
1368 | "qtnf_pcie_irq", (void *)bus); | |
1369 | if (ret) { | |
1370 | pr_err("failed to request pcie irq %d\n", pdev->irq); | |
1d5e3b90 | 1371 | goto err_xfer; |
98f44cb0 IM |
1372 | } |
1373 | ||
c3b2f7ca | 1374 | qtnf_bringup_fw_async(bus); |
98f44cb0 IM |
1375 | |
1376 | return 0; | |
1377 | ||
1d5e3b90 SM |
1378 | err_xfer: |
1379 | qtnf_free_xfer_buffers(pcie_priv); | |
1380 | ||
c3b2f7ca SM |
1381 | err_ipc: |
1382 | qtnf_pcie_free_shm_ipc(pcie_priv); | |
1383 | ||
98f44cb0 IM |
1384 | err_base: |
1385 | flush_workqueue(pcie_priv->workqueue); | |
1386 | destroy_workqueue(pcie_priv->workqueue); | |
c3b2f7ca | 1387 | netif_napi_del(&bus->mux_napi); |
98f44cb0 | 1388 | |
c3b2f7ca SM |
1389 | err_init: |
1390 | tasklet_kill(&pcie_priv->reclaim_tq); | |
98f44cb0 IM |
1391 | pci_set_drvdata(pdev, NULL); |
1392 | ||
98f44cb0 IM |
1393 | return ret; |
1394 | } | |
1395 | ||
1396 | static void qtnf_pcie_remove(struct pci_dev *pdev) | |
1397 | { | |
1398 | struct qtnf_pcie_bus_priv *priv; | |
1399 | struct qtnf_bus *bus; | |
1400 | ||
1401 | bus = pci_get_drvdata(pdev); | |
1402 | if (!bus) | |
1403 | return; | |
1404 | ||
c3b2f7ca SM |
1405 | wait_for_completion(&bus->firmware_init_complete); |
1406 | ||
1407 | if (bus->fw_state == QTNF_FW_STATE_ACTIVE) | |
1408 | qtnf_core_detach(bus); | |
1409 | ||
98f44cb0 IM |
1410 | priv = get_bus_priv(bus); |
1411 | ||
98f44cb0 | 1412 | netif_napi_del(&bus->mux_napi); |
98f44cb0 IM |
1413 | flush_workqueue(priv->workqueue); |
1414 | destroy_workqueue(priv->workqueue); | |
1415 | tasklet_kill(&priv->reclaim_tq); | |
1416 | ||
1d5e3b90 | 1417 | qtnf_free_xfer_buffers(priv); |
98f44cb0 IM |
1418 | qtnf_debugfs_remove(bus); |
1419 | ||
1420 | qtnf_pcie_free_shm_ipc(priv); | |
a34d7bcb | 1421 | qtnf_reset_card(priv); |
98f44cb0 IM |
1422 | } |
1423 | ||
1424 | #ifdef CONFIG_PM_SLEEP | |
1425 | static int qtnf_pcie_suspend(struct device *dev) | |
1426 | { | |
1427 | return -EOPNOTSUPP; | |
1428 | } | |
1429 | ||
1430 | static int qtnf_pcie_resume(struct device *dev) | |
1431 | { | |
1432 | return 0; | |
1433 | } | |
1434 | #endif /* CONFIG_PM_SLEEP */ | |
1435 | ||
1436 | #ifdef CONFIG_PM_SLEEP | |
1437 | /* Power Management Hooks */ | |
1438 | static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, | |
1439 | qtnf_pcie_resume); | |
1440 | #endif | |
1441 | ||
cc5becd3 | 1442 | static const struct pci_device_id qtnf_pcie_devid_table[] = { |
98f44cb0 IM |
1443 | { |
1444 | PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, | |
1445 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | |
1446 | }, | |
1447 | { }, | |
1448 | }; | |
1449 | ||
1450 | MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); | |
1451 | ||
1452 | static struct pci_driver qtnf_pcie_drv_data = { | |
1453 | .name = DRV_NAME, | |
1454 | .id_table = qtnf_pcie_devid_table, | |
1455 | .probe = qtnf_pcie_probe, | |
1456 | .remove = qtnf_pcie_remove, | |
1457 | #ifdef CONFIG_PM_SLEEP | |
1458 | .driver = { | |
1459 | .pm = &qtnf_pcie_pm_ops, | |
1460 | }, | |
1461 | #endif | |
1462 | }; | |
1463 | ||
1464 | static int __init qtnf_pcie_register(void) | |
1465 | { | |
1466 | pr_info("register Quantenna QSR10g FullMAC PCIE driver\n"); | |
1467 | return pci_register_driver(&qtnf_pcie_drv_data); | |
1468 | } | |
1469 | ||
1470 | static void __exit qtnf_pcie_exit(void) | |
1471 | { | |
1472 | pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n"); | |
1473 | pci_unregister_driver(&qtnf_pcie_drv_data); | |
1474 | } | |
1475 | ||
1476 | module_init(qtnf_pcie_register); | |
1477 | module_exit(qtnf_pcie_exit); | |
1478 | ||
1479 | MODULE_AUTHOR("Quantenna Communications"); | |
1480 | MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN."); | |
1481 | MODULE_LICENSE("GPL"); |