]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/pci.c
Merge tag 'sh-pfc-for-v5.1-tag2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-focal-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / pci.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
eda6500a
JP
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/device.h>
9#include <linux/pci.h>
10#include <linux/interrupt.h>
11#include <linux/wait.h>
12#include <linux/types.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/log2.h>
1e81779a 16#include <linux/string.h>
eda6500a 17
62e86f9e 18#include "pci_hw.h"
1d20d23c 19#include "pci.h"
eda6500a
JP
20#include "core.h"
21#include "cmd.h"
22#include "port.h"
c1a38311 23#include "resources.h"
eda6500a 24
eda6500a
JP
25#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27#define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
29
30enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
35};
36
eda6500a
JP
37#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
38
39static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
44};
45
46static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
47 0, /* unused */
48 0, /* unused */
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
51};
52
53struct mlxsw_pci_mem_item {
54 char *buf;
55 dma_addr_t mapaddr;
56 size_t size;
57};
58
59struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
61 union {
62 struct {
63 struct sk_buff *skb;
64 } sdq;
65 struct {
66 struct sk_buff *skb;
67 } rdq;
68 } u;
69};
70
71struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
75 u16 producer_counter;
76 u16 consumer_counter;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
83 union {
84 struct {
85 u32 comp_sdq_count;
86 u32 comp_rdq_count;
b76550bb 87 enum mlxsw_pci_cqe_v v;
eda6500a
JP
88 } cq;
89 struct {
90 u32 ev_cmd_count;
91 u32 ev_comp_count;
92 u32 ev_other_count;
93 } eq;
94 } u;
95};
96
97struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
100};
101
102struct mlxsw_pci {
103 struct pci_dev *pdev;
104 u8 __iomem *hw_addr;
105 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
106 u32 doorbell_offset;
eda6500a
JP
107 struct mlxsw_core *core;
108 struct {
eda6500a 109 struct mlxsw_pci_mem_item *items;
3e2206da 110 unsigned int count;
eda6500a
JP
111 } fw_area;
112 struct {
1e81779a
IS
113 struct mlxsw_pci_mem_item out_mbox;
114 struct mlxsw_pci_mem_item in_mbox;
eda6500a
JP
115 struct mutex lock; /* Lock access to command registers */
116 bool nopoll;
117 wait_queue_head_t wait;
118 bool wait_done;
119 struct {
120 u8 status;
121 u64 out_param;
122 } comp;
123 } cmd;
124 struct mlxsw_bus_info bus_info;
54a2e8d4 125 const struct pci_device_id *id;
8404f6f2
JP
126 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
127 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
eda6500a
JP
128};
129
130static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
131{
132 tasklet_schedule(&q->tasklet);
133}
134
135static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
136 size_t elem_size, int elem_index)
137{
138 return q->mem_item.buf + (elem_size * elem_index);
139}
140
141static struct mlxsw_pci_queue_elem_info *
142mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
143{
144 return &q->elem_info[elem_index];
145}
146
147static struct mlxsw_pci_queue_elem_info *
148mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
149{
150 int index = q->producer_counter & (q->count - 1);
151
5091730d 152 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
eda6500a
JP
153 return NULL;
154 return mlxsw_pci_queue_elem_info_get(q, index);
155}
156
157static struct mlxsw_pci_queue_elem_info *
158mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
159{
160 int index = q->consumer_counter & (q->count - 1);
161
162 return mlxsw_pci_queue_elem_info_get(q, index);
163}
164
165static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
166{
167 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
168}
169
170static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
171{
172 return owner_bit != !!(q->consumer_counter & q->count);
173}
174
eda6500a
JP
175static struct mlxsw_pci_queue_type_group *
176mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
177 enum mlxsw_pci_queue_type q_type)
178{
179 return &mlxsw_pci->queues[q_type];
180}
181
182static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
183 enum mlxsw_pci_queue_type q_type)
184{
185 struct mlxsw_pci_queue_type_group *queue_group;
186
187 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
188 return queue_group->count;
189}
190
191static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
192{
193 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
194}
195
eda6500a
JP
196static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
197{
198 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
199}
200
eda6500a
JP
201static struct mlxsw_pci_queue *
202__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
203 enum mlxsw_pci_queue_type q_type, u8 q_num)
204{
205 return &mlxsw_pci->queues[q_type].q[q_num];
206}
207
208static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
209 u8 q_num)
210{
211 return __mlxsw_pci_queue_get(mlxsw_pci,
212 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
213}
214
215static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
216 u8 q_num)
217{
218 return __mlxsw_pci_queue_get(mlxsw_pci,
219 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
220}
221
222static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
223 u8 q_num)
224{
225 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
226}
227
228static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
229 u8 q_num)
230{
231 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
232}
233
234static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
235 struct mlxsw_pci_queue *q,
236 u16 val)
237{
238 mlxsw_pci_write32(mlxsw_pci,
239 DOORBELL(mlxsw_pci->doorbell_offset,
240 mlxsw_pci_doorbell_type_offset[q->type],
241 q->num), val);
242}
243
244static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
245 struct mlxsw_pci_queue *q,
246 u16 val)
247{
248 mlxsw_pci_write32(mlxsw_pci,
249 DOORBELL(mlxsw_pci->doorbell_offset,
250 mlxsw_pci_doorbell_arm_type_offset[q->type],
251 q->num), val);
252}
253
254static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
255 struct mlxsw_pci_queue *q)
256{
257 wmb(); /* ensure all writes are done before we ring a bell */
258 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
259}
260
261static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
262 struct mlxsw_pci_queue *q)
263{
264 wmb(); /* ensure all writes are done before we ring a bell */
265 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
266 q->consumer_counter + q->count);
267}
268
269static void
270mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
271 struct mlxsw_pci_queue *q)
272{
273 wmb(); /* ensure all writes are done before we ring a bell */
274 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
275}
276
277static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
278 int page_index)
279{
280 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
281}
282
283static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
284 struct mlxsw_pci_queue *q)
285{
286 int i;
287 int err;
288
289 q->producer_counter = 0;
290 q->consumer_counter = 0;
291
292 /* Set CQ of same number of this SDQ. */
293 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
f0138e25 294 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 3);
eda6500a
JP
295 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
296 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
297 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
298
299 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
300 }
301
302 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
303 if (err)
304 return err;
305 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
306 return 0;
307}
308
309static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
310 struct mlxsw_pci_queue *q)
311{
312 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
313}
314
eda6500a
JP
315static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
316 int index, char *frag_data, size_t frag_len,
317 int direction)
318{
319 struct pci_dev *pdev = mlxsw_pci->pdev;
320 dma_addr_t mapaddr;
321
322 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
323 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
6cf9dc8b 324 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
eda6500a
JP
325 return -EIO;
326 }
327 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
328 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
329 return 0;
330}
331
332static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
333 int index, int direction)
334{
335 struct pci_dev *pdev = mlxsw_pci->pdev;
336 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
337 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
338
339 if (!frag_len)
340 return;
341 pci_unmap_single(pdev, mapaddr, frag_len, direction);
342}
343
344static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
345 struct mlxsw_pci_queue_elem_info *elem_info)
346{
347 size_t buf_len = MLXSW_PORT_MAX_MTU;
348 char *wqe = elem_info->elem;
349 struct sk_buff *skb;
350 int err;
351
352 elem_info->u.rdq.skb = NULL;
353 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
354 if (!skb)
355 return -ENOMEM;
356
357 /* Assume that wqe was previously zeroed. */
358
359 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
360 buf_len, DMA_FROM_DEVICE);
361 if (err)
362 goto err_frag_map;
363
364 elem_info->u.rdq.skb = skb;
365 return 0;
366
367err_frag_map:
368 dev_kfree_skb_any(skb);
369 return err;
370}
371
372static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
373 struct mlxsw_pci_queue_elem_info *elem_info)
374{
375 struct sk_buff *skb;
376 char *wqe;
377
378 skb = elem_info->u.rdq.skb;
379 wqe = elem_info->elem;
380
381 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
382 dev_kfree_skb_any(skb);
383}
384
385static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
386 struct mlxsw_pci_queue *q)
387{
388 struct mlxsw_pci_queue_elem_info *elem_info;
424e1114 389 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
eda6500a
JP
390 int i;
391 int err;
392
393 q->producer_counter = 0;
394 q->consumer_counter = 0;
395
396 /* Set CQ of same number of this RDQ with base
424e1114 397 * above SDQ count as the lower ones are assigned to SDQs.
eda6500a 398 */
424e1114 399 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
eda6500a
JP
400 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
401 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
402 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
403
404 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
405 }
406
407 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
408 if (err)
409 return err;
410
411 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
412
413 for (i = 0; i < q->count; i++) {
414 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
415 BUG_ON(!elem_info);
416 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
417 if (err)
418 goto rollback;
419 /* Everything is set up, ring doorbell to pass elem to HW */
420 q->producer_counter++;
421 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
422 }
423
424 return 0;
425
426rollback:
427 for (i--; i >= 0; i--) {
428 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
429 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
430 }
431 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
432
433 return err;
434}
435
436static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
437 struct mlxsw_pci_queue *q)
438{
439 struct mlxsw_pci_queue_elem_info *elem_info;
440 int i;
441
442 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
443 for (i = 0; i < q->count; i++) {
444 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
445 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
446 }
447}
448
8404f6f2
JP
449static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
450 struct mlxsw_pci_queue *q)
451{
452 q->u.cq.v = mlxsw_pci->max_cqe_ver;
453
454 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
455 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
456 q->num < mlxsw_pci->num_sdq_cqs)
457 q->u.cq.v = MLXSW_PCI_CQE_V1;
458}
459
eda6500a
JP
460static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
461 struct mlxsw_pci_queue *q)
462{
463 int i;
464 int err;
465
466 q->consumer_counter = 0;
467
468 for (i = 0; i < q->count; i++) {
469 char *elem = mlxsw_pci_queue_elem_get(q, i);
470
b76550bb 471 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
eda6500a
JP
472 }
473
8404f6f2
JP
474 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
475 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
476 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
477 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
478 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
479 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
480
eda6500a 481 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
eda6500a
JP
482 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
483 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
484 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
485 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
486
487 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
488 }
489 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
490 if (err)
491 return err;
492 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
493 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
494 return 0;
495}
496
497static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
498 struct mlxsw_pci_queue *q)
499{
500 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
501}
502
eda6500a
JP
503static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
504 struct mlxsw_pci_queue *q,
505 u16 consumer_counter_limit,
506 char *cqe)
507{
508 struct pci_dev *pdev = mlxsw_pci->pdev;
509 struct mlxsw_pci_queue_elem_info *elem_info;
510 char *wqe;
511 struct sk_buff *skb;
512 int i;
513
514 spin_lock(&q->lock);
515 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
516 skb = elem_info->u.sdq.skb;
517 wqe = elem_info->elem;
518 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
519 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
520 dev_kfree_skb_any(skb);
521 elem_info->u.sdq.skb = NULL;
522
523 if (q->consumer_counter++ != consumer_counter_limit)
524 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
525 spin_unlock(&q->lock);
526}
527
528static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
529 struct mlxsw_pci_queue *q,
530 u16 consumer_counter_limit,
b76550bb 531 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
eda6500a
JP
532{
533 struct pci_dev *pdev = mlxsw_pci->pdev;
534 struct mlxsw_pci_queue_elem_info *elem_info;
535 char *wqe;
536 struct sk_buff *skb;
537 struct mlxsw_rx_info rx_info;
7b7b9cff 538 u16 byte_count;
eda6500a
JP
539 int err;
540
541 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
542 skb = elem_info->u.sdq.skb;
543 if (!skb)
544 return;
545 wqe = elem_info->elem;
546 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
547
548 if (q->consumer_counter++ != consumer_counter_limit)
549 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
550
b76550bb 551 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
d2292e87 552 rx_info.is_lag = true;
b76550bb
JP
553 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
554 rx_info.lag_port_index =
555 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
d2292e87
JP
556 } else {
557 rx_info.is_lag = false;
558 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
559 }
8060646a 560
eda6500a
JP
561 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
562
7b7b9cff 563 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
b76550bb 564 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
7b7b9cff
JP
565 byte_count -= ETH_FCS_LEN;
566 skb_put(skb, byte_count);
eda6500a
JP
567 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
568
eda6500a
JP
569 memset(wqe, 0, q->elem_size);
570 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
6cf9dc8b
JP
571 if (err)
572 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
eda6500a
JP
573 /* Everything is set up, ring doorbell to pass elem to HW */
574 q->producer_counter++;
575 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
576 return;
eda6500a
JP
577}
578
579static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
580{
b76550bb
JP
581 struct mlxsw_pci_queue_elem_info *elem_info;
582 char *elem;
583 bool owner_bit;
584
585 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
586 elem = elem_info->elem;
587 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
588 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
589 return NULL;
590 q->consumer_counter++;
591 rmb(); /* make sure we read owned bit before the rest of elem */
592 return elem;
eda6500a
JP
593}
594
595static void mlxsw_pci_cq_tasklet(unsigned long data)
596{
597 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
598 struct mlxsw_pci *mlxsw_pci = q->pci;
599 char *cqe;
600 int items = 0;
601 int credits = q->count >> 1;
602
603 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
604 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
b76550bb
JP
605 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
606 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
c9ebea04
IS
607 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
608
609 memcpy(ncqe, cqe, q->elem_size);
610 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
eda6500a
JP
611
612 if (sendq) {
613 struct mlxsw_pci_queue *sdq;
614
615 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
616 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
c9ebea04 617 wqe_counter, ncqe);
eda6500a
JP
618 q->u.cq.comp_sdq_count++;
619 } else {
620 struct mlxsw_pci_queue *rdq;
621
622 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
623 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
c9ebea04 624 wqe_counter, q->u.cq.v, ncqe);
eda6500a
JP
625 q->u.cq.comp_rdq_count++;
626 }
627 if (++items == credits)
628 break;
629 }
c9ebea04 630 if (items)
eda6500a 631 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
eda6500a
JP
632}
633
8404f6f2
JP
634static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
635{
636 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
637 MLXSW_PCI_CQE01_COUNT;
638}
639
640static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
641{
642 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
643 MLXSW_PCI_CQE01_SIZE;
644}
645
eda6500a
JP
646static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
647 struct mlxsw_pci_queue *q)
648{
649 int i;
650 int err;
651
652 q->consumer_counter = 0;
653
654 for (i = 0; i < q->count; i++) {
655 char *elem = mlxsw_pci_queue_elem_get(q, i);
656
657 mlxsw_pci_eqe_owner_set(elem, 1);
658 }
659
660 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
eda6500a
JP
661 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
662 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
663 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
664 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
665
666 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
667 }
668 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
669 if (err)
670 return err;
671 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
672 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
673 return 0;
674}
675
676static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
677 struct mlxsw_pci_queue *q)
678{
679 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
680}
681
eda6500a
JP
682static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
683{
684 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
685 mlxsw_pci->cmd.comp.out_param =
686 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
687 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
688 mlxsw_pci->cmd.wait_done = true;
689 wake_up(&mlxsw_pci->cmd.wait);
690}
691
692static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
693{
b76550bb
JP
694 struct mlxsw_pci_queue_elem_info *elem_info;
695 char *elem;
696 bool owner_bit;
697
698 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
699 elem = elem_info->elem;
700 owner_bit = mlxsw_pci_eqe_owner_get(elem);
701 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
702 return NULL;
703 q->consumer_counter++;
704 rmb(); /* make sure we read owned bit before the rest of elem */
705 return elem;
eda6500a
JP
706}
707
708static void mlxsw_pci_eq_tasklet(unsigned long data)
709{
710 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
711 struct mlxsw_pci *mlxsw_pci = q->pci;
e4c870b1
JP
712 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
713 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
eda6500a
JP
714 char *eqe;
715 u8 cqn;
716 bool cq_handle = false;
717 int items = 0;
718 int credits = q->count >> 1;
719
720 memset(&active_cqns, 0, sizeof(active_cqns));
721
722 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
eda6500a 723
f3c84a8e
ND
724 /* Command interface completion events are always received on
725 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
726 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
727 */
728 switch (q->num) {
729 case MLXSW_PCI_EQ_ASYNC_NUM:
eda6500a
JP
730 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
731 q->u.eq.ev_cmd_count++;
732 break;
f3c84a8e 733 case MLXSW_PCI_EQ_COMP_NUM:
eda6500a
JP
734 cqn = mlxsw_pci_eqe_cqn_get(eqe);
735 set_bit(cqn, active_cqns);
736 cq_handle = true;
737 q->u.eq.ev_comp_count++;
738 break;
739 default:
740 q->u.eq.ev_other_count++;
741 }
742 if (++items == credits)
743 break;
744 }
745 if (items) {
746 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
747 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
748 }
749
750 if (!cq_handle)
751 return;
e4c870b1 752 for_each_set_bit(cqn, active_cqns, cq_count) {
eda6500a
JP
753 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
754 mlxsw_pci_queue_tasklet_schedule(q);
755 }
756}
757
758struct mlxsw_pci_queue_ops {
759 const char *name;
760 enum mlxsw_pci_queue_type type;
8404f6f2
JP
761 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
762 struct mlxsw_pci_queue *q);
eda6500a
JP
763 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
764 struct mlxsw_pci_queue *q);
765 void (*fini)(struct mlxsw_pci *mlxsw_pci,
766 struct mlxsw_pci_queue *q);
767 void (*tasklet)(unsigned long data);
8404f6f2
JP
768 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
769 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
eda6500a
JP
770 u16 elem_count;
771 u8 elem_size;
772};
773
774static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
775 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
776 .init = mlxsw_pci_sdq_init,
777 .fini = mlxsw_pci_sdq_fini,
eda6500a
JP
778 .elem_count = MLXSW_PCI_WQE_COUNT,
779 .elem_size = MLXSW_PCI_WQE_SIZE,
780};
781
782static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
783 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
784 .init = mlxsw_pci_rdq_init,
785 .fini = mlxsw_pci_rdq_fini,
eda6500a
JP
786 .elem_count = MLXSW_PCI_WQE_COUNT,
787 .elem_size = MLXSW_PCI_WQE_SIZE
788};
789
790static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
791 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
8404f6f2 792 .pre_init = mlxsw_pci_cq_pre_init,
eda6500a
JP
793 .init = mlxsw_pci_cq_init,
794 .fini = mlxsw_pci_cq_fini,
795 .tasklet = mlxsw_pci_cq_tasklet,
8404f6f2
JP
796 .elem_count_f = mlxsw_pci_cq_elem_count,
797 .elem_size_f = mlxsw_pci_cq_elem_size
eda6500a
JP
798};
799
800static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
801 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
802 .init = mlxsw_pci_eq_init,
803 .fini = mlxsw_pci_eq_fini,
804 .tasklet = mlxsw_pci_eq_tasklet,
eda6500a
JP
805 .elem_count = MLXSW_PCI_EQE_COUNT,
806 .elem_size = MLXSW_PCI_EQE_SIZE
807};
808
809static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
810 const struct mlxsw_pci_queue_ops *q_ops,
811 struct mlxsw_pci_queue *q, u8 q_num)
812{
813 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
814 int i;
815 int err;
816
8404f6f2
JP
817 q->num = q_num;
818 if (q_ops->pre_init)
819 q_ops->pre_init(mlxsw_pci, q);
b76550bb 820
eda6500a 821 spin_lock_init(&q->lock);
8404f6f2
JP
822 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
823 q_ops->elem_count;
824 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
825 q_ops->elem_size;
eda6500a
JP
826 q->type = q_ops->type;
827 q->pci = mlxsw_pci;
828
829 if (q_ops->tasklet)
830 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
831
832 mem_item->size = MLXSW_PCI_AQ_SIZE;
833 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
834 mem_item->size,
835 &mem_item->mapaddr);
836 if (!mem_item->buf)
837 return -ENOMEM;
838 memset(mem_item->buf, 0, mem_item->size);
839
840 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
841 if (!q->elem_info) {
842 err = -ENOMEM;
843 goto err_elem_info_alloc;
844 }
845
846 /* Initialize dma mapped elements info elem_info for
847 * future easy access.
848 */
849 for (i = 0; i < q->count; i++) {
850 struct mlxsw_pci_queue_elem_info *elem_info;
851
852 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
853 elem_info->elem =
8404f6f2 854 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
eda6500a
JP
855 }
856
857 mlxsw_cmd_mbox_zero(mbox);
858 err = q_ops->init(mlxsw_pci, mbox, q);
859 if (err)
860 goto err_q_ops_init;
861 return 0;
862
863err_q_ops_init:
864 kfree(q->elem_info);
865err_elem_info_alloc:
866 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
867 mem_item->buf, mem_item->mapaddr);
868 return err;
869}
870
871static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
872 const struct mlxsw_pci_queue_ops *q_ops,
873 struct mlxsw_pci_queue *q)
874{
875 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
876
877 q_ops->fini(mlxsw_pci, q);
878 kfree(q->elem_info);
879 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
880 mem_item->buf, mem_item->mapaddr);
881}
882
883static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
884 const struct mlxsw_pci_queue_ops *q_ops,
885 u8 num_qs)
886{
eda6500a 887 struct mlxsw_pci_queue_type_group *queue_group;
eda6500a
JP
888 int i;
889 int err;
890
891 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
892 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
893 if (!queue_group->q)
894 return -ENOMEM;
895
896 for (i = 0; i < num_qs; i++) {
897 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
898 &queue_group->q[i], i);
899 if (err)
900 goto err_queue_init;
901 }
902 queue_group->count = num_qs;
903
eda6500a
JP
904 return 0;
905
906err_queue_init:
907 for (i--; i >= 0; i--)
908 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
909 kfree(queue_group->q);
910 return err;
911}
912
913static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
914 const struct mlxsw_pci_queue_ops *q_ops)
915{
916 struct mlxsw_pci_queue_type_group *queue_group;
917 int i;
918
919 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
920 for (i = 0; i < queue_group->count; i++)
921 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
922 kfree(queue_group->q);
923}
924
925static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
926{
927 struct pci_dev *pdev = mlxsw_pci->pdev;
928 u8 num_sdqs;
929 u8 sdq_log2sz;
930 u8 num_rdqs;
931 u8 rdq_log2sz;
932 u8 num_cqs;
933 u8 cq_log2sz;
41107685 934 u8 cqv2_log2sz;
eda6500a
JP
935 u8 num_eqs;
936 u8 eq_log2sz;
937 int err;
938
939 mlxsw_cmd_mbox_zero(mbox);
940 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
941 if (err)
942 return err;
943
944 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
945 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
946 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
947 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
948 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
949 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
41107685 950 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
eda6500a
JP
951 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
952 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
953
c85c3882 954 if (num_sdqs + num_rdqs > num_cqs ||
e4c870b1 955 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
eda6500a
JP
956 dev_err(&pdev->dev, "Unsupported number of queues\n");
957 return -EINVAL;
958 }
959
960 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
961 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
b76550bb 962 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
41107685
JP
963 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
964 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
eda6500a
JP
965 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
966 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
967 return -EINVAL;
968 }
969
8404f6f2
JP
970 mlxsw_pci->num_sdq_cqs = num_sdqs;
971
eda6500a
JP
972 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
973 num_eqs);
974 if (err) {
975 dev_err(&pdev->dev, "Failed to initialize event queues\n");
976 return err;
977 }
978
979 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
980 num_cqs);
981 if (err) {
982 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
983 goto err_cqs_init;
984 }
985
986 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
987 num_sdqs);
988 if (err) {
989 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
990 goto err_sdqs_init;
991 }
992
993 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
994 num_rdqs);
995 if (err) {
996 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
997 goto err_rdqs_init;
998 }
999
1000 /* We have to poll in command interface until queues are initialized */
1001 mlxsw_pci->cmd.nopoll = true;
1002 return 0;
1003
1004err_rdqs_init:
1005 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1006err_sdqs_init:
1007 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1008err_cqs_init:
1009 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1010 return err;
1011}
1012
1013static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1014{
1015 mlxsw_pci->cmd.nopoll = false;
1016 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1017 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1018 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1019 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1020}
1021
1022static void
1023mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1024 char *mbox, int index,
1025 const struct mlxsw_swid_config *swid)
1026{
1027 u8 mask = 0;
1028
1029 if (swid->used_type) {
1030 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1031 mbox, index, swid->type);
1032 mask |= 1;
1033 }
1034 if (swid->used_properties) {
1035 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1036 mbox, index, swid->properties);
1037 mask |= 2;
1038 }
1039 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1040}
1041
57d316ba 1042static int mlxsw_pci_resources_query(struct mlxsw_pci *mlxsw_pci, char *mbox,
ad3f20b2 1043 struct mlxsw_res *res)
57d316ba
NF
1044{
1045 int index, i;
1046 u64 data;
1047 u16 id;
1048 int err;
1049
ad3f20b2 1050 if (!res)
57d316ba
NF
1051 return 0;
1052
1053 mlxsw_cmd_mbox_zero(mbox);
1054
f38a2314
JP
1055 for (index = 0; index < MLXSW_CMD_QUERY_RESOURCES_MAX_QUERIES;
1056 index++) {
57d316ba
NF
1057 err = mlxsw_cmd_query_resources(mlxsw_pci->core, mbox, index);
1058 if (err)
1059 return err;
1060
f38a2314 1061 for (i = 0; i < MLXSW_CMD_QUERY_RESOURCES_PER_QUERY; i++) {
57d316ba
NF
1062 id = mlxsw_cmd_mbox_query_resource_id_get(mbox, i);
1063 data = mlxsw_cmd_mbox_query_resource_data_get(mbox, i);
1064
f38a2314 1065 if (id == MLXSW_CMD_QUERY_RESOURCES_TABLE_END_ID)
57d316ba
NF
1066 return 0;
1067
c1a38311 1068 mlxsw_res_parse(res, id, data);
57d316ba
NF
1069 }
1070 }
1071
1072 /* If after MLXSW_RESOURCES_QUERY_MAX_QUERIES we still didn't get
1073 * MLXSW_RESOURCES_TABLE_END_ID, something went bad in the FW.
1074 */
1075 return -EIO;
1076}
1077
c1a38311 1078static int
e21d21ca
AS
1079mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1080 const struct mlxsw_config_profile *profile,
c1a38311 1081 struct mlxsw_res *res)
403547d3 1082{
e21d21ca
AS
1083 u64 single_size, double_size, linear_size;
1084 int err;
403547d3 1085
e21d21ca
AS
1086 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1087 &single_size, &double_size,
1088 &linear_size);
1089 if (err)
1090 return err;
403547d3 1091
c1a38311
JP
1092 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1093 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1094 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
403547d3
NF
1095
1096 return 0;
1097}
1098
eda6500a 1099static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
403547d3 1100 const struct mlxsw_config_profile *profile,
c1a38311 1101 struct mlxsw_res *res)
eda6500a
JP
1102{
1103 int i;
403547d3 1104 int err;
eda6500a
JP
1105
1106 mlxsw_cmd_mbox_zero(mbox);
1107
1108 if (profile->used_max_vepa_channels) {
1109 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1110 mbox, 1);
1111 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1112 mbox, profile->max_vepa_channels);
1113 }
eda6500a
JP
1114 if (profile->used_max_mid) {
1115 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1116 mbox, 1);
1117 mlxsw_cmd_mbox_config_profile_max_mid_set(
1118 mbox, profile->max_mid);
1119 }
1120 if (profile->used_max_pgt) {
1121 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1122 mbox, 1);
1123 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1124 mbox, profile->max_pgt);
1125 }
1126 if (profile->used_max_system_port) {
1127 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1128 mbox, 1);
1129 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1130 mbox, profile->max_system_port);
1131 }
1132 if (profile->used_max_vlan_groups) {
1133 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1134 mbox, 1);
1135 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1136 mbox, profile->max_vlan_groups);
1137 }
1138 if (profile->used_max_regions) {
1139 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1140 mbox, 1);
1141 mlxsw_cmd_mbox_config_profile_max_regions_set(
1142 mbox, profile->max_regions);
1143 }
1144 if (profile->used_flood_tables) {
1145 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1146 mbox, 1);
1147 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1148 mbox, profile->max_flood_tables);
1149 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1150 mbox, profile->max_vid_flood_tables);
12fd35ab
IS
1151 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1152 mbox, profile->max_fid_offset_flood_tables);
1153 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1154 mbox, profile->fid_offset_flood_table_size);
453b6a8d
IS
1155 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1156 mbox, profile->max_fid_flood_tables);
1157 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1158 mbox, profile->fid_flood_table_size);
eda6500a
JP
1159 }
1160 if (profile->used_flood_mode) {
1161 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1162 mbox, 1);
1163 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1164 mbox, profile->flood_mode);
1165 }
1166 if (profile->used_max_ib_mc) {
1167 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1168 mbox, 1);
1169 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1170 mbox, profile->max_ib_mc);
1171 }
1172 if (profile->used_max_pkey) {
1173 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1174 mbox, 1);
1175 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1176 mbox, profile->max_pkey);
1177 }
1178 if (profile->used_ar_sec) {
1179 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1180 mbox, 1);
1181 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1182 mbox, profile->ar_sec);
1183 }
1184 if (profile->used_adaptive_routing_group_cap) {
1185 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1186 mbox, 1);
1187 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1188 mbox, profile->adaptive_routing_group_cap);
1189 }
110d2d21 1190 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
e21d21ca 1191 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
403547d3
NF
1192 if (err)
1193 return err;
1194
1195 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1196 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
c1a38311 1197 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
403547d3
NF
1198 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1199 1);
1200 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
c1a38311 1201 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
489107bd 1202 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
403547d3
NF
1203 mbox, 1);
1204 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
c1a38311 1205 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
489107bd 1206 }
eda6500a
JP
1207
1208 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1209 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1210 &profile->swid_config[i]);
1211
8404f6f2
JP
1212 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1213 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1214 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1215 }
1216
eda6500a
JP
1217 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1218}
1219
1220static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1221{
1222 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1223 int err;
1224
1225 mlxsw_cmd_mbox_zero(mbox);
1226 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1227 if (err)
1228 return err;
1229 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1230 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1231 return 0;
1232}
1233
1234static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1235 u16 num_pages)
1236{
1237 struct mlxsw_pci_mem_item *mem_item;
3e2206da 1238 int nent = 0;
eda6500a
JP
1239 int i;
1240 int err;
1241
1242 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1243 GFP_KERNEL);
1244 if (!mlxsw_pci->fw_area.items)
1245 return -ENOMEM;
3e2206da 1246 mlxsw_pci->fw_area.count = num_pages;
eda6500a
JP
1247
1248 mlxsw_cmd_mbox_zero(mbox);
1249 for (i = 0; i < num_pages; i++) {
1250 mem_item = &mlxsw_pci->fw_area.items[i];
1251
1252 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1253 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1254 mem_item->size,
1255 &mem_item->mapaddr);
1256 if (!mem_item->buf) {
1257 err = -ENOMEM;
1258 goto err_alloc;
1259 }
3e2206da
JP
1260 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1261 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1262 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1263 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1264 if (err)
1265 goto err_cmd_map_fa;
1266 nent = 0;
1267 mlxsw_cmd_mbox_zero(mbox);
1268 }
eda6500a
JP
1269 }
1270
3e2206da
JP
1271 if (nent) {
1272 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1273 if (err)
1274 goto err_cmd_map_fa;
1275 }
eda6500a
JP
1276
1277 return 0;
1278
1279err_cmd_map_fa:
1280err_alloc:
1281 for (i--; i >= 0; i--) {
1282 mem_item = &mlxsw_pci->fw_area.items[i];
1283
1284 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1285 mem_item->buf, mem_item->mapaddr);
1286 }
1287 kfree(mlxsw_pci->fw_area.items);
1288 return err;
1289}
1290
1291static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1292{
1293 struct mlxsw_pci_mem_item *mem_item;
1294 int i;
1295
1296 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1297
3e2206da 1298 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
eda6500a
JP
1299 mem_item = &mlxsw_pci->fw_area.items[i];
1300
1301 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1302 mem_item->buf, mem_item->mapaddr);
1303 }
1304 kfree(mlxsw_pci->fw_area.items);
1305}
1306
1307static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1308{
1309 struct mlxsw_pci *mlxsw_pci = dev_id;
1310 struct mlxsw_pci_queue *q;
1311 int i;
1312
1313 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1314 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1315 mlxsw_pci_queue_tasklet_schedule(q);
1316 }
1317 return IRQ_HANDLED;
1318}
1319
1e81779a
IS
1320static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1321 struct mlxsw_pci_mem_item *mbox)
1322{
1323 struct pci_dev *pdev = mlxsw_pci->pdev;
1324 int err = 0;
1325
1326 mbox->size = MLXSW_CMD_MBOX_SIZE;
1327 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1328 &mbox->mapaddr);
1329 if (!mbox->buf) {
1330 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1331 err = -ENOMEM;
1332 }
1333
1334 return err;
1335}
1336
1337static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1338 struct mlxsw_pci_mem_item *mbox)
1339{
1340 struct pci_dev *pdev = mlxsw_pci->pdev;
1341
1342 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1343 mbox->mapaddr);
1344}
1345
f3a52c61
JP
1346static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1347 const struct pci_device_id *id)
1348{
1349 unsigned long end;
1350 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1351 int err;
1352
1353 mlxsw_reg_mrsr_pack(mrsr_pl);
1354 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1355 if (err)
1356 return err;
1357 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1358 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1359 return 0;
1360 }
1361
1362 /* We must wait for the HW to become responsive once again. */
1363 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1364
1365 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1366 do {
1367 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1368
1369 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
67c14cc9 1370 return 0;
f3a52c61
JP
1371 cond_resched();
1372 } while (time_before(jiffies, end));
67c14cc9 1373 return -EBUSY;
f3a52c61
JP
1374}
1375
1376static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1377{
1378 int err;
1379
1380 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1381 if (err < 0)
1382 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1383 return err;
1384}
1385
1386static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1387{
1388 pci_free_irq_vectors(mlxsw_pci->pdev);
1389}
1390
eda6500a 1391static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
57d316ba 1392 const struct mlxsw_config_profile *profile,
c1a38311 1393 struct mlxsw_res *res)
eda6500a
JP
1394{
1395 struct mlxsw_pci *mlxsw_pci = bus_priv;
1396 struct pci_dev *pdev = mlxsw_pci->pdev;
1397 char *mbox;
1398 u16 num_pages;
1399 int err;
1400
1401 mutex_init(&mlxsw_pci->cmd.lock);
1402 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1403
1404 mlxsw_pci->core = mlxsw_core;
1405
1406 mbox = mlxsw_cmd_mbox_alloc();
1407 if (!mbox)
1408 return -ENOMEM;
1e81779a
IS
1409
1410 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1411 if (err)
1412 goto mbox_put;
1413
1414 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1415 if (err)
1416 goto err_out_mbox_alloc;
1417
f3a52c61
JP
1418 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1419 if (err)
1420 goto err_sw_reset;
1421
1422 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1423 if (err < 0) {
1424 dev_err(&pdev->dev, "MSI-X init failed\n");
1425 goto err_alloc_irq;
1426 }
1427
eda6500a
JP
1428 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1429 if (err)
1430 goto err_query_fw;
1431
1432 mlxsw_pci->bus_info.fw_rev.major =
1433 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1434 mlxsw_pci->bus_info.fw_rev.minor =
1435 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1436 mlxsw_pci->bus_info.fw_rev.subminor =
1437 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1438
1439 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1440 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1441 err = -EINVAL;
1442 goto err_iface_rev;
1443 }
1444 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1445 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1446 err = -EINVAL;
1447 goto err_doorbell_page_bar;
1448 }
1449
1450 mlxsw_pci->doorbell_offset =
1451 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1452
1453 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1454 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1455 if (err)
1456 goto err_fw_area_init;
1457
1458 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1459 if (err)
1460 goto err_boardinfo;
1461
ad3f20b2 1462 err = mlxsw_pci_resources_query(mlxsw_pci, mbox, res);
57d316ba
NF
1463 if (err)
1464 goto err_query_resources;
1465
8404f6f2
JP
1466 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1467 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1468 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1469 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1470 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1471 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1472 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1473 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1474 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1475 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1476 } else {
1477 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1478 goto err_cqe_v_check;
1479 }
1480
c1a38311 1481 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
eda6500a
JP
1482 if (err)
1483 goto err_config_profile;
1484
1485 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1486 if (err)
1487 goto err_aqs_init;
1488
3680b1f6 1489 err = request_irq(pci_irq_vector(pdev, 0),
eda6500a 1490 mlxsw_pci_eq_irq_handler, 0,
1d20d23c 1491 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
eda6500a
JP
1492 if (err) {
1493 dev_err(&pdev->dev, "IRQ request failed\n");
1494 goto err_request_eq_irq;
1495 }
1496
1497 goto mbox_put;
1498
1499err_request_eq_irq:
1500 mlxsw_pci_aqs_fini(mlxsw_pci);
1501err_aqs_init:
1502err_config_profile:
8404f6f2 1503err_cqe_v_check:
57d316ba 1504err_query_resources:
eda6500a
JP
1505err_boardinfo:
1506 mlxsw_pci_fw_area_fini(mlxsw_pci);
1507err_fw_area_init:
1508err_doorbell_page_bar:
1509err_iface_rev:
1510err_query_fw:
f3a52c61
JP
1511 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1512err_alloc_irq:
1513err_sw_reset:
1e81779a
IS
1514 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1515err_out_mbox_alloc:
1516 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
eda6500a
JP
1517mbox_put:
1518 mlxsw_cmd_mbox_free(mbox);
1519 return err;
1520}
1521
1522static void mlxsw_pci_fini(void *bus_priv)
1523{
1524 struct mlxsw_pci *mlxsw_pci = bus_priv;
1525
3680b1f6 1526 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
eda6500a
JP
1527 mlxsw_pci_aqs_fini(mlxsw_pci);
1528 mlxsw_pci_fw_area_fini(mlxsw_pci);
f3a52c61 1529 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1e81779a
IS
1530 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1531 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
eda6500a
JP
1532}
1533
1534static struct mlxsw_pci_queue *
1535mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1536 const struct mlxsw_tx_info *tx_info)
1537{
1538 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1539
1540 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1541}
1542
d003462a
IS
1543static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1544 const struct mlxsw_tx_info *tx_info)
1545{
1546 struct mlxsw_pci *mlxsw_pci = bus_priv;
1547 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1548
1549 return !mlxsw_pci_queue_elem_info_producer_get(q);
1550}
1551
eda6500a
JP
1552static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1553 const struct mlxsw_tx_info *tx_info)
1554{
1555 struct mlxsw_pci *mlxsw_pci = bus_priv;
1556 struct mlxsw_pci_queue *q;
1557 struct mlxsw_pci_queue_elem_info *elem_info;
1558 char *wqe;
1559 int i;
1560 int err;
1561
1562 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1563 err = skb_linearize(skb);
1564 if (err)
1565 return err;
1566 }
1567
1568 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1569 spin_lock_bh(&q->lock);
1570 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1571 if (!elem_info) {
1572 /* queue is full */
1573 err = -EAGAIN;
1574 goto unlock;
1575 }
1576 elem_info->u.sdq.skb = skb;
1577
1578 wqe = elem_info->elem;
1579 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1580 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1581 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1582
1583 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1584 skb_headlen(skb), DMA_TO_DEVICE);
1585 if (err)
1586 goto unlock;
1587
1588 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1589 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1590
1591 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1592 skb_frag_address(frag),
1593 skb_frag_size(frag),
1594 DMA_TO_DEVICE);
1595 if (err)
1596 goto unmap_frags;
1597 }
1598
1599 /* Set unused sq entries byte count to zero. */
1600 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1601 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1602
1603 /* Everything is set up, ring producer doorbell to get HW going */
1604 q->producer_counter++;
1605 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1606
1607 goto unlock;
1608
1609unmap_frags:
1610 for (; i >= 0; i--)
1611 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1612unlock:
1613 spin_unlock_bh(&q->lock);
1614 return err;
1615}
1616
1617static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1618 u32 in_mod, bool out_mbox_direct,
1619 char *in_mbox, size_t in_mbox_size,
1620 char *out_mbox, size_t out_mbox_size,
1621 u8 *p_status)
1622{
1623 struct mlxsw_pci *mlxsw_pci = bus_priv;
830a8b1b 1624 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
eda6500a
JP
1625 bool evreq = mlxsw_pci->cmd.nopoll;
1626 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1627 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1628 int err;
1629
1630 *p_status = MLXSW_CMD_STATUS_OK;
1631
1632 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1633 if (err)
1634 return err;
1635
830a8b1b 1636 if (in_mbox) {
1e81779a 1637 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
830a8b1b
ST
1638 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1639 }
bcb9db49
AB
1640 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1641 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
eda6500a 1642
830a8b1b
ST
1643 if (out_mbox)
1644 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
bcb9db49
AB
1645 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1646 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
eda6500a
JP
1647
1648 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1649 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1650
1651 *p_wait_done = false;
1652
1653 wmb(); /* all needs to be written before we write control register */
1654 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1655 MLXSW_PCI_CIR_CTRL_GO_BIT |
1656 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1657 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1658 opcode);
1659
1660 if (!evreq) {
1661 unsigned long end;
1662
1663 end = jiffies + timeout;
1664 do {
1665 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1666
1667 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1668 *p_wait_done = true;
1669 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1670 break;
1671 }
1672 cond_resched();
1673 } while (time_before(jiffies, end));
1674 } else {
1675 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1676 *p_status = mlxsw_pci->cmd.comp.status;
1677 }
1678
1679 err = 0;
1680 if (*p_wait_done) {
1681 if (*p_status)
1682 err = -EIO;
1683 } else {
1684 err = -ETIMEDOUT;
1685 }
1686
1687 if (!err && out_mbox && out_mbox_direct) {
1e81779a 1688 /* Some commands don't use output param as address to mailbox
eda6500a
JP
1689 * but they store output directly into registers. In that case,
1690 * copy registers into mbox buffer.
1691 */
1692 __be32 tmp;
1693
1694 if (!evreq) {
1695 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1696 CIR_OUT_PARAM_HI));
1697 memcpy(out_mbox, &tmp, sizeof(tmp));
1698 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1699 CIR_OUT_PARAM_LO));
1700 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1701 }
d9324f68 1702 } else if (!err && out_mbox) {
1e81779a 1703 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
d9324f68 1704 }
eda6500a 1705
eda6500a
JP
1706 mutex_unlock(&mlxsw_pci->cmd.lock);
1707
1708 return err;
1709}
1710
54a2e8d4
AS
1711static const struct mlxsw_bus mlxsw_pci_bus = {
1712 .kind = "pci",
1713 .init = mlxsw_pci_init,
1714 .fini = mlxsw_pci_fini,
1715 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1716 .skb_transmit = mlxsw_pci_skb_transmit,
1717 .cmd_exec = mlxsw_pci_cmd_exec,
f3a52c61 1718 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
54a2e8d4
AS
1719};
1720
eda6500a
JP
1721static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1722{
1d20d23c 1723 const char *driver_name = pdev->driver->name;
eda6500a
JP
1724 struct mlxsw_pci *mlxsw_pci;
1725 int err;
1726
1727 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1728 if (!mlxsw_pci)
1729 return -ENOMEM;
1730
1731 err = pci_enable_device(pdev);
1732 if (err) {
1733 dev_err(&pdev->dev, "pci_enable_device failed\n");
1734 goto err_pci_enable_device;
1735 }
1736
1d20d23c 1737 err = pci_request_regions(pdev, driver_name);
eda6500a
JP
1738 if (err) {
1739 dev_err(&pdev->dev, "pci_request_regions failed\n");
1740 goto err_pci_request_regions;
1741 }
1742
1743 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1744 if (!err) {
1745 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1746 if (err) {
1747 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1748 goto err_pci_set_dma_mask;
1749 }
1750 } else {
1751 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1752 if (err) {
1753 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1754 goto err_pci_set_dma_mask;
1755 }
1756 }
1757
1758 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1759 dev_err(&pdev->dev, "invalid PCI region size\n");
1760 err = -EINVAL;
1761 goto err_pci_resource_len_check;
1762 }
1763
1764 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1765 pci_resource_len(pdev, 0));
1766 if (!mlxsw_pci->hw_addr) {
1767 dev_err(&pdev->dev, "ioremap failed\n");
1768 err = -EIO;
1769 goto err_ioremap;
1770 }
1771 pci_set_master(pdev);
1772
1773 mlxsw_pci->pdev = pdev;
1774 pci_set_drvdata(pdev, mlxsw_pci);
1775
1d20d23c 1776 mlxsw_pci->bus_info.device_kind = driver_name;
eda6500a
JP
1777 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1778 mlxsw_pci->bus_info.dev = &pdev->dev;
54a2e8d4 1779 mlxsw_pci->id = id;
eda6500a 1780
eda6500a 1781 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
24cc68ad
AS
1782 &mlxsw_pci_bus, mlxsw_pci, false,
1783 NULL);
03bffcad 1784 if (err) {
eda6500a
JP
1785 dev_err(&pdev->dev, "cannot register bus device\n");
1786 goto err_bus_device_register;
1787 }
1788
1789 return 0;
1790
1791err_bus_device_register:
eda6500a
JP
1792 iounmap(mlxsw_pci->hw_addr);
1793err_ioremap:
1794err_pci_resource_len_check:
1795err_pci_set_dma_mask:
1796 pci_release_regions(pdev);
1797err_pci_request_regions:
1798 pci_disable_device(pdev);
1799err_pci_enable_device:
1800 kfree(mlxsw_pci);
1801 return err;
1802}
1803
1804static void mlxsw_pci_remove(struct pci_dev *pdev)
1805{
1806 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1807
24cc68ad 1808 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
eda6500a
JP
1809 iounmap(mlxsw_pci->hw_addr);
1810 pci_release_regions(mlxsw_pci->pdev);
1811 pci_disable_device(mlxsw_pci->pdev);
1812 kfree(mlxsw_pci);
1813}
1814
1d20d23c
JP
1815int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1816{
1817 pci_driver->probe = mlxsw_pci_probe;
1818 pci_driver->remove = mlxsw_pci_remove;
1819 return pci_register_driver(pci_driver);
1820}
1821EXPORT_SYMBOL(mlxsw_pci_driver_register);
eda6500a 1822
1d20d23c 1823void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
eda6500a 1824{
1d20d23c
JP
1825 pci_unregister_driver(pci_driver);
1826}
1827EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
eda6500a 1828
1d20d23c
JP
1829static int __init mlxsw_pci_module_init(void)
1830{
eda6500a 1831 return 0;
eda6500a
JP
1832}
1833
1834static void __exit mlxsw_pci_module_exit(void)
1835{
eda6500a
JP
1836}
1837
1838module_init(mlxsw_pci_module_init);
1839module_exit(mlxsw_pci_module_exit);
1840
1841MODULE_LICENSE("Dual BSD/GPL");
1842MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1843MODULE_DESCRIPTION("Mellanox switch PCI interface driver");