]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/mellanox/mlxsw/pci.c
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / mellanox / mlxsw / pci.c
CommitLineData
9948a064
JP
1// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2/* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
eda6500a
JP
3
4#include <linux/kernel.h>
5#include <linux/module.h>
6#include <linux/export.h>
7#include <linux/err.h>
8#include <linux/device.h>
9#include <linux/pci.h>
10#include <linux/interrupt.h>
11#include <linux/wait.h>
12#include <linux/types.h>
13#include <linux/skbuff.h>
14#include <linux/if_vlan.h>
15#include <linux/log2.h>
1e81779a 16#include <linux/string.h>
eda6500a 17
62e86f9e 18#include "pci_hw.h"
1d20d23c 19#include "pci.h"
eda6500a
JP
20#include "core.h"
21#include "cmd.h"
22#include "port.h"
c1a38311 23#include "resources.h"
eda6500a 24
eda6500a
JP
25#define mlxsw_pci_write32(mlxsw_pci, reg, val) \
26 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
27#define mlxsw_pci_read32(mlxsw_pci, reg) \
28 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
29
30enum mlxsw_pci_queue_type {
31 MLXSW_PCI_QUEUE_TYPE_SDQ,
32 MLXSW_PCI_QUEUE_TYPE_RDQ,
33 MLXSW_PCI_QUEUE_TYPE_CQ,
34 MLXSW_PCI_QUEUE_TYPE_EQ,
35};
36
eda6500a
JP
37#define MLXSW_PCI_QUEUE_TYPE_COUNT 4
38
39static const u16 mlxsw_pci_doorbell_type_offset[] = {
40 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
41 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
42 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
43 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
44};
45
46static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
47 0, /* unused */
48 0, /* unused */
49 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
50 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
51};
52
53struct mlxsw_pci_mem_item {
54 char *buf;
55 dma_addr_t mapaddr;
56 size_t size;
57};
58
59struct mlxsw_pci_queue_elem_info {
60 char *elem; /* pointer to actual dma mapped element mem chunk */
61 union {
62 struct {
63 struct sk_buff *skb;
64 } sdq;
65 struct {
66 struct sk_buff *skb;
67 } rdq;
68 } u;
69};
70
71struct mlxsw_pci_queue {
72 spinlock_t lock; /* for queue accesses */
73 struct mlxsw_pci_mem_item mem_item;
74 struct mlxsw_pci_queue_elem_info *elem_info;
75 u16 producer_counter;
76 u16 consumer_counter;
77 u16 count; /* number of elements in queue */
78 u8 num; /* queue number */
79 u8 elem_size; /* size of one element */
80 enum mlxsw_pci_queue_type type;
81 struct tasklet_struct tasklet; /* queue processing tasklet */
82 struct mlxsw_pci *pci;
83 union {
84 struct {
85 u32 comp_sdq_count;
86 u32 comp_rdq_count;
b76550bb 87 enum mlxsw_pci_cqe_v v;
eda6500a
JP
88 } cq;
89 struct {
90 u32 ev_cmd_count;
91 u32 ev_comp_count;
92 u32 ev_other_count;
93 } eq;
94 } u;
95};
96
97struct mlxsw_pci_queue_type_group {
98 struct mlxsw_pci_queue *q;
99 u8 count; /* number of queues in group */
100};
101
102struct mlxsw_pci {
103 struct pci_dev *pdev;
104 u8 __iomem *hw_addr;
8289169d 105 u64 free_running_clock_offset;
eda6500a
JP
106 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
107 u32 doorbell_offset;
eda6500a
JP
108 struct mlxsw_core *core;
109 struct {
eda6500a 110 struct mlxsw_pci_mem_item *items;
3e2206da 111 unsigned int count;
eda6500a
JP
112 } fw_area;
113 struct {
1e81779a
IS
114 struct mlxsw_pci_mem_item out_mbox;
115 struct mlxsw_pci_mem_item in_mbox;
eda6500a
JP
116 struct mutex lock; /* Lock access to command registers */
117 bool nopoll;
118 wait_queue_head_t wait;
119 bool wait_done;
120 struct {
121 u8 status;
122 u64 out_param;
123 } comp;
124 } cmd;
125 struct mlxsw_bus_info bus_info;
54a2e8d4 126 const struct pci_device_id *id;
8404f6f2
JP
127 enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */
128 u8 num_sdq_cqs; /* Number of CQs used for SDQs */
eda6500a
JP
129};
130
131static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
132{
133 tasklet_schedule(&q->tasklet);
134}
135
136static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
137 size_t elem_size, int elem_index)
138{
139 return q->mem_item.buf + (elem_size * elem_index);
140}
141
142static struct mlxsw_pci_queue_elem_info *
143mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
144{
145 return &q->elem_info[elem_index];
146}
147
148static struct mlxsw_pci_queue_elem_info *
149mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
150{
151 int index = q->producer_counter & (q->count - 1);
152
5091730d 153 if ((u16) (q->producer_counter - q->consumer_counter) == q->count)
eda6500a
JP
154 return NULL;
155 return mlxsw_pci_queue_elem_info_get(q, index);
156}
157
158static struct mlxsw_pci_queue_elem_info *
159mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
160{
161 int index = q->consumer_counter & (q->count - 1);
162
163 return mlxsw_pci_queue_elem_info_get(q, index);
164}
165
166static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
167{
168 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
169}
170
171static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
172{
173 return owner_bit != !!(q->consumer_counter & q->count);
174}
175
eda6500a
JP
176static struct mlxsw_pci_queue_type_group *
177mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
178 enum mlxsw_pci_queue_type q_type)
179{
180 return &mlxsw_pci->queues[q_type];
181}
182
183static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
184 enum mlxsw_pci_queue_type q_type)
185{
186 struct mlxsw_pci_queue_type_group *queue_group;
187
188 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
189 return queue_group->count;
190}
191
192static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
193{
194 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
195}
196
eda6500a
JP
197static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
198{
199 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
200}
201
eda6500a
JP
202static struct mlxsw_pci_queue *
203__mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
204 enum mlxsw_pci_queue_type q_type, u8 q_num)
205{
206 return &mlxsw_pci->queues[q_type].q[q_num];
207}
208
209static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
210 u8 q_num)
211{
212 return __mlxsw_pci_queue_get(mlxsw_pci,
213 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
214}
215
216static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
217 u8 q_num)
218{
219 return __mlxsw_pci_queue_get(mlxsw_pci,
220 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
221}
222
223static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
224 u8 q_num)
225{
226 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
227}
228
229static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
230 u8 q_num)
231{
232 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
233}
234
235static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
236 struct mlxsw_pci_queue *q,
237 u16 val)
238{
239 mlxsw_pci_write32(mlxsw_pci,
240 DOORBELL(mlxsw_pci->doorbell_offset,
241 mlxsw_pci_doorbell_type_offset[q->type],
242 q->num), val);
243}
244
245static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
246 struct mlxsw_pci_queue *q,
247 u16 val)
248{
249 mlxsw_pci_write32(mlxsw_pci,
250 DOORBELL(mlxsw_pci->doorbell_offset,
251 mlxsw_pci_doorbell_arm_type_offset[q->type],
252 q->num), val);
253}
254
255static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
256 struct mlxsw_pci_queue *q)
257{
258 wmb(); /* ensure all writes are done before we ring a bell */
259 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
260}
261
262static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
263 struct mlxsw_pci_queue *q)
264{
265 wmb(); /* ensure all writes are done before we ring a bell */
266 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
267 q->consumer_counter + q->count);
268}
269
270static void
271mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
272 struct mlxsw_pci_queue *q)
273{
274 wmb(); /* ensure all writes are done before we ring a bell */
275 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
276}
277
278static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
279 int page_index)
280{
281 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
282}
283
284static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
285 struct mlxsw_pci_queue *q)
286{
6aaee55c 287 int tclass;
eda6500a
JP
288 int i;
289 int err;
290
291 q->producer_counter = 0;
292 q->consumer_counter = 0;
6aaee55c
PM
293 tclass = q->num == MLXSW_PCI_SDQ_EMAD_INDEX ? MLXSW_PCI_SDQ_EMAD_TC :
294 MLXSW_PCI_SDQ_CTL_TC;
eda6500a
JP
295
296 /* Set CQ of same number of this SDQ. */
297 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
6aaee55c 298 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass);
eda6500a
JP
299 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
300 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
301 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
302
303 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
304 }
305
306 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
307 if (err)
308 return err;
309 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
310 return 0;
311}
312
313static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
314 struct mlxsw_pci_queue *q)
315{
316 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
317}
318
eda6500a
JP
319static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
320 int index, char *frag_data, size_t frag_len,
321 int direction)
322{
323 struct pci_dev *pdev = mlxsw_pci->pdev;
324 dma_addr_t mapaddr;
325
326 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
327 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
6cf9dc8b 328 dev_err_ratelimited(&pdev->dev, "failed to dma map tx frag\n");
eda6500a
JP
329 return -EIO;
330 }
331 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
332 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
333 return 0;
334}
335
336static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
337 int index, int direction)
338{
339 struct pci_dev *pdev = mlxsw_pci->pdev;
340 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
341 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
342
343 if (!frag_len)
344 return;
345 pci_unmap_single(pdev, mapaddr, frag_len, direction);
346}
347
348static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
349 struct mlxsw_pci_queue_elem_info *elem_info)
350{
351 size_t buf_len = MLXSW_PORT_MAX_MTU;
352 char *wqe = elem_info->elem;
353 struct sk_buff *skb;
354 int err;
355
356 elem_info->u.rdq.skb = NULL;
357 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
358 if (!skb)
359 return -ENOMEM;
360
361 /* Assume that wqe was previously zeroed. */
362
363 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
364 buf_len, DMA_FROM_DEVICE);
365 if (err)
366 goto err_frag_map;
367
368 elem_info->u.rdq.skb = skb;
369 return 0;
370
371err_frag_map:
372 dev_kfree_skb_any(skb);
373 return err;
374}
375
376static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
377 struct mlxsw_pci_queue_elem_info *elem_info)
378{
379 struct sk_buff *skb;
380 char *wqe;
381
382 skb = elem_info->u.rdq.skb;
383 wqe = elem_info->elem;
384
385 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
386 dev_kfree_skb_any(skb);
387}
388
389static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
390 struct mlxsw_pci_queue *q)
391{
392 struct mlxsw_pci_queue_elem_info *elem_info;
424e1114 393 u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci);
eda6500a
JP
394 int i;
395 int err;
396
397 q->producer_counter = 0;
398 q->consumer_counter = 0;
399
400 /* Set CQ of same number of this RDQ with base
424e1114 401 * above SDQ count as the lower ones are assigned to SDQs.
eda6500a 402 */
424e1114 403 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num);
eda6500a
JP
404 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
405 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
406 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
407
408 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
409 }
410
411 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
412 if (err)
413 return err;
414
415 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
416
417 for (i = 0; i < q->count; i++) {
418 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
419 BUG_ON(!elem_info);
420 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
421 if (err)
422 goto rollback;
423 /* Everything is set up, ring doorbell to pass elem to HW */
424 q->producer_counter++;
425 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
426 }
427
428 return 0;
429
430rollback:
431 for (i--; i >= 0; i--) {
432 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
433 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
434 }
435 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
436
437 return err;
438}
439
440static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
441 struct mlxsw_pci_queue *q)
442{
443 struct mlxsw_pci_queue_elem_info *elem_info;
444 int i;
445
446 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
447 for (i = 0; i < q->count; i++) {
448 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
449 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
450 }
451}
452
8404f6f2
JP
453static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci,
454 struct mlxsw_pci_queue *q)
455{
456 q->u.cq.v = mlxsw_pci->max_cqe_ver;
457
458 /* For SDQ it is pointless to use CQEv2, so use CQEv1 instead */
459 if (q->u.cq.v == MLXSW_PCI_CQE_V2 &&
460 q->num < mlxsw_pci->num_sdq_cqs)
461 q->u.cq.v = MLXSW_PCI_CQE_V1;
462}
463
eda6500a
JP
464static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
465 struct mlxsw_pci_queue *q)
466{
467 int i;
468 int err;
469
470 q->consumer_counter = 0;
471
472 for (i = 0; i < q->count; i++) {
473 char *elem = mlxsw_pci_queue_elem_get(q, i);
474
b76550bb 475 mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1);
eda6500a
JP
476 }
477
8404f6f2
JP
478 if (q->u.cq.v == MLXSW_PCI_CQE_V1)
479 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
480 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1);
481 else if (q->u.cq.v == MLXSW_PCI_CQE_V2)
482 mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox,
483 MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2);
484
eda6500a 485 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
eda6500a
JP
486 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
487 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
488 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
489 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
490
491 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
492 }
493 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
494 if (err)
495 return err;
496 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
497 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
498 return 0;
499}
500
501static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
502 struct mlxsw_pci_queue *q)
503{
504 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
505}
506
eda6500a
JP
507static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
508 struct mlxsw_pci_queue *q,
509 u16 consumer_counter_limit,
510 char *cqe)
511{
512 struct pci_dev *pdev = mlxsw_pci->pdev;
513 struct mlxsw_pci_queue_elem_info *elem_info;
0714256c 514 struct mlxsw_tx_info tx_info;
eda6500a
JP
515 char *wqe;
516 struct sk_buff *skb;
517 int i;
518
519 spin_lock(&q->lock);
520 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
0714256c 521 tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info;
eda6500a
JP
522 skb = elem_info->u.sdq.skb;
523 wqe = elem_info->elem;
524 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
525 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
0714256c
PM
526
527 if (unlikely(!tx_info.is_emad &&
528 skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
529 mlxsw_core_ptp_transmitted(mlxsw_pci->core, skb,
530 tx_info.local_port);
531 skb = NULL;
532 }
533
534 if (skb)
535 dev_kfree_skb_any(skb);
eda6500a
JP
536 elem_info->u.sdq.skb = NULL;
537
538 if (q->consumer_counter++ != consumer_counter_limit)
539 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
540 spin_unlock(&q->lock);
541}
542
543static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
544 struct mlxsw_pci_queue *q,
545 u16 consumer_counter_limit,
b76550bb 546 enum mlxsw_pci_cqe_v cqe_v, char *cqe)
eda6500a
JP
547{
548 struct pci_dev *pdev = mlxsw_pci->pdev;
549 struct mlxsw_pci_queue_elem_info *elem_info;
550 char *wqe;
551 struct sk_buff *skb;
552 struct mlxsw_rx_info rx_info;
7b7b9cff 553 u16 byte_count;
eda6500a
JP
554 int err;
555
556 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
557 skb = elem_info->u.sdq.skb;
558 if (!skb)
559 return;
560 wqe = elem_info->elem;
561 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
562
563 if (q->consumer_counter++ != consumer_counter_limit)
564 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
565
b76550bb 566 if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) {
d2292e87 567 rx_info.is_lag = true;
b76550bb
JP
568 rx_info.u.lag_id = mlxsw_pci_cqe_lag_id_get(cqe_v, cqe);
569 rx_info.lag_port_index =
570 mlxsw_pci_cqe_lag_subport_get(cqe_v, cqe);
d2292e87
JP
571 } else {
572 rx_info.is_lag = false;
573 rx_info.u.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
574 }
8060646a 575
eda6500a
JP
576 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
577
7b7b9cff 578 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
b76550bb 579 if (mlxsw_pci_cqe_crc_get(cqe_v, cqe))
7b7b9cff
JP
580 byte_count -= ETH_FCS_LEN;
581 skb_put(skb, byte_count);
eda6500a
JP
582 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
583
eda6500a
JP
584 memset(wqe, 0, q->elem_size);
585 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
6cf9dc8b
JP
586 if (err)
587 dev_dbg_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n");
eda6500a
JP
588 /* Everything is set up, ring doorbell to pass elem to HW */
589 q->producer_counter++;
590 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
591 return;
eda6500a
JP
592}
593
594static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
595{
b76550bb
JP
596 struct mlxsw_pci_queue_elem_info *elem_info;
597 char *elem;
598 bool owner_bit;
599
600 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
601 elem = elem_info->elem;
602 owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem);
603 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
604 return NULL;
605 q->consumer_counter++;
606 rmb(); /* make sure we read owned bit before the rest of elem */
607 return elem;
eda6500a
JP
608}
609
610static void mlxsw_pci_cq_tasklet(unsigned long data)
611{
612 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
613 struct mlxsw_pci *mlxsw_pci = q->pci;
614 char *cqe;
615 int items = 0;
616 int credits = q->count >> 1;
617
618 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
619 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
b76550bb
JP
620 u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe);
621 u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe);
c9ebea04
IS
622 char ncqe[MLXSW_PCI_CQE_SIZE_MAX];
623
624 memcpy(ncqe, cqe, q->elem_size);
625 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
eda6500a
JP
626
627 if (sendq) {
628 struct mlxsw_pci_queue *sdq;
629
630 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
631 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
c9ebea04 632 wqe_counter, ncqe);
eda6500a
JP
633 q->u.cq.comp_sdq_count++;
634 } else {
635 struct mlxsw_pci_queue *rdq;
636
637 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
638 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
c9ebea04 639 wqe_counter, q->u.cq.v, ncqe);
eda6500a
JP
640 q->u.cq.comp_rdq_count++;
641 }
642 if (++items == credits)
643 break;
644 }
c9ebea04 645 if (items)
eda6500a 646 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
eda6500a
JP
647}
648
8404f6f2
JP
649static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q)
650{
651 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT :
652 MLXSW_PCI_CQE01_COUNT;
653}
654
655static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q)
656{
657 return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE :
658 MLXSW_PCI_CQE01_SIZE;
659}
660
eda6500a
JP
661static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
662 struct mlxsw_pci_queue *q)
663{
664 int i;
665 int err;
666
667 q->consumer_counter = 0;
668
669 for (i = 0; i < q->count; i++) {
670 char *elem = mlxsw_pci_queue_elem_get(q, i);
671
672 mlxsw_pci_eqe_owner_set(elem, 1);
673 }
674
675 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
eda6500a
JP
676 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
677 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
678 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
679 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
680
681 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
682 }
683 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
684 if (err)
685 return err;
686 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
687 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
688 return 0;
689}
690
691static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
692 struct mlxsw_pci_queue *q)
693{
694 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
695}
696
eda6500a
JP
697static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
698{
699 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
700 mlxsw_pci->cmd.comp.out_param =
701 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
702 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
703 mlxsw_pci->cmd.wait_done = true;
704 wake_up(&mlxsw_pci->cmd.wait);
705}
706
707static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
708{
b76550bb
JP
709 struct mlxsw_pci_queue_elem_info *elem_info;
710 char *elem;
711 bool owner_bit;
712
713 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
714 elem = elem_info->elem;
715 owner_bit = mlxsw_pci_eqe_owner_get(elem);
716 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
717 return NULL;
718 q->consumer_counter++;
719 rmb(); /* make sure we read owned bit before the rest of elem */
720 return elem;
eda6500a
JP
721}
722
723static void mlxsw_pci_eq_tasklet(unsigned long data)
724{
725 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
726 struct mlxsw_pci *mlxsw_pci = q->pci;
e4c870b1
JP
727 u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci);
728 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)];
eda6500a
JP
729 char *eqe;
730 u8 cqn;
731 bool cq_handle = false;
732 int items = 0;
733 int credits = q->count >> 1;
734
735 memset(&active_cqns, 0, sizeof(active_cqns));
736
737 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
eda6500a 738
f3c84a8e
ND
739 /* Command interface completion events are always received on
740 * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events
741 * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1).
742 */
743 switch (q->num) {
744 case MLXSW_PCI_EQ_ASYNC_NUM:
eda6500a
JP
745 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
746 q->u.eq.ev_cmd_count++;
747 break;
f3c84a8e 748 case MLXSW_PCI_EQ_COMP_NUM:
eda6500a
JP
749 cqn = mlxsw_pci_eqe_cqn_get(eqe);
750 set_bit(cqn, active_cqns);
751 cq_handle = true;
752 q->u.eq.ev_comp_count++;
753 break;
754 default:
755 q->u.eq.ev_other_count++;
756 }
757 if (++items == credits)
758 break;
759 }
760 if (items) {
761 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
762 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
763 }
764
765 if (!cq_handle)
766 return;
e4c870b1 767 for_each_set_bit(cqn, active_cqns, cq_count) {
eda6500a
JP
768 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
769 mlxsw_pci_queue_tasklet_schedule(q);
770 }
771}
772
773struct mlxsw_pci_queue_ops {
774 const char *name;
775 enum mlxsw_pci_queue_type type;
8404f6f2
JP
776 void (*pre_init)(struct mlxsw_pci *mlxsw_pci,
777 struct mlxsw_pci_queue *q);
eda6500a
JP
778 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
779 struct mlxsw_pci_queue *q);
780 void (*fini)(struct mlxsw_pci *mlxsw_pci,
781 struct mlxsw_pci_queue *q);
782 void (*tasklet)(unsigned long data);
8404f6f2
JP
783 u16 (*elem_count_f)(const struct mlxsw_pci_queue *q);
784 u8 (*elem_size_f)(const struct mlxsw_pci_queue *q);
eda6500a
JP
785 u16 elem_count;
786 u8 elem_size;
787};
788
789static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
790 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
791 .init = mlxsw_pci_sdq_init,
792 .fini = mlxsw_pci_sdq_fini,
eda6500a
JP
793 .elem_count = MLXSW_PCI_WQE_COUNT,
794 .elem_size = MLXSW_PCI_WQE_SIZE,
795};
796
797static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
798 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
799 .init = mlxsw_pci_rdq_init,
800 .fini = mlxsw_pci_rdq_fini,
eda6500a
JP
801 .elem_count = MLXSW_PCI_WQE_COUNT,
802 .elem_size = MLXSW_PCI_WQE_SIZE
803};
804
805static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
806 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
8404f6f2 807 .pre_init = mlxsw_pci_cq_pre_init,
eda6500a
JP
808 .init = mlxsw_pci_cq_init,
809 .fini = mlxsw_pci_cq_fini,
810 .tasklet = mlxsw_pci_cq_tasklet,
8404f6f2
JP
811 .elem_count_f = mlxsw_pci_cq_elem_count,
812 .elem_size_f = mlxsw_pci_cq_elem_size
eda6500a
JP
813};
814
815static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
816 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
817 .init = mlxsw_pci_eq_init,
818 .fini = mlxsw_pci_eq_fini,
819 .tasklet = mlxsw_pci_eq_tasklet,
eda6500a
JP
820 .elem_count = MLXSW_PCI_EQE_COUNT,
821 .elem_size = MLXSW_PCI_EQE_SIZE
822};
823
824static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
825 const struct mlxsw_pci_queue_ops *q_ops,
826 struct mlxsw_pci_queue *q, u8 q_num)
827{
828 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
829 int i;
830 int err;
831
8404f6f2
JP
832 q->num = q_num;
833 if (q_ops->pre_init)
834 q_ops->pre_init(mlxsw_pci, q);
b76550bb 835
eda6500a 836 spin_lock_init(&q->lock);
8404f6f2
JP
837 q->count = q_ops->elem_count_f ? q_ops->elem_count_f(q) :
838 q_ops->elem_count;
839 q->elem_size = q_ops->elem_size_f ? q_ops->elem_size_f(q) :
840 q_ops->elem_size;
eda6500a
JP
841 q->type = q_ops->type;
842 q->pci = mlxsw_pci;
843
844 if (q_ops->tasklet)
845 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
846
847 mem_item->size = MLXSW_PCI_AQ_SIZE;
848 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
849 mem_item->size,
850 &mem_item->mapaddr);
851 if (!mem_item->buf)
852 return -ENOMEM;
eda6500a
JP
853
854 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
855 if (!q->elem_info) {
856 err = -ENOMEM;
857 goto err_elem_info_alloc;
858 }
859
860 /* Initialize dma mapped elements info elem_info for
861 * future easy access.
862 */
863 for (i = 0; i < q->count; i++) {
864 struct mlxsw_pci_queue_elem_info *elem_info;
865
866 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
867 elem_info->elem =
8404f6f2 868 __mlxsw_pci_queue_elem_get(q, q->elem_size, i);
eda6500a
JP
869 }
870
871 mlxsw_cmd_mbox_zero(mbox);
872 err = q_ops->init(mlxsw_pci, mbox, q);
873 if (err)
874 goto err_q_ops_init;
875 return 0;
876
877err_q_ops_init:
878 kfree(q->elem_info);
879err_elem_info_alloc:
880 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
881 mem_item->buf, mem_item->mapaddr);
882 return err;
883}
884
885static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
886 const struct mlxsw_pci_queue_ops *q_ops,
887 struct mlxsw_pci_queue *q)
888{
889 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
890
891 q_ops->fini(mlxsw_pci, q);
892 kfree(q->elem_info);
893 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
894 mem_item->buf, mem_item->mapaddr);
895}
896
897static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
898 const struct mlxsw_pci_queue_ops *q_ops,
899 u8 num_qs)
900{
eda6500a 901 struct mlxsw_pci_queue_type_group *queue_group;
eda6500a
JP
902 int i;
903 int err;
904
905 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
906 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
907 if (!queue_group->q)
908 return -ENOMEM;
909
910 for (i = 0; i < num_qs; i++) {
911 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
912 &queue_group->q[i], i);
913 if (err)
914 goto err_queue_init;
915 }
916 queue_group->count = num_qs;
917
eda6500a
JP
918 return 0;
919
920err_queue_init:
921 for (i--; i >= 0; i--)
922 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
923 kfree(queue_group->q);
924 return err;
925}
926
927static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
928 const struct mlxsw_pci_queue_ops *q_ops)
929{
930 struct mlxsw_pci_queue_type_group *queue_group;
931 int i;
932
933 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
934 for (i = 0; i < queue_group->count; i++)
935 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
936 kfree(queue_group->q);
937}
938
939static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
940{
941 struct pci_dev *pdev = mlxsw_pci->pdev;
942 u8 num_sdqs;
943 u8 sdq_log2sz;
944 u8 num_rdqs;
945 u8 rdq_log2sz;
946 u8 num_cqs;
947 u8 cq_log2sz;
41107685 948 u8 cqv2_log2sz;
eda6500a
JP
949 u8 num_eqs;
950 u8 eq_log2sz;
951 int err;
952
953 mlxsw_cmd_mbox_zero(mbox);
954 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
955 if (err)
956 return err;
957
958 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
959 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
960 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
961 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
962 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
963 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
41107685 964 cqv2_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cqv2_sz_get(mbox);
eda6500a
JP
965 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
966 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
967
c85c3882 968 if (num_sdqs + num_rdqs > num_cqs ||
6aaee55c 969 num_sdqs < MLXSW_PCI_SDQS_MIN ||
e4c870b1 970 num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) {
eda6500a
JP
971 dev_err(&pdev->dev, "Unsupported number of queues\n");
972 return -EINVAL;
973 }
974
975 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
976 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
b76550bb 977 (1 << cq_log2sz != MLXSW_PCI_CQE01_COUNT) ||
41107685
JP
978 (mlxsw_pci->max_cqe_ver == MLXSW_PCI_CQE_V2 &&
979 (1 << cqv2_log2sz != MLXSW_PCI_CQE2_COUNT)) ||
eda6500a
JP
980 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
981 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
982 return -EINVAL;
983 }
984
8404f6f2
JP
985 mlxsw_pci->num_sdq_cqs = num_sdqs;
986
eda6500a
JP
987 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
988 num_eqs);
989 if (err) {
990 dev_err(&pdev->dev, "Failed to initialize event queues\n");
991 return err;
992 }
993
994 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
995 num_cqs);
996 if (err) {
997 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
998 goto err_cqs_init;
999 }
1000
1001 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1002 num_sdqs);
1003 if (err) {
1004 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1005 goto err_sdqs_init;
1006 }
1007
1008 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1009 num_rdqs);
1010 if (err) {
1011 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1012 goto err_rdqs_init;
1013 }
1014
1015 /* We have to poll in command interface until queues are initialized */
1016 mlxsw_pci->cmd.nopoll = true;
1017 return 0;
1018
1019err_rdqs_init:
1020 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1021err_sdqs_init:
1022 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1023err_cqs_init:
1024 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1025 return err;
1026}
1027
1028static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1029{
1030 mlxsw_pci->cmd.nopoll = false;
1031 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1032 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1033 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1034 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1035}
1036
1037static void
1038mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1039 char *mbox, int index,
1040 const struct mlxsw_swid_config *swid)
1041{
1042 u8 mask = 0;
1043
1044 if (swid->used_type) {
1045 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1046 mbox, index, swid->type);
1047 mask |= 1;
1048 }
1049 if (swid->used_properties) {
1050 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1051 mbox, index, swid->properties);
1052 mask |= 2;
1053 }
1054 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1055}
1056
c1a38311 1057static int
e21d21ca
AS
1058mlxsw_pci_profile_get_kvd_sizes(const struct mlxsw_pci *mlxsw_pci,
1059 const struct mlxsw_config_profile *profile,
c1a38311 1060 struct mlxsw_res *res)
403547d3 1061{
e21d21ca
AS
1062 u64 single_size, double_size, linear_size;
1063 int err;
403547d3 1064
e21d21ca
AS
1065 err = mlxsw_core_kvd_sizes_get(mlxsw_pci->core, profile,
1066 &single_size, &double_size,
1067 &linear_size);
1068 if (err)
1069 return err;
403547d3 1070
c1a38311
JP
1071 MLXSW_RES_SET(res, KVD_SINGLE_SIZE, single_size);
1072 MLXSW_RES_SET(res, KVD_DOUBLE_SIZE, double_size);
1073 MLXSW_RES_SET(res, KVD_LINEAR_SIZE, linear_size);
403547d3
NF
1074
1075 return 0;
1076}
1077
eda6500a 1078static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
403547d3 1079 const struct mlxsw_config_profile *profile,
c1a38311 1080 struct mlxsw_res *res)
eda6500a
JP
1081{
1082 int i;
403547d3 1083 int err;
eda6500a
JP
1084
1085 mlxsw_cmd_mbox_zero(mbox);
1086
1087 if (profile->used_max_vepa_channels) {
1088 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1089 mbox, 1);
1090 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1091 mbox, profile->max_vepa_channels);
1092 }
eda6500a
JP
1093 if (profile->used_max_mid) {
1094 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1095 mbox, 1);
1096 mlxsw_cmd_mbox_config_profile_max_mid_set(
1097 mbox, profile->max_mid);
1098 }
1099 if (profile->used_max_pgt) {
1100 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1101 mbox, 1);
1102 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1103 mbox, profile->max_pgt);
1104 }
1105 if (profile->used_max_system_port) {
1106 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1107 mbox, 1);
1108 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1109 mbox, profile->max_system_port);
1110 }
1111 if (profile->used_max_vlan_groups) {
1112 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1113 mbox, 1);
1114 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1115 mbox, profile->max_vlan_groups);
1116 }
1117 if (profile->used_max_regions) {
1118 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1119 mbox, 1);
1120 mlxsw_cmd_mbox_config_profile_max_regions_set(
1121 mbox, profile->max_regions);
1122 }
1123 if (profile->used_flood_tables) {
1124 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1125 mbox, 1);
1126 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1127 mbox, profile->max_flood_tables);
1128 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1129 mbox, profile->max_vid_flood_tables);
12fd35ab
IS
1130 mlxsw_cmd_mbox_config_profile_max_fid_offset_flood_tables_set(
1131 mbox, profile->max_fid_offset_flood_tables);
1132 mlxsw_cmd_mbox_config_profile_fid_offset_flood_table_size_set(
1133 mbox, profile->fid_offset_flood_table_size);
453b6a8d
IS
1134 mlxsw_cmd_mbox_config_profile_max_fid_flood_tables_set(
1135 mbox, profile->max_fid_flood_tables);
1136 mlxsw_cmd_mbox_config_profile_fid_flood_table_size_set(
1137 mbox, profile->fid_flood_table_size);
eda6500a
JP
1138 }
1139 if (profile->used_flood_mode) {
1140 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1141 mbox, 1);
1142 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1143 mbox, profile->flood_mode);
1144 }
1145 if (profile->used_max_ib_mc) {
1146 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1147 mbox, 1);
1148 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1149 mbox, profile->max_ib_mc);
1150 }
1151 if (profile->used_max_pkey) {
1152 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1153 mbox, 1);
1154 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1155 mbox, profile->max_pkey);
1156 }
1157 if (profile->used_ar_sec) {
1158 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1159 mbox, 1);
1160 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1161 mbox, profile->ar_sec);
1162 }
1163 if (profile->used_adaptive_routing_group_cap) {
1164 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1165 mbox, 1);
1166 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1167 mbox, profile->adaptive_routing_group_cap);
1168 }
110d2d21 1169 if (profile->used_kvd_sizes && MLXSW_RES_VALID(res, KVD_SIZE)) {
e21d21ca 1170 err = mlxsw_pci_profile_get_kvd_sizes(mlxsw_pci, profile, res);
403547d3
NF
1171 if (err)
1172 return err;
1173
1174 mlxsw_cmd_mbox_config_profile_set_kvd_linear_size_set(mbox, 1);
1175 mlxsw_cmd_mbox_config_profile_kvd_linear_size_set(mbox,
c1a38311 1176 MLXSW_RES_GET(res, KVD_LINEAR_SIZE));
403547d3
NF
1177 mlxsw_cmd_mbox_config_profile_set_kvd_hash_single_size_set(mbox,
1178 1);
1179 mlxsw_cmd_mbox_config_profile_kvd_hash_single_size_set(mbox,
c1a38311 1180 MLXSW_RES_GET(res, KVD_SINGLE_SIZE));
489107bd 1181 mlxsw_cmd_mbox_config_profile_set_kvd_hash_double_size_set(
403547d3
NF
1182 mbox, 1);
1183 mlxsw_cmd_mbox_config_profile_kvd_hash_double_size_set(mbox,
c1a38311 1184 MLXSW_RES_GET(res, KVD_DOUBLE_SIZE));
489107bd 1185 }
eda6500a
JP
1186
1187 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1188 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1189 &profile->swid_config[i]);
1190
8404f6f2
JP
1191 if (mlxsw_pci->max_cqe_ver > MLXSW_PCI_CQE_V0) {
1192 mlxsw_cmd_mbox_config_profile_set_cqe_version_set(mbox, 1);
1193 mlxsw_cmd_mbox_config_profile_cqe_version_set(mbox, 1);
1194 }
1195
eda6500a
JP
1196 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1197}
1198
1199static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1200{
1201 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1202 int err;
1203
1204 mlxsw_cmd_mbox_zero(mbox);
1205 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1206 if (err)
1207 return err;
1208 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1209 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1210 return 0;
1211}
1212
1213static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1214 u16 num_pages)
1215{
1216 struct mlxsw_pci_mem_item *mem_item;
3e2206da 1217 int nent = 0;
eda6500a
JP
1218 int i;
1219 int err;
1220
1221 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1222 GFP_KERNEL);
1223 if (!mlxsw_pci->fw_area.items)
1224 return -ENOMEM;
3e2206da 1225 mlxsw_pci->fw_area.count = num_pages;
eda6500a
JP
1226
1227 mlxsw_cmd_mbox_zero(mbox);
1228 for (i = 0; i < num_pages; i++) {
1229 mem_item = &mlxsw_pci->fw_area.items[i];
1230
1231 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1232 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1233 mem_item->size,
1234 &mem_item->mapaddr);
1235 if (!mem_item->buf) {
1236 err = -ENOMEM;
1237 goto err_alloc;
1238 }
3e2206da
JP
1239 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr);
1240 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */
1241 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) {
1242 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1243 if (err)
1244 goto err_cmd_map_fa;
1245 nent = 0;
1246 mlxsw_cmd_mbox_zero(mbox);
1247 }
eda6500a
JP
1248 }
1249
3e2206da
JP
1250 if (nent) {
1251 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent);
1252 if (err)
1253 goto err_cmd_map_fa;
1254 }
eda6500a
JP
1255
1256 return 0;
1257
1258err_cmd_map_fa:
1259err_alloc:
1260 for (i--; i >= 0; i--) {
1261 mem_item = &mlxsw_pci->fw_area.items[i];
1262
1263 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1264 mem_item->buf, mem_item->mapaddr);
1265 }
1266 kfree(mlxsw_pci->fw_area.items);
1267 return err;
1268}
1269
1270static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1271{
1272 struct mlxsw_pci_mem_item *mem_item;
1273 int i;
1274
1275 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1276
3e2206da 1277 for (i = 0; i < mlxsw_pci->fw_area.count; i++) {
eda6500a
JP
1278 mem_item = &mlxsw_pci->fw_area.items[i];
1279
1280 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1281 mem_item->buf, mem_item->mapaddr);
1282 }
1283 kfree(mlxsw_pci->fw_area.items);
1284}
1285
1286static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1287{
1288 struct mlxsw_pci *mlxsw_pci = dev_id;
1289 struct mlxsw_pci_queue *q;
1290 int i;
1291
1292 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1293 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1294 mlxsw_pci_queue_tasklet_schedule(q);
1295 }
1296 return IRQ_HANDLED;
1297}
1298
1e81779a
IS
1299static int mlxsw_pci_mbox_alloc(struct mlxsw_pci *mlxsw_pci,
1300 struct mlxsw_pci_mem_item *mbox)
1301{
1302 struct pci_dev *pdev = mlxsw_pci->pdev;
1303 int err = 0;
1304
1305 mbox->size = MLXSW_CMD_MBOX_SIZE;
1306 mbox->buf = pci_alloc_consistent(pdev, MLXSW_CMD_MBOX_SIZE,
1307 &mbox->mapaddr);
1308 if (!mbox->buf) {
1309 dev_err(&pdev->dev, "Failed allocating memory for mailbox\n");
1310 err = -ENOMEM;
1311 }
1312
1313 return err;
1314}
1315
1316static void mlxsw_pci_mbox_free(struct mlxsw_pci *mlxsw_pci,
1317 struct mlxsw_pci_mem_item *mbox)
1318{
1319 struct pci_dev *pdev = mlxsw_pci->pdev;
1320
1321 pci_free_consistent(pdev, MLXSW_CMD_MBOX_SIZE, mbox->buf,
1322 mbox->mapaddr);
1323}
1324
f3a52c61
JP
1325static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci,
1326 const struct pci_device_id *id)
1327{
1328 unsigned long end;
1329 char mrsr_pl[MLXSW_REG_MRSR_LEN];
1330 int err;
1331
1332 mlxsw_reg_mrsr_pack(mrsr_pl);
1333 err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl);
1334 if (err)
1335 return err;
1336 if (id->device == PCI_DEVICE_ID_MELLANOX_SWITCHX2) {
1337 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1338 return 0;
1339 }
1340
1341 /* We must wait for the HW to become responsive once again. */
1342 msleep(MLXSW_PCI_SW_RESET_WAIT_MSECS);
1343
1344 end = jiffies + msecs_to_jiffies(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1345 do {
1346 u32 val = mlxsw_pci_read32(mlxsw_pci, FW_READY);
1347
1348 if ((val & MLXSW_PCI_FW_READY_MASK) == MLXSW_PCI_FW_READY_MAGIC)
67c14cc9 1349 return 0;
f3a52c61
JP
1350 cond_resched();
1351 } while (time_before(jiffies, end));
67c14cc9 1352 return -EBUSY;
f3a52c61
JP
1353}
1354
1355static int mlxsw_pci_alloc_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1356{
1357 int err;
1358
1359 err = pci_alloc_irq_vectors(mlxsw_pci->pdev, 1, 1, PCI_IRQ_MSIX);
1360 if (err < 0)
1361 dev_err(&mlxsw_pci->pdev->dev, "MSI-X init failed\n");
1362 return err;
1363}
1364
1365static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci)
1366{
1367 pci_free_irq_vectors(mlxsw_pci->pdev);
1368}
1369
eda6500a 1370static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
57d316ba 1371 const struct mlxsw_config_profile *profile,
c1a38311 1372 struct mlxsw_res *res)
eda6500a
JP
1373{
1374 struct mlxsw_pci *mlxsw_pci = bus_priv;
1375 struct pci_dev *pdev = mlxsw_pci->pdev;
1376 char *mbox;
1377 u16 num_pages;
1378 int err;
1379
1380 mutex_init(&mlxsw_pci->cmd.lock);
1381 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1382
1383 mlxsw_pci->core = mlxsw_core;
1384
1385 mbox = mlxsw_cmd_mbox_alloc();
1386 if (!mbox)
1387 return -ENOMEM;
1e81779a
IS
1388
1389 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
1390 if (err)
1391 goto mbox_put;
1392
1393 err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1394 if (err)
1395 goto err_out_mbox_alloc;
1396
f3a52c61
JP
1397 err = mlxsw_pci_sw_reset(mlxsw_pci, mlxsw_pci->id);
1398 if (err)
1399 goto err_sw_reset;
1400
1401 err = mlxsw_pci_alloc_irq_vectors(mlxsw_pci);
1402 if (err < 0) {
1403 dev_err(&pdev->dev, "MSI-X init failed\n");
1404 goto err_alloc_irq;
1405 }
1406
eda6500a
JP
1407 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1408 if (err)
1409 goto err_query_fw;
1410
1411 mlxsw_pci->bus_info.fw_rev.major =
1412 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1413 mlxsw_pci->bus_info.fw_rev.minor =
1414 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1415 mlxsw_pci->bus_info.fw_rev.subminor =
1416 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1417
1418 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1419 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1420 err = -EINVAL;
1421 goto err_iface_rev;
1422 }
1423 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1424 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1425 err = -EINVAL;
1426 goto err_doorbell_page_bar;
1427 }
1428
1429 mlxsw_pci->doorbell_offset =
1430 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1431
8289169d
ST
1432 if (mlxsw_cmd_mbox_query_fw_fr_rn_clk_bar_get(mbox) != 0) {
1433 dev_err(&pdev->dev, "Unsupported free running clock BAR queried from hw\n");
1434 err = -EINVAL;
1435 goto err_fr_rn_clk_bar;
1436 }
1437
1438 mlxsw_pci->free_running_clock_offset =
1439 mlxsw_cmd_mbox_query_fw_free_running_clock_offset_get(mbox);
1440
eda6500a
JP
1441 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1442 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1443 if (err)
1444 goto err_fw_area_init;
1445
1446 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1447 if (err)
1448 goto err_boardinfo;
1449
e5ba7803 1450 err = mlxsw_core_resources_query(mlxsw_core, mbox, res);
57d316ba
NF
1451 if (err)
1452 goto err_query_resources;
1453
8404f6f2
JP
1454 if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V2) &&
1455 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V2))
1456 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V2;
1457 else if (MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V1) &&
1458 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V1))
1459 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V1;
1460 else if ((MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0) &&
1461 MLXSW_CORE_RES_GET(mlxsw_core, CQE_V0)) ||
1462 !MLXSW_CORE_RES_VALID(mlxsw_core, CQE_V0)) {
1463 mlxsw_pci->max_cqe_ver = MLXSW_PCI_CQE_V0;
1464 } else {
1465 dev_err(&pdev->dev, "Invalid supported CQE version combination reported\n");
1466 goto err_cqe_v_check;
1467 }
1468
c1a38311 1469 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile, res);
eda6500a
JP
1470 if (err)
1471 goto err_config_profile;
1472
1473 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1474 if (err)
1475 goto err_aqs_init;
1476
3680b1f6 1477 err = request_irq(pci_irq_vector(pdev, 0),
eda6500a 1478 mlxsw_pci_eq_irq_handler, 0,
1d20d23c 1479 mlxsw_pci->bus_info.device_kind, mlxsw_pci);
eda6500a
JP
1480 if (err) {
1481 dev_err(&pdev->dev, "IRQ request failed\n");
1482 goto err_request_eq_irq;
1483 }
1484
1485 goto mbox_put;
1486
1487err_request_eq_irq:
1488 mlxsw_pci_aqs_fini(mlxsw_pci);
1489err_aqs_init:
1490err_config_profile:
8404f6f2 1491err_cqe_v_check:
57d316ba 1492err_query_resources:
eda6500a
JP
1493err_boardinfo:
1494 mlxsw_pci_fw_area_fini(mlxsw_pci);
1495err_fw_area_init:
8289169d 1496err_fr_rn_clk_bar:
eda6500a
JP
1497err_doorbell_page_bar:
1498err_iface_rev:
1499err_query_fw:
f3a52c61
JP
1500 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1501err_alloc_irq:
1502err_sw_reset:
1e81779a
IS
1503 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1504err_out_mbox_alloc:
1505 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
eda6500a
JP
1506mbox_put:
1507 mlxsw_cmd_mbox_free(mbox);
1508 return err;
1509}
1510
1511static void mlxsw_pci_fini(void *bus_priv)
1512{
1513 struct mlxsw_pci *mlxsw_pci = bus_priv;
1514
3680b1f6 1515 free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci);
eda6500a
JP
1516 mlxsw_pci_aqs_fini(mlxsw_pci);
1517 mlxsw_pci_fw_area_fini(mlxsw_pci);
f3a52c61 1518 mlxsw_pci_free_irq_vectors(mlxsw_pci);
1e81779a
IS
1519 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.out_mbox);
1520 mlxsw_pci_mbox_free(mlxsw_pci, &mlxsw_pci->cmd.in_mbox);
eda6500a
JP
1521}
1522
1523static struct mlxsw_pci_queue *
1524mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1525 const struct mlxsw_tx_info *tx_info)
1526{
6aaee55c
PM
1527 u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1;
1528 u8 sdqn;
1529
1530 if (tx_info->is_emad) {
1531 sdqn = MLXSW_PCI_SDQ_EMAD_INDEX;
1532 } else {
1533 BUILD_BUG_ON(MLXSW_PCI_SDQ_EMAD_INDEX != 0);
1534 sdqn = 1 + (tx_info->local_port % ctl_sdq_count);
1535 }
eda6500a
JP
1536
1537 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1538}
1539
d003462a
IS
1540static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1541 const struct mlxsw_tx_info *tx_info)
1542{
1543 struct mlxsw_pci *mlxsw_pci = bus_priv;
1544 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1545
1546 return !mlxsw_pci_queue_elem_info_producer_get(q);
1547}
1548
eda6500a
JP
1549static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1550 const struct mlxsw_tx_info *tx_info)
1551{
1552 struct mlxsw_pci *mlxsw_pci = bus_priv;
1553 struct mlxsw_pci_queue *q;
1554 struct mlxsw_pci_queue_elem_info *elem_info;
1555 char *wqe;
1556 int i;
1557 int err;
1558
1559 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1560 err = skb_linearize(skb);
1561 if (err)
1562 return err;
1563 }
1564
1565 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1566 spin_lock_bh(&q->lock);
1567 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1568 if (!elem_info) {
1569 /* queue is full */
1570 err = -EAGAIN;
1571 goto unlock;
1572 }
0714256c 1573 mlxsw_skb_cb(skb)->tx_info = *tx_info;
eda6500a
JP
1574 elem_info->u.sdq.skb = skb;
1575
1576 wqe = elem_info->elem;
1577 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1578 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1579 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1580
1581 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1582 skb_headlen(skb), DMA_TO_DEVICE);
1583 if (err)
1584 goto unlock;
1585
1586 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1587 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1588
1589 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1590 skb_frag_address(frag),
1591 skb_frag_size(frag),
1592 DMA_TO_DEVICE);
1593 if (err)
1594 goto unmap_frags;
1595 }
1596
0714256c
PM
1597 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1598 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1599
eda6500a
JP
1600 /* Set unused sq entries byte count to zero. */
1601 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1602 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1603
1604 /* Everything is set up, ring producer doorbell to get HW going */
1605 q->producer_counter++;
1606 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1607
1608 goto unlock;
1609
1610unmap_frags:
1611 for (; i >= 0; i--)
1612 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1613unlock:
1614 spin_unlock_bh(&q->lock);
1615 return err;
1616}
1617
1618static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1619 u32 in_mod, bool out_mbox_direct,
1620 char *in_mbox, size_t in_mbox_size,
1621 char *out_mbox, size_t out_mbox_size,
1622 u8 *p_status)
1623{
1624 struct mlxsw_pci *mlxsw_pci = bus_priv;
830a8b1b 1625 dma_addr_t in_mapaddr = 0, out_mapaddr = 0;
eda6500a
JP
1626 bool evreq = mlxsw_pci->cmd.nopoll;
1627 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1628 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1629 int err;
1630
1631 *p_status = MLXSW_CMD_STATUS_OK;
1632
1633 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1634 if (err)
1635 return err;
1636
830a8b1b 1637 if (in_mbox) {
1e81779a 1638 memcpy(mlxsw_pci->cmd.in_mbox.buf, in_mbox, in_mbox_size);
830a8b1b
ST
1639 in_mapaddr = mlxsw_pci->cmd.in_mbox.mapaddr;
1640 }
bcb9db49
AB
1641 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, upper_32_bits(in_mapaddr));
1642 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, lower_32_bits(in_mapaddr));
eda6500a 1643
830a8b1b
ST
1644 if (out_mbox)
1645 out_mapaddr = mlxsw_pci->cmd.out_mbox.mapaddr;
bcb9db49
AB
1646 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, upper_32_bits(out_mapaddr));
1647 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, lower_32_bits(out_mapaddr));
eda6500a
JP
1648
1649 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1650 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1651
1652 *p_wait_done = false;
1653
1654 wmb(); /* all needs to be written before we write control register */
1655 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1656 MLXSW_PCI_CIR_CTRL_GO_BIT |
1657 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1658 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1659 opcode);
1660
1661 if (!evreq) {
1662 unsigned long end;
1663
1664 end = jiffies + timeout;
1665 do {
1666 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1667
1668 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1669 *p_wait_done = true;
1670 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1671 break;
1672 }
1673 cond_resched();
1674 } while (time_before(jiffies, end));
1675 } else {
1676 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1677 *p_status = mlxsw_pci->cmd.comp.status;
1678 }
1679
1680 err = 0;
1681 if (*p_wait_done) {
1682 if (*p_status)
1683 err = -EIO;
1684 } else {
1685 err = -ETIMEDOUT;
1686 }
1687
1688 if (!err && out_mbox && out_mbox_direct) {
1e81779a 1689 /* Some commands don't use output param as address to mailbox
eda6500a
JP
1690 * but they store output directly into registers. In that case,
1691 * copy registers into mbox buffer.
1692 */
1693 __be32 tmp;
1694
1695 if (!evreq) {
1696 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1697 CIR_OUT_PARAM_HI));
1698 memcpy(out_mbox, &tmp, sizeof(tmp));
1699 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1700 CIR_OUT_PARAM_LO));
1701 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1702 }
d9324f68 1703 } else if (!err && out_mbox) {
1e81779a 1704 memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size);
d9324f68 1705 }
eda6500a 1706
eda6500a
JP
1707 mutex_unlock(&mlxsw_pci->cmd.lock);
1708
1709 return err;
1710}
1711
8289169d
ST
1712static u32 mlxsw_pci_read_frc_h(void *bus_priv)
1713{
1714 struct mlxsw_pci *mlxsw_pci = bus_priv;
1715 u64 frc_offset;
1716
1717 frc_offset = mlxsw_pci->free_running_clock_offset;
1718 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_H(frc_offset));
1719}
1720
1721static u32 mlxsw_pci_read_frc_l(void *bus_priv)
1722{
1723 struct mlxsw_pci *mlxsw_pci = bus_priv;
1724 u64 frc_offset;
1725
1726 frc_offset = mlxsw_pci->free_running_clock_offset;
1727 return mlxsw_pci_read32(mlxsw_pci, FREE_RUNNING_CLOCK_L(frc_offset));
1728}
1729
54a2e8d4
AS
1730static const struct mlxsw_bus mlxsw_pci_bus = {
1731 .kind = "pci",
1732 .init = mlxsw_pci_init,
1733 .fini = mlxsw_pci_fini,
1734 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1735 .skb_transmit = mlxsw_pci_skb_transmit,
1736 .cmd_exec = mlxsw_pci_cmd_exec,
8289169d
ST
1737 .read_frc_h = mlxsw_pci_read_frc_h,
1738 .read_frc_l = mlxsw_pci_read_frc_l,
f3a52c61 1739 .features = MLXSW_BUS_F_TXRX | MLXSW_BUS_F_RESET,
54a2e8d4
AS
1740};
1741
eda6500a
JP
1742static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1743{
1d20d23c 1744 const char *driver_name = pdev->driver->name;
eda6500a
JP
1745 struct mlxsw_pci *mlxsw_pci;
1746 int err;
1747
1748 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1749 if (!mlxsw_pci)
1750 return -ENOMEM;
1751
1752 err = pci_enable_device(pdev);
1753 if (err) {
1754 dev_err(&pdev->dev, "pci_enable_device failed\n");
1755 goto err_pci_enable_device;
1756 }
1757
1d20d23c 1758 err = pci_request_regions(pdev, driver_name);
eda6500a
JP
1759 if (err) {
1760 dev_err(&pdev->dev, "pci_request_regions failed\n");
1761 goto err_pci_request_regions;
1762 }
1763
1764 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1765 if (!err) {
1766 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1767 if (err) {
1768 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1769 goto err_pci_set_dma_mask;
1770 }
1771 } else {
1772 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1773 if (err) {
1774 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1775 goto err_pci_set_dma_mask;
1776 }
1777 }
1778
1779 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1780 dev_err(&pdev->dev, "invalid PCI region size\n");
1781 err = -EINVAL;
1782 goto err_pci_resource_len_check;
1783 }
1784
1785 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1786 pci_resource_len(pdev, 0));
1787 if (!mlxsw_pci->hw_addr) {
1788 dev_err(&pdev->dev, "ioremap failed\n");
1789 err = -EIO;
1790 goto err_ioremap;
1791 }
1792 pci_set_master(pdev);
1793
1794 mlxsw_pci->pdev = pdev;
1795 pci_set_drvdata(pdev, mlxsw_pci);
1796
1d20d23c 1797 mlxsw_pci->bus_info.device_kind = driver_name;
eda6500a
JP
1798 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1799 mlxsw_pci->bus_info.dev = &pdev->dev;
8289169d 1800 mlxsw_pci->bus_info.read_frc_capable = true;
54a2e8d4 1801 mlxsw_pci->id = id;
eda6500a 1802
eda6500a 1803 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
24cc68ad 1804 &mlxsw_pci_bus, mlxsw_pci, false,
5bcfb6a4 1805 NULL, NULL);
03bffcad 1806 if (err) {
eda6500a
JP
1807 dev_err(&pdev->dev, "cannot register bus device\n");
1808 goto err_bus_device_register;
1809 }
1810
1811 return 0;
1812
1813err_bus_device_register:
eda6500a
JP
1814 iounmap(mlxsw_pci->hw_addr);
1815err_ioremap:
1816err_pci_resource_len_check:
1817err_pci_set_dma_mask:
1818 pci_release_regions(pdev);
1819err_pci_request_regions:
1820 pci_disable_device(pdev);
1821err_pci_enable_device:
1822 kfree(mlxsw_pci);
1823 return err;
1824}
1825
1826static void mlxsw_pci_remove(struct pci_dev *pdev)
1827{
1828 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1829
24cc68ad 1830 mlxsw_core_bus_device_unregister(mlxsw_pci->core, false);
eda6500a
JP
1831 iounmap(mlxsw_pci->hw_addr);
1832 pci_release_regions(mlxsw_pci->pdev);
1833 pci_disable_device(mlxsw_pci->pdev);
1834 kfree(mlxsw_pci);
1835}
1836
1d20d23c
JP
1837int mlxsw_pci_driver_register(struct pci_driver *pci_driver)
1838{
1839 pci_driver->probe = mlxsw_pci_probe;
1840 pci_driver->remove = mlxsw_pci_remove;
1841 return pci_register_driver(pci_driver);
1842}
1843EXPORT_SYMBOL(mlxsw_pci_driver_register);
eda6500a 1844
1d20d23c 1845void mlxsw_pci_driver_unregister(struct pci_driver *pci_driver)
eda6500a 1846{
1d20d23c
JP
1847 pci_unregister_driver(pci_driver);
1848}
1849EXPORT_SYMBOL(mlxsw_pci_driver_unregister);
eda6500a 1850
1d20d23c
JP
1851static int __init mlxsw_pci_module_init(void)
1852{
eda6500a 1853 return 0;
eda6500a
JP
1854}
1855
1856static void __exit mlxsw_pci_module_exit(void)
1857{
eda6500a
JP
1858}
1859
1860module_init(mlxsw_pci_module_init);
1861module_exit(mlxsw_pci_module_exit);
1862
1863MODULE_LICENSE("Dual BSD/GPL");
1864MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1865MODULE_DESCRIPTION("Mellanox switch PCI interface driver");