]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/pensando/ionic/ionic_lif.c
ionic: use index not pointer for queue tracking
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
CommitLineData
1a58e196
SN
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
011c7289
AB
4#include <linux/printk.h>
5#include <linux/dynamic_debug.h>
1a58e196
SN
6#include <linux/netdevice.h>
7#include <linux/etherdevice.h>
4b03b273 8#include <linux/if_vlan.h>
8c15440b 9#include <linux/rtnetlink.h>
1a58e196
SN
10#include <linux/interrupt.h>
11#include <linux/pci.h>
12#include <linux/cpumask.h>
13
14#include "ionic.h"
15#include "ionic_bus.h"
16#include "ionic_lif.h"
0f3154e6 17#include "ionic_txrx.h"
4d03e00a 18#include "ionic_ethtool.h"
1a58e196
SN
19#include "ionic_debugfs.h"
20
5b3f3f2a
SN
21/* queuetype support level */
22static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
23 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
24 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
25 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
26 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
27 * 1 = ... with Tx SG version 1
28 */
29};
30
2a654540
SN
31static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
32static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
33static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
8d61aad4 34static void ionic_link_status_check(struct ionic_lif *lif);
c672412f
SN
35static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
36static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
37static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
2a654540 38
49d3b493
SN
39static int ionic_start_queues(struct ionic_lif *lif);
40static void ionic_stop_queues(struct ionic_lif *lif);
5b3f3f2a 41static void ionic_lif_queue_identify(struct ionic_lif *lif);
49d3b493 42
2a654540
SN
43static void ionic_lif_deferred_work(struct work_struct *work)
44{
45 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
46 struct ionic_deferred *def = &lif->deferred;
47 struct ionic_deferred_work *w = NULL;
48
49 spin_lock_bh(&def->lock);
50 if (!list_empty(&def->list)) {
51 w = list_first_entry(&def->list,
52 struct ionic_deferred_work, list);
53 list_del(&w->list);
54 }
55 spin_unlock_bh(&def->lock);
56
57 if (w) {
58 switch (w->type) {
59 case IONIC_DW_TYPE_RX_MODE:
60 ionic_lif_rx_mode(lif, w->rx_mode);
61 break;
62 case IONIC_DW_TYPE_RX_ADDR_ADD:
63 ionic_lif_addr_add(lif, w->addr);
64 break;
65 case IONIC_DW_TYPE_RX_ADDR_DEL:
66 ionic_lif_addr_del(lif, w->addr);
67 break;
8d61aad4
SN
68 case IONIC_DW_TYPE_LINK_STATUS:
69 ionic_link_status_check(lif);
70 break;
c672412f
SN
71 case IONIC_DW_TYPE_LIF_RESET:
72 if (w->fw_status)
73 ionic_lif_handle_fw_up(lif);
74 else
75 ionic_lif_handle_fw_down(lif);
76 break;
2a654540
SN
77 default:
78 break;
79 }
80 kfree(w);
81 schedule_work(&def->work);
82 }
83}
84
c672412f
SN
85void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
86 struct ionic_deferred_work *work)
2a654540
SN
87{
88 spin_lock_bh(&def->lock);
89 list_add_tail(&work->list, &def->list);
90 spin_unlock_bh(&def->lock);
91 schedule_work(&def->work);
92}
93
8d61aad4
SN
94static void ionic_link_status_check(struct ionic_lif *lif)
95{
96 struct net_device *netdev = lif->netdev;
97 u16 link_status;
98 bool link_up;
99
0925e9db 100 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
49d3b493
SN
101 return;
102
8d61aad4
SN
103 link_status = le16_to_cpu(lif->info->status.link_status);
104 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
105
8d61aad4 106 if (link_up) {
aa47b540
SN
107 if (!netif_carrier_ok(netdev)) {
108 u32 link_speed;
8d61aad4 109
aa47b540
SN
110 ionic_port_identify(lif->ionic);
111 link_speed = le32_to_cpu(lif->info->status.link_speed);
112 netdev_info(netdev, "Link up - %d Gbps\n",
113 link_speed / 1000);
0f3154e6
SN
114 netif_carrier_on(netdev);
115 }
aa47b540 116
0925e9db
SN
117 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
118 mutex_lock(&lif->queue_lock);
49d3b493 119 ionic_start_queues(lif);
0925e9db
SN
120 mutex_unlock(&lif->queue_lock);
121 }
8d61aad4 122 } else {
aa47b540
SN
123 if (netif_carrier_ok(netdev)) {
124 netdev_info(netdev, "Link down\n");
125 netif_carrier_off(netdev);
126 }
8d61aad4 127
0925e9db
SN
128 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
129 mutex_lock(&lif->queue_lock);
49d3b493 130 ionic_stop_queues(lif);
0925e9db
SN
131 mutex_unlock(&lif->queue_lock);
132 }
8d61aad4
SN
133 }
134
c6d3d73a 135 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
8d61aad4
SN
136}
137
987c0871 138void ionic_link_status_check_request(struct ionic_lif *lif)
8d61aad4
SN
139{
140 struct ionic_deferred_work *work;
141
142 /* we only need one request outstanding at a time */
c6d3d73a 143 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
8d61aad4
SN
144 return;
145
146 if (in_interrupt()) {
147 work = kzalloc(sizeof(*work), GFP_ATOMIC);
148 if (!work)
149 return;
150
151 work->type = IONIC_DW_TYPE_LINK_STATUS;
152 ionic_lif_deferred_enqueue(&lif->deferred, work);
153 } else {
154 ionic_link_status_check(lif);
155 }
156}
157
1d062b7b
SN
158static irqreturn_t ionic_isr(int irq, void *data)
159{
160 struct napi_struct *napi = data;
161
162 napi_schedule_irqoff(napi);
163
164 return IRQ_HANDLED;
165}
166
167static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
168{
169 struct ionic_intr_info *intr = &qcq->intr;
170 struct device *dev = lif->ionic->dev;
171 struct ionic_queue *q = &qcq->q;
172 const char *name;
173
174 if (lif->registered)
175 name = lif->netdev->name;
176 else
177 name = dev_name(dev);
178
179 snprintf(intr->name, sizeof(intr->name),
180 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
181
182 return devm_request_irq(dev, intr->vector, ionic_isr,
183 0, intr->name, &qcq->napi);
184}
185
186static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
187{
188 struct ionic *ionic = lif->ionic;
189 int index;
190
191 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
192 if (index == ionic->nintrs) {
193 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
194 __func__, index, ionic->nintrs);
195 return -ENOSPC;
196 }
197
198 set_bit(index, ionic->intrs);
199 ionic_intr_init(&ionic->idev, intr, index);
200
201 return 0;
202}
203
36ac2c50 204static void ionic_intr_free(struct ionic *ionic, int index)
1d062b7b 205{
c06107ca 206 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
36ac2c50 207 clear_bit(index, ionic->intrs);
1d062b7b
SN
208}
209
0f3154e6
SN
210static int ionic_qcq_enable(struct ionic_qcq *qcq)
211{
212 struct ionic_queue *q = &qcq->q;
213 struct ionic_lif *lif = q->lif;
214 struct ionic_dev *idev;
215 struct device *dev;
216
217 struct ionic_admin_ctx ctx = {
218 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
219 .cmd.q_control = {
220 .opcode = IONIC_CMD_Q_CONTROL,
221 .lif_index = cpu_to_le16(lif->index),
222 .type = q->type,
223 .index = cpu_to_le32(q->index),
224 .oper = IONIC_Q_ENABLE,
225 },
226 };
227
228 idev = &lif->ionic->idev;
229 dev = lif->ionic->dev;
230
231 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
232 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
233
234 if (qcq->flags & IONIC_QCQ_F_INTR) {
235 irq_set_affinity_hint(qcq->intr.vector,
236 &qcq->intr.affinity_mask);
237 napi_enable(&qcq->napi);
238 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
239 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
240 IONIC_INTR_MASK_CLEAR);
241 }
242
243 return ionic_adminq_post_wait(lif, &ctx);
244}
245
246static int ionic_qcq_disable(struct ionic_qcq *qcq)
247{
248 struct ionic_queue *q = &qcq->q;
249 struct ionic_lif *lif = q->lif;
250 struct ionic_dev *idev;
251 struct device *dev;
252
253 struct ionic_admin_ctx ctx = {
254 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
255 .cmd.q_control = {
256 .opcode = IONIC_CMD_Q_CONTROL,
257 .lif_index = cpu_to_le16(lif->index),
258 .type = q->type,
259 .index = cpu_to_le32(q->index),
260 .oper = IONIC_Q_DISABLE,
261 },
262 };
263
264 idev = &lif->ionic->idev;
265 dev = lif->ionic->dev;
266
267 dev_dbg(dev, "q_disable.index %d q_disable.qtype %d\n",
268 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
269
270 if (qcq->flags & IONIC_QCQ_F_INTR) {
271 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
272 IONIC_INTR_MASK_SET);
273 synchronize_irq(qcq->intr.vector);
274 irq_set_affinity_hint(qcq->intr.vector, NULL);
275 napi_disable(&qcq->napi);
276 }
277
278 return ionic_adminq_post_wait(lif, &ctx);
279}
280
1d062b7b
SN
281static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
282{
283 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
284
285 if (!qcq)
286 return;
287
1d062b7b
SN
288 if (!(qcq->flags & IONIC_QCQ_F_INITED))
289 return;
290
291 if (qcq->flags & IONIC_QCQ_F_INTR) {
292 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
293 IONIC_INTR_MASK_SET);
1d062b7b
SN
294 netif_napi_del(&qcq->napi);
295 }
296
297 qcq->flags &= ~IONIC_QCQ_F_INITED;
298}
299
300static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
301{
302 struct device *dev = lif->ionic->dev;
303
304 if (!qcq)
305 return;
306
2a8c2c1a
SN
307 ionic_debugfs_del_qcq(qcq);
308
ea5a8b09
SN
309 if (qcq->q_base) {
310 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
311 qcq->q_base = NULL;
312 qcq->q_base_pa = 0;
313 }
314
315 if (qcq->cq_base) {
316 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
317 qcq->cq_base = NULL;
318 qcq->cq_base_pa = 0;
319 }
320
321 if (qcq->sg_base) {
322 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
323 qcq->sg_base = NULL;
324 qcq->sg_base_pa = 0;
325 }
1d062b7b 326
0b064100
SN
327 if (qcq->flags & IONIC_QCQ_F_INTR) {
328 irq_set_affinity_hint(qcq->intr.vector, NULL);
329 devm_free_irq(dev, qcq->intr.vector, &qcq->napi);
330 qcq->intr.vector = 0;
36ac2c50 331 ionic_intr_free(lif->ionic, qcq->intr.index);
0b064100 332 }
1d062b7b
SN
333
334 devm_kfree(dev, qcq->cq.info);
335 qcq->cq.info = NULL;
336 devm_kfree(dev, qcq->q.info);
337 qcq->q.info = NULL;
338 devm_kfree(dev, qcq);
339}
340
341static void ionic_qcqs_free(struct ionic_lif *lif)
342{
0f3154e6 343 struct device *dev = lif->ionic->dev;
0f3154e6 344
77ceb68e
SN
345 if (lif->notifyqcq) {
346 ionic_qcq_free(lif, lif->notifyqcq);
347 lif->notifyqcq = NULL;
348 }
349
1d062b7b
SN
350 if (lif->adminqcq) {
351 ionic_qcq_free(lif, lif->adminqcq);
352 lif->adminqcq = NULL;
353 }
0f3154e6 354
a4674f34 355 if (lif->rxqcqs) {
34dec947
SN
356 devm_kfree(dev, lif->rxqstats);
357 lif->rxqstats = NULL;
a4674f34
SN
358 devm_kfree(dev, lif->rxqcqs);
359 lif->rxqcqs = NULL;
360 }
0f3154e6 361
a4674f34 362 if (lif->txqcqs) {
34dec947
SN
363 devm_kfree(dev, lif->txqstats);
364 lif->txqstats = NULL;
a4674f34
SN
365 devm_kfree(dev, lif->txqcqs);
366 lif->txqcqs = NULL;
367 }
1d062b7b
SN
368}
369
77ceb68e
SN
370static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
371 struct ionic_qcq *n_qcq)
372{
373 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
36ac2c50 374 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
77ceb68e
SN
375 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
376 }
377
378 n_qcq->intr.vector = src_qcq->intr.vector;
379 n_qcq->intr.index = src_qcq->intr.index;
380}
381
1d062b7b
SN
382static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
383 unsigned int index,
384 const char *name, unsigned int flags,
385 unsigned int num_descs, unsigned int desc_size,
386 unsigned int cq_desc_size,
387 unsigned int sg_desc_size,
388 unsigned int pid, struct ionic_qcq **qcq)
389{
390 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
391 struct device *dev = lif->ionic->dev;
392 void *q_base, *cq_base, *sg_base;
393 dma_addr_t cq_base_pa = 0;
394 dma_addr_t sg_base_pa = 0;
395 dma_addr_t q_base_pa = 0;
396 struct ionic_qcq *new;
397 int err;
398
399 *qcq = NULL;
400
1d062b7b
SN
401 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
402 if (!new) {
403 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
404 err = -ENOMEM;
405 goto err_out;
406 }
407
408 new->flags = flags;
409
e7164200 410 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
1d062b7b
SN
411 GFP_KERNEL);
412 if (!new->q.info) {
413 netdev_err(lif->netdev, "Cannot allocate queue info\n");
414 err = -ENOMEM;
ea5a8b09 415 goto err_out_free_qcq;
1d062b7b
SN
416 }
417
418 new->q.type = type;
419
420 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
421 desc_size, sg_desc_size, pid);
422 if (err) {
423 netdev_err(lif->netdev, "Cannot initialize queue\n");
ea5a8b09 424 goto err_out_free_q_info;
1d062b7b
SN
425 }
426
427 if (flags & IONIC_QCQ_F_INTR) {
428 err = ionic_intr_alloc(lif, &new->intr);
429 if (err) {
430 netdev_warn(lif->netdev, "no intr for %s: %d\n",
9aa1c152 431 new->q.name, err);
1d062b7b
SN
432 goto err_out;
433 }
434
435 err = ionic_bus_get_irq(lif->ionic, new->intr.index);
436 if (err < 0) {
437 netdev_warn(lif->netdev, "no vector for %s: %d\n",
9aa1c152 438 new->q.name, err);
1d062b7b
SN
439 goto err_out_free_intr;
440 }
441 new->intr.vector = err;
442 ionic_intr_mask_assert(idev->intr_ctrl, new->intr.index,
443 IONIC_INTR_MASK_SET);
444
0b064100
SN
445 err = ionic_request_irq(lif, new);
446 if (err) {
9aa1c152
SN
447 netdev_warn(lif->netdev, "irq request failed for %s: %d\n",
448 new->q.name, err);
0b064100
SN
449 goto err_out_free_intr;
450 }
451
b7f55b81
SN
452 new->intr.cpu = cpumask_local_spread(new->intr.index,
453 dev_to_node(dev));
454 if (new->intr.cpu != -1)
1d062b7b
SN
455 cpumask_set_cpu(new->intr.cpu,
456 &new->intr.affinity_mask);
457 } else {
c06107ca 458 new->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
1d062b7b
SN
459 }
460
e7164200 461 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
1d062b7b
SN
462 GFP_KERNEL);
463 if (!new->cq.info) {
464 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
465 err = -ENOMEM;
0b064100 466 goto err_out_free_irq;
1d062b7b
SN
467 }
468
469 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
470 if (err) {
471 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
ea5a8b09 472 goto err_out_free_cq_info;
1d062b7b
SN
473 }
474
ea5a8b09
SN
475 new->q_size = PAGE_SIZE + (num_descs * desc_size);
476 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
477 GFP_KERNEL);
478 if (!new->q_base) {
1d062b7b
SN
479 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
480 err = -ENOMEM;
ea5a8b09 481 goto err_out_free_cq_info;
1d062b7b 482 }
ea5a8b09
SN
483 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
484 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
485 ionic_q_map(&new->q, q_base, q_base_pa);
1d062b7b 486
ea5a8b09
SN
487 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
488 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
489 GFP_KERNEL);
490 if (!new->cq_base) {
491 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
492 err = -ENOMEM;
493 goto err_out_free_q;
494 }
495 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
496 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
497 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
498 ionic_cq_bind(&new->cq, &new->q);
1d062b7b
SN
499
500 if (flags & IONIC_QCQ_F_SG) {
ea5a8b09
SN
501 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
502 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
503 GFP_KERNEL);
504 if (!new->sg_base) {
505 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
506 err = -ENOMEM;
507 goto err_out_free_cq;
508 }
509 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
510 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
1d062b7b
SN
511 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
512 }
513
1d062b7b
SN
514 *qcq = new;
515
516 return 0;
517
ea5a8b09
SN
518err_out_free_cq:
519 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
520err_out_free_q:
521 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
522err_out_free_cq_info:
523 devm_kfree(dev, new->cq.info);
0b064100
SN
524err_out_free_irq:
525 if (flags & IONIC_QCQ_F_INTR)
526 devm_free_irq(dev, new->intr.vector, &new->napi);
1d062b7b 527err_out_free_intr:
0b064100 528 if (flags & IONIC_QCQ_F_INTR)
36ac2c50 529 ionic_intr_free(lif->ionic, new->intr.index);
ea5a8b09
SN
530err_out_free_q_info:
531 devm_kfree(dev, new->q.info);
532err_out_free_qcq:
533 devm_kfree(dev, new);
1d062b7b
SN
534err_out:
535 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
536 return err;
537}
538
539static int ionic_qcqs_alloc(struct ionic_lif *lif)
540{
0f3154e6 541 struct device *dev = lif->ionic->dev;
1d062b7b
SN
542 unsigned int flags;
543 int err;
544
545 flags = IONIC_QCQ_F_INTR;
546 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
547 IONIC_ADMINQ_LENGTH,
548 sizeof(struct ionic_admin_cmd),
549 sizeof(struct ionic_admin_comp),
550 0, lif->kern_pid, &lif->adminqcq);
551 if (err)
552 return err;
2a8c2c1a 553 ionic_debugfs_add_qcq(lif, lif->adminqcq);
1d062b7b 554
77ceb68e
SN
555 if (lif->ionic->nnqs_per_lif) {
556 flags = IONIC_QCQ_F_NOTIFYQ;
557 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
558 flags, IONIC_NOTIFYQ_LENGTH,
559 sizeof(struct ionic_notifyq_cmd),
560 sizeof(union ionic_notifyq_comp),
561 0, lif->kern_pid, &lif->notifyqcq);
562 if (err)
34dec947 563 goto err_out;
2a8c2c1a 564 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
77ceb68e
SN
565
566 /* Let the notifyq ride on the adminq interrupt */
567 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
568 }
569
0f3154e6 570 err = -ENOMEM;
ee205626 571 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
34dec947 572 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 573 if (!lif->txqcqs)
34dec947 574 goto err_out;
ee205626 575 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
34dec947 576 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 577 if (!lif->rxqcqs)
34dec947 578 goto err_out;
0f3154e6 579
34dec947
SN
580 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
581 sizeof(struct ionic_tx_stats), GFP_KERNEL);
582 if (!lif->txqstats)
583 goto err_out;
584 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
585 sizeof(struct ionic_rx_stats), GFP_KERNEL);
586 if (!lif->rxqstats)
587 goto err_out;
77ceb68e 588
34dec947 589 return 0;
77ceb68e 590
34dec947
SN
591err_out:
592 ionic_qcqs_free(lif);
77ceb68e
SN
593 return err;
594}
595
0f3154e6
SN
596static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
597{
598 struct device *dev = lif->ionic->dev;
599 struct ionic_queue *q = &qcq->q;
600 struct ionic_cq *cq = &qcq->cq;
601 struct ionic_admin_ctx ctx = {
602 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
603 .cmd.q_init = {
604 .opcode = IONIC_CMD_Q_INIT,
605 .lif_index = cpu_to_le16(lif->index),
606 .type = q->type,
5b3f3f2a 607 .ver = lif->qtype_info[q->type].version,
0f3154e6
SN
608 .index = cpu_to_le32(q->index),
609 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
610 IONIC_QINIT_F_SG),
0f3154e6
SN
611 .pid = cpu_to_le16(q->pid),
612 .ring_size = ilog2(q->num_descs),
613 .ring_base = cpu_to_le64(q->base_pa),
614 .cq_ring_base = cpu_to_le64(cq->base_pa),
615 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
616 },
617 };
fe8c30b5 618 unsigned int intr_index;
0f3154e6
SN
619 int err;
620
fe8c30b5
SN
621 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
622 intr_index = qcq->intr.index;
623 else
34dec947 624 intr_index = lif->rxqcqs[q->index]->intr.index;
fe8c30b5
SN
625 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
626
0f3154e6
SN
627 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
628 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
629 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
630 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
631 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
632 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 633 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 634
f1d2e894
SN
635 q->tail_idx = 0;
636 q->head_idx = 0;
637 cq->tail_idx = 0;
49d3b493 638
0f3154e6
SN
639 err = ionic_adminq_post_wait(lif, &ctx);
640 if (err)
641 return err;
642
643 q->hw_type = ctx.comp.q_init.hw_type;
644 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
645 q->dbval = IONIC_DBELL_QID(q->hw_index);
646
647 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
648 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
649
fe8c30b5
SN
650 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
651 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
652 NAPI_POLL_WEIGHT);
653
0f3154e6
SN
654 qcq->flags |= IONIC_QCQ_F_INITED;
655
0f3154e6
SN
656 return 0;
657}
658
659static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
660{
661 struct device *dev = lif->ionic->dev;
662 struct ionic_queue *q = &qcq->q;
663 struct ionic_cq *cq = &qcq->cq;
664 struct ionic_admin_ctx ctx = {
665 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
666 .cmd.q_init = {
667 .opcode = IONIC_CMD_Q_INIT,
668 .lif_index = cpu_to_le16(lif->index),
669 .type = q->type,
5b3f3f2a 670 .ver = lif->qtype_info[q->type].version,
0f3154e6 671 .index = cpu_to_le32(q->index),
08f2e4b2
SN
672 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
673 IONIC_QINIT_F_SG),
0f3154e6
SN
674 .intr_index = cpu_to_le16(cq->bound_intr->index),
675 .pid = cpu_to_le16(q->pid),
676 .ring_size = ilog2(q->num_descs),
677 .ring_base = cpu_to_le64(q->base_pa),
678 .cq_ring_base = cpu_to_le64(cq->base_pa),
08f2e4b2 679 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
0f3154e6
SN
680 },
681 };
682 int err;
683
684 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
685 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
686 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
687 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
688 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
689 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 690 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 691
f1d2e894
SN
692 q->tail_idx = 0;
693 q->head_idx = 0;
694 cq->tail_idx = 0;
49d3b493 695
0f3154e6
SN
696 err = ionic_adminq_post_wait(lif, &ctx);
697 if (err)
698 return err;
699
700 q->hw_type = ctx.comp.q_init.hw_type;
701 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
702 q->dbval = IONIC_DBELL_QID(q->hw_index);
703
704 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
705 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
706
fe8c30b5
SN
707 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
708 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
709 NAPI_POLL_WEIGHT);
710 else
711 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
712 NAPI_POLL_WEIGHT);
0f3154e6 713
0f3154e6
SN
714 qcq->flags |= IONIC_QCQ_F_INITED;
715
0f3154e6
SN
716 return 0;
717}
718
77ceb68e
SN
719static bool ionic_notifyq_service(struct ionic_cq *cq,
720 struct ionic_cq_info *cq_info)
721{
722 union ionic_notifyq_comp *comp = cq_info->cq_desc;
c672412f 723 struct ionic_deferred_work *work;
77ceb68e
SN
724 struct net_device *netdev;
725 struct ionic_queue *q;
726 struct ionic_lif *lif;
727 u64 eid;
728
729 q = cq->bound_q;
730 lif = q->info[0].cb_arg;
731 netdev = lif->netdev;
732 eid = le64_to_cpu(comp->event.eid);
733
734 /* Have we run out of new completions to process? */
3fbc9bb6 735 if ((s64)(eid - lif->last_eid) <= 0)
77ceb68e
SN
736 return false;
737
738 lif->last_eid = eid;
739
740 dev_dbg(lif->ionic->dev, "notifyq event:\n");
741 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
742 comp, sizeof(*comp), true);
743
744 switch (le16_to_cpu(comp->event.ecode)) {
745 case IONIC_EVENT_LINK_CHANGE:
8d61aad4 746 ionic_link_status_check_request(lif);
77ceb68e
SN
747 break;
748 case IONIC_EVENT_RESET:
c672412f
SN
749 work = kzalloc(sizeof(*work), GFP_ATOMIC);
750 if (!work) {
751 netdev_err(lif->netdev, "%s OOM\n", __func__);
752 } else {
753 work->type = IONIC_DW_TYPE_LIF_RESET;
754 ionic_lif_deferred_enqueue(&lif->deferred, work);
755 }
77ceb68e
SN
756 break;
757 default:
5b3f3f2a 758 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
77ceb68e
SN
759 comp->event.ecode, eid);
760 break;
761 }
762
763 return true;
764}
765
766static int ionic_notifyq_clean(struct ionic_lif *lif, int budget)
767{
768 struct ionic_dev *idev = &lif->ionic->idev;
769 struct ionic_cq *cq = &lif->notifyqcq->cq;
770 u32 work_done;
771
772 work_done = ionic_cq_service(cq, budget, ionic_notifyq_service,
773 NULL, NULL);
774 if (work_done)
775 ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
776 work_done, IONIC_INTR_CRED_RESET_COALESCE);
777
778 return work_done;
1d062b7b
SN
779}
780
781static bool ionic_adminq_service(struct ionic_cq *cq,
782 struct ionic_cq_info *cq_info)
783{
784 struct ionic_admin_comp *comp = cq_info->cq_desc;
785
786 if (!color_match(comp->color, cq->done_color))
787 return false;
788
789 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
790
791 return true;
792}
793
794static int ionic_adminq_napi(struct napi_struct *napi, int budget)
795{
77ceb68e
SN
796 struct ionic_lif *lif = napi_to_cq(napi)->lif;
797 int n_work = 0;
798 int a_work = 0;
799
800 if (likely(lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED))
801 n_work = ionic_notifyq_clean(lif, budget);
802 a_work = ionic_napi(napi, budget, ionic_adminq_service, NULL, NULL);
803
804 return max(n_work, a_work);
1d062b7b
SN
805}
806
f64e0c56
SN
807void ionic_get_stats64(struct net_device *netdev,
808 struct rtnl_link_stats64 *ns)
8d61aad4
SN
809{
810 struct ionic_lif *lif = netdev_priv(netdev);
811 struct ionic_lif_stats *ls;
812
813 memset(ns, 0, sizeof(*ns));
814 ls = &lif->info->stats;
815
816 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
817 le64_to_cpu(ls->rx_mcast_packets) +
818 le64_to_cpu(ls->rx_bcast_packets);
819
820 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
821 le64_to_cpu(ls->tx_mcast_packets) +
822 le64_to_cpu(ls->tx_bcast_packets);
823
824 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
825 le64_to_cpu(ls->rx_mcast_bytes) +
826 le64_to_cpu(ls->rx_bcast_bytes);
827
828 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
829 le64_to_cpu(ls->tx_mcast_bytes) +
830 le64_to_cpu(ls->tx_bcast_bytes);
831
832 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
833 le64_to_cpu(ls->rx_mcast_drop_packets) +
834 le64_to_cpu(ls->rx_bcast_drop_packets);
835
836 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
837 le64_to_cpu(ls->tx_mcast_drop_packets) +
838 le64_to_cpu(ls->tx_bcast_drop_packets);
839
840 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
841
842 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
843
844 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
845 le64_to_cpu(ls->rx_queue_disabled) +
846 le64_to_cpu(ls->rx_desc_fetch_error) +
847 le64_to_cpu(ls->rx_desc_data_error);
848
849 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
850 le64_to_cpu(ls->tx_queue_disabled) +
851 le64_to_cpu(ls->tx_desc_fetch_error) +
852 le64_to_cpu(ls->tx_desc_data_error);
853
854 ns->rx_errors = ns->rx_over_errors +
855 ns->rx_missed_errors;
856
857 ns->tx_errors = ns->tx_aborted_errors;
858}
859
2a654540
SN
860static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
861{
862 struct ionic_admin_ctx ctx = {
863 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
864 .cmd.rx_filter_add = {
865 .opcode = IONIC_CMD_RX_FILTER_ADD,
866 .lif_index = cpu_to_le16(lif->index),
867 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
868 },
869 };
870 struct ionic_rx_filter *f;
871 int err;
872
873 /* don't bother if we already have it */
874 spin_lock_bh(&lif->rx_filters.lock);
875 f = ionic_rx_filter_by_addr(lif, addr);
876 spin_unlock_bh(&lif->rx_filters.lock);
877 if (f)
878 return 0;
879
cbec2153 880 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
2a654540
SN
881
882 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
883 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 884 if (err && err != -EEXIST)
2a654540
SN
885 return err;
886
887 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
888}
889
890static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
891{
892 struct ionic_admin_ctx ctx = {
893 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
894 .cmd.rx_filter_del = {
895 .opcode = IONIC_CMD_RX_FILTER_DEL,
896 .lif_index = cpu_to_le16(lif->index),
897 },
898 };
899 struct ionic_rx_filter *f;
900 int err;
901
902 spin_lock_bh(&lif->rx_filters.lock);
903 f = ionic_rx_filter_by_addr(lif, addr);
904 if (!f) {
905 spin_unlock_bh(&lif->rx_filters.lock);
906 return -ENOENT;
907 }
908
cbec2153
SN
909 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
910 addr, f->filter_id);
911
2a654540
SN
912 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
913 ionic_rx_filter_free(lif, f);
914 spin_unlock_bh(&lif->rx_filters.lock);
915
916 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 917 if (err && err != -EEXIST)
2a654540
SN
918 return err;
919
2a654540
SN
920 return 0;
921}
922
923static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add)
924{
925 struct ionic *ionic = lif->ionic;
926 struct ionic_deferred_work *work;
927 unsigned int nmfilters;
928 unsigned int nufilters;
929
930 if (add) {
931 /* Do we have space for this filter? We test the counters
932 * here before checking the need for deferral so that we
933 * can return an overflow error to the stack.
934 */
935 nmfilters = le32_to_cpu(ionic->ident.lif.eth.max_mcast_filters);
936 nufilters = le32_to_cpu(ionic->ident.lif.eth.max_ucast_filters);
937
938 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
939 lif->nmcast++;
940 else if (!is_multicast_ether_addr(addr) &&
941 lif->nucast < nufilters)
942 lif->nucast++;
943 else
944 return -ENOSPC;
945 } else {
946 if (is_multicast_ether_addr(addr) && lif->nmcast)
947 lif->nmcast--;
948 else if (!is_multicast_ether_addr(addr) && lif->nucast)
949 lif->nucast--;
950 }
951
952 if (in_interrupt()) {
953 work = kzalloc(sizeof(*work), GFP_ATOMIC);
954 if (!work) {
955 netdev_err(lif->netdev, "%s OOM\n", __func__);
956 return -ENOMEM;
957 }
958 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
959 IONIC_DW_TYPE_RX_ADDR_DEL;
960 memcpy(work->addr, addr, ETH_ALEN);
961 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
962 add ? "add" : "del", addr);
963 ionic_lif_deferred_enqueue(&lif->deferred, work);
964 } else {
965 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
966 add ? "add" : "del", addr);
967 if (add)
968 return ionic_lif_addr_add(lif, addr);
969 else
970 return ionic_lif_addr_del(lif, addr);
971 }
972
973 return 0;
974}
975
976static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
977{
978 return ionic_lif_addr(netdev_priv(netdev), addr, true);
979}
980
981static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
982{
983 return ionic_lif_addr(netdev_priv(netdev), addr, false);
984}
985
986static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
987{
988 struct ionic_admin_ctx ctx = {
989 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
990 .cmd.rx_mode_set = {
991 .opcode = IONIC_CMD_RX_MODE_SET,
992 .lif_index = cpu_to_le16(lif->index),
993 .rx_mode = cpu_to_le16(rx_mode),
994 },
995 };
996 char buf[128];
997 int err;
998 int i;
999#define REMAIN(__x) (sizeof(buf) - (__x))
1000
38e0f746
TI
1001 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1002 lif->rx_mode, rx_mode);
2a654540 1003 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
38e0f746 1004 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
2a654540 1005 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
38e0f746 1006 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
2a654540 1007 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
38e0f746 1008 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
2a654540 1009 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
38e0f746 1010 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
2a654540 1011 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
38e0f746 1012 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
2a654540
SN
1013 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1014
1015 err = ionic_adminq_post_wait(lif, &ctx);
1016 if (err)
1017 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1018 rx_mode, err);
1019 else
1020 lif->rx_mode = rx_mode;
1021}
1022
1023static void _ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1024{
1025 struct ionic_deferred_work *work;
1026
1027 if (in_interrupt()) {
1028 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1029 if (!work) {
1030 netdev_err(lif->netdev, "%s OOM\n", __func__);
1031 return;
1032 }
1033 work->type = IONIC_DW_TYPE_RX_MODE;
1034 work->rx_mode = rx_mode;
1035 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1036 ionic_lif_deferred_enqueue(&lif->deferred, work);
1037 } else {
1038 ionic_lif_rx_mode(lif, rx_mode);
1039 }
1040}
1041
1042static void ionic_set_rx_mode(struct net_device *netdev)
1043{
1044 struct ionic_lif *lif = netdev_priv(netdev);
1045 struct ionic_identity *ident;
1046 unsigned int nfilters;
1047 unsigned int rx_mode;
1048
1049 ident = &lif->ionic->ident;
1050
1051 rx_mode = IONIC_RX_MODE_F_UNICAST;
1052 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1053 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1054 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1055 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1056
1057 /* sync unicast addresses
1058 * next check to see if we're in an overflow state
1059 * if so, we track that we overflowed and enable NIC PROMISC
1060 * else if the overflow is set and not needed
1061 * we remove our overflow flag and check the netdev flags
1062 * to see if we can disable NIC PROMISC
1063 */
1064 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
1065 nfilters = le32_to_cpu(ident->lif.eth.max_ucast_filters);
1066 if (netdev_uc_count(netdev) + 1 > nfilters) {
1067 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1068 lif->uc_overflow = true;
1069 } else if (lif->uc_overflow) {
1070 lif->uc_overflow = false;
1071 if (!(netdev->flags & IFF_PROMISC))
1072 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1073 }
1074
1075 /* same for multicast */
1076 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
1077 nfilters = le32_to_cpu(ident->lif.eth.max_mcast_filters);
1078 if (netdev_mc_count(netdev) > nfilters) {
1079 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1080 lif->mc_overflow = true;
1081 } else if (lif->mc_overflow) {
1082 lif->mc_overflow = false;
1083 if (!(netdev->flags & IFF_ALLMULTI))
1084 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1085 }
1086
1087 if (lif->rx_mode != rx_mode)
1088 _ionic_lif_rx_mode(lif, rx_mode);
1089}
1090
beead698
SN
1091static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1092{
1093 u64 wanted = 0;
1094
1095 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1096 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1097 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1098 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1099 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1100 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1101 if (features & NETIF_F_RXHASH)
1102 wanted |= IONIC_ETH_HW_RX_HASH;
1103 if (features & NETIF_F_RXCSUM)
1104 wanted |= IONIC_ETH_HW_RX_CSUM;
1105 if (features & NETIF_F_SG)
1106 wanted |= IONIC_ETH_HW_TX_SG;
1107 if (features & NETIF_F_HW_CSUM)
1108 wanted |= IONIC_ETH_HW_TX_CSUM;
1109 if (features & NETIF_F_TSO)
1110 wanted |= IONIC_ETH_HW_TSO;
1111 if (features & NETIF_F_TSO6)
1112 wanted |= IONIC_ETH_HW_TSO_IPV6;
1113 if (features & NETIF_F_TSO_ECN)
1114 wanted |= IONIC_ETH_HW_TSO_ECN;
1115 if (features & NETIF_F_GSO_GRE)
1116 wanted |= IONIC_ETH_HW_TSO_GRE;
1117 if (features & NETIF_F_GSO_GRE_CSUM)
1118 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1119 if (features & NETIF_F_GSO_IPXIP4)
1120 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1121 if (features & NETIF_F_GSO_IPXIP6)
1122 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1123 if (features & NETIF_F_GSO_UDP_TUNNEL)
1124 wanted |= IONIC_ETH_HW_TSO_UDP;
1125 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1126 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1127
1128 return cpu_to_le64(wanted);
1129}
1130
1131static int ionic_set_nic_features(struct ionic_lif *lif,
1132 netdev_features_t features)
1133{
1134 struct device *dev = lif->ionic->dev;
1135 struct ionic_admin_ctx ctx = {
1136 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1137 .cmd.lif_setattr = {
1138 .opcode = IONIC_CMD_LIF_SETATTR,
1139 .index = cpu_to_le16(lif->index),
1140 .attr = IONIC_LIF_ATTR_FEATURES,
1141 },
1142 };
1143 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1144 IONIC_ETH_HW_VLAN_RX_STRIP |
1145 IONIC_ETH_HW_VLAN_RX_FILTER;
75fcb75b 1146 u64 old_hw_features;
beead698
SN
1147 int err;
1148
1149 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1150 err = ionic_adminq_post_wait(lif, &ctx);
1151 if (err)
1152 return err;
1153
75fcb75b 1154 old_hw_features = lif->hw_features;
beead698
SN
1155 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1156 ctx.comp.lif_setattr.features);
1157
75fcb75b
SN
1158 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1159 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1160
beead698
SN
1161 if ((vlan_flags & features) &&
1162 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1163 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1164
1165 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1166 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1167 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1168 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1169 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1170 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1171 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1172 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1173 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1174 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1175 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1176 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1177 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1178 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1179 if (lif->hw_features & IONIC_ETH_HW_TSO)
1180 dev_dbg(dev, "feature ETH_HW_TSO\n");
1181 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1182 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1183 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1184 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1185 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1186 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1187 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1188 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1189 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1190 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1191 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1192 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1193 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1194 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1195 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1196 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1197
1198 return 0;
1199}
1200
1201static int ionic_init_nic_features(struct ionic_lif *lif)
1202{
1203 struct net_device *netdev = lif->netdev;
1204 netdev_features_t features;
1205 int err;
1206
1207 /* set up what we expect to support by default */
1208 features = NETIF_F_HW_VLAN_CTAG_TX |
1209 NETIF_F_HW_VLAN_CTAG_RX |
1210 NETIF_F_HW_VLAN_CTAG_FILTER |
1211 NETIF_F_RXHASH |
1212 NETIF_F_SG |
1213 NETIF_F_HW_CSUM |
1214 NETIF_F_RXCSUM |
1215 NETIF_F_TSO |
1216 NETIF_F_TSO6 |
1217 NETIF_F_TSO_ECN;
1218
1219 err = ionic_set_nic_features(lif, features);
1220 if (err)
1221 return err;
1222
1223 /* tell the netdev what we actually can support */
1224 netdev->features |= NETIF_F_HIGHDMA;
1225
1226 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1227 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1228 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1229 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1230 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1231 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1232 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1233 netdev->hw_features |= NETIF_F_RXHASH;
1234 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1235 netdev->hw_features |= NETIF_F_SG;
1236
1237 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1238 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1239 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1240 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1241 if (lif->hw_features & IONIC_ETH_HW_TSO)
1242 netdev->hw_enc_features |= NETIF_F_TSO;
1243 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1244 netdev->hw_enc_features |= NETIF_F_TSO6;
1245 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1246 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1247 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1248 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1249 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1250 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1251 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1252 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1253 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1254 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1255 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1256 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1257 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1258 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1259
1260 netdev->hw_features |= netdev->hw_enc_features;
1261 netdev->features |= netdev->hw_features;
ef7232da 1262 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
beead698 1263
c672412f
SN
1264 netdev->priv_flags |= IFF_UNICAST_FLT |
1265 IFF_LIVE_ADDR_CHANGE;
beead698
SN
1266
1267 return 0;
1268}
1269
1270static int ionic_set_features(struct net_device *netdev,
1271 netdev_features_t features)
1272{
1273 struct ionic_lif *lif = netdev_priv(netdev);
1274 int err;
1275
1276 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1277 __func__, (u64)lif->netdev->features, (u64)features);
1278
1279 err = ionic_set_nic_features(lif, features);
1280
1281 return err;
1282}
1283
1284static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1285{
2a654540
SN
1286 struct sockaddr *addr = sa;
1287 u8 *mac;
1288 int err;
1289
1290 mac = (u8 *)addr->sa_data;
1291 if (ether_addr_equal(netdev->dev_addr, mac))
1292 return 0;
1293
1294 err = eth_prepare_mac_addr_change(netdev, addr);
1295 if (err)
1296 return err;
1297
1298 if (!is_zero_ether_addr(netdev->dev_addr)) {
1299 netdev_info(netdev, "deleting mac addr %pM\n",
1300 netdev->dev_addr);
1301 ionic_addr_del(netdev, netdev->dev_addr);
1302 }
1303
1304 eth_commit_mac_addr_change(netdev, addr);
1305 netdev_info(netdev, "updating mac addr %pM\n", mac);
1306
1307 return ionic_addr_add(netdev, mac);
beead698
SN
1308}
1309
1310static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1311{
1312 struct ionic_lif *lif = netdev_priv(netdev);
1313 struct ionic_admin_ctx ctx = {
1314 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1315 .cmd.lif_setattr = {
1316 .opcode = IONIC_CMD_LIF_SETATTR,
1317 .index = cpu_to_le16(lif->index),
1318 .attr = IONIC_LIF_ATTR_MTU,
1319 .mtu = cpu_to_le32(new_mtu),
1320 },
1321 };
1322 int err;
1323
1324 err = ionic_adminq_post_wait(lif, &ctx);
1325 if (err)
1326 return err;
1327
1328 netdev->mtu = new_mtu;
086c18f2 1329 err = ionic_reset_queues(lif, NULL, NULL);
beead698
SN
1330
1331 return err;
1332}
1333
8c15440b
SN
1334static void ionic_tx_timeout_work(struct work_struct *ws)
1335{
1336 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1337
1338 netdev_info(lif->netdev, "Tx Timeout recovery\n");
1339
1340 rtnl_lock();
086c18f2 1341 ionic_reset_queues(lif, NULL, NULL);
8c15440b
SN
1342 rtnl_unlock();
1343}
1344
0290bd29 1345static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
beead698 1346{
8c15440b
SN
1347 struct ionic_lif *lif = netdev_priv(netdev);
1348
1349 schedule_work(&lif->tx_timeout_work);
beead698
SN
1350}
1351
1352static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1353 u16 vid)
1354{
2a654540
SN
1355 struct ionic_lif *lif = netdev_priv(netdev);
1356 struct ionic_admin_ctx ctx = {
1357 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1358 .cmd.rx_filter_add = {
1359 .opcode = IONIC_CMD_RX_FILTER_ADD,
1360 .lif_index = cpu_to_le16(lif->index),
1361 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1362 .vlan.vlan = cpu_to_le16(vid),
1363 },
1364 };
1365 int err;
1366
cbec2153 1367 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
2a654540
SN
1368 err = ionic_adminq_post_wait(lif, &ctx);
1369 if (err)
1370 return err;
1371
2a654540 1372 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
beead698
SN
1373}
1374
1375static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1376 u16 vid)
1377{
2a654540
SN
1378 struct ionic_lif *lif = netdev_priv(netdev);
1379 struct ionic_admin_ctx ctx = {
1380 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1381 .cmd.rx_filter_del = {
1382 .opcode = IONIC_CMD_RX_FILTER_DEL,
1383 .lif_index = cpu_to_le16(lif->index),
1384 },
1385 };
1386 struct ionic_rx_filter *f;
1387
1388 spin_lock_bh(&lif->rx_filters.lock);
1389
1390 f = ionic_rx_filter_by_vlan(lif, vid);
1391 if (!f) {
1392 spin_unlock_bh(&lif->rx_filters.lock);
1393 return -ENOENT;
1394 }
1395
cbec2153
SN
1396 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1397 vid, f->filter_id);
2a654540
SN
1398
1399 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1400 ionic_rx_filter_free(lif, f);
1401 spin_unlock_bh(&lif->rx_filters.lock);
1402
1403 return ionic_adminq_post_wait(lif, &ctx);
beead698
SN
1404}
1405
aa319881
SN
1406int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1407 const u8 *key, const u32 *indir)
1408{
1409 struct ionic_admin_ctx ctx = {
1410 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1411 .cmd.lif_setattr = {
1412 .opcode = IONIC_CMD_LIF_SETATTR,
1413 .attr = IONIC_LIF_ATTR_RSS,
aa319881
SN
1414 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1415 },
1416 };
1417 unsigned int i, tbl_sz;
1418
75fcb75b
SN
1419 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1420 lif->rss_types = types;
1421 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1422 }
aa319881
SN
1423
1424 if (key)
1425 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1426
1427 if (indir) {
1428 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1429 for (i = 0; i < tbl_sz; i++)
1430 lif->rss_ind_tbl[i] = indir[i];
1431 }
1432
1433 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1434 IONIC_RSS_HASH_KEY_SIZE);
1435
1436 return ionic_adminq_post_wait(lif, &ctx);
1437}
1438
1439static int ionic_lif_rss_init(struct ionic_lif *lif)
1440{
aa319881
SN
1441 unsigned int tbl_sz;
1442 unsigned int i;
1443
aa319881
SN
1444 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1445 IONIC_RSS_TYPE_IPV4_TCP |
1446 IONIC_RSS_TYPE_IPV4_UDP |
1447 IONIC_RSS_TYPE_IPV6 |
1448 IONIC_RSS_TYPE_IPV6_TCP |
1449 IONIC_RSS_TYPE_IPV6_UDP;
1450
1451 /* Fill indirection table with 'default' values */
1452 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1453 for (i = 0; i < tbl_sz; i++)
1454 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1455
ffac2027 1456 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
aa319881
SN
1457}
1458
ffac2027 1459static void ionic_lif_rss_deinit(struct ionic_lif *lif)
aa319881 1460{
ffac2027
SN
1461 int tbl_sz;
1462
1463 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1464 memset(lif->rss_ind_tbl, 0, tbl_sz);
1465 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1466
1467 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
aa319881
SN
1468}
1469
0f3154e6
SN
1470static void ionic_txrx_disable(struct ionic_lif *lif)
1471{
1472 unsigned int i;
4ee7bda4 1473 int err;
0f3154e6 1474
d5eddde5
SN
1475 if (lif->txqcqs) {
1476 for (i = 0; i < lif->nxqs; i++) {
34dec947 1477 err = ionic_qcq_disable(lif->txqcqs[i]);
d5eddde5
SN
1478 if (err == -ETIMEDOUT)
1479 break;
1480 }
1481 }
1482
1483 if (lif->rxqcqs) {
1484 for (i = 0; i < lif->nxqs; i++) {
34dec947 1485 err = ionic_qcq_disable(lif->rxqcqs[i]);
d5eddde5
SN
1486 if (err == -ETIMEDOUT)
1487 break;
1488 }
0f3154e6
SN
1489 }
1490}
1491
1492static void ionic_txrx_deinit(struct ionic_lif *lif)
1493{
1494 unsigned int i;
1495
d5eddde5
SN
1496 if (lif->txqcqs) {
1497 for (i = 0; i < lif->nxqs; i++) {
34dec947
SN
1498 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1499 ionic_tx_flush(&lif->txqcqs[i]->cq);
1500 ionic_tx_empty(&lif->txqcqs[i]->q);
d5eddde5
SN
1501 }
1502 }
0f3154e6 1503
d5eddde5
SN
1504 if (lif->rxqcqs) {
1505 for (i = 0; i < lif->nxqs; i++) {
34dec947
SN
1506 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
1507 ionic_rx_flush(&lif->rxqcqs[i]->cq);
1508 ionic_rx_empty(&lif->rxqcqs[i]->q);
d5eddde5 1509 }
0f3154e6 1510 }
49d3b493 1511 lif->rx_mode = 0;
0f3154e6
SN
1512}
1513
1514static void ionic_txrx_free(struct ionic_lif *lif)
1515{
1516 unsigned int i;
1517
d5eddde5
SN
1518 if (lif->txqcqs) {
1519 for (i = 0; i < lif->nxqs; i++) {
34dec947
SN
1520 ionic_qcq_free(lif, lif->txqcqs[i]);
1521 lif->txqcqs[i] = NULL;
d5eddde5
SN
1522 }
1523 }
0f3154e6 1524
d5eddde5
SN
1525 if (lif->rxqcqs) {
1526 for (i = 0; i < lif->nxqs; i++) {
34dec947
SN
1527 ionic_qcq_free(lif, lif->rxqcqs[i]);
1528 lif->rxqcqs[i] = NULL;
d5eddde5 1529 }
0f3154e6
SN
1530 }
1531}
1532
1533static int ionic_txrx_alloc(struct ionic_lif *lif)
1534{
5b3f3f2a 1535 unsigned int sg_desc_sz;
0f3154e6
SN
1536 unsigned int flags;
1537 unsigned int i;
1538 int err = 0;
1539
5b3f3f2a
SN
1540 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1541 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1542 sizeof(struct ionic_txq_sg_desc_v1))
1543 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1544 else
1545 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1546
0f3154e6 1547 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
fe8c30b5
SN
1548 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1549 flags |= IONIC_QCQ_F_INTR;
0f3154e6
SN
1550 for (i = 0; i < lif->nxqs; i++) {
1551 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1552 lif->ntxq_descs,
1553 sizeof(struct ionic_txq_desc),
1554 sizeof(struct ionic_txq_comp),
5b3f3f2a 1555 sg_desc_sz,
34dec947 1556 lif->kern_pid, &lif->txqcqs[i]);
0f3154e6
SN
1557 if (err)
1558 goto err_out;
1559
fe8c30b5
SN
1560 if (flags & IONIC_QCQ_F_INTR)
1561 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1562 lif->txqcqs[i]->intr.index,
fe8c30b5
SN
1563 lif->tx_coalesce_hw);
1564
34dec947 1565 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
0f3154e6
SN
1566 }
1567
08f2e4b2 1568 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
0f3154e6
SN
1569 for (i = 0; i < lif->nxqs; i++) {
1570 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1571 lif->nrxq_descs,
1572 sizeof(struct ionic_rxq_desc),
1573 sizeof(struct ionic_rxq_comp),
08f2e4b2 1574 sizeof(struct ionic_rxq_sg_desc),
34dec947 1575 lif->kern_pid, &lif->rxqcqs[i]);
0f3154e6
SN
1576 if (err)
1577 goto err_out;
1578
8c15440b 1579 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1580 lif->rxqcqs[i]->intr.index,
780eded3 1581 lif->rx_coalesce_hw);
fe8c30b5
SN
1582
1583 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
34dec947
SN
1584 ionic_link_qcq_interrupts(lif->rxqcqs[i],
1585 lif->txqcqs[i]);
fe8c30b5 1586
34dec947 1587 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
0f3154e6
SN
1588 }
1589
1590 return 0;
1591
1592err_out:
1593 ionic_txrx_free(lif);
1594
1595 return err;
1596}
1597
1598static int ionic_txrx_init(struct ionic_lif *lif)
1599{
1600 unsigned int i;
1601 int err;
1602
1603 for (i = 0; i < lif->nxqs; i++) {
34dec947 1604 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
0f3154e6
SN
1605 if (err)
1606 goto err_out;
1607
34dec947 1608 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
0f3154e6 1609 if (err) {
34dec947 1610 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
0f3154e6
SN
1611 goto err_out;
1612 }
1613 }
1614
aa319881
SN
1615 if (lif->netdev->features & NETIF_F_RXHASH)
1616 ionic_lif_rss_init(lif);
1617
0f3154e6
SN
1618 ionic_set_rx_mode(lif->netdev);
1619
1620 return 0;
1621
1622err_out:
1623 while (i--) {
34dec947
SN
1624 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1625 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
0f3154e6
SN
1626 }
1627
1628 return err;
1629}
1630
1631static int ionic_txrx_enable(struct ionic_lif *lif)
1632{
1633 int i, err;
1634
1635 for (i = 0; i < lif->nxqs; i++) {
34dec947
SN
1636 ionic_rx_fill(&lif->rxqcqs[i]->q);
1637 err = ionic_qcq_enable(lif->rxqcqs[i]);
0f3154e6
SN
1638 if (err)
1639 goto err_out;
1640
34dec947 1641 err = ionic_qcq_enable(lif->txqcqs[i]);
0f3154e6 1642 if (err) {
4ee7bda4 1643 if (err != -ETIMEDOUT)
34dec947 1644 ionic_qcq_disable(lif->rxqcqs[i]);
0f3154e6
SN
1645 goto err_out;
1646 }
1647 }
1648
1649 return 0;
1650
1651err_out:
1652 while (i--) {
34dec947 1653 err = ionic_qcq_disable(lif->txqcqs[i]);
4ee7bda4
SN
1654 if (err == -ETIMEDOUT)
1655 break;
34dec947 1656 err = ionic_qcq_disable(lif->rxqcqs[i]);
4ee7bda4
SN
1657 if (err == -ETIMEDOUT)
1658 break;
0f3154e6
SN
1659 }
1660
1661 return err;
1662}
1663
49d3b493
SN
1664static int ionic_start_queues(struct ionic_lif *lif)
1665{
1666 int err;
1667
1668 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1669 return 0;
1670
1671 err = ionic_txrx_enable(lif);
1672 if (err) {
1673 clear_bit(IONIC_LIF_F_UP, lif->state);
1674 return err;
1675 }
1676 netif_tx_wake_all_queues(lif->netdev);
1677
1678 return 0;
1679}
1680
d4881430 1681static int ionic_open(struct net_device *netdev)
beead698
SN
1682{
1683 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1684 int err;
beead698 1685
0f3154e6
SN
1686 err = ionic_txrx_alloc(lif);
1687 if (err)
1688 return err;
1689
1690 err = ionic_txrx_init(lif);
1691 if (err)
49d3b493 1692 goto err_out;
beead698 1693
fa48494c
SN
1694 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1695 if (err)
1696 goto err_txrx_deinit;
1697
1698 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1699 if (err)
1700 goto err_txrx_deinit;
1701
49d3b493
SN
1702 /* don't start the queues until we have link */
1703 if (netif_carrier_ok(netdev)) {
1704 err = ionic_start_queues(lif);
1705 if (err)
1706 goto err_txrx_deinit;
1707 }
8d61aad4 1708
beead698 1709 return 0;
0f3154e6
SN
1710
1711err_txrx_deinit:
1712 ionic_txrx_deinit(lif);
49d3b493 1713err_out:
0f3154e6
SN
1714 ionic_txrx_free(lif);
1715 return err;
beead698
SN
1716}
1717
49d3b493 1718static void ionic_stop_queues(struct ionic_lif *lif)
beead698 1719{
49d3b493
SN
1720 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1721 return;
beead698 1722
49d3b493 1723 netif_tx_disable(lif->netdev);
b59eabd2 1724 ionic_txrx_disable(lif);
49d3b493 1725}
beead698 1726
d4881430 1727static int ionic_stop(struct net_device *netdev)
49d3b493
SN
1728{
1729 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1730
b59eabd2 1731 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
c672412f
SN
1732 return 0;
1733
49d3b493 1734 ionic_stop_queues(lif);
0f3154e6
SN
1735 ionic_txrx_deinit(lif);
1736 ionic_txrx_free(lif);
beead698 1737
49d3b493 1738 return 0;
beead698
SN
1739}
1740
fbb39807
SN
1741static int ionic_get_vf_config(struct net_device *netdev,
1742 int vf, struct ifla_vf_info *ivf)
1743{
1744 struct ionic_lif *lif = netdev_priv(netdev);
1745 struct ionic *ionic = lif->ionic;
1746 int ret = 0;
1747
a836c352
SN
1748 if (!netif_device_present(netdev))
1749 return -EBUSY;
1750
fbb39807
SN
1751 down_read(&ionic->vf_op_lock);
1752
1753 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1754 ret = -EINVAL;
1755 } else {
1756 ivf->vf = vf;
1757 ivf->vlan = ionic->vfs[vf].vlanid;
1758 ivf->qos = 0;
1759 ivf->spoofchk = ionic->vfs[vf].spoofchk;
1760 ivf->linkstate = ionic->vfs[vf].linkstate;
1761 ivf->max_tx_rate = ionic->vfs[vf].maxrate;
1762 ivf->trusted = ionic->vfs[vf].trusted;
1763 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1764 }
1765
1766 up_read(&ionic->vf_op_lock);
1767 return ret;
1768}
1769
1770static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1771 struct ifla_vf_stats *vf_stats)
1772{
1773 struct ionic_lif *lif = netdev_priv(netdev);
1774 struct ionic *ionic = lif->ionic;
1775 struct ionic_lif_stats *vs;
1776 int ret = 0;
1777
a836c352
SN
1778 if (!netif_device_present(netdev))
1779 return -EBUSY;
1780
fbb39807
SN
1781 down_read(&ionic->vf_op_lock);
1782
1783 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1784 ret = -EINVAL;
1785 } else {
1786 memset(vf_stats, 0, sizeof(*vf_stats));
1787 vs = &ionic->vfs[vf].stats;
1788
1789 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1790 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1791 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
1792 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
1793 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
1794 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
1795 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1796 le64_to_cpu(vs->rx_mcast_drop_packets) +
1797 le64_to_cpu(vs->rx_bcast_drop_packets);
1798 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1799 le64_to_cpu(vs->tx_mcast_drop_packets) +
1800 le64_to_cpu(vs->tx_bcast_drop_packets);
1801 }
1802
1803 up_read(&ionic->vf_op_lock);
1804 return ret;
1805}
1806
1807static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1808{
1809 struct ionic_lif *lif = netdev_priv(netdev);
1810 struct ionic *ionic = lif->ionic;
1811 int ret;
1812
1813 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1814 return -EINVAL;
1815
a836c352
SN
1816 if (!netif_device_present(netdev))
1817 return -EBUSY;
1818
e396ce5f 1819 down_write(&ionic->vf_op_lock);
fbb39807
SN
1820
1821 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1822 ret = -EINVAL;
1823 } else {
1824 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1825 if (!ret)
1826 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1827 }
1828
e396ce5f 1829 up_write(&ionic->vf_op_lock);
fbb39807
SN
1830 return ret;
1831}
1832
1833static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1834 u8 qos, __be16 proto)
1835{
1836 struct ionic_lif *lif = netdev_priv(netdev);
1837 struct ionic *ionic = lif->ionic;
1838 int ret;
1839
1840 /* until someday when we support qos */
1841 if (qos)
1842 return -EINVAL;
1843
1844 if (vlan > 4095)
1845 return -EINVAL;
1846
1847 if (proto != htons(ETH_P_8021Q))
1848 return -EPROTONOSUPPORT;
1849
a836c352
SN
1850 if (!netif_device_present(netdev))
1851 return -EBUSY;
1852
e396ce5f 1853 down_write(&ionic->vf_op_lock);
fbb39807
SN
1854
1855 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1856 ret = -EINVAL;
1857 } else {
1858 ret = ionic_set_vf_config(ionic, vf,
1859 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
1860 if (!ret)
1861 ionic->vfs[vf].vlanid = vlan;
1862 }
1863
e396ce5f 1864 up_write(&ionic->vf_op_lock);
fbb39807
SN
1865 return ret;
1866}
1867
1868static int ionic_set_vf_rate(struct net_device *netdev, int vf,
1869 int tx_min, int tx_max)
1870{
1871 struct ionic_lif *lif = netdev_priv(netdev);
1872 struct ionic *ionic = lif->ionic;
1873 int ret;
1874
1875 /* setting the min just seems silly */
1876 if (tx_min)
1877 return -EINVAL;
1878
a836c352
SN
1879 if (!netif_device_present(netdev))
1880 return -EBUSY;
1881
fbb39807
SN
1882 down_write(&ionic->vf_op_lock);
1883
1884 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1885 ret = -EINVAL;
1886 } else {
1887 ret = ionic_set_vf_config(ionic, vf,
1888 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
1889 if (!ret)
1890 lif->ionic->vfs[vf].maxrate = tx_max;
1891 }
1892
1893 up_write(&ionic->vf_op_lock);
1894 return ret;
1895}
1896
1897static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
1898{
1899 struct ionic_lif *lif = netdev_priv(netdev);
1900 struct ionic *ionic = lif->ionic;
1901 u8 data = set; /* convert to u8 for config */
1902 int ret;
1903
a836c352
SN
1904 if (!netif_device_present(netdev))
1905 return -EBUSY;
1906
fbb39807
SN
1907 down_write(&ionic->vf_op_lock);
1908
1909 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1910 ret = -EINVAL;
1911 } else {
1912 ret = ionic_set_vf_config(ionic, vf,
1913 IONIC_VF_ATTR_SPOOFCHK, &data);
1914 if (!ret)
1915 ionic->vfs[vf].spoofchk = data;
1916 }
1917
1918 up_write(&ionic->vf_op_lock);
1919 return ret;
1920}
1921
1922static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
1923{
1924 struct ionic_lif *lif = netdev_priv(netdev);
1925 struct ionic *ionic = lif->ionic;
1926 u8 data = set; /* convert to u8 for config */
1927 int ret;
1928
a836c352
SN
1929 if (!netif_device_present(netdev))
1930 return -EBUSY;
1931
fbb39807
SN
1932 down_write(&ionic->vf_op_lock);
1933
1934 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1935 ret = -EINVAL;
1936 } else {
1937 ret = ionic_set_vf_config(ionic, vf,
1938 IONIC_VF_ATTR_TRUST, &data);
1939 if (!ret)
1940 ionic->vfs[vf].trusted = data;
1941 }
1942
1943 up_write(&ionic->vf_op_lock);
1944 return ret;
1945}
1946
1947static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
1948{
1949 struct ionic_lif *lif = netdev_priv(netdev);
1950 struct ionic *ionic = lif->ionic;
1951 u8 data;
1952 int ret;
1953
1954 switch (set) {
1955 case IFLA_VF_LINK_STATE_ENABLE:
1956 data = IONIC_VF_LINK_STATUS_UP;
1957 break;
1958 case IFLA_VF_LINK_STATE_DISABLE:
1959 data = IONIC_VF_LINK_STATUS_DOWN;
1960 break;
1961 case IFLA_VF_LINK_STATE_AUTO:
1962 data = IONIC_VF_LINK_STATUS_AUTO;
1963 break;
1964 default:
1965 return -EINVAL;
1966 }
1967
a836c352
SN
1968 if (!netif_device_present(netdev))
1969 return -EBUSY;
1970
fbb39807
SN
1971 down_write(&ionic->vf_op_lock);
1972
1973 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1974 ret = -EINVAL;
1975 } else {
1976 ret = ionic_set_vf_config(ionic, vf,
1977 IONIC_VF_ATTR_LINKSTATE, &data);
1978 if (!ret)
1979 ionic->vfs[vf].linkstate = set;
1980 }
1981
1982 up_write(&ionic->vf_op_lock);
1983 return ret;
1984}
1985
beead698
SN
1986static const struct net_device_ops ionic_netdev_ops = {
1987 .ndo_open = ionic_open,
1988 .ndo_stop = ionic_stop,
0f3154e6 1989 .ndo_start_xmit = ionic_start_xmit,
8d61aad4 1990 .ndo_get_stats64 = ionic_get_stats64,
2a654540 1991 .ndo_set_rx_mode = ionic_set_rx_mode,
beead698
SN
1992 .ndo_set_features = ionic_set_features,
1993 .ndo_set_mac_address = ionic_set_mac_address,
1994 .ndo_validate_addr = eth_validate_addr,
1995 .ndo_tx_timeout = ionic_tx_timeout,
1996 .ndo_change_mtu = ionic_change_mtu,
1997 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
1998 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
fbb39807
SN
1999 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2000 .ndo_set_vf_trust = ionic_set_vf_trust,
2001 .ndo_set_vf_mac = ionic_set_vf_mac,
2002 .ndo_set_vf_rate = ionic_set_vf_rate,
2003 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2004 .ndo_get_vf_config = ionic_get_vf_config,
2005 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2006 .ndo_get_vf_stats = ionic_get_vf_stats,
beead698
SN
2007};
2008
086c18f2 2009int ionic_reset_queues(struct ionic_lif *lif, ionic_reset_cb cb, void *arg)
beead698
SN
2010{
2011 bool running;
2012 int err = 0;
2013
0925e9db 2014 mutex_lock(&lif->queue_lock);
beead698 2015 running = netif_running(lif->netdev);
b59eabd2
SN
2016 if (running) {
2017 netif_device_detach(lif->netdev);
beead698 2018 err = ionic_stop(lif->netdev);
086c18f2 2019 if (err)
59929fbb 2020 goto reset_out;
b59eabd2 2021 }
086c18f2
SN
2022
2023 if (cb)
2024 cb(lif, arg);
2025
2026 if (running) {
2027 err = ionic_open(lif->netdev);
b59eabd2
SN
2028 netif_device_attach(lif->netdev);
2029 }
59929fbb
SN
2030
2031reset_out:
0925e9db 2032 mutex_unlock(&lif->queue_lock);
beead698
SN
2033
2034 return err;
2035}
2036
30b87ab4 2037int ionic_lif_alloc(struct ionic *ionic)
1a58e196
SN
2038{
2039 struct device *dev = ionic->dev;
4b03b273 2040 union ionic_lif_identity *lid;
1a58e196
SN
2041 struct net_device *netdev;
2042 struct ionic_lif *lif;
aa319881 2043 int tbl_sz;
1a58e196
SN
2044 int err;
2045
4b03b273
SN
2046 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2047 if (!lid)
30b87ab4 2048 return -ENOMEM;
4b03b273 2049
1a58e196
SN
2050 netdev = alloc_etherdev_mqs(sizeof(*lif),
2051 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2052 if (!netdev) {
2053 dev_err(dev, "Cannot allocate netdev, aborting\n");
4b1debbe
CIK
2054 err = -ENOMEM;
2055 goto err_out_free_lid;
1a58e196
SN
2056 }
2057
2058 SET_NETDEV_DEV(netdev, dev);
2059
2060 lif = netdev_priv(netdev);
2061 lif->netdev = netdev;
30b87ab4 2062 ionic->lif = lif;
beead698 2063 netdev->netdev_ops = &ionic_netdev_ops;
4d03e00a 2064 ionic_ethtool_set_ops(netdev);
beead698
SN
2065
2066 netdev->watchdog_timeo = 2 * HZ;
aa47b540
SN
2067 netif_carrier_off(netdev);
2068
4b03b273
SN
2069 lif->identity = lid;
2070 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
2071 ionic_lif_identify(ionic, lif->lif_type, lif->identity);
eba87609
SN
2072 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2073 le32_to_cpu(lif->identity->eth.min_frame_size));
4b03b273
SN
2074 lif->netdev->max_mtu =
2075 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
1a58e196
SN
2076
2077 lif->neqs = ionic->neqs_per_lif;
2078 lif->nxqs = ionic->ntxqs_per_lif;
2079
2080 lif->ionic = ionic;
30b87ab4 2081 lif->index = 0;
0f3154e6
SN
2082 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2083 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
b14e4e95 2084 lif->tx_budget = IONIC_TX_BUDGET_DEFAULT;
1a58e196 2085
8c15440b 2086 /* Convert the default coalesce value to actual hw resolution */
780eded3 2087 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
ff7ebed9 2088 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
780eded3 2089 lif->rx_coalesce_usecs);
fe8c30b5
SN
2090 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2091 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
8c15440b 2092
30b87ab4 2093 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
1a58e196 2094
1d062b7b
SN
2095 spin_lock_init(&lif->adminq_lock);
2096
2a654540
SN
2097 spin_lock_init(&lif->deferred.lock);
2098 INIT_LIST_HEAD(&lif->deferred.list);
2099 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2100
1a58e196
SN
2101 /* allocate lif info */
2102 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2103 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2104 &lif->info_pa, GFP_KERNEL);
2105 if (!lif->info) {
2106 dev_err(dev, "Failed to allocate lif info, aborting\n");
2107 err = -ENOMEM;
2108 goto err_out_free_netdev;
2109 }
2110
2a8c2c1a
SN
2111 ionic_debugfs_add_lif(lif);
2112
30b87ab4
SN
2113 /* allocate control queues and txrx queue arrays */
2114 ionic_lif_queue_identify(lif);
1d062b7b
SN
2115 err = ionic_qcqs_alloc(lif);
2116 if (err)
2117 goto err_out_free_lif_info;
2118
aa319881
SN
2119 /* allocate rss indirection table */
2120 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2121 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2122 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2123 &lif->rss_ind_tbl_pa,
2124 GFP_KERNEL);
2125
2126 if (!lif->rss_ind_tbl) {
73a63ee9 2127 err = -ENOMEM;
aa319881
SN
2128 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2129 goto err_out_free_qcqs;
2130 }
ffac2027 2131 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
aa319881 2132
30b87ab4 2133 return 0;
1a58e196 2134
aa319881
SN
2135err_out_free_qcqs:
2136 ionic_qcqs_free(lif);
1d062b7b
SN
2137err_out_free_lif_info:
2138 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2139 lif->info = NULL;
2140 lif->info_pa = 0;
1a58e196
SN
2141err_out_free_netdev:
2142 free_netdev(lif->netdev);
2143 lif = NULL;
4b1debbe 2144err_out_free_lid:
4b03b273 2145 kfree(lid);
1a58e196 2146
30b87ab4 2147 return err;
1a58e196
SN
2148}
2149
2150static void ionic_lif_reset(struct ionic_lif *lif)
2151{
2152 struct ionic_dev *idev = &lif->ionic->idev;
2153
2154 mutex_lock(&lif->ionic->dev_cmd_lock);
2155 ionic_dev_cmd_lif_reset(idev, lif->index);
2156 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2157 mutex_unlock(&lif->ionic->dev_cmd_lock);
2158}
2159
c672412f
SN
2160static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2161{
2162 struct ionic *ionic = lif->ionic;
2163
2164 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2165 return;
2166
2167 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2168
2169 netif_device_detach(lif->netdev);
2170
2171 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2172 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
0925e9db 2173 mutex_lock(&lif->queue_lock);
c672412f 2174 ionic_stop_queues(lif);
0925e9db 2175 mutex_unlock(&lif->queue_lock);
c672412f
SN
2176 }
2177
2178 if (netif_running(lif->netdev)) {
2179 ionic_txrx_deinit(lif);
2180 ionic_txrx_free(lif);
2181 }
30b87ab4 2182 ionic_lif_deinit(lif);
6bc977fa 2183 ionic_reset(ionic);
c672412f
SN
2184 ionic_qcqs_free(lif);
2185
2186 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2187}
2188
2189static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2190{
2191 struct ionic *ionic = lif->ionic;
2192 int err;
2193
2194 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2195 return;
2196
2197 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2198
1d53aedc 2199 ionic_init_devinfo(ionic);
ddc5911b 2200 ionic_port_init(ionic);
c672412f
SN
2201 err = ionic_qcqs_alloc(lif);
2202 if (err)
2203 goto err_out;
2204
30b87ab4 2205 err = ionic_lif_init(lif);
c672412f
SN
2206 if (err)
2207 goto err_qcqs_free;
2208
2209 if (lif->registered)
2210 ionic_lif_set_netdev_info(lif);
2211
7e4d4759
SN
2212 ionic_rx_filter_replay(lif);
2213
c672412f
SN
2214 if (netif_running(lif->netdev)) {
2215 err = ionic_txrx_alloc(lif);
2216 if (err)
2217 goto err_lifs_deinit;
2218
2219 err = ionic_txrx_init(lif);
2220 if (err)
2221 goto err_txrx_free;
2222 }
2223
2224 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
2225 ionic_link_status_check_request(lif);
2226 netif_device_attach(lif->netdev);
2227 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2228
2229 return;
2230
2231err_txrx_free:
2232 ionic_txrx_free(lif);
2233err_lifs_deinit:
30b87ab4 2234 ionic_lif_deinit(lif);
c672412f
SN
2235err_qcqs_free:
2236 ionic_qcqs_free(lif);
2237err_out:
2238 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2239}
2240
30b87ab4 2241void ionic_lif_free(struct ionic_lif *lif)
1a58e196
SN
2242{
2243 struct device *dev = lif->ionic->dev;
2244
aa319881
SN
2245 /* free rss indirection table */
2246 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2247 lif->rss_ind_tbl_pa);
2248 lif->rss_ind_tbl = NULL;
2249 lif->rss_ind_tbl_pa = 0;
2250
1d062b7b
SN
2251 /* free queues */
2252 ionic_qcqs_free(lif);
c672412f
SN
2253 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2254 ionic_lif_reset(lif);
1a58e196
SN
2255
2256 /* free lif info */
4b03b273 2257 kfree(lif->identity);
1a58e196
SN
2258 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2259 lif->info = NULL;
2260 lif->info_pa = 0;
2261
6461b446
SN
2262 /* unmap doorbell page */
2263 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2264 lif->kern_dbpage = NULL;
2265 kfree(lif->dbid_inuse);
2266 lif->dbid_inuse = NULL;
2267
1a58e196
SN
2268 /* free netdev & lif */
2269 ionic_debugfs_del_lif(lif);
1a58e196
SN
2270 free_netdev(lif->netdev);
2271}
2272
30b87ab4 2273void ionic_lif_deinit(struct ionic_lif *lif)
1a58e196 2274{
c672412f 2275 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
1a58e196
SN
2276 return;
2277
c672412f
SN
2278 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2279 cancel_work_sync(&lif->deferred.work);
2280 cancel_work_sync(&lif->tx_timeout_work);
7e4d4759 2281 ionic_rx_filters_deinit(lif);
bdff4666
SN
2282 if (lif->netdev->features & NETIF_F_RXHASH)
2283 ionic_lif_rss_deinit(lif);
c672412f 2284 }
1a58e196 2285
1d062b7b 2286 napi_disable(&lif->adminqcq->napi);
77ceb68e 2287 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2288 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2289
0925e9db 2290 mutex_destroy(&lif->queue_lock);
1a58e196
SN
2291 ionic_lif_reset(lif);
2292}
2293
1d062b7b
SN
2294static int ionic_lif_adminq_init(struct ionic_lif *lif)
2295{
2296 struct device *dev = lif->ionic->dev;
2297 struct ionic_q_init_comp comp;
2298 struct ionic_dev *idev;
2299 struct ionic_qcq *qcq;
2300 struct ionic_queue *q;
2301 int err;
2302
2303 idev = &lif->ionic->idev;
2304 qcq = lif->adminqcq;
2305 q = &qcq->q;
2306
2307 mutex_lock(&lif->ionic->dev_cmd_lock);
2308 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2309 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2310 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2311 mutex_unlock(&lif->ionic->dev_cmd_lock);
2312 if (err) {
2313 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2314 return err;
2315 }
2316
2317 q->hw_type = comp.hw_type;
2318 q->hw_index = le32_to_cpu(comp.hw_index);
2319 q->dbval = IONIC_DBELL_QID(q->hw_index);
2320
2321 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2322 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2323
2324 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2325 NAPI_POLL_WEIGHT);
2326
1d062b7b
SN
2327 napi_enable(&qcq->napi);
2328
2329 if (qcq->flags & IONIC_QCQ_F_INTR)
2330 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2331 IONIC_INTR_MASK_CLEAR);
2332
2333 qcq->flags |= IONIC_QCQ_F_INITED;
2334
1d062b7b
SN
2335 return 0;
2336}
2337
77ceb68e
SN
2338static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2339{
2340 struct ionic_qcq *qcq = lif->notifyqcq;
2341 struct device *dev = lif->ionic->dev;
2342 struct ionic_queue *q = &qcq->q;
2343 int err;
2344
2345 struct ionic_admin_ctx ctx = {
2346 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2347 .cmd.q_init = {
2348 .opcode = IONIC_CMD_Q_INIT,
2349 .lif_index = cpu_to_le16(lif->index),
2350 .type = q->type,
5b3f3f2a 2351 .ver = lif->qtype_info[q->type].version,
77ceb68e
SN
2352 .index = cpu_to_le32(q->index),
2353 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2354 IONIC_QINIT_F_ENA),
2355 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2356 .pid = cpu_to_le16(q->pid),
2357 .ring_size = ilog2(q->num_descs),
2358 .ring_base = cpu_to_le64(q->base_pa),
2359 }
2360 };
2361
2362 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2363 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2364 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2365 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2366
2367 err = ionic_adminq_post_wait(lif, &ctx);
2368 if (err)
2369 return err;
2370
c672412f 2371 lif->last_eid = 0;
77ceb68e
SN
2372 q->hw_type = ctx.comp.q_init.hw_type;
2373 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2374 q->dbval = IONIC_DBELL_QID(q->hw_index);
2375
2376 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2377 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2378
2379 /* preset the callback info */
2380 q->info[0].cb_arg = lif;
2381
2382 qcq->flags |= IONIC_QCQ_F_INITED;
2383
77ceb68e
SN
2384 return 0;
2385}
2386
2a654540
SN
2387static int ionic_station_set(struct ionic_lif *lif)
2388{
2389 struct net_device *netdev = lif->netdev;
2390 struct ionic_admin_ctx ctx = {
2391 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2392 .cmd.lif_getattr = {
2393 .opcode = IONIC_CMD_LIF_GETATTR,
2394 .index = cpu_to_le16(lif->index),
2395 .attr = IONIC_LIF_ATTR_MAC,
2396 },
2397 };
2398 struct sockaddr addr;
2399 int err;
2400
2401 err = ionic_adminq_post_wait(lif, &ctx);
2402 if (err)
2403 return err;
216902ae
SN
2404 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2405 ctx.comp.lif_getattr.mac);
fbb39807
SN
2406 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2407 return 0;
2408
f20a4d40
SN
2409 if (!is_zero_ether_addr(netdev->dev_addr)) {
2410 /* If the netdev mac is non-zero and doesn't match the default
2411 * device address, it was set by something earlier and we're
2412 * likely here again after a fw-upgrade reset. We need to be
2413 * sure the netdev mac is in our filter list.
2414 */
2415 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2416 netdev->dev_addr))
2417 ionic_lif_addr(lif, netdev->dev_addr, true);
2418 } else {
2419 /* Update the netdev mac with the device's mac */
216902ae
SN
2420 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2421 addr.sa_family = AF_INET;
2422 err = eth_prepare_mac_addr_change(netdev, &addr);
2423 if (err) {
2424 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2425 addr.sa_data, err);
2426 return 0;
2427 }
2a654540 2428
216902ae
SN
2429 eth_commit_mac_addr_change(netdev, &addr);
2430 }
fbb39807 2431
2a654540
SN
2432 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2433 netdev->dev_addr);
2434 ionic_lif_addr(lif, netdev->dev_addr, true);
2435
2436 return 0;
2437}
2438
30b87ab4 2439int ionic_lif_init(struct ionic_lif *lif)
1a58e196
SN
2440{
2441 struct ionic_dev *idev = &lif->ionic->idev;
6461b446 2442 struct device *dev = lif->ionic->dev;
1a58e196 2443 struct ionic_lif_init_comp comp;
6461b446 2444 int dbpage_num;
1a58e196
SN
2445 int err;
2446
1a58e196
SN
2447 mutex_lock(&lif->ionic->dev_cmd_lock);
2448 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2449 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2450 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2451 mutex_unlock(&lif->ionic->dev_cmd_lock);
2452 if (err)
2453 return err;
2454
2455 lif->hw_index = le16_to_cpu(comp.hw_index);
0925e9db 2456 mutex_init(&lif->queue_lock);
1a58e196 2457
6461b446
SN
2458 /* now that we have the hw_index we can figure out our doorbell page */
2459 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2460 if (!lif->dbid_count) {
2461 dev_err(dev, "No doorbell pages, aborting\n");
2462 return -EINVAL;
2463 }
2464
2465 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2466 if (!lif->dbid_inuse) {
2467 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2468 return -ENOMEM;
2469 }
2470
2471 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2472 set_bit(0, lif->dbid_inuse);
2473 lif->kern_pid = 0;
2474
2475 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2476 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2477 if (!lif->kern_dbpage) {
2478 dev_err(dev, "Cannot map dbpage, aborting\n");
2479 err = -ENOMEM;
2480 goto err_out_free_dbid;
2481 }
2482
1d062b7b
SN
2483 err = ionic_lif_adminq_init(lif);
2484 if (err)
2485 goto err_out_adminq_deinit;
2486
77ceb68e
SN
2487 if (lif->ionic->nnqs_per_lif) {
2488 err = ionic_lif_notifyq_init(lif);
2489 if (err)
2490 goto err_out_notifyq_deinit;
2491 }
2492
beead698
SN
2493 err = ionic_init_nic_features(lif);
2494 if (err)
2495 goto err_out_notifyq_deinit;
2496
7e4d4759
SN
2497 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2498 err = ionic_rx_filters_init(lif);
2499 if (err)
2500 goto err_out_notifyq_deinit;
2501 }
c1e329eb 2502
2a654540
SN
2503 err = ionic_station_set(lif);
2504 if (err)
2505 goto err_out_notifyq_deinit;
2506
0f3154e6
SN
2507 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2508
c6d3d73a 2509 set_bit(IONIC_LIF_F_INITED, lif->state);
1a58e196 2510
8c15440b
SN
2511 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2512
1a58e196 2513 return 0;
6461b446 2514
77ceb68e
SN
2515err_out_notifyq_deinit:
2516 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2517err_out_adminq_deinit:
2518 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2519 ionic_lif_reset(lif);
2520 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2521 lif->kern_dbpage = NULL;
6461b446
SN
2522err_out_free_dbid:
2523 kfree(lif->dbid_inuse);
2524 lif->dbid_inuse = NULL;
2525
2526 return err;
1a58e196
SN
2527}
2528
1a371ea1
SN
2529static void ionic_lif_notify_work(struct work_struct *ws)
2530{
2531}
2532
2533static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2534{
2535 struct ionic_admin_ctx ctx = {
2536 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2537 .cmd.lif_setattr = {
2538 .opcode = IONIC_CMD_LIF_SETATTR,
2539 .index = cpu_to_le16(lif->index),
2540 .attr = IONIC_LIF_ATTR_NAME,
2541 },
2542 };
2543
2544 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2545 sizeof(ctx.cmd.lif_setattr.name));
2546
2547 ionic_adminq_post_wait(lif, &ctx);
2548}
2549
2550static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2551{
2552 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2553 return NULL;
2554
2555 return netdev_priv(netdev);
2556}
2557
2558static int ionic_lif_notify(struct notifier_block *nb,
2559 unsigned long event, void *info)
2560{
2561 struct net_device *ndev = netdev_notifier_info_to_dev(info);
2562 struct ionic *ionic = container_of(nb, struct ionic, nb);
2563 struct ionic_lif *lif = ionic_netdev_lif(ndev);
2564
2565 if (!lif || lif->ionic != ionic)
2566 return NOTIFY_DONE;
2567
2568 switch (event) {
2569 case NETDEV_CHANGENAME:
2570 ionic_lif_set_netdev_info(lif);
2571 break;
2572 }
2573
2574 return NOTIFY_DONE;
2575}
2576
30b87ab4 2577int ionic_lif_register(struct ionic_lif *lif)
beead698
SN
2578{
2579 int err;
2580
30b87ab4 2581 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
1a371ea1 2582
30b87ab4 2583 lif->ionic->nb.notifier_call = ionic_lif_notify;
1a371ea1 2584
30b87ab4 2585 err = register_netdevice_notifier(&lif->ionic->nb);
1a371ea1 2586 if (err)
30b87ab4 2587 lif->ionic->nb.notifier_call = NULL;
1a371ea1 2588
beead698 2589 /* only register LIF0 for now */
30b87ab4 2590 err = register_netdev(lif->netdev);
beead698 2591 if (err) {
30b87ab4 2592 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
beead698
SN
2593 return err;
2594 }
30b87ab4
SN
2595 lif->registered = true;
2596 ionic_lif_set_netdev_info(lif);
beead698
SN
2597
2598 return 0;
2599}
2600
30b87ab4 2601void ionic_lif_unregister(struct ionic_lif *lif)
beead698 2602{
30b87ab4
SN
2603 if (lif->ionic->nb.notifier_call) {
2604 unregister_netdevice_notifier(&lif->ionic->nb);
2605 cancel_work_sync(&lif->ionic->nb_work);
2606 lif->ionic->nb.notifier_call = NULL;
1a371ea1
SN
2607 }
2608
30b87ab4
SN
2609 if (lif->netdev->reg_state == NETREG_REGISTERED)
2610 unregister_netdev(lif->netdev);
2611 lif->registered = false;
beead698
SN
2612}
2613
5b3f3f2a
SN
2614static void ionic_lif_queue_identify(struct ionic_lif *lif)
2615{
2616 struct ionic *ionic = lif->ionic;
2617 union ionic_q_identity *q_ident;
2618 struct ionic_dev *idev;
2619 int qtype;
2620 int err;
2621
2622 idev = &lif->ionic->idev;
2623 q_ident = (union ionic_q_identity *)&idev->dev_cmd_regs->data;
2624
2625 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
2626 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
2627
2628 /* filter out the ones we know about */
2629 switch (qtype) {
2630 case IONIC_QTYPE_ADMINQ:
2631 case IONIC_QTYPE_NOTIFYQ:
2632 case IONIC_QTYPE_RXQ:
2633 case IONIC_QTYPE_TXQ:
2634 break;
2635 default:
2636 continue;
2637 }
2638
2639 memset(qti, 0, sizeof(*qti));
2640
2641 mutex_lock(&ionic->dev_cmd_lock);
2642 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
2643 ionic_qtype_versions[qtype]);
2644 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2645 if (!err) {
2646 qti->version = q_ident->version;
2647 qti->supported = q_ident->supported;
2648 qti->features = le64_to_cpu(q_ident->features);
2649 qti->desc_sz = le16_to_cpu(q_ident->desc_sz);
2650 qti->comp_sz = le16_to_cpu(q_ident->comp_sz);
2651 qti->sg_desc_sz = le16_to_cpu(q_ident->sg_desc_sz);
2652 qti->max_sg_elems = le16_to_cpu(q_ident->max_sg_elems);
2653 qti->sg_desc_stride = le16_to_cpu(q_ident->sg_desc_stride);
2654 }
2655 mutex_unlock(&ionic->dev_cmd_lock);
2656
2657 if (err == -EINVAL) {
2658 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
2659 continue;
2660 } else if (err == -EIO) {
2661 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
2662 return;
2663 } else if (err) {
2664 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
2665 qtype, err);
2666 return;
2667 }
2668
2669 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
2670 qtype, qti->version);
2671 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
2672 qtype, qti->supported);
2673 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
2674 qtype, qti->features);
2675 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
2676 qtype, qti->desc_sz);
2677 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
2678 qtype, qti->comp_sz);
2679 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
2680 qtype, qti->sg_desc_sz);
2681 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
2682 qtype, qti->max_sg_elems);
2683 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
2684 qtype, qti->sg_desc_stride);
2685 }
2686}
2687
1a58e196
SN
2688int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
2689 union ionic_lif_identity *lid)
2690{
2691 struct ionic_dev *idev = &ionic->idev;
2692 size_t sz;
2693 int err;
2694
2695 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
2696
2697 mutex_lock(&ionic->dev_cmd_lock);
2698 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
2699 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
2700 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
2701 mutex_unlock(&ionic->dev_cmd_lock);
2702 if (err)
2703 return (err);
2704
2705 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
2706 le64_to_cpu(lid->capabilities));
2707
2708 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
2709 le32_to_cpu(lid->eth.max_ucast_filters));
2710 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
2711 le32_to_cpu(lid->eth.max_mcast_filters));
2712 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
2713 le64_to_cpu(lid->eth.config.features));
2714 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
2715 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
2716 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
2717 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
2718 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
2719 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
2720 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
2721 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
2722 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
2723 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
2724 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
2725 le32_to_cpu(lid->eth.config.mtu));
2726
2727 return 0;
2728}
2729
30b87ab4 2730int ionic_lif_size(struct ionic *ionic)
1a58e196
SN
2731{
2732 struct ionic_identity *ident = &ionic->ident;
2733 unsigned int nintrs, dev_nintrs;
2734 union ionic_lif_config *lc;
2735 unsigned int ntxqs_per_lif;
2736 unsigned int nrxqs_per_lif;
2737 unsigned int neqs_per_lif;
2738 unsigned int nnqs_per_lif;
2739 unsigned int nxqs, neqs;
2740 unsigned int min_intrs;
2741 int err;
2742
2743 lc = &ident->lif.eth.config;
2744 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
2745 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
2746 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
2747 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
2748 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
2749
2750 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
2751 nxqs = min(nxqs, num_online_cpus());
2752 neqs = min(neqs_per_lif, num_online_cpus());
2753
2754try_again:
2755 /* interrupt usage:
2756 * 1 for master lif adminq/notifyq
2757 * 1 for each CPU for master lif TxRx queue pairs
2758 * whatever's left is for RDMA queues
2759 */
2760 nintrs = 1 + nxqs + neqs;
2761 min_intrs = 2; /* adminq + 1 TxRx queue pair */
2762
2763 if (nintrs > dev_nintrs)
2764 goto try_fewer;
2765
2766 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
2767 if (err < 0 && err != -ENOSPC) {
2768 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
2769 return err;
2770 }
2771 if (err == -ENOSPC)
2772 goto try_fewer;
2773
2774 if (err != nintrs) {
2775 ionic_bus_free_irq_vectors(ionic);
2776 goto try_fewer;
2777 }
2778
2779 ionic->nnqs_per_lif = nnqs_per_lif;
2780 ionic->neqs_per_lif = neqs;
2781 ionic->ntxqs_per_lif = nxqs;
2782 ionic->nrxqs_per_lif = nxqs;
2783 ionic->nintrs = nintrs;
2784
2785 ionic_debugfs_add_sizes(ionic);
2786
2787 return 0;
2788
2789try_fewer:
2790 if (nnqs_per_lif > 1) {
2791 nnqs_per_lif >>= 1;
2792 goto try_again;
2793 }
2794 if (neqs > 1) {
2795 neqs >>= 1;
2796 goto try_again;
2797 }
2798 if (nxqs > 1) {
2799 nxqs >>= 1;
2800 goto try_again;
2801 }
2802 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
2803 return -ENOSPC;
2804}