]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/pensando/ionic/ionic_lif.c
net: ocelot: support multiple bridges
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
CommitLineData
1a58e196
SN
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
cc69837f 4#include <linux/ethtool.h>
011c7289
AB
5#include <linux/printk.h>
6#include <linux/dynamic_debug.h>
1a58e196
SN
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
4b03b273 9#include <linux/if_vlan.h>
8c15440b 10#include <linux/rtnetlink.h>
1a58e196
SN
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/cpumask.h>
14
15#include "ionic.h"
16#include "ionic_bus.h"
17#include "ionic_lif.h"
0f3154e6 18#include "ionic_txrx.h"
4d03e00a 19#include "ionic_ethtool.h"
1a58e196
SN
20#include "ionic_debugfs.h"
21
5b3f3f2a
SN
22/* queuetype support level */
23static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
24 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
25 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
26 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
27 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
28 * 1 = ... with Tx SG version 1
29 */
30};
31
2a654540
SN
32static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
33static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
34static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
8d61aad4 35static void ionic_link_status_check(struct ionic_lif *lif);
c672412f
SN
36static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
37static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
38static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
2a654540 39
f053e1f8
SN
40static void ionic_txrx_deinit(struct ionic_lif *lif);
41static int ionic_txrx_init(struct ionic_lif *lif);
49d3b493
SN
42static int ionic_start_queues(struct ionic_lif *lif);
43static void ionic_stop_queues(struct ionic_lif *lif);
5b3f3f2a 44static void ionic_lif_queue_identify(struct ionic_lif *lif);
49d3b493 45
04a83459
SN
46static void ionic_dim_work(struct work_struct *work)
47{
48 struct dim *dim = container_of(work, struct dim, work);
49 struct dim_cq_moder cur_moder;
50 struct ionic_qcq *qcq;
51 u32 new_coal;
52
53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
54 qcq = container_of(dim, struct ionic_qcq, dim);
55 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
56 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
57 dim->state = DIM_START_MEASURE;
58}
59
2a654540
SN
60static void ionic_lif_deferred_work(struct work_struct *work)
61{
62 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
63 struct ionic_deferred *def = &lif->deferred;
64 struct ionic_deferred_work *w = NULL;
65
52733cff
SN
66 do {
67 spin_lock_bh(&def->lock);
68 if (!list_empty(&def->list)) {
69 w = list_first_entry(&def->list,
70 struct ionic_deferred_work, list);
71 list_del(&w->list);
72 }
73 spin_unlock_bh(&def->lock);
74
75 if (!w)
76 break;
2a654540 77
2a654540
SN
78 switch (w->type) {
79 case IONIC_DW_TYPE_RX_MODE:
80 ionic_lif_rx_mode(lif, w->rx_mode);
81 break;
82 case IONIC_DW_TYPE_RX_ADDR_ADD:
83 ionic_lif_addr_add(lif, w->addr);
84 break;
85 case IONIC_DW_TYPE_RX_ADDR_DEL:
86 ionic_lif_addr_del(lif, w->addr);
87 break;
8d61aad4
SN
88 case IONIC_DW_TYPE_LINK_STATUS:
89 ionic_link_status_check(lif);
90 break;
c672412f
SN
91 case IONIC_DW_TYPE_LIF_RESET:
92 if (w->fw_status)
93 ionic_lif_handle_fw_up(lif);
94 else
95 ionic_lif_handle_fw_down(lif);
96 break;
2a654540
SN
97 default:
98 break;
99 }
100 kfree(w);
52733cff
SN
101 w = NULL;
102 } while (true);
2a654540
SN
103}
104
c672412f
SN
105void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
106 struct ionic_deferred_work *work)
2a654540
SN
107{
108 spin_lock_bh(&def->lock);
109 list_add_tail(&work->list, &def->list);
110 spin_unlock_bh(&def->lock);
111 schedule_work(&def->work);
112}
113
8d61aad4
SN
114static void ionic_link_status_check(struct ionic_lif *lif)
115{
116 struct net_device *netdev = lif->netdev;
117 u16 link_status;
118 bool link_up;
119
0925e9db 120 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
49d3b493
SN
121 return;
122
8d61aad4
SN
123 link_status = le16_to_cpu(lif->info->status.link_status);
124 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
125
8d61aad4 126 if (link_up) {
8f56bc4d
SN
127 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
128 mutex_lock(&lif->queue_lock);
129 ionic_start_queues(lif);
130 mutex_unlock(&lif->queue_lock);
131 }
132
aa47b540
SN
133 if (!netif_carrier_ok(netdev)) {
134 u32 link_speed;
8d61aad4 135
aa47b540
SN
136 ionic_port_identify(lif->ionic);
137 link_speed = le32_to_cpu(lif->info->status.link_speed);
138 netdev_info(netdev, "Link up - %d Gbps\n",
139 link_speed / 1000);
0f3154e6
SN
140 netif_carrier_on(netdev);
141 }
8d61aad4 142 } else {
aa47b540
SN
143 if (netif_carrier_ok(netdev)) {
144 netdev_info(netdev, "Link down\n");
145 netif_carrier_off(netdev);
146 }
8d61aad4 147
0925e9db
SN
148 if (lif->netdev->flags & IFF_UP && netif_running(lif->netdev)) {
149 mutex_lock(&lif->queue_lock);
49d3b493 150 ionic_stop_queues(lif);
0925e9db
SN
151 mutex_unlock(&lif->queue_lock);
152 }
8d61aad4
SN
153 }
154
c6d3d73a 155 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
8d61aad4
SN
156}
157
1800eee1 158void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
8d61aad4
SN
159{
160 struct ionic_deferred_work *work;
161
162 /* we only need one request outstanding at a time */
c6d3d73a 163 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
8d61aad4
SN
164 return;
165
1800eee1 166 if (!can_sleep) {
8d61aad4 167 work = kzalloc(sizeof(*work), GFP_ATOMIC);
2c580d77
SN
168 if (!work) {
169 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
8d61aad4 170 return;
2c580d77 171 }
8d61aad4
SN
172
173 work->type = IONIC_DW_TYPE_LINK_STATUS;
174 ionic_lif_deferred_enqueue(&lif->deferred, work);
175 } else {
176 ionic_link_status_check(lif);
177 }
178}
179
1d062b7b
SN
180static irqreturn_t ionic_isr(int irq, void *data)
181{
182 struct napi_struct *napi = data;
183
184 napi_schedule_irqoff(napi);
185
186 return IRQ_HANDLED;
187}
188
189static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
190{
191 struct ionic_intr_info *intr = &qcq->intr;
192 struct device *dev = lif->ionic->dev;
193 struct ionic_queue *q = &qcq->q;
194 const char *name;
195
196 if (lif->registered)
197 name = lif->netdev->name;
198 else
199 name = dev_name(dev);
200
201 snprintf(intr->name, sizeof(intr->name),
202 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
203
204 return devm_request_irq(dev, intr->vector, ionic_isr,
205 0, intr->name, &qcq->napi);
206}
207
208static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
209{
210 struct ionic *ionic = lif->ionic;
211 int index;
212
213 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
214 if (index == ionic->nintrs) {
215 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
216 __func__, index, ionic->nintrs);
217 return -ENOSPC;
218 }
219
220 set_bit(index, ionic->intrs);
221 ionic_intr_init(&ionic->idev, intr, index);
222
223 return 0;
224}
225
36ac2c50 226static void ionic_intr_free(struct ionic *ionic, int index)
1d062b7b 227{
c06107ca 228 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
36ac2c50 229 clear_bit(index, ionic->intrs);
1d062b7b
SN
230}
231
0f3154e6
SN
232static int ionic_qcq_enable(struct ionic_qcq *qcq)
233{
234 struct ionic_queue *q = &qcq->q;
235 struct ionic_lif *lif = q->lif;
236 struct ionic_dev *idev;
237 struct device *dev;
238
239 struct ionic_admin_ctx ctx = {
240 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
241 .cmd.q_control = {
242 .opcode = IONIC_CMD_Q_CONTROL,
243 .lif_index = cpu_to_le16(lif->index),
244 .type = q->type,
245 .index = cpu_to_le32(q->index),
246 .oper = IONIC_Q_ENABLE,
247 },
248 };
249
250 idev = &lif->ionic->idev;
251 dev = lif->ionic->dev;
252
253 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
254 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
255
256 if (qcq->flags & IONIC_QCQ_F_INTR) {
257 irq_set_affinity_hint(qcq->intr.vector,
258 &qcq->intr.affinity_mask);
259 napi_enable(&qcq->napi);
260 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
261 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
262 IONIC_INTR_MASK_CLEAR);
263 }
264
265 return ionic_adminq_post_wait(lif, &ctx);
266}
267
ba6ab8ac 268static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
0f3154e6 269{
7c737fc4
SN
270 struct ionic_queue *q;
271 struct ionic_lif *lif;
ba6ab8ac 272 int err = 0;
0f3154e6
SN
273
274 struct ionic_admin_ctx ctx = {
275 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
276 .cmd.q_control = {
277 .opcode = IONIC_CMD_Q_CONTROL,
0f3154e6
SN
278 .oper = IONIC_Q_DISABLE,
279 },
280 };
281
7c737fc4
SN
282 if (!qcq)
283 return -ENXIO;
0f3154e6 284
7c737fc4
SN
285 q = &qcq->q;
286 lif = q->lif;
0f3154e6
SN
287
288 if (qcq->flags & IONIC_QCQ_F_INTR) {
7c737fc4
SN
289 struct ionic_dev *idev = &lif->ionic->idev;
290
04a83459 291 cancel_work_sync(&qcq->dim.work);
0f3154e6
SN
292 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
293 IONIC_INTR_MASK_SET);
294 synchronize_irq(qcq->intr.vector);
295 irq_set_affinity_hint(qcq->intr.vector, NULL);
296 napi_disable(&qcq->napi);
297 }
298
ba6ab8ac
SN
299 if (send_to_hw) {
300 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
301 ctx.cmd.q_control.type = q->type;
302 ctx.cmd.q_control.index = cpu_to_le32(q->index);
303 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
304 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
7c737fc4 305
ba6ab8ac
SN
306 err = ionic_adminq_post_wait(lif, &ctx);
307 }
308
309 return err;
0f3154e6
SN
310}
311
1d062b7b
SN
312static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
313{
314 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
315
316 if (!qcq)
317 return;
318
1d062b7b
SN
319 if (!(qcq->flags & IONIC_QCQ_F_INITED))
320 return;
321
322 if (qcq->flags & IONIC_QCQ_F_INTR) {
323 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
324 IONIC_INTR_MASK_SET);
1d062b7b
SN
325 netif_napi_del(&qcq->napi);
326 }
327
328 qcq->flags &= ~IONIC_QCQ_F_INITED;
329}
330
101b40a0
SN
331static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
332{
333 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
334 return;
335
336 irq_set_affinity_hint(qcq->intr.vector, NULL);
337 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
338 qcq->intr.vector = 0;
339 ionic_intr_free(lif->ionic, qcq->intr.index);
340 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
341}
342
1d062b7b
SN
343static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
344{
345 struct device *dev = lif->ionic->dev;
346
347 if (!qcq)
348 return;
349
2a8c2c1a
SN
350 ionic_debugfs_del_qcq(qcq);
351
ea5a8b09
SN
352 if (qcq->q_base) {
353 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
354 qcq->q_base = NULL;
355 qcq->q_base_pa = 0;
356 }
357
358 if (qcq->cq_base) {
359 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
360 qcq->cq_base = NULL;
361 qcq->cq_base_pa = 0;
362 }
363
364 if (qcq->sg_base) {
365 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
366 qcq->sg_base = NULL;
367 qcq->sg_base_pa = 0;
368 }
1d062b7b 369
101b40a0 370 ionic_qcq_intr_free(lif, qcq);
1d062b7b 371
a34e25ab
SN
372 if (qcq->cq.info) {
373 devm_kfree(dev, qcq->cq.info);
374 qcq->cq.info = NULL;
375 }
376 if (qcq->q.info) {
377 devm_kfree(dev, qcq->q.info);
378 qcq->q.info = NULL;
379 }
1d062b7b
SN
380}
381
382static void ionic_qcqs_free(struct ionic_lif *lif)
383{
0f3154e6 384 struct device *dev = lif->ionic->dev;
0f3154e6 385
77ceb68e
SN
386 if (lif->notifyqcq) {
387 ionic_qcq_free(lif, lif->notifyqcq);
101b40a0 388 devm_kfree(dev, lif->notifyqcq);
77ceb68e
SN
389 lif->notifyqcq = NULL;
390 }
391
1d062b7b
SN
392 if (lif->adminqcq) {
393 ionic_qcq_free(lif, lif->adminqcq);
101b40a0 394 devm_kfree(dev, lif->adminqcq);
1d062b7b
SN
395 lif->adminqcq = NULL;
396 }
0f3154e6 397
a4674f34 398 if (lif->rxqcqs) {
34dec947
SN
399 devm_kfree(dev, lif->rxqstats);
400 lif->rxqstats = NULL;
a4674f34
SN
401 devm_kfree(dev, lif->rxqcqs);
402 lif->rxqcqs = NULL;
403 }
0f3154e6 404
a4674f34 405 if (lif->txqcqs) {
34dec947
SN
406 devm_kfree(dev, lif->txqstats);
407 lif->txqstats = NULL;
a4674f34
SN
408 devm_kfree(dev, lif->txqcqs);
409 lif->txqcqs = NULL;
410 }
1d062b7b
SN
411}
412
77ceb68e
SN
413static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
414 struct ionic_qcq *n_qcq)
415{
416 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
36ac2c50 417 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
77ceb68e
SN
418 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
419 }
420
421 n_qcq->intr.vector = src_qcq->intr.vector;
422 n_qcq->intr.index = src_qcq->intr.index;
423}
424
101b40a0
SN
425static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
426{
427 int err;
428
429 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
430 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
431 return 0;
432 }
433
434 err = ionic_intr_alloc(lif, &qcq->intr);
435 if (err) {
436 netdev_warn(lif->netdev, "no intr for %s: %d\n",
437 qcq->q.name, err);
438 goto err_out;
439 }
440
441 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
442 if (err < 0) {
443 netdev_warn(lif->netdev, "no vector for %s: %d\n",
444 qcq->q.name, err);
445 goto err_out_free_intr;
446 }
447 qcq->intr.vector = err;
448 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
449 IONIC_INTR_MASK_SET);
450
451 err = ionic_request_irq(lif, qcq);
452 if (err) {
453 netdev_warn(lif->netdev, "irq request failed %d\n", err);
454 goto err_out_free_intr;
455 }
456
457 /* try to get the irq on the local numa node first */
458 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
459 dev_to_node(lif->ionic->dev));
460 if (qcq->intr.cpu != -1)
461 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
462
463 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
464 return 0;
465
466err_out_free_intr:
467 ionic_intr_free(lif->ionic, qcq->intr.index);
468err_out:
469 return err;
470}
471
1d062b7b
SN
472static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
473 unsigned int index,
474 const char *name, unsigned int flags,
475 unsigned int num_descs, unsigned int desc_size,
476 unsigned int cq_desc_size,
477 unsigned int sg_desc_size,
478 unsigned int pid, struct ionic_qcq **qcq)
479{
480 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
481 struct device *dev = lif->ionic->dev;
482 void *q_base, *cq_base, *sg_base;
483 dma_addr_t cq_base_pa = 0;
484 dma_addr_t sg_base_pa = 0;
485 dma_addr_t q_base_pa = 0;
486 struct ionic_qcq *new;
487 int err;
488
489 *qcq = NULL;
490
1d062b7b
SN
491 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
492 if (!new) {
493 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
494 err = -ENOMEM;
495 goto err_out;
496 }
497
f37bc346 498 new->q.dev = dev;
1d062b7b
SN
499 new->flags = flags;
500
e7164200 501 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
1d062b7b
SN
502 GFP_KERNEL);
503 if (!new->q.info) {
504 netdev_err(lif->netdev, "Cannot allocate queue info\n");
505 err = -ENOMEM;
ea5a8b09 506 goto err_out_free_qcq;
1d062b7b
SN
507 }
508
509 new->q.type = type;
f37bc346 510 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
1d062b7b
SN
511
512 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
513 desc_size, sg_desc_size, pid);
514 if (err) {
515 netdev_err(lif->netdev, "Cannot initialize queue\n");
ea5a8b09 516 goto err_out_free_q_info;
1d062b7b
SN
517 }
518
101b40a0
SN
519 err = ionic_alloc_qcq_interrupt(lif, new);
520 if (err)
521 goto err_out;
1d062b7b 522
e7164200 523 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
1d062b7b
SN
524 GFP_KERNEL);
525 if (!new->cq.info) {
526 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
527 err = -ENOMEM;
0b064100 528 goto err_out_free_irq;
1d062b7b
SN
529 }
530
531 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
532 if (err) {
533 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
ea5a8b09 534 goto err_out_free_cq_info;
1d062b7b
SN
535 }
536
9576a36c
SN
537 if (flags & IONIC_QCQ_F_NOTIFYQ) {
538 int q_size, cq_size;
539
540 /* q & cq need to be contiguous in case of notifyq */
541 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
542 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
543
544 new->q_size = PAGE_SIZE + q_size + cq_size;
545 new->q_base = dma_alloc_coherent(dev, new->q_size,
546 &new->q_base_pa, GFP_KERNEL);
547 if (!new->q_base) {
548 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
549 err = -ENOMEM;
550 goto err_out_free_cq_info;
551 }
552 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
553 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
554 ionic_q_map(&new->q, q_base, q_base_pa);
555
556 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
557 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
558 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
559 ionic_cq_bind(&new->cq, &new->q);
560 } else {
561 new->q_size = PAGE_SIZE + (num_descs * desc_size);
562 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
563 GFP_KERNEL);
564 if (!new->q_base) {
565 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
566 err = -ENOMEM;
567 goto err_out_free_cq_info;
568 }
569 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
570 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
571 ionic_q_map(&new->q, q_base, q_base_pa);
572
573 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
574 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
575 GFP_KERNEL);
576 if (!new->cq_base) {
577 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
578 err = -ENOMEM;
579 goto err_out_free_q;
580 }
581 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
582 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
583 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
584 ionic_cq_bind(&new->cq, &new->q);
ea5a8b09 585 }
1d062b7b
SN
586
587 if (flags & IONIC_QCQ_F_SG) {
ea5a8b09
SN
588 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
589 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
590 GFP_KERNEL);
591 if (!new->sg_base) {
592 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
593 err = -ENOMEM;
594 goto err_out_free_cq;
595 }
596 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
597 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
1d062b7b
SN
598 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
599 }
600
04a83459
SN
601 INIT_WORK(&new->dim.work, ionic_dim_work);
602 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
603
1d062b7b
SN
604 *qcq = new;
605
606 return 0;
607
ea5a8b09
SN
608err_out_free_cq:
609 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
610err_out_free_q:
611 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
612err_out_free_cq_info:
613 devm_kfree(dev, new->cq.info);
0b064100 614err_out_free_irq:
101b40a0 615 if (flags & IONIC_QCQ_F_INTR) {
0b064100 616 devm_free_irq(dev, new->intr.vector, &new->napi);
36ac2c50 617 ionic_intr_free(lif->ionic, new->intr.index);
101b40a0 618 }
ea5a8b09
SN
619err_out_free_q_info:
620 devm_kfree(dev, new->q.info);
621err_out_free_qcq:
622 devm_kfree(dev, new);
1d062b7b
SN
623err_out:
624 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
625 return err;
626}
627
628static int ionic_qcqs_alloc(struct ionic_lif *lif)
629{
0f3154e6 630 struct device *dev = lif->ionic->dev;
1d062b7b
SN
631 unsigned int flags;
632 int err;
633
634 flags = IONIC_QCQ_F_INTR;
635 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
636 IONIC_ADMINQ_LENGTH,
637 sizeof(struct ionic_admin_cmd),
638 sizeof(struct ionic_admin_comp),
639 0, lif->kern_pid, &lif->adminqcq);
640 if (err)
641 return err;
2a8c2c1a 642 ionic_debugfs_add_qcq(lif, lif->adminqcq);
1d062b7b 643
77ceb68e
SN
644 if (lif->ionic->nnqs_per_lif) {
645 flags = IONIC_QCQ_F_NOTIFYQ;
646 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
647 flags, IONIC_NOTIFYQ_LENGTH,
648 sizeof(struct ionic_notifyq_cmd),
649 sizeof(union ionic_notifyq_comp),
650 0, lif->kern_pid, &lif->notifyqcq);
651 if (err)
34dec947 652 goto err_out;
2a8c2c1a 653 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
77ceb68e
SN
654
655 /* Let the notifyq ride on the adminq interrupt */
656 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
657 }
658
0f3154e6 659 err = -ENOMEM;
ee205626 660 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
34dec947 661 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 662 if (!lif->txqcqs)
34dec947 663 goto err_out;
ee205626 664 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
34dec947 665 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 666 if (!lif->rxqcqs)
34dec947 667 goto err_out;
0f3154e6 668
34dec947
SN
669 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
670 sizeof(struct ionic_tx_stats), GFP_KERNEL);
671 if (!lif->txqstats)
672 goto err_out;
673 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
674 sizeof(struct ionic_rx_stats), GFP_KERNEL);
675 if (!lif->rxqstats)
676 goto err_out;
77ceb68e 677
34dec947 678 return 0;
77ceb68e 679
34dec947
SN
680err_out:
681 ionic_qcqs_free(lif);
77ceb68e
SN
682 return err;
683}
684
f053e1f8
SN
685static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
686{
687 qcq->q.tail_idx = 0;
688 qcq->q.head_idx = 0;
689 qcq->cq.tail_idx = 0;
690 qcq->cq.done_color = 1;
691 memset(qcq->q_base, 0, qcq->q_size);
692 memset(qcq->cq_base, 0, qcq->cq_size);
693 memset(qcq->sg_base, 0, qcq->sg_size);
694}
695
0f3154e6
SN
696static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
697{
698 struct device *dev = lif->ionic->dev;
699 struct ionic_queue *q = &qcq->q;
700 struct ionic_cq *cq = &qcq->cq;
701 struct ionic_admin_ctx ctx = {
702 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
703 .cmd.q_init = {
704 .opcode = IONIC_CMD_Q_INIT,
705 .lif_index = cpu_to_le16(lif->index),
706 .type = q->type,
5b3f3f2a 707 .ver = lif->qtype_info[q->type].version,
0f3154e6
SN
708 .index = cpu_to_le32(q->index),
709 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
710 IONIC_QINIT_F_SG),
0f3154e6
SN
711 .pid = cpu_to_le16(q->pid),
712 .ring_size = ilog2(q->num_descs),
713 .ring_base = cpu_to_le64(q->base_pa),
714 .cq_ring_base = cpu_to_le64(cq->base_pa),
715 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
716 },
717 };
fe8c30b5 718 unsigned int intr_index;
0f3154e6
SN
719 int err;
720
101b40a0 721 if (qcq->flags & IONIC_QCQ_F_INTR)
fe8c30b5
SN
722 intr_index = qcq->intr.index;
723 else
34dec947 724 intr_index = lif->rxqcqs[q->index]->intr.index;
fe8c30b5
SN
725 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
726
0f3154e6
SN
727 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
728 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
729 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
730 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
731 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
732 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 733 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 734
f053e1f8 735 ionic_qcq_sanitize(qcq);
49d3b493 736
0f3154e6
SN
737 err = ionic_adminq_post_wait(lif, &ctx);
738 if (err)
739 return err;
740
741 q->hw_type = ctx.comp.q_init.hw_type;
742 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
743 q->dbval = IONIC_DBELL_QID(q->hw_index);
744
745 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
746 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
747
fe8c30b5
SN
748 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
749 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
750 NAPI_POLL_WEIGHT);
751
0f3154e6
SN
752 qcq->flags |= IONIC_QCQ_F_INITED;
753
0f3154e6
SN
754 return 0;
755}
756
757static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
758{
759 struct device *dev = lif->ionic->dev;
760 struct ionic_queue *q = &qcq->q;
761 struct ionic_cq *cq = &qcq->cq;
762 struct ionic_admin_ctx ctx = {
763 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
764 .cmd.q_init = {
765 .opcode = IONIC_CMD_Q_INIT,
766 .lif_index = cpu_to_le16(lif->index),
767 .type = q->type,
5b3f3f2a 768 .ver = lif->qtype_info[q->type].version,
0f3154e6 769 .index = cpu_to_le32(q->index),
08f2e4b2
SN
770 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
771 IONIC_QINIT_F_SG),
0f3154e6
SN
772 .intr_index = cpu_to_le16(cq->bound_intr->index),
773 .pid = cpu_to_le16(q->pid),
774 .ring_size = ilog2(q->num_descs),
775 .ring_base = cpu_to_le64(q->base_pa),
776 .cq_ring_base = cpu_to_le64(cq->base_pa),
08f2e4b2 777 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
0f3154e6
SN
778 },
779 };
780 int err;
781
782 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
783 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
784 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
785 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
786 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
787 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 788 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 789
f053e1f8 790 ionic_qcq_sanitize(qcq);
49d3b493 791
0f3154e6
SN
792 err = ionic_adminq_post_wait(lif, &ctx);
793 if (err)
794 return err;
795
796 q->hw_type = ctx.comp.q_init.hw_type;
797 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
798 q->dbval = IONIC_DBELL_QID(q->hw_index);
799
800 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
801 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
802
fe8c30b5
SN
803 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
804 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
805 NAPI_POLL_WEIGHT);
806 else
807 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
808 NAPI_POLL_WEIGHT);
0f3154e6 809
0f3154e6
SN
810 qcq->flags |= IONIC_QCQ_F_INITED;
811
0f3154e6
SN
812 return 0;
813}
814
77ceb68e
SN
815static bool ionic_notifyq_service(struct ionic_cq *cq,
816 struct ionic_cq_info *cq_info)
817{
818 union ionic_notifyq_comp *comp = cq_info->cq_desc;
c672412f 819 struct ionic_deferred_work *work;
77ceb68e
SN
820 struct net_device *netdev;
821 struct ionic_queue *q;
822 struct ionic_lif *lif;
823 u64 eid;
824
825 q = cq->bound_q;
826 lif = q->info[0].cb_arg;
827 netdev = lif->netdev;
828 eid = le64_to_cpu(comp->event.eid);
829
830 /* Have we run out of new completions to process? */
3fbc9bb6 831 if ((s64)(eid - lif->last_eid) <= 0)
77ceb68e
SN
832 return false;
833
834 lif->last_eid = eid;
835
836 dev_dbg(lif->ionic->dev, "notifyq event:\n");
837 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
838 comp, sizeof(*comp), true);
839
840 switch (le16_to_cpu(comp->event.ecode)) {
841 case IONIC_EVENT_LINK_CHANGE:
1800eee1 842 ionic_link_status_check_request(lif, false);
77ceb68e
SN
843 break;
844 case IONIC_EVENT_RESET:
c672412f
SN
845 work = kzalloc(sizeof(*work), GFP_ATOMIC);
846 if (!work) {
c0c682ee 847 netdev_err(lif->netdev, "Reset event dropped\n");
c672412f
SN
848 } else {
849 work->type = IONIC_DW_TYPE_LIF_RESET;
850 ionic_lif_deferred_enqueue(&lif->deferred, work);
851 }
77ceb68e
SN
852 break;
853 default:
5b3f3f2a 854 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
77ceb68e
SN
855 comp->event.ecode, eid);
856 break;
857 }
858
859 return true;
860}
861
1d062b7b
SN
862static bool ionic_adminq_service(struct ionic_cq *cq,
863 struct ionic_cq_info *cq_info)
864{
865 struct ionic_admin_comp *comp = cq_info->cq_desc;
866
867 if (!color_match(comp->color, cq->done_color))
868 return false;
869
870 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
871
872 return true;
873}
874
875static int ionic_adminq_napi(struct napi_struct *napi, int budget)
876{
b4280948 877 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
77ceb68e 878 struct ionic_lif *lif = napi_to_cq(napi)->lif;
b4280948
SN
879 struct ionic_dev *idev = &lif->ionic->idev;
880 unsigned int flags = 0;
77ceb68e
SN
881 int n_work = 0;
882 int a_work = 0;
b4280948
SN
883 int work_done;
884
885 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
886 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
887 ionic_notifyq_service, NULL, NULL);
77ceb68e 888
b4280948
SN
889 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
890 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
891 ionic_adminq_service, NULL, NULL);
892
893 work_done = max(n_work, a_work);
894 if (work_done < budget && napi_complete_done(napi, work_done)) {
895 flags |= IONIC_INTR_CRED_UNMASK;
04a83459 896 lif->adminqcq->cq.bound_intr->rearm_count++;
b4280948 897 }
77ceb68e 898
b4280948
SN
899 if (work_done || flags) {
900 flags |= IONIC_INTR_CRED_RESET_COALESCE;
901 ionic_intr_credits(idev->intr_ctrl,
902 intr->index,
903 n_work + a_work, flags);
904 }
905
906 return work_done;
1d062b7b
SN
907}
908
f64e0c56
SN
909void ionic_get_stats64(struct net_device *netdev,
910 struct rtnl_link_stats64 *ns)
8d61aad4
SN
911{
912 struct ionic_lif *lif = netdev_priv(netdev);
913 struct ionic_lif_stats *ls;
914
915 memset(ns, 0, sizeof(*ns));
916 ls = &lif->info->stats;
917
918 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
919 le64_to_cpu(ls->rx_mcast_packets) +
920 le64_to_cpu(ls->rx_bcast_packets);
921
922 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
923 le64_to_cpu(ls->tx_mcast_packets) +
924 le64_to_cpu(ls->tx_bcast_packets);
925
926 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
927 le64_to_cpu(ls->rx_mcast_bytes) +
928 le64_to_cpu(ls->rx_bcast_bytes);
929
930 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
931 le64_to_cpu(ls->tx_mcast_bytes) +
932 le64_to_cpu(ls->tx_bcast_bytes);
933
934 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
935 le64_to_cpu(ls->rx_mcast_drop_packets) +
936 le64_to_cpu(ls->rx_bcast_drop_packets);
937
938 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
939 le64_to_cpu(ls->tx_mcast_drop_packets) +
940 le64_to_cpu(ls->tx_bcast_drop_packets);
941
942 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
943
944 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
945
946 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
947 le64_to_cpu(ls->rx_queue_disabled) +
948 le64_to_cpu(ls->rx_desc_fetch_error) +
949 le64_to_cpu(ls->rx_desc_data_error);
950
951 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
952 le64_to_cpu(ls->tx_queue_disabled) +
953 le64_to_cpu(ls->tx_desc_fetch_error) +
954 le64_to_cpu(ls->tx_desc_data_error);
955
956 ns->rx_errors = ns->rx_over_errors +
957 ns->rx_missed_errors;
958
959 ns->tx_errors = ns->tx_aborted_errors;
960}
961
2a654540
SN
962static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
963{
964 struct ionic_admin_ctx ctx = {
965 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
966 .cmd.rx_filter_add = {
967 .opcode = IONIC_CMD_RX_FILTER_ADD,
968 .lif_index = cpu_to_le16(lif->index),
969 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
970 },
971 };
972 struct ionic_rx_filter *f;
973 int err;
974
975 /* don't bother if we already have it */
976 spin_lock_bh(&lif->rx_filters.lock);
977 f = ionic_rx_filter_by_addr(lif, addr);
978 spin_unlock_bh(&lif->rx_filters.lock);
979 if (f)
980 return 0;
981
cbec2153 982 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
2a654540
SN
983
984 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
985 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 986 if (err && err != -EEXIST)
2a654540
SN
987 return err;
988
989 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
990}
991
992static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
993{
994 struct ionic_admin_ctx ctx = {
995 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
996 .cmd.rx_filter_del = {
997 .opcode = IONIC_CMD_RX_FILTER_DEL,
998 .lif_index = cpu_to_le16(lif->index),
999 },
1000 };
1001 struct ionic_rx_filter *f;
1002 int err;
1003
1004 spin_lock_bh(&lif->rx_filters.lock);
1005 f = ionic_rx_filter_by_addr(lif, addr);
1006 if (!f) {
1007 spin_unlock_bh(&lif->rx_filters.lock);
1008 return -ENOENT;
1009 }
1010
cbec2153
SN
1011 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1012 addr, f->filter_id);
1013
2a654540
SN
1014 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1015 ionic_rx_filter_free(lif, f);
1016 spin_unlock_bh(&lif->rx_filters.lock);
1017
1018 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 1019 if (err && err != -EEXIST)
2a654540
SN
1020 return err;
1021
2a654540
SN
1022 return 0;
1023}
1024
1800eee1
SAS
1025static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
1026 bool can_sleep)
2a654540 1027{
2a654540
SN
1028 struct ionic_deferred_work *work;
1029 unsigned int nmfilters;
1030 unsigned int nufilters;
1031
1032 if (add) {
1033 /* Do we have space for this filter? We test the counters
1034 * here before checking the need for deferral so that we
1035 * can return an overflow error to the stack.
1036 */
bb9f80f3
SN
1037 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1038 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
2a654540
SN
1039
1040 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1041 lif->nmcast++;
1042 else if (!is_multicast_ether_addr(addr) &&
1043 lif->nucast < nufilters)
1044 lif->nucast++;
1045 else
1046 return -ENOSPC;
1047 } else {
1048 if (is_multicast_ether_addr(addr) && lif->nmcast)
1049 lif->nmcast--;
1050 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1051 lif->nucast--;
1052 }
1053
1800eee1 1054 if (!can_sleep) {
2a654540 1055 work = kzalloc(sizeof(*work), GFP_ATOMIC);
c0c682ee 1056 if (!work)
2a654540 1057 return -ENOMEM;
2a654540
SN
1058 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
1059 IONIC_DW_TYPE_RX_ADDR_DEL;
1060 memcpy(work->addr, addr, ETH_ALEN);
1061 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
1062 add ? "add" : "del", addr);
1063 ionic_lif_deferred_enqueue(&lif->deferred, work);
1064 } else {
1065 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1066 add ? "add" : "del", addr);
1067 if (add)
1068 return ionic_lif_addr_add(lif, addr);
1069 else
1070 return ionic_lif_addr_del(lif, addr);
1071 }
1072
1073 return 0;
1074}
1075
1076static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1077{
7c8d008c 1078 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
1800eee1
SAS
1079}
1080
1081static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
1082{
7c8d008c 1083 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
2a654540
SN
1084}
1085
1086static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1087{
7c8d008c 1088 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
1800eee1
SAS
1089}
1090
1091static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
1092{
7c8d008c 1093 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
2a654540
SN
1094}
1095
1096static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1097{
1098 struct ionic_admin_ctx ctx = {
1099 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1100 .cmd.rx_mode_set = {
1101 .opcode = IONIC_CMD_RX_MODE_SET,
1102 .lif_index = cpu_to_le16(lif->index),
1103 .rx_mode = cpu_to_le16(rx_mode),
1104 },
1105 };
1106 char buf[128];
1107 int err;
1108 int i;
1109#define REMAIN(__x) (sizeof(buf) - (__x))
1110
38e0f746
TI
1111 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1112 lif->rx_mode, rx_mode);
2a654540 1113 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
38e0f746 1114 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
2a654540 1115 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
38e0f746 1116 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
2a654540 1117 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
38e0f746 1118 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
2a654540 1119 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
38e0f746 1120 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
2a654540 1121 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
38e0f746 1122 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
2a654540
SN
1123 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1124
1125 err = ionic_adminq_post_wait(lif, &ctx);
1126 if (err)
1127 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1128 rx_mode, err);
1129 else
1130 lif->rx_mode = rx_mode;
1131}
1132
81dbc241 1133static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
2a654540
SN
1134{
1135 struct ionic_lif *lif = netdev_priv(netdev);
e94f76bb 1136 struct ionic_deferred_work *work;
2a654540
SN
1137 unsigned int nfilters;
1138 unsigned int rx_mode;
1139
2a654540
SN
1140 rx_mode = IONIC_RX_MODE_F_UNICAST;
1141 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1142 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1143 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1144 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1145
1146 /* sync unicast addresses
1147 * next check to see if we're in an overflow state
1148 * if so, we track that we overflowed and enable NIC PROMISC
1149 * else if the overflow is set and not needed
1150 * we remove our overflow flag and check the netdev flags
1151 * to see if we can disable NIC PROMISC
1152 */
81dbc241 1153 if (can_sleep)
e0243e19 1154 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
81dbc241
SN
1155 else
1156 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
bb9f80f3 1157 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
2a654540
SN
1158 if (netdev_uc_count(netdev) + 1 > nfilters) {
1159 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1160 lif->uc_overflow = true;
1161 } else if (lif->uc_overflow) {
1162 lif->uc_overflow = false;
1163 if (!(netdev->flags & IFF_PROMISC))
1164 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1165 }
1166
1167 /* same for multicast */
81dbc241 1168 if (can_sleep)
e0243e19 1169 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
81dbc241
SN
1170 else
1171 __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
bb9f80f3 1172 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
2a654540
SN
1173 if (netdev_mc_count(netdev) > nfilters) {
1174 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1175 lif->mc_overflow = true;
1176 } else if (lif->mc_overflow) {
1177 lif->mc_overflow = false;
1178 if (!(netdev->flags & IFF_ALLMULTI))
1179 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1180 }
1181
e94f76bb 1182 if (lif->rx_mode != rx_mode) {
81dbc241 1183 if (!can_sleep) {
e94f76bb
SN
1184 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1185 if (!work) {
c0c682ee 1186 netdev_err(lif->netdev, "rxmode change dropped\n");
e94f76bb
SN
1187 return;
1188 }
1189 work->type = IONIC_DW_TYPE_RX_MODE;
1190 work->rx_mode = rx_mode;
1191 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1192 ionic_lif_deferred_enqueue(&lif->deferred, work);
1193 } else {
1194 ionic_lif_rx_mode(lif, rx_mode);
1195 }
1196 }
1800eee1
SAS
1197}
1198
1199static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1200{
7c8d008c 1201 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP);
2a654540
SN
1202}
1203
beead698
SN
1204static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1205{
1206 u64 wanted = 0;
1207
1208 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1209 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1210 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1211 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1212 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1213 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1214 if (features & NETIF_F_RXHASH)
1215 wanted |= IONIC_ETH_HW_RX_HASH;
1216 if (features & NETIF_F_RXCSUM)
1217 wanted |= IONIC_ETH_HW_RX_CSUM;
1218 if (features & NETIF_F_SG)
1219 wanted |= IONIC_ETH_HW_TX_SG;
1220 if (features & NETIF_F_HW_CSUM)
1221 wanted |= IONIC_ETH_HW_TX_CSUM;
1222 if (features & NETIF_F_TSO)
1223 wanted |= IONIC_ETH_HW_TSO;
1224 if (features & NETIF_F_TSO6)
1225 wanted |= IONIC_ETH_HW_TSO_IPV6;
1226 if (features & NETIF_F_TSO_ECN)
1227 wanted |= IONIC_ETH_HW_TSO_ECN;
1228 if (features & NETIF_F_GSO_GRE)
1229 wanted |= IONIC_ETH_HW_TSO_GRE;
1230 if (features & NETIF_F_GSO_GRE_CSUM)
1231 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1232 if (features & NETIF_F_GSO_IPXIP4)
1233 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1234 if (features & NETIF_F_GSO_IPXIP6)
1235 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1236 if (features & NETIF_F_GSO_UDP_TUNNEL)
1237 wanted |= IONIC_ETH_HW_TSO_UDP;
1238 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1239 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1240
1241 return cpu_to_le64(wanted);
1242}
1243
1244static int ionic_set_nic_features(struct ionic_lif *lif,
1245 netdev_features_t features)
1246{
1247 struct device *dev = lif->ionic->dev;
1248 struct ionic_admin_ctx ctx = {
1249 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1250 .cmd.lif_setattr = {
1251 .opcode = IONIC_CMD_LIF_SETATTR,
1252 .index = cpu_to_le16(lif->index),
1253 .attr = IONIC_LIF_ATTR_FEATURES,
1254 },
1255 };
1256 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1257 IONIC_ETH_HW_VLAN_RX_STRIP |
1258 IONIC_ETH_HW_VLAN_RX_FILTER;
75fcb75b 1259 u64 old_hw_features;
beead698
SN
1260 int err;
1261
1262 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1263 err = ionic_adminq_post_wait(lif, &ctx);
1264 if (err)
1265 return err;
1266
75fcb75b 1267 old_hw_features = lif->hw_features;
beead698
SN
1268 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1269 ctx.comp.lif_setattr.features);
1270
75fcb75b
SN
1271 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1272 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1273
beead698
SN
1274 if ((vlan_flags & features) &&
1275 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1276 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1277
1278 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1279 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1280 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1281 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1282 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1283 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1284 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1285 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1286 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1287 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1288 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1289 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1290 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1291 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1292 if (lif->hw_features & IONIC_ETH_HW_TSO)
1293 dev_dbg(dev, "feature ETH_HW_TSO\n");
1294 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1295 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1296 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1297 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1298 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1299 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1300 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1301 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1302 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1303 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1304 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1305 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1306 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1307 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1308 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1309 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1310
1311 return 0;
1312}
1313
1314static int ionic_init_nic_features(struct ionic_lif *lif)
1315{
1316 struct net_device *netdev = lif->netdev;
1317 netdev_features_t features;
1318 int err;
1319
1320 /* set up what we expect to support by default */
1321 features = NETIF_F_HW_VLAN_CTAG_TX |
1322 NETIF_F_HW_VLAN_CTAG_RX |
1323 NETIF_F_HW_VLAN_CTAG_FILTER |
1324 NETIF_F_RXHASH |
1325 NETIF_F_SG |
1326 NETIF_F_HW_CSUM |
1327 NETIF_F_RXCSUM |
1328 NETIF_F_TSO |
1329 NETIF_F_TSO6 |
1330 NETIF_F_TSO_ECN;
1331
1332 err = ionic_set_nic_features(lif, features);
1333 if (err)
1334 return err;
1335
1336 /* tell the netdev what we actually can support */
1337 netdev->features |= NETIF_F_HIGHDMA;
1338
1339 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1340 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1341 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1342 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1343 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1344 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1345 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1346 netdev->hw_features |= NETIF_F_RXHASH;
1347 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1348 netdev->hw_features |= NETIF_F_SG;
1349
1350 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1351 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1352 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1353 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1354 if (lif->hw_features & IONIC_ETH_HW_TSO)
1355 netdev->hw_enc_features |= NETIF_F_TSO;
1356 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1357 netdev->hw_enc_features |= NETIF_F_TSO6;
1358 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1359 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1360 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1361 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1362 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1363 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1364 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1365 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1366 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1367 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1368 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1369 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1370 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1371 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1372
1373 netdev->hw_features |= netdev->hw_enc_features;
1374 netdev->features |= netdev->hw_features;
ef7232da 1375 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
beead698 1376
c672412f
SN
1377 netdev->priv_flags |= IFF_UNICAST_FLT |
1378 IFF_LIVE_ADDR_CHANGE;
beead698
SN
1379
1380 return 0;
1381}
1382
1383static int ionic_set_features(struct net_device *netdev,
1384 netdev_features_t features)
1385{
1386 struct ionic_lif *lif = netdev_priv(netdev);
1387 int err;
1388
1389 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1390 __func__, (u64)lif->netdev->features, (u64)features);
1391
1392 err = ionic_set_nic_features(lif, features);
1393
1394 return err;
1395}
1396
1397static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1398{
2a654540
SN
1399 struct sockaddr *addr = sa;
1400 u8 *mac;
1401 int err;
1402
1403 mac = (u8 *)addr->sa_data;
1404 if (ether_addr_equal(netdev->dev_addr, mac))
1405 return 0;
1406
1407 err = eth_prepare_mac_addr_change(netdev, addr);
1408 if (err)
1409 return err;
1410
1411 if (!is_zero_ether_addr(netdev->dev_addr)) {
1412 netdev_info(netdev, "deleting mac addr %pM\n",
1413 netdev->dev_addr);
1414 ionic_addr_del(netdev, netdev->dev_addr);
1415 }
1416
1417 eth_commit_mac_addr_change(netdev, addr);
1418 netdev_info(netdev, "updating mac addr %pM\n", mac);
1419
1420 return ionic_addr_add(netdev, mac);
beead698
SN
1421}
1422
f053e1f8
SN
1423static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1424{
1425 /* Stop and clean the queues before reconfiguration */
1426 mutex_lock(&lif->queue_lock);
1427 netif_device_detach(lif->netdev);
1428 ionic_stop_queues(lif);
1429 ionic_txrx_deinit(lif);
1430}
1431
1432static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1433{
1434 int err;
1435
1436 /* Re-init the queues after reconfiguration */
1437
1438 /* The only way txrx_init can fail here is if communication
1439 * with FW is suddenly broken. There's not much we can do
1440 * at this point - error messages have already been printed,
1441 * so we can continue on and the user can eventually do a
1442 * DOWN and UP to try to reset and clear the issue.
1443 */
1444 err = ionic_txrx_init(lif);
1445 mutex_unlock(&lif->queue_lock);
1800eee1 1446 ionic_link_status_check_request(lif, true);
f053e1f8
SN
1447 netif_device_attach(lif->netdev);
1448
1449 return err;
1450}
1451
beead698
SN
1452static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1453{
1454 struct ionic_lif *lif = netdev_priv(netdev);
1455 struct ionic_admin_ctx ctx = {
1456 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1457 .cmd.lif_setattr = {
1458 .opcode = IONIC_CMD_LIF_SETATTR,
1459 .index = cpu_to_le16(lif->index),
1460 .attr = IONIC_LIF_ATTR_MTU,
1461 .mtu = cpu_to_le32(new_mtu),
1462 },
1463 };
1464 int err;
1465
1466 err = ionic_adminq_post_wait(lif, &ctx);
1467 if (err)
1468 return err;
1469
f053e1f8 1470 /* if we're not running, nothing more to do */
79ba55a3
SN
1471 if (!netif_running(netdev)) {
1472 netdev->mtu = new_mtu;
f053e1f8 1473 return 0;
79ba55a3 1474 }
beead698 1475
f053e1f8 1476 ionic_stop_queues_reconfig(lif);
79ba55a3 1477 netdev->mtu = new_mtu;
f053e1f8 1478 return ionic_start_queues_reconfig(lif);
beead698
SN
1479}
1480
8c15440b
SN
1481static void ionic_tx_timeout_work(struct work_struct *ws)
1482{
1483 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1484
1485 netdev_info(lif->netdev, "Tx Timeout recovery\n");
1486
6f7d6f0f
SN
1487 /* if we were stopped before this scheduled job was launched,
1488 * don't bother the queues as they are already stopped.
1489 */
1490 if (!netif_running(lif->netdev))
1491 return;
1492
1493 ionic_stop_queues_reconfig(lif);
1494 ionic_start_queues_reconfig(lif);
8c15440b
SN
1495}
1496
0290bd29 1497static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
beead698 1498{
8c15440b
SN
1499 struct ionic_lif *lif = netdev_priv(netdev);
1500
1501 schedule_work(&lif->tx_timeout_work);
beead698
SN
1502}
1503
1504static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1505 u16 vid)
1506{
2a654540
SN
1507 struct ionic_lif *lif = netdev_priv(netdev);
1508 struct ionic_admin_ctx ctx = {
1509 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1510 .cmd.rx_filter_add = {
1511 .opcode = IONIC_CMD_RX_FILTER_ADD,
1512 .lif_index = cpu_to_le16(lif->index),
1513 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1514 .vlan.vlan = cpu_to_le16(vid),
1515 },
1516 };
1517 int err;
1518
cbec2153 1519 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
2a654540
SN
1520 err = ionic_adminq_post_wait(lif, &ctx);
1521 if (err)
1522 return err;
1523
2a654540 1524 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
beead698
SN
1525}
1526
1527static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1528 u16 vid)
1529{
2a654540
SN
1530 struct ionic_lif *lif = netdev_priv(netdev);
1531 struct ionic_admin_ctx ctx = {
1532 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1533 .cmd.rx_filter_del = {
1534 .opcode = IONIC_CMD_RX_FILTER_DEL,
1535 .lif_index = cpu_to_le16(lif->index),
1536 },
1537 };
1538 struct ionic_rx_filter *f;
1539
1540 spin_lock_bh(&lif->rx_filters.lock);
1541
1542 f = ionic_rx_filter_by_vlan(lif, vid);
1543 if (!f) {
1544 spin_unlock_bh(&lif->rx_filters.lock);
1545 return -ENOENT;
1546 }
1547
cbec2153
SN
1548 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1549 vid, f->filter_id);
2a654540
SN
1550
1551 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1552 ionic_rx_filter_free(lif, f);
1553 spin_unlock_bh(&lif->rx_filters.lock);
1554
1555 return ionic_adminq_post_wait(lif, &ctx);
beead698
SN
1556}
1557
aa319881
SN
1558int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1559 const u8 *key, const u32 *indir)
1560{
1561 struct ionic_admin_ctx ctx = {
1562 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1563 .cmd.lif_setattr = {
1564 .opcode = IONIC_CMD_LIF_SETATTR,
1565 .attr = IONIC_LIF_ATTR_RSS,
aa319881
SN
1566 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1567 },
1568 };
1569 unsigned int i, tbl_sz;
1570
75fcb75b
SN
1571 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1572 lif->rss_types = types;
1573 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1574 }
aa319881
SN
1575
1576 if (key)
1577 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1578
1579 if (indir) {
1580 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1581 for (i = 0; i < tbl_sz; i++)
1582 lif->rss_ind_tbl[i] = indir[i];
1583 }
1584
1585 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1586 IONIC_RSS_HASH_KEY_SIZE);
1587
1588 return ionic_adminq_post_wait(lif, &ctx);
1589}
1590
1591static int ionic_lif_rss_init(struct ionic_lif *lif)
1592{
aa319881
SN
1593 unsigned int tbl_sz;
1594 unsigned int i;
1595
aa319881
SN
1596 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1597 IONIC_RSS_TYPE_IPV4_TCP |
1598 IONIC_RSS_TYPE_IPV4_UDP |
1599 IONIC_RSS_TYPE_IPV6 |
1600 IONIC_RSS_TYPE_IPV6_TCP |
1601 IONIC_RSS_TYPE_IPV6_UDP;
1602
1603 /* Fill indirection table with 'default' values */
1604 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1605 for (i = 0; i < tbl_sz; i++)
1606 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1607
ffac2027 1608 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
aa319881
SN
1609}
1610
ffac2027 1611static void ionic_lif_rss_deinit(struct ionic_lif *lif)
aa319881 1612{
ffac2027
SN
1613 int tbl_sz;
1614
1615 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1616 memset(lif->rss_ind_tbl, 0, tbl_sz);
1617 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1618
1619 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
aa319881
SN
1620}
1621
e7e8e087
SN
1622static void ionic_lif_quiesce(struct ionic_lif *lif)
1623{
1624 struct ionic_admin_ctx ctx = {
1625 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1626 .cmd.lif_setattr = {
1627 .opcode = IONIC_CMD_LIF_SETATTR,
1628 .index = cpu_to_le16(lif->index),
1629 .attr = IONIC_LIF_ATTR_STATE,
1630 .state = IONIC_LIF_QUIESCE,
1631 },
1632 };
1633 int err;
1634
1635 err = ionic_adminq_post_wait(lif, &ctx);
1636 if (err)
1637 netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
1638}
1639
0f3154e6
SN
1640static void ionic_txrx_disable(struct ionic_lif *lif)
1641{
1642 unsigned int i;
ba6ab8ac 1643 int err = 0;
0f3154e6 1644
d5eddde5 1645 if (lif->txqcqs) {
ba6ab8ac
SN
1646 for (i = 0; i < lif->nxqs; i++)
1647 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
d5eddde5
SN
1648 }
1649
1650 if (lif->rxqcqs) {
ba6ab8ac
SN
1651 for (i = 0; i < lif->nxqs; i++)
1652 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
0f3154e6 1653 }
e7e8e087
SN
1654
1655 ionic_lif_quiesce(lif);
0f3154e6
SN
1656}
1657
1658static void ionic_txrx_deinit(struct ionic_lif *lif)
1659{
1660 unsigned int i;
1661
d5eddde5 1662 if (lif->txqcqs) {
101b40a0 1663 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
34dec947
SN
1664 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1665 ionic_tx_flush(&lif->txqcqs[i]->cq);
1666 ionic_tx_empty(&lif->txqcqs[i]->q);
d5eddde5
SN
1667 }
1668 }
0f3154e6 1669
d5eddde5 1670 if (lif->rxqcqs) {
101b40a0 1671 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
34dec947 1672 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
34dec947 1673 ionic_rx_empty(&lif->rxqcqs[i]->q);
d5eddde5 1674 }
0f3154e6 1675 }
49d3b493 1676 lif->rx_mode = 0;
0f3154e6
SN
1677}
1678
1679static void ionic_txrx_free(struct ionic_lif *lif)
1680{
1681 unsigned int i;
1682
d5eddde5 1683 if (lif->txqcqs) {
101b40a0 1684 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
34dec947 1685 ionic_qcq_free(lif, lif->txqcqs[i]);
101b40a0 1686 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
34dec947 1687 lif->txqcqs[i] = NULL;
d5eddde5
SN
1688 }
1689 }
0f3154e6 1690
d5eddde5 1691 if (lif->rxqcqs) {
101b40a0 1692 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
34dec947 1693 ionic_qcq_free(lif, lif->rxqcqs[i]);
101b40a0 1694 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
34dec947 1695 lif->rxqcqs[i] = NULL;
d5eddde5 1696 }
0f3154e6
SN
1697 }
1698}
1699
1700static int ionic_txrx_alloc(struct ionic_lif *lif)
1701{
5b3f3f2a 1702 unsigned int sg_desc_sz;
0f3154e6
SN
1703 unsigned int flags;
1704 unsigned int i;
1705 int err = 0;
1706
5b3f3f2a
SN
1707 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1708 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1709 sizeof(struct ionic_txq_sg_desc_v1))
1710 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1711 else
1712 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1713
0f3154e6 1714 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
fe8c30b5
SN
1715 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1716 flags |= IONIC_QCQ_F_INTR;
0f3154e6
SN
1717 for (i = 0; i < lif->nxqs; i++) {
1718 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1719 lif->ntxq_descs,
1720 sizeof(struct ionic_txq_desc),
1721 sizeof(struct ionic_txq_comp),
5b3f3f2a 1722 sg_desc_sz,
34dec947 1723 lif->kern_pid, &lif->txqcqs[i]);
0f3154e6
SN
1724 if (err)
1725 goto err_out;
1726
04a83459 1727 if (flags & IONIC_QCQ_F_INTR) {
fe8c30b5 1728 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1729 lif->txqcqs[i]->intr.index,
fe8c30b5 1730 lif->tx_coalesce_hw);
04a83459
SN
1731 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
1732 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
1733 }
fe8c30b5 1734
34dec947 1735 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
0f3154e6
SN
1736 }
1737
08f2e4b2 1738 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
0f3154e6
SN
1739 for (i = 0; i < lif->nxqs; i++) {
1740 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1741 lif->nrxq_descs,
1742 sizeof(struct ionic_rxq_desc),
1743 sizeof(struct ionic_rxq_comp),
08f2e4b2 1744 sizeof(struct ionic_rxq_sg_desc),
34dec947 1745 lif->kern_pid, &lif->rxqcqs[i]);
0f3154e6
SN
1746 if (err)
1747 goto err_out;
1748
8c15440b 1749 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1750 lif->rxqcqs[i]->intr.index,
780eded3 1751 lif->rx_coalesce_hw);
04a83459
SN
1752 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
1753 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
fe8c30b5
SN
1754
1755 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
34dec947
SN
1756 ionic_link_qcq_interrupts(lif->rxqcqs[i],
1757 lif->txqcqs[i]);
fe8c30b5 1758
34dec947 1759 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
0f3154e6
SN
1760 }
1761
1762 return 0;
1763
1764err_out:
1765 ionic_txrx_free(lif);
1766
1767 return err;
1768}
1769
1770static int ionic_txrx_init(struct ionic_lif *lif)
1771{
1772 unsigned int i;
1773 int err;
1774
1775 for (i = 0; i < lif->nxqs; i++) {
34dec947 1776 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
0f3154e6
SN
1777 if (err)
1778 goto err_out;
1779
34dec947 1780 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
0f3154e6 1781 if (err) {
34dec947 1782 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
0f3154e6
SN
1783 goto err_out;
1784 }
1785 }
1786
aa319881
SN
1787 if (lif->netdev->features & NETIF_F_RXHASH)
1788 ionic_lif_rss_init(lif);
1789
7c8d008c 1790 ionic_set_rx_mode(lif->netdev, CAN_SLEEP);
0f3154e6
SN
1791
1792 return 0;
1793
1794err_out:
1795 while (i--) {
34dec947
SN
1796 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1797 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
0f3154e6
SN
1798 }
1799
1800 return err;
1801}
1802
1803static int ionic_txrx_enable(struct ionic_lif *lif)
1804{
ba6ab8ac 1805 int derr = 0;
0f3154e6
SN
1806 int i, err;
1807
1808 for (i = 0; i < lif->nxqs; i++) {
7c737fc4
SN
1809 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
1810 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
1811 err = -ENXIO;
1812 goto err_out;
1813 }
1814
34dec947
SN
1815 ionic_rx_fill(&lif->rxqcqs[i]->q);
1816 err = ionic_qcq_enable(lif->rxqcqs[i]);
0f3154e6
SN
1817 if (err)
1818 goto err_out;
1819
34dec947 1820 err = ionic_qcq_enable(lif->txqcqs[i]);
0f3154e6 1821 if (err) {
ba6ab8ac 1822 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
0f3154e6
SN
1823 goto err_out;
1824 }
1825 }
1826
1827 return 0;
1828
1829err_out:
1830 while (i--) {
ba6ab8ac
SN
1831 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
1832 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
0f3154e6
SN
1833 }
1834
1835 return err;
1836}
1837
49d3b493
SN
1838static int ionic_start_queues(struct ionic_lif *lif)
1839{
1840 int err;
1841
1842 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1843 return 0;
1844
1845 err = ionic_txrx_enable(lif);
1846 if (err) {
1847 clear_bit(IONIC_LIF_F_UP, lif->state);
1848 return err;
1849 }
1850 netif_tx_wake_all_queues(lif->netdev);
1851
1852 return 0;
1853}
1854
d4881430 1855static int ionic_open(struct net_device *netdev)
beead698
SN
1856{
1857 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1858 int err;
beead698 1859
0f3154e6
SN
1860 err = ionic_txrx_alloc(lif);
1861 if (err)
1862 return err;
1863
1864 err = ionic_txrx_init(lif);
1865 if (err)
49d3b493 1866 goto err_out;
beead698 1867
fa48494c
SN
1868 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1869 if (err)
1870 goto err_txrx_deinit;
1871
1872 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1873 if (err)
1874 goto err_txrx_deinit;
1875
49d3b493
SN
1876 /* don't start the queues until we have link */
1877 if (netif_carrier_ok(netdev)) {
1878 err = ionic_start_queues(lif);
1879 if (err)
1880 goto err_txrx_deinit;
1881 }
8d61aad4 1882
beead698 1883 return 0;
0f3154e6
SN
1884
1885err_txrx_deinit:
1886 ionic_txrx_deinit(lif);
49d3b493 1887err_out:
0f3154e6
SN
1888 ionic_txrx_free(lif);
1889 return err;
beead698
SN
1890}
1891
49d3b493 1892static void ionic_stop_queues(struct ionic_lif *lif)
beead698 1893{
49d3b493
SN
1894 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1895 return;
beead698 1896
49d3b493 1897 netif_tx_disable(lif->netdev);
b59eabd2 1898 ionic_txrx_disable(lif);
49d3b493 1899}
beead698 1900
d4881430 1901static int ionic_stop(struct net_device *netdev)
49d3b493
SN
1902{
1903 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1904
b59eabd2 1905 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
c672412f
SN
1906 return 0;
1907
49d3b493 1908 ionic_stop_queues(lif);
0f3154e6
SN
1909 ionic_txrx_deinit(lif);
1910 ionic_txrx_free(lif);
beead698 1911
49d3b493 1912 return 0;
beead698
SN
1913}
1914
fbb39807
SN
1915static int ionic_get_vf_config(struct net_device *netdev,
1916 int vf, struct ifla_vf_info *ivf)
1917{
1918 struct ionic_lif *lif = netdev_priv(netdev);
1919 struct ionic *ionic = lif->ionic;
1920 int ret = 0;
1921
a836c352
SN
1922 if (!netif_device_present(netdev))
1923 return -EBUSY;
1924
fbb39807
SN
1925 down_read(&ionic->vf_op_lock);
1926
1927 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1928 ret = -EINVAL;
1929 } else {
1930 ivf->vf = vf;
d701ec32 1931 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
fbb39807
SN
1932 ivf->qos = 0;
1933 ivf->spoofchk = ionic->vfs[vf].spoofchk;
1934 ivf->linkstate = ionic->vfs[vf].linkstate;
d701ec32 1935 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
fbb39807
SN
1936 ivf->trusted = ionic->vfs[vf].trusted;
1937 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1938 }
1939
1940 up_read(&ionic->vf_op_lock);
1941 return ret;
1942}
1943
1944static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1945 struct ifla_vf_stats *vf_stats)
1946{
1947 struct ionic_lif *lif = netdev_priv(netdev);
1948 struct ionic *ionic = lif->ionic;
1949 struct ionic_lif_stats *vs;
1950 int ret = 0;
1951
a836c352
SN
1952 if (!netif_device_present(netdev))
1953 return -EBUSY;
1954
fbb39807
SN
1955 down_read(&ionic->vf_op_lock);
1956
1957 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1958 ret = -EINVAL;
1959 } else {
1960 memset(vf_stats, 0, sizeof(*vf_stats));
1961 vs = &ionic->vfs[vf].stats;
1962
1963 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1964 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1965 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
1966 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
1967 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
1968 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
1969 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1970 le64_to_cpu(vs->rx_mcast_drop_packets) +
1971 le64_to_cpu(vs->rx_bcast_drop_packets);
1972 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1973 le64_to_cpu(vs->tx_mcast_drop_packets) +
1974 le64_to_cpu(vs->tx_bcast_drop_packets);
1975 }
1976
1977 up_read(&ionic->vf_op_lock);
1978 return ret;
1979}
1980
1981static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1982{
1983 struct ionic_lif *lif = netdev_priv(netdev);
1984 struct ionic *ionic = lif->ionic;
1985 int ret;
1986
1987 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1988 return -EINVAL;
1989
a836c352
SN
1990 if (!netif_device_present(netdev))
1991 return -EBUSY;
1992
e396ce5f 1993 down_write(&ionic->vf_op_lock);
fbb39807
SN
1994
1995 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1996 ret = -EINVAL;
1997 } else {
1998 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1999 if (!ret)
2000 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
2001 }
2002
e396ce5f 2003 up_write(&ionic->vf_op_lock);
fbb39807
SN
2004 return ret;
2005}
2006
2007static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2008 u8 qos, __be16 proto)
2009{
2010 struct ionic_lif *lif = netdev_priv(netdev);
2011 struct ionic *ionic = lif->ionic;
2012 int ret;
2013
2014 /* until someday when we support qos */
2015 if (qos)
2016 return -EINVAL;
2017
2018 if (vlan > 4095)
2019 return -EINVAL;
2020
2021 if (proto != htons(ETH_P_8021Q))
2022 return -EPROTONOSUPPORT;
2023
a836c352
SN
2024 if (!netif_device_present(netdev))
2025 return -EBUSY;
2026
e396ce5f 2027 down_write(&ionic->vf_op_lock);
fbb39807
SN
2028
2029 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2030 ret = -EINVAL;
2031 } else {
2032 ret = ionic_set_vf_config(ionic, vf,
2033 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2034 if (!ret)
d701ec32 2035 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
fbb39807
SN
2036 }
2037
e396ce5f 2038 up_write(&ionic->vf_op_lock);
fbb39807
SN
2039 return ret;
2040}
2041
2042static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2043 int tx_min, int tx_max)
2044{
2045 struct ionic_lif *lif = netdev_priv(netdev);
2046 struct ionic *ionic = lif->ionic;
2047 int ret;
2048
2049 /* setting the min just seems silly */
2050 if (tx_min)
2051 return -EINVAL;
2052
a836c352
SN
2053 if (!netif_device_present(netdev))
2054 return -EBUSY;
2055
fbb39807
SN
2056 down_write(&ionic->vf_op_lock);
2057
2058 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2059 ret = -EINVAL;
2060 } else {
2061 ret = ionic_set_vf_config(ionic, vf,
2062 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2063 if (!ret)
d701ec32 2064 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
fbb39807
SN
2065 }
2066
2067 up_write(&ionic->vf_op_lock);
2068 return ret;
2069}
2070
2071static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2072{
2073 struct ionic_lif *lif = netdev_priv(netdev);
2074 struct ionic *ionic = lif->ionic;
2075 u8 data = set; /* convert to u8 for config */
2076 int ret;
2077
a836c352
SN
2078 if (!netif_device_present(netdev))
2079 return -EBUSY;
2080
fbb39807
SN
2081 down_write(&ionic->vf_op_lock);
2082
2083 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2084 ret = -EINVAL;
2085 } else {
2086 ret = ionic_set_vf_config(ionic, vf,
2087 IONIC_VF_ATTR_SPOOFCHK, &data);
2088 if (!ret)
2089 ionic->vfs[vf].spoofchk = data;
2090 }
2091
2092 up_write(&ionic->vf_op_lock);
2093 return ret;
2094}
2095
2096static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2097{
2098 struct ionic_lif *lif = netdev_priv(netdev);
2099 struct ionic *ionic = lif->ionic;
2100 u8 data = set; /* convert to u8 for config */
2101 int ret;
2102
a836c352
SN
2103 if (!netif_device_present(netdev))
2104 return -EBUSY;
2105
fbb39807
SN
2106 down_write(&ionic->vf_op_lock);
2107
2108 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2109 ret = -EINVAL;
2110 } else {
2111 ret = ionic_set_vf_config(ionic, vf,
2112 IONIC_VF_ATTR_TRUST, &data);
2113 if (!ret)
2114 ionic->vfs[vf].trusted = data;
2115 }
2116
2117 up_write(&ionic->vf_op_lock);
2118 return ret;
2119}
2120
2121static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2122{
2123 struct ionic_lif *lif = netdev_priv(netdev);
2124 struct ionic *ionic = lif->ionic;
2125 u8 data;
2126 int ret;
2127
2128 switch (set) {
2129 case IFLA_VF_LINK_STATE_ENABLE:
2130 data = IONIC_VF_LINK_STATUS_UP;
2131 break;
2132 case IFLA_VF_LINK_STATE_DISABLE:
2133 data = IONIC_VF_LINK_STATUS_DOWN;
2134 break;
2135 case IFLA_VF_LINK_STATE_AUTO:
2136 data = IONIC_VF_LINK_STATUS_AUTO;
2137 break;
2138 default:
2139 return -EINVAL;
2140 }
2141
a836c352
SN
2142 if (!netif_device_present(netdev))
2143 return -EBUSY;
2144
fbb39807
SN
2145 down_write(&ionic->vf_op_lock);
2146
2147 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2148 ret = -EINVAL;
2149 } else {
2150 ret = ionic_set_vf_config(ionic, vf,
2151 IONIC_VF_ATTR_LINKSTATE, &data);
2152 if (!ret)
2153 ionic->vfs[vf].linkstate = set;
2154 }
2155
2156 up_write(&ionic->vf_op_lock);
2157 return ret;
2158}
2159
beead698
SN
2160static const struct net_device_ops ionic_netdev_ops = {
2161 .ndo_open = ionic_open,
2162 .ndo_stop = ionic_stop,
0f3154e6 2163 .ndo_start_xmit = ionic_start_xmit,
8d61aad4 2164 .ndo_get_stats64 = ionic_get_stats64,
1800eee1 2165 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
beead698
SN
2166 .ndo_set_features = ionic_set_features,
2167 .ndo_set_mac_address = ionic_set_mac_address,
2168 .ndo_validate_addr = eth_validate_addr,
2169 .ndo_tx_timeout = ionic_tx_timeout,
2170 .ndo_change_mtu = ionic_change_mtu,
2171 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2172 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
fbb39807
SN
2173 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2174 .ndo_set_vf_trust = ionic_set_vf_trust,
2175 .ndo_set_vf_mac = ionic_set_vf_mac,
2176 .ndo_set_vf_rate = ionic_set_vf_rate,
2177 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2178 .ndo_get_vf_config = ionic_get_vf_config,
2179 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2180 .ndo_get_vf_stats = ionic_get_vf_stats,
beead698
SN
2181};
2182
a34e25ab
SN
2183static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2184{
2185 /* only swapping the queues, not the napi, flags, or other stuff */
2186 swap(a->q.num_descs, b->q.num_descs);
2187 swap(a->q.base, b->q.base);
2188 swap(a->q.base_pa, b->q.base_pa);
2189 swap(a->q.info, b->q.info);
2190 swap(a->q_base, b->q_base);
2191 swap(a->q_base_pa, b->q_base_pa);
2192 swap(a->q_size, b->q_size);
2193
2194 swap(a->q.sg_base, b->q.sg_base);
2195 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2196 swap(a->sg_base, b->sg_base);
2197 swap(a->sg_base_pa, b->sg_base_pa);
2198 swap(a->sg_size, b->sg_size);
2199
2200 swap(a->cq.num_descs, b->cq.num_descs);
2201 swap(a->cq.base, b->cq.base);
2202 swap(a->cq.base_pa, b->cq.base_pa);
2203 swap(a->cq.info, b->cq.info);
2204 swap(a->cq_base, b->cq_base);
2205 swap(a->cq_base_pa, b->cq_base_pa);
2206 swap(a->cq_size, b->cq_size);
55eda6bb
SN
2207
2208 ionic_debugfs_del_qcq(a);
2209 ionic_debugfs_add_qcq(a->q.lif, a);
a34e25ab
SN
2210}
2211
2212int ionic_reconfigure_queues(struct ionic_lif *lif,
2213 struct ionic_queue_params *qparam)
2214{
2215 struct ionic_qcq **tx_qcqs = NULL;
2216 struct ionic_qcq **rx_qcqs = NULL;
2217 unsigned int sg_desc_sz;
2218 unsigned int flags;
2219 int err = -ENOMEM;
2220 unsigned int i;
2221
2222 /* allocate temporary qcq arrays to hold new queue structs */
101b40a0
SN
2223 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2224 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
a34e25ab
SN
2225 sizeof(struct ionic_qcq *), GFP_KERNEL);
2226 if (!tx_qcqs)
2227 goto err_out;
2228 }
101b40a0
SN
2229 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
2230 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
a34e25ab
SN
2231 sizeof(struct ionic_qcq *), GFP_KERNEL);
2232 if (!rx_qcqs)
2233 goto err_out;
2234 }
2235
101b40a0
SN
2236 /* allocate new desc_info and rings, but leave the interrupt setup
2237 * until later so as to not mess with the still-running queues
2238 */
a34e25ab
SN
2239 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2240 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2241 sizeof(struct ionic_txq_sg_desc_v1))
2242 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2243 else
2244 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2245
2246 if (tx_qcqs) {
101b40a0 2247 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2248 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2249 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2250 qparam->ntxq_descs,
2251 sizeof(struct ionic_txq_desc),
2252 sizeof(struct ionic_txq_comp),
2253 sg_desc_sz,
2254 lif->kern_pid, &tx_qcqs[i]);
2255 if (err)
2256 goto err_out;
2257 }
2258 }
2259
2260 if (rx_qcqs) {
101b40a0 2261 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2262 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2263 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2264 qparam->nrxq_descs,
2265 sizeof(struct ionic_rxq_desc),
2266 sizeof(struct ionic_rxq_comp),
2267 sizeof(struct ionic_rxq_sg_desc),
2268 lif->kern_pid, &rx_qcqs[i]);
2269 if (err)
2270 goto err_out;
2271 }
2272 }
2273
2274 /* stop and clean the queues */
2275 ionic_stop_queues_reconfig(lif);
2276
101b40a0
SN
2277 if (qparam->nxqs != lif->nxqs) {
2278 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2279 if (err)
2280 goto err_out_reinit_unlock;
2281 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2282 if (err) {
2283 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2284 goto err_out_reinit_unlock;
2285 }
2286 }
2287
a34e25ab
SN
2288 /* swap new desc_info and rings, keeping existing interrupt config */
2289 if (tx_qcqs) {
2290 lif->ntxq_descs = qparam->ntxq_descs;
101b40a0 2291 for (i = 0; i < qparam->nxqs; i++)
a34e25ab
SN
2292 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2293 }
2294
2295 if (rx_qcqs) {
2296 lif->nrxq_descs = qparam->nrxq_descs;
101b40a0 2297 for (i = 0; i < qparam->nxqs; i++)
a34e25ab
SN
2298 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2299 }
2300
101b40a0
SN
2301 /* if we need to change the interrupt layout, this is the time */
2302 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2303 qparam->nxqs != lif->nxqs) {
2304 if (qparam->intr_split) {
2305 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2306 } else {
2307 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2308 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2309 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2310 }
2311
2312 /* clear existing interrupt assignments */
2313 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2314 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2315 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2316 }
2317
2318 /* re-assign the interrupts */
2319 for (i = 0; i < qparam->nxqs; i++) {
2320 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2321 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2322 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2323 lif->rxqcqs[i]->intr.index,
2324 lif->rx_coalesce_hw);
2325
2326 if (qparam->intr_split) {
2327 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2328 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2329 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2330 lif->txqcqs[i]->intr.index,
2331 lif->tx_coalesce_hw);
04a83459
SN
2332 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2333 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
101b40a0
SN
2334 } else {
2335 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2336 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2337 }
2338 }
2339 }
2340
ed6d9b02
SN
2341 /* now we can rework the debugfs mappings */
2342 if (tx_qcqs) {
2343 for (i = 0; i < qparam->nxqs; i++) {
2344 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2345 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2346 }
2347 }
2348
2349 if (rx_qcqs) {
2350 for (i = 0; i < qparam->nxqs; i++) {
2351 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2352 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2353 }
2354 }
2355
101b40a0
SN
2356 swap(lif->nxqs, qparam->nxqs);
2357
2358err_out_reinit_unlock:
2359 /* re-init the queues, but don't loose an error code */
2360 if (err)
2361 ionic_start_queues_reconfig(lif);
2362 else
2363 err = ionic_start_queues_reconfig(lif);
a34e25ab
SN
2364
2365err_out:
2366 /* free old allocs without cleaning intr */
101b40a0 2367 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2368 if (tx_qcqs && tx_qcqs[i]) {
2369 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2370 ionic_qcq_free(lif, tx_qcqs[i]);
101b40a0 2371 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
a34e25ab
SN
2372 tx_qcqs[i] = NULL;
2373 }
2374 if (rx_qcqs && rx_qcqs[i]) {
2375 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2376 ionic_qcq_free(lif, rx_qcqs[i]);
101b40a0 2377 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
a34e25ab
SN
2378 rx_qcqs[i] = NULL;
2379 }
2380 }
2381
2382 /* free q array */
2383 if (rx_qcqs) {
2384 devm_kfree(lif->ionic->dev, rx_qcqs);
2385 rx_qcqs = NULL;
2386 }
2387 if (tx_qcqs) {
2388 devm_kfree(lif->ionic->dev, tx_qcqs);
2389 tx_qcqs = NULL;
2390 }
2391
101b40a0
SN
2392 /* clean the unused dma and info allocations when new set is smaller
2393 * than the full array, but leave the qcq shells in place
2394 */
2395 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2396 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2397 ionic_qcq_free(lif, lif->txqcqs[i]);
2398
2399 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2400 ionic_qcq_free(lif, lif->rxqcqs[i]);
2401 }
2402
a34e25ab
SN
2403 return err;
2404}
2405
30b87ab4 2406int ionic_lif_alloc(struct ionic *ionic)
1a58e196
SN
2407{
2408 struct device *dev = ionic->dev;
4b03b273 2409 union ionic_lif_identity *lid;
1a58e196
SN
2410 struct net_device *netdev;
2411 struct ionic_lif *lif;
aa319881 2412 int tbl_sz;
1a58e196
SN
2413 int err;
2414
4b03b273
SN
2415 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2416 if (!lid)
30b87ab4 2417 return -ENOMEM;
4b03b273 2418
1a58e196
SN
2419 netdev = alloc_etherdev_mqs(sizeof(*lif),
2420 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2421 if (!netdev) {
2422 dev_err(dev, "Cannot allocate netdev, aborting\n");
4b1debbe
CIK
2423 err = -ENOMEM;
2424 goto err_out_free_lid;
1a58e196
SN
2425 }
2426
2427 SET_NETDEV_DEV(netdev, dev);
2428
2429 lif = netdev_priv(netdev);
2430 lif->netdev = netdev;
30b87ab4 2431 ionic->lif = lif;
beead698 2432 netdev->netdev_ops = &ionic_netdev_ops;
4d03e00a 2433 ionic_ethtool_set_ops(netdev);
beead698
SN
2434
2435 netdev->watchdog_timeo = 2 * HZ;
aa47b540
SN
2436 netif_carrier_off(netdev);
2437
4b03b273
SN
2438 lif->identity = lid;
2439 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
bb9f80f3
SN
2440 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2441 if (err) {
2442 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2443 lif->lif_type, err);
2444 goto err_out_free_netdev;
2445 }
eba87609
SN
2446 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2447 le32_to_cpu(lif->identity->eth.min_frame_size));
4b03b273
SN
2448 lif->netdev->max_mtu =
2449 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
1a58e196
SN
2450
2451 lif->neqs = ionic->neqs_per_lif;
2452 lif->nxqs = ionic->ntxqs_per_lif;
2453
2454 lif->ionic = ionic;
30b87ab4 2455 lif->index = 0;
0f3154e6
SN
2456 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2457 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1a58e196 2458
8c15440b 2459 /* Convert the default coalesce value to actual hw resolution */
780eded3 2460 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
ff7ebed9 2461 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
780eded3 2462 lif->rx_coalesce_usecs);
fe8c30b5
SN
2463 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2464 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
04a83459
SN
2465 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2466 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
8c15440b 2467
30b87ab4 2468 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
1a58e196 2469
1d062b7b
SN
2470 spin_lock_init(&lif->adminq_lock);
2471
2a654540
SN
2472 spin_lock_init(&lif->deferred.lock);
2473 INIT_LIST_HEAD(&lif->deferred.list);
2474 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2475
1a58e196
SN
2476 /* allocate lif info */
2477 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2478 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2479 &lif->info_pa, GFP_KERNEL);
2480 if (!lif->info) {
2481 dev_err(dev, "Failed to allocate lif info, aborting\n");
2482 err = -ENOMEM;
2483 goto err_out_free_netdev;
2484 }
2485
2a8c2c1a
SN
2486 ionic_debugfs_add_lif(lif);
2487
30b87ab4
SN
2488 /* allocate control queues and txrx queue arrays */
2489 ionic_lif_queue_identify(lif);
1d062b7b
SN
2490 err = ionic_qcqs_alloc(lif);
2491 if (err)
2492 goto err_out_free_lif_info;
2493
aa319881
SN
2494 /* allocate rss indirection table */
2495 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2496 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2497 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2498 &lif->rss_ind_tbl_pa,
2499 GFP_KERNEL);
2500
2501 if (!lif->rss_ind_tbl) {
73a63ee9 2502 err = -ENOMEM;
aa319881
SN
2503 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2504 goto err_out_free_qcqs;
2505 }
ffac2027 2506 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
aa319881 2507
30b87ab4 2508 return 0;
1a58e196 2509
aa319881
SN
2510err_out_free_qcqs:
2511 ionic_qcqs_free(lif);
1d062b7b
SN
2512err_out_free_lif_info:
2513 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2514 lif->info = NULL;
2515 lif->info_pa = 0;
1a58e196
SN
2516err_out_free_netdev:
2517 free_netdev(lif->netdev);
2518 lif = NULL;
4b1debbe 2519err_out_free_lid:
4b03b273 2520 kfree(lid);
1a58e196 2521
30b87ab4 2522 return err;
1a58e196
SN
2523}
2524
2525static void ionic_lif_reset(struct ionic_lif *lif)
2526{
2527 struct ionic_dev *idev = &lif->ionic->idev;
2528
2529 mutex_lock(&lif->ionic->dev_cmd_lock);
2530 ionic_dev_cmd_lif_reset(idev, lif->index);
2531 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2532 mutex_unlock(&lif->ionic->dev_cmd_lock);
2533}
2534
c672412f
SN
2535static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2536{
2537 struct ionic *ionic = lif->ionic;
2538
2539 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2540 return;
2541
2542 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2543
2544 netif_device_detach(lif->netdev);
2545
2546 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2547 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
0925e9db 2548 mutex_lock(&lif->queue_lock);
c672412f 2549 ionic_stop_queues(lif);
0925e9db 2550 mutex_unlock(&lif->queue_lock);
c672412f
SN
2551 }
2552
2553 if (netif_running(lif->netdev)) {
2554 ionic_txrx_deinit(lif);
2555 ionic_txrx_free(lif);
2556 }
30b87ab4 2557 ionic_lif_deinit(lif);
6bc977fa 2558 ionic_reset(ionic);
c672412f
SN
2559 ionic_qcqs_free(lif);
2560
2561 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2562}
2563
2564static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2565{
2566 struct ionic *ionic = lif->ionic;
2567 int err;
2568
2569 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2570 return;
2571
2572 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2573
1d53aedc 2574 ionic_init_devinfo(ionic);
a21b5d49
SN
2575 err = ionic_identify(ionic);
2576 if (err)
2577 goto err_out;
2578 err = ionic_port_identify(ionic);
2579 if (err)
2580 goto err_out;
2581 err = ionic_port_init(ionic);
2582 if (err)
2583 goto err_out;
c672412f
SN
2584 err = ionic_qcqs_alloc(lif);
2585 if (err)
2586 goto err_out;
2587
30b87ab4 2588 err = ionic_lif_init(lif);
c672412f
SN
2589 if (err)
2590 goto err_qcqs_free;
2591
2592 if (lif->registered)
2593 ionic_lif_set_netdev_info(lif);
2594
7e4d4759
SN
2595 ionic_rx_filter_replay(lif);
2596
c672412f
SN
2597 if (netif_running(lif->netdev)) {
2598 err = ionic_txrx_alloc(lif);
2599 if (err)
2600 goto err_lifs_deinit;
2601
2602 err = ionic_txrx_init(lif);
2603 if (err)
2604 goto err_txrx_free;
2605 }
2606
2607 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
1800eee1 2608 ionic_link_status_check_request(lif, true);
c672412f
SN
2609 netif_device_attach(lif->netdev);
2610 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2611
2612 return;
2613
2614err_txrx_free:
2615 ionic_txrx_free(lif);
2616err_lifs_deinit:
30b87ab4 2617 ionic_lif_deinit(lif);
c672412f
SN
2618err_qcqs_free:
2619 ionic_qcqs_free(lif);
2620err_out:
2621 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2622}
2623
30b87ab4 2624void ionic_lif_free(struct ionic_lif *lif)
1a58e196
SN
2625{
2626 struct device *dev = lif->ionic->dev;
2627
aa319881
SN
2628 /* free rss indirection table */
2629 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2630 lif->rss_ind_tbl_pa);
2631 lif->rss_ind_tbl = NULL;
2632 lif->rss_ind_tbl_pa = 0;
2633
1d062b7b
SN
2634 /* free queues */
2635 ionic_qcqs_free(lif);
c672412f
SN
2636 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2637 ionic_lif_reset(lif);
1a58e196
SN
2638
2639 /* free lif info */
4b03b273 2640 kfree(lif->identity);
1a58e196
SN
2641 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2642 lif->info = NULL;
2643 lif->info_pa = 0;
2644
6461b446
SN
2645 /* unmap doorbell page */
2646 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2647 lif->kern_dbpage = NULL;
2648 kfree(lif->dbid_inuse);
2649 lif->dbid_inuse = NULL;
2650
1a58e196
SN
2651 /* free netdev & lif */
2652 ionic_debugfs_del_lif(lif);
1a58e196
SN
2653 free_netdev(lif->netdev);
2654}
2655
30b87ab4 2656void ionic_lif_deinit(struct ionic_lif *lif)
1a58e196 2657{
c672412f 2658 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
1a58e196
SN
2659 return;
2660
c672412f
SN
2661 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2662 cancel_work_sync(&lif->deferred.work);
2663 cancel_work_sync(&lif->tx_timeout_work);
7e4d4759 2664 ionic_rx_filters_deinit(lif);
bdff4666
SN
2665 if (lif->netdev->features & NETIF_F_RXHASH)
2666 ionic_lif_rss_deinit(lif);
c672412f 2667 }
1a58e196 2668
1d062b7b 2669 napi_disable(&lif->adminqcq->napi);
77ceb68e 2670 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2671 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2672
0925e9db 2673 mutex_destroy(&lif->queue_lock);
1a58e196
SN
2674 ionic_lif_reset(lif);
2675}
2676
1d062b7b
SN
2677static int ionic_lif_adminq_init(struct ionic_lif *lif)
2678{
2679 struct device *dev = lif->ionic->dev;
2680 struct ionic_q_init_comp comp;
2681 struct ionic_dev *idev;
2682 struct ionic_qcq *qcq;
2683 struct ionic_queue *q;
2684 int err;
2685
2686 idev = &lif->ionic->idev;
2687 qcq = lif->adminqcq;
2688 q = &qcq->q;
2689
2690 mutex_lock(&lif->ionic->dev_cmd_lock);
2691 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2692 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2693 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2694 mutex_unlock(&lif->ionic->dev_cmd_lock);
2695 if (err) {
2696 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2697 return err;
2698 }
2699
2700 q->hw_type = comp.hw_type;
2701 q->hw_index = le32_to_cpu(comp.hw_index);
2702 q->dbval = IONIC_DBELL_QID(q->hw_index);
2703
2704 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2705 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2706
2707 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2708 NAPI_POLL_WEIGHT);
2709
1d062b7b
SN
2710 napi_enable(&qcq->napi);
2711
2712 if (qcq->flags & IONIC_QCQ_F_INTR)
2713 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2714 IONIC_INTR_MASK_CLEAR);
2715
2716 qcq->flags |= IONIC_QCQ_F_INITED;
2717
1d062b7b
SN
2718 return 0;
2719}
2720
77ceb68e
SN
2721static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2722{
2723 struct ionic_qcq *qcq = lif->notifyqcq;
2724 struct device *dev = lif->ionic->dev;
2725 struct ionic_queue *q = &qcq->q;
2726 int err;
2727
2728 struct ionic_admin_ctx ctx = {
2729 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2730 .cmd.q_init = {
2731 .opcode = IONIC_CMD_Q_INIT,
2732 .lif_index = cpu_to_le16(lif->index),
2733 .type = q->type,
5b3f3f2a 2734 .ver = lif->qtype_info[q->type].version,
77ceb68e
SN
2735 .index = cpu_to_le32(q->index),
2736 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2737 IONIC_QINIT_F_ENA),
2738 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2739 .pid = cpu_to_le16(q->pid),
2740 .ring_size = ilog2(q->num_descs),
2741 .ring_base = cpu_to_le64(q->base_pa),
2742 }
2743 };
2744
2745 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2746 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2747 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2748 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2749
2750 err = ionic_adminq_post_wait(lif, &ctx);
2751 if (err)
2752 return err;
2753
c672412f 2754 lif->last_eid = 0;
77ceb68e
SN
2755 q->hw_type = ctx.comp.q_init.hw_type;
2756 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2757 q->dbval = IONIC_DBELL_QID(q->hw_index);
2758
2759 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2760 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2761
2762 /* preset the callback info */
2763 q->info[0].cb_arg = lif;
2764
2765 qcq->flags |= IONIC_QCQ_F_INITED;
2766
77ceb68e
SN
2767 return 0;
2768}
2769
2a654540
SN
2770static int ionic_station_set(struct ionic_lif *lif)
2771{
2772 struct net_device *netdev = lif->netdev;
2773 struct ionic_admin_ctx ctx = {
2774 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2775 .cmd.lif_getattr = {
2776 .opcode = IONIC_CMD_LIF_GETATTR,
2777 .index = cpu_to_le16(lif->index),
2778 .attr = IONIC_LIF_ATTR_MAC,
2779 },
2780 };
2781 struct sockaddr addr;
2782 int err;
2783
2784 err = ionic_adminq_post_wait(lif, &ctx);
2785 if (err)
2786 return err;
216902ae
SN
2787 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2788 ctx.comp.lif_getattr.mac);
fbb39807
SN
2789 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2790 return 0;
2791
f20a4d40
SN
2792 if (!is_zero_ether_addr(netdev->dev_addr)) {
2793 /* If the netdev mac is non-zero and doesn't match the default
2794 * device address, it was set by something earlier and we're
2795 * likely here again after a fw-upgrade reset. We need to be
2796 * sure the netdev mac is in our filter list.
2797 */
2798 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2799 netdev->dev_addr))
7c8d008c 2800 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
f20a4d40
SN
2801 } else {
2802 /* Update the netdev mac with the device's mac */
216902ae
SN
2803 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2804 addr.sa_family = AF_INET;
2805 err = eth_prepare_mac_addr_change(netdev, &addr);
2806 if (err) {
2807 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2808 addr.sa_data, err);
2809 return 0;
2810 }
2a654540 2811
216902ae
SN
2812 eth_commit_mac_addr_change(netdev, &addr);
2813 }
fbb39807 2814
2a654540
SN
2815 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2816 netdev->dev_addr);
7c8d008c 2817 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
2a654540
SN
2818
2819 return 0;
2820}
2821
30b87ab4 2822int ionic_lif_init(struct ionic_lif *lif)
1a58e196
SN
2823{
2824 struct ionic_dev *idev = &lif->ionic->idev;
6461b446 2825 struct device *dev = lif->ionic->dev;
1a58e196 2826 struct ionic_lif_init_comp comp;
6461b446 2827 int dbpage_num;
1a58e196
SN
2828 int err;
2829
1a58e196
SN
2830 mutex_lock(&lif->ionic->dev_cmd_lock);
2831 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2832 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2833 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2834 mutex_unlock(&lif->ionic->dev_cmd_lock);
2835 if (err)
2836 return err;
2837
2838 lif->hw_index = le16_to_cpu(comp.hw_index);
0925e9db 2839 mutex_init(&lif->queue_lock);
1a58e196 2840
6461b446
SN
2841 /* now that we have the hw_index we can figure out our doorbell page */
2842 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2843 if (!lif->dbid_count) {
2844 dev_err(dev, "No doorbell pages, aborting\n");
2845 return -EINVAL;
2846 }
2847
2848 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2849 if (!lif->dbid_inuse) {
2850 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2851 return -ENOMEM;
2852 }
2853
2854 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2855 set_bit(0, lif->dbid_inuse);
2856 lif->kern_pid = 0;
2857
2858 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2859 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2860 if (!lif->kern_dbpage) {
2861 dev_err(dev, "Cannot map dbpage, aborting\n");
2862 err = -ENOMEM;
2863 goto err_out_free_dbid;
2864 }
2865
1d062b7b
SN
2866 err = ionic_lif_adminq_init(lif);
2867 if (err)
2868 goto err_out_adminq_deinit;
2869
77ceb68e
SN
2870 if (lif->ionic->nnqs_per_lif) {
2871 err = ionic_lif_notifyq_init(lif);
2872 if (err)
2873 goto err_out_notifyq_deinit;
2874 }
2875
beead698
SN
2876 err = ionic_init_nic_features(lif);
2877 if (err)
2878 goto err_out_notifyq_deinit;
2879
7e4d4759
SN
2880 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2881 err = ionic_rx_filters_init(lif);
2882 if (err)
2883 goto err_out_notifyq_deinit;
2884 }
c1e329eb 2885
2a654540
SN
2886 err = ionic_station_set(lif);
2887 if (err)
2888 goto err_out_notifyq_deinit;
2889
0f3154e6
SN
2890 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2891
c6d3d73a 2892 set_bit(IONIC_LIF_F_INITED, lif->state);
1a58e196 2893
8c15440b
SN
2894 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2895
1a58e196 2896 return 0;
6461b446 2897
77ceb68e
SN
2898err_out_notifyq_deinit:
2899 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2900err_out_adminq_deinit:
2901 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2902 ionic_lif_reset(lif);
2903 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2904 lif->kern_dbpage = NULL;
6461b446
SN
2905err_out_free_dbid:
2906 kfree(lif->dbid_inuse);
2907 lif->dbid_inuse = NULL;
2908
2909 return err;
1a58e196
SN
2910}
2911
1a371ea1
SN
2912static void ionic_lif_notify_work(struct work_struct *ws)
2913{
2914}
2915
2916static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2917{
2918 struct ionic_admin_ctx ctx = {
2919 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2920 .cmd.lif_setattr = {
2921 .opcode = IONIC_CMD_LIF_SETATTR,
2922 .index = cpu_to_le16(lif->index),
2923 .attr = IONIC_LIF_ATTR_NAME,
2924 },
2925 };
2926
2927 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2928 sizeof(ctx.cmd.lif_setattr.name));
2929
2930 ionic_adminq_post_wait(lif, &ctx);
2931}
2932
2933static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2934{
2935 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2936 return NULL;
2937
2938 return netdev_priv(netdev);
2939}
2940
2941static int ionic_lif_notify(struct notifier_block *nb,
2942 unsigned long event, void *info)
2943{
2944 struct net_device *ndev = netdev_notifier_info_to_dev(info);
2945 struct ionic *ionic = container_of(nb, struct ionic, nb);
2946 struct ionic_lif *lif = ionic_netdev_lif(ndev);
2947
2948 if (!lif || lif->ionic != ionic)
2949 return NOTIFY_DONE;
2950
2951 switch (event) {
2952 case NETDEV_CHANGENAME:
2953 ionic_lif_set_netdev_info(lif);
2954 break;
2955 }
2956
2957 return NOTIFY_DONE;
2958}
2959
30b87ab4 2960int ionic_lif_register(struct ionic_lif *lif)
beead698
SN
2961{
2962 int err;
2963
30b87ab4 2964 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
1a371ea1 2965
30b87ab4 2966 lif->ionic->nb.notifier_call = ionic_lif_notify;
1a371ea1 2967
30b87ab4 2968 err = register_netdevice_notifier(&lif->ionic->nb);
1a371ea1 2969 if (err)
30b87ab4 2970 lif->ionic->nb.notifier_call = NULL;
1a371ea1 2971
beead698 2972 /* only register LIF0 for now */
30b87ab4 2973 err = register_netdev(lif->netdev);
beead698 2974 if (err) {
30b87ab4 2975 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
beead698
SN
2976 return err;
2977 }
f6e428b2
SN
2978
2979 ionic_link_status_check_request(lif, true);
30b87ab4
SN
2980 lif->registered = true;
2981 ionic_lif_set_netdev_info(lif);
beead698
SN
2982
2983 return 0;
2984}
2985
30b87ab4 2986void ionic_lif_unregister(struct ionic_lif *lif)
beead698 2987{
30b87ab4
SN
2988 if (lif->ionic->nb.notifier_call) {
2989 unregister_netdevice_notifier(&lif->ionic->nb);
2990 cancel_work_sync(&lif->ionic->nb_work);
2991 lif->ionic->nb.notifier_call = NULL;
1a371ea1
SN
2992 }
2993
30b87ab4
SN
2994 if (lif->netdev->reg_state == NETREG_REGISTERED)
2995 unregister_netdev(lif->netdev);
2996 lif->registered = false;
beead698
SN
2997}
2998
5b3f3f2a
SN
2999static void ionic_lif_queue_identify(struct ionic_lif *lif)
3000{
d701ec32 3001 union ionic_q_identity __iomem *q_ident;
5b3f3f2a 3002 struct ionic *ionic = lif->ionic;
5b3f3f2a
SN
3003 struct ionic_dev *idev;
3004 int qtype;
3005 int err;
3006
3007 idev = &lif->ionic->idev;
d701ec32 3008 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
5b3f3f2a
SN
3009
3010 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3011 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3012
3013 /* filter out the ones we know about */
3014 switch (qtype) {
3015 case IONIC_QTYPE_ADMINQ:
3016 case IONIC_QTYPE_NOTIFYQ:
3017 case IONIC_QTYPE_RXQ:
3018 case IONIC_QTYPE_TXQ:
3019 break;
3020 default:
3021 continue;
3022 }
3023
3024 memset(qti, 0, sizeof(*qti));
3025
3026 mutex_lock(&ionic->dev_cmd_lock);
3027 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3028 ionic_qtype_versions[qtype]);
3029 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3030 if (!err) {
d701ec32
SN
3031 qti->version = readb(&q_ident->version);
3032 qti->supported = readb(&q_ident->supported);
3033 qti->features = readq(&q_ident->features);
3034 qti->desc_sz = readw(&q_ident->desc_sz);
3035 qti->comp_sz = readw(&q_ident->comp_sz);
3036 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3037 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3038 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
5b3f3f2a
SN
3039 }
3040 mutex_unlock(&ionic->dev_cmd_lock);
3041
3042 if (err == -EINVAL) {
3043 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3044 continue;
3045 } else if (err == -EIO) {
3046 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3047 return;
3048 } else if (err) {
3049 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3050 qtype, err);
3051 return;
3052 }
3053
3054 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3055 qtype, qti->version);
3056 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3057 qtype, qti->supported);
3058 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3059 qtype, qti->features);
3060 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3061 qtype, qti->desc_sz);
3062 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3063 qtype, qti->comp_sz);
3064 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3065 qtype, qti->sg_desc_sz);
3066 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3067 qtype, qti->max_sg_elems);
3068 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3069 qtype, qti->sg_desc_stride);
3070 }
3071}
3072
1a58e196
SN
3073int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3074 union ionic_lif_identity *lid)
3075{
3076 struct ionic_dev *idev = &ionic->idev;
3077 size_t sz;
3078 int err;
3079
3080 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3081
3082 mutex_lock(&ionic->dev_cmd_lock);
3083 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3084 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3085 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3086 mutex_unlock(&ionic->dev_cmd_lock);
3087 if (err)
3088 return (err);
3089
3090 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3091 le64_to_cpu(lid->capabilities));
3092
3093 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3094 le32_to_cpu(lid->eth.max_ucast_filters));
3095 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3096 le32_to_cpu(lid->eth.max_mcast_filters));
3097 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3098 le64_to_cpu(lid->eth.config.features));
3099 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3100 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3101 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3102 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3103 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3104 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3105 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3106 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3107 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3108 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3109 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3110 le32_to_cpu(lid->eth.config.mtu));
3111
3112 return 0;
3113}
3114
30b87ab4 3115int ionic_lif_size(struct ionic *ionic)
1a58e196
SN
3116{
3117 struct ionic_identity *ident = &ionic->ident;
3118 unsigned int nintrs, dev_nintrs;
3119 union ionic_lif_config *lc;
3120 unsigned int ntxqs_per_lif;
3121 unsigned int nrxqs_per_lif;
3122 unsigned int neqs_per_lif;
3123 unsigned int nnqs_per_lif;
3124 unsigned int nxqs, neqs;
3125 unsigned int min_intrs;
3126 int err;
3127
3128 lc = &ident->lif.eth.config;
3129 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3130 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3131 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3132 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3133 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3134
3135 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3136 nxqs = min(nxqs, num_online_cpus());
3137 neqs = min(neqs_per_lif, num_online_cpus());
3138
3139try_again:
3140 /* interrupt usage:
3141 * 1 for master lif adminq/notifyq
3142 * 1 for each CPU for master lif TxRx queue pairs
3143 * whatever's left is for RDMA queues
3144 */
3145 nintrs = 1 + nxqs + neqs;
3146 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3147
3148 if (nintrs > dev_nintrs)
3149 goto try_fewer;
3150
3151 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3152 if (err < 0 && err != -ENOSPC) {
3153 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3154 return err;
3155 }
3156 if (err == -ENOSPC)
3157 goto try_fewer;
3158
3159 if (err != nintrs) {
3160 ionic_bus_free_irq_vectors(ionic);
3161 goto try_fewer;
3162 }
3163
3164 ionic->nnqs_per_lif = nnqs_per_lif;
3165 ionic->neqs_per_lif = neqs;
3166 ionic->ntxqs_per_lif = nxqs;
3167 ionic->nrxqs_per_lif = nxqs;
3168 ionic->nintrs = nintrs;
3169
3170 ionic_debugfs_add_sizes(ionic);
3171
3172 return 0;
3173
3174try_fewer:
3175 if (nnqs_per_lif > 1) {
3176 nnqs_per_lif >>= 1;
3177 goto try_again;
3178 }
3179 if (neqs > 1) {
3180 neqs >>= 1;
3181 goto try_again;
3182 }
3183 if (nxqs > 1) {
3184 nxqs >>= 1;
3185 goto try_again;
3186 }
3187 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3188 return -ENOSPC;
3189}