]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/net/ethernet/pensando/ionic/ionic_lif.c
ionic: simplify the intr_index use in txq_init
[mirror_ubuntu-jammy-kernel.git] / drivers / net / ethernet / pensando / ionic / ionic_lif.c
CommitLineData
1a58e196
SN
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3
cc69837f 4#include <linux/ethtool.h>
011c7289
AB
5#include <linux/printk.h>
6#include <linux/dynamic_debug.h>
1a58e196
SN
7#include <linux/netdevice.h>
8#include <linux/etherdevice.h>
4b03b273 9#include <linux/if_vlan.h>
8c15440b 10#include <linux/rtnetlink.h>
1a58e196
SN
11#include <linux/interrupt.h>
12#include <linux/pci.h>
13#include <linux/cpumask.h>
14
15#include "ionic.h"
16#include "ionic_bus.h"
17#include "ionic_lif.h"
0f3154e6 18#include "ionic_txrx.h"
4d03e00a 19#include "ionic_ethtool.h"
1a58e196
SN
20#include "ionic_debugfs.h"
21
5b3f3f2a
SN
22/* queuetype support level */
23static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = {
24 [IONIC_QTYPE_ADMINQ] = 0, /* 0 = Base version with CQ support */
25 [IONIC_QTYPE_NOTIFYQ] = 0, /* 0 = Base version */
26 [IONIC_QTYPE_RXQ] = 0, /* 0 = Base version with CQ+SG support */
27 [IONIC_QTYPE_TXQ] = 1, /* 0 = Base version with CQ+SG support
28 * 1 = ... with Tx SG version 1
29 */
30};
31
2a654540
SN
32static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode);
33static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr);
34static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr);
8d61aad4 35static void ionic_link_status_check(struct ionic_lif *lif);
c672412f
SN
36static void ionic_lif_handle_fw_down(struct ionic_lif *lif);
37static void ionic_lif_handle_fw_up(struct ionic_lif *lif);
38static void ionic_lif_set_netdev_info(struct ionic_lif *lif);
2a654540 39
f053e1f8
SN
40static void ionic_txrx_deinit(struct ionic_lif *lif);
41static int ionic_txrx_init(struct ionic_lif *lif);
49d3b493
SN
42static int ionic_start_queues(struct ionic_lif *lif);
43static void ionic_stop_queues(struct ionic_lif *lif);
5b3f3f2a 44static void ionic_lif_queue_identify(struct ionic_lif *lif);
49d3b493 45
04a83459
SN
46static void ionic_dim_work(struct work_struct *work)
47{
48 struct dim *dim = container_of(work, struct dim, work);
49 struct dim_cq_moder cur_moder;
50 struct ionic_qcq *qcq;
51 u32 new_coal;
52
53 cur_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
54 qcq = container_of(dim, struct ionic_qcq, dim);
55 new_coal = ionic_coal_usec_to_hw(qcq->q.lif->ionic, cur_moder.usec);
56 qcq->intr.dim_coal_hw = new_coal ? new_coal : 1;
57 dim->state = DIM_START_MEASURE;
58}
59
2a654540
SN
60static void ionic_lif_deferred_work(struct work_struct *work)
61{
62 struct ionic_lif *lif = container_of(work, struct ionic_lif, deferred.work);
63 struct ionic_deferred *def = &lif->deferred;
64 struct ionic_deferred_work *w = NULL;
65
52733cff
SN
66 do {
67 spin_lock_bh(&def->lock);
68 if (!list_empty(&def->list)) {
69 w = list_first_entry(&def->list,
70 struct ionic_deferred_work, list);
71 list_del(&w->list);
72 }
73 spin_unlock_bh(&def->lock);
74
75 if (!w)
76 break;
2a654540 77
2a654540
SN
78 switch (w->type) {
79 case IONIC_DW_TYPE_RX_MODE:
80 ionic_lif_rx_mode(lif, w->rx_mode);
81 break;
82 case IONIC_DW_TYPE_RX_ADDR_ADD:
83 ionic_lif_addr_add(lif, w->addr);
84 break;
85 case IONIC_DW_TYPE_RX_ADDR_DEL:
86 ionic_lif_addr_del(lif, w->addr);
87 break;
8d61aad4
SN
88 case IONIC_DW_TYPE_LINK_STATUS:
89 ionic_link_status_check(lif);
90 break;
c672412f
SN
91 case IONIC_DW_TYPE_LIF_RESET:
92 if (w->fw_status)
93 ionic_lif_handle_fw_up(lif);
94 else
95 ionic_lif_handle_fw_down(lif);
96 break;
2a654540
SN
97 default:
98 break;
99 }
100 kfree(w);
52733cff
SN
101 w = NULL;
102 } while (true);
2a654540
SN
103}
104
c672412f
SN
105void ionic_lif_deferred_enqueue(struct ionic_deferred *def,
106 struct ionic_deferred_work *work)
2a654540
SN
107{
108 spin_lock_bh(&def->lock);
109 list_add_tail(&work->list, &def->list);
110 spin_unlock_bh(&def->lock);
111 schedule_work(&def->work);
112}
113
8d61aad4
SN
114static void ionic_link_status_check(struct ionic_lif *lif)
115{
116 struct net_device *netdev = lif->netdev;
117 u16 link_status;
118 bool link_up;
119
0925e9db 120 if (!test_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
49d3b493
SN
121 return;
122
8d61aad4
SN
123 link_status = le16_to_cpu(lif->info->status.link_status);
124 link_up = link_status == IONIC_PORT_OPER_STATUS_UP;
125
8d61aad4 126 if (link_up) {
25cc5a5f 127 if (netdev->flags & IFF_UP && netif_running(netdev)) {
8f56bc4d
SN
128 mutex_lock(&lif->queue_lock);
129 ionic_start_queues(lif);
130 mutex_unlock(&lif->queue_lock);
131 }
132
aa47b540 133 if (!netif_carrier_ok(netdev)) {
aa47b540 134 ionic_port_identify(lif->ionic);
aa47b540 135 netdev_info(netdev, "Link up - %d Gbps\n",
25cc5a5f 136 le32_to_cpu(lif->info->status.link_speed) / 1000);
0f3154e6
SN
137 netif_carrier_on(netdev);
138 }
8d61aad4 139 } else {
aa47b540
SN
140 if (netif_carrier_ok(netdev)) {
141 netdev_info(netdev, "Link down\n");
142 netif_carrier_off(netdev);
143 }
8d61aad4 144
25cc5a5f 145 if (netdev->flags & IFF_UP && netif_running(netdev)) {
0925e9db 146 mutex_lock(&lif->queue_lock);
49d3b493 147 ionic_stop_queues(lif);
0925e9db
SN
148 mutex_unlock(&lif->queue_lock);
149 }
8d61aad4
SN
150 }
151
c6d3d73a 152 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
8d61aad4
SN
153}
154
1800eee1 155void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep)
8d61aad4
SN
156{
157 struct ionic_deferred_work *work;
158
159 /* we only need one request outstanding at a time */
c6d3d73a 160 if (test_and_set_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state))
8d61aad4
SN
161 return;
162
1800eee1 163 if (!can_sleep) {
8d61aad4 164 work = kzalloc(sizeof(*work), GFP_ATOMIC);
2c580d77
SN
165 if (!work) {
166 clear_bit(IONIC_LIF_F_LINK_CHECK_REQUESTED, lif->state);
8d61aad4 167 return;
2c580d77 168 }
8d61aad4
SN
169
170 work->type = IONIC_DW_TYPE_LINK_STATUS;
171 ionic_lif_deferred_enqueue(&lif->deferred, work);
172 } else {
173 ionic_link_status_check(lif);
174 }
175}
176
1d062b7b
SN
177static irqreturn_t ionic_isr(int irq, void *data)
178{
179 struct napi_struct *napi = data;
180
181 napi_schedule_irqoff(napi);
182
183 return IRQ_HANDLED;
184}
185
186static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq)
187{
188 struct ionic_intr_info *intr = &qcq->intr;
189 struct device *dev = lif->ionic->dev;
190 struct ionic_queue *q = &qcq->q;
191 const char *name;
192
193 if (lif->registered)
194 name = lif->netdev->name;
195 else
196 name = dev_name(dev);
197
198 snprintf(intr->name, sizeof(intr->name),
199 "%s-%s-%s", IONIC_DRV_NAME, name, q->name);
200
201 return devm_request_irq(dev, intr->vector, ionic_isr,
202 0, intr->name, &qcq->napi);
203}
204
205static int ionic_intr_alloc(struct ionic_lif *lif, struct ionic_intr_info *intr)
206{
207 struct ionic *ionic = lif->ionic;
208 int index;
209
210 index = find_first_zero_bit(ionic->intrs, ionic->nintrs);
211 if (index == ionic->nintrs) {
212 netdev_warn(lif->netdev, "%s: no intr, index=%d nintrs=%d\n",
213 __func__, index, ionic->nintrs);
214 return -ENOSPC;
215 }
216
217 set_bit(index, ionic->intrs);
218 ionic_intr_init(&ionic->idev, intr, index);
219
220 return 0;
221}
222
36ac2c50 223static void ionic_intr_free(struct ionic *ionic, int index)
1d062b7b 224{
c06107ca 225 if (index != IONIC_INTR_INDEX_NOT_ASSIGNED && index < ionic->nintrs)
36ac2c50 226 clear_bit(index, ionic->intrs);
1d062b7b
SN
227}
228
0f3154e6
SN
229static int ionic_qcq_enable(struct ionic_qcq *qcq)
230{
231 struct ionic_queue *q = &qcq->q;
232 struct ionic_lif *lif = q->lif;
233 struct ionic_dev *idev;
234 struct device *dev;
235
236 struct ionic_admin_ctx ctx = {
237 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
238 .cmd.q_control = {
239 .opcode = IONIC_CMD_Q_CONTROL,
240 .lif_index = cpu_to_le16(lif->index),
241 .type = q->type,
242 .index = cpu_to_le32(q->index),
243 .oper = IONIC_Q_ENABLE,
244 },
245 };
246
247 idev = &lif->ionic->idev;
248 dev = lif->ionic->dev;
249
250 dev_dbg(dev, "q_enable.index %d q_enable.qtype %d\n",
251 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
252
253 if (qcq->flags & IONIC_QCQ_F_INTR) {
254 irq_set_affinity_hint(qcq->intr.vector,
255 &qcq->intr.affinity_mask);
256 napi_enable(&qcq->napi);
257 ionic_intr_clean(idev->intr_ctrl, qcq->intr.index);
258 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
259 IONIC_INTR_MASK_CLEAR);
260 }
261
262 return ionic_adminq_post_wait(lif, &ctx);
263}
264
ba6ab8ac 265static int ionic_qcq_disable(struct ionic_qcq *qcq, bool send_to_hw)
0f3154e6 266{
7c737fc4
SN
267 struct ionic_queue *q;
268 struct ionic_lif *lif;
ba6ab8ac 269 int err = 0;
0f3154e6
SN
270
271 struct ionic_admin_ctx ctx = {
272 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
273 .cmd.q_control = {
274 .opcode = IONIC_CMD_Q_CONTROL,
0f3154e6
SN
275 .oper = IONIC_Q_DISABLE,
276 },
277 };
278
7c737fc4
SN
279 if (!qcq)
280 return -ENXIO;
0f3154e6 281
7c737fc4
SN
282 q = &qcq->q;
283 lif = q->lif;
0f3154e6
SN
284
285 if (qcq->flags & IONIC_QCQ_F_INTR) {
7c737fc4
SN
286 struct ionic_dev *idev = &lif->ionic->idev;
287
04a83459 288 cancel_work_sync(&qcq->dim.work);
0f3154e6
SN
289 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
290 IONIC_INTR_MASK_SET);
291 synchronize_irq(qcq->intr.vector);
292 irq_set_affinity_hint(qcq->intr.vector, NULL);
293 napi_disable(&qcq->napi);
294 }
295
ba6ab8ac
SN
296 if (send_to_hw) {
297 ctx.cmd.q_control.lif_index = cpu_to_le16(lif->index);
298 ctx.cmd.q_control.type = q->type;
299 ctx.cmd.q_control.index = cpu_to_le32(q->index);
300 dev_dbg(lif->ionic->dev, "q_disable.index %d q_disable.qtype %d\n",
301 ctx.cmd.q_control.index, ctx.cmd.q_control.type);
7c737fc4 302
ba6ab8ac
SN
303 err = ionic_adminq_post_wait(lif, &ctx);
304 }
305
306 return err;
0f3154e6
SN
307}
308
1d062b7b
SN
309static void ionic_lif_qcq_deinit(struct ionic_lif *lif, struct ionic_qcq *qcq)
310{
311 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
312
313 if (!qcq)
314 return;
315
1d062b7b
SN
316 if (!(qcq->flags & IONIC_QCQ_F_INITED))
317 return;
318
319 if (qcq->flags & IONIC_QCQ_F_INTR) {
320 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
321 IONIC_INTR_MASK_SET);
1d062b7b
SN
322 netif_napi_del(&qcq->napi);
323 }
324
325 qcq->flags &= ~IONIC_QCQ_F_INITED;
326}
327
101b40a0
SN
328static void ionic_qcq_intr_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
329{
330 if (!(qcq->flags & IONIC_QCQ_F_INTR) || qcq->intr.vector == 0)
331 return;
332
333 irq_set_affinity_hint(qcq->intr.vector, NULL);
334 devm_free_irq(lif->ionic->dev, qcq->intr.vector, &qcq->napi);
335 qcq->intr.vector = 0;
336 ionic_intr_free(lif->ionic, qcq->intr.index);
337 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
338}
339
1d062b7b
SN
340static void ionic_qcq_free(struct ionic_lif *lif, struct ionic_qcq *qcq)
341{
342 struct device *dev = lif->ionic->dev;
343
344 if (!qcq)
345 return;
346
2a8c2c1a
SN
347 ionic_debugfs_del_qcq(qcq);
348
ea5a8b09
SN
349 if (qcq->q_base) {
350 dma_free_coherent(dev, qcq->q_size, qcq->q_base, qcq->q_base_pa);
351 qcq->q_base = NULL;
352 qcq->q_base_pa = 0;
353 }
354
355 if (qcq->cq_base) {
356 dma_free_coherent(dev, qcq->cq_size, qcq->cq_base, qcq->cq_base_pa);
357 qcq->cq_base = NULL;
358 qcq->cq_base_pa = 0;
359 }
360
361 if (qcq->sg_base) {
362 dma_free_coherent(dev, qcq->sg_size, qcq->sg_base, qcq->sg_base_pa);
363 qcq->sg_base = NULL;
364 qcq->sg_base_pa = 0;
365 }
1d062b7b 366
101b40a0 367 ionic_qcq_intr_free(lif, qcq);
1d062b7b 368
a34e25ab
SN
369 if (qcq->cq.info) {
370 devm_kfree(dev, qcq->cq.info);
371 qcq->cq.info = NULL;
372 }
373 if (qcq->q.info) {
374 devm_kfree(dev, qcq->q.info);
375 qcq->q.info = NULL;
376 }
1d062b7b
SN
377}
378
379static void ionic_qcqs_free(struct ionic_lif *lif)
380{
0f3154e6 381 struct device *dev = lif->ionic->dev;
0f3154e6 382
77ceb68e
SN
383 if (lif->notifyqcq) {
384 ionic_qcq_free(lif, lif->notifyqcq);
101b40a0 385 devm_kfree(dev, lif->notifyqcq);
77ceb68e
SN
386 lif->notifyqcq = NULL;
387 }
388
1d062b7b
SN
389 if (lif->adminqcq) {
390 ionic_qcq_free(lif, lif->adminqcq);
101b40a0 391 devm_kfree(dev, lif->adminqcq);
1d062b7b
SN
392 lif->adminqcq = NULL;
393 }
0f3154e6 394
a4674f34 395 if (lif->rxqcqs) {
34dec947
SN
396 devm_kfree(dev, lif->rxqstats);
397 lif->rxqstats = NULL;
a4674f34
SN
398 devm_kfree(dev, lif->rxqcqs);
399 lif->rxqcqs = NULL;
400 }
0f3154e6 401
a4674f34 402 if (lif->txqcqs) {
34dec947
SN
403 devm_kfree(dev, lif->txqstats);
404 lif->txqstats = NULL;
a4674f34
SN
405 devm_kfree(dev, lif->txqcqs);
406 lif->txqcqs = NULL;
407 }
1d062b7b
SN
408}
409
77ceb68e
SN
410static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq,
411 struct ionic_qcq *n_qcq)
412{
413 if (WARN_ON(n_qcq->flags & IONIC_QCQ_F_INTR)) {
36ac2c50 414 ionic_intr_free(n_qcq->cq.lif->ionic, n_qcq->intr.index);
77ceb68e
SN
415 n_qcq->flags &= ~IONIC_QCQ_F_INTR;
416 }
417
418 n_qcq->intr.vector = src_qcq->intr.vector;
419 n_qcq->intr.index = src_qcq->intr.index;
420}
421
101b40a0
SN
422static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq)
423{
424 int err;
425
426 if (!(qcq->flags & IONIC_QCQ_F_INTR)) {
427 qcq->intr.index = IONIC_INTR_INDEX_NOT_ASSIGNED;
428 return 0;
429 }
430
431 err = ionic_intr_alloc(lif, &qcq->intr);
432 if (err) {
433 netdev_warn(lif->netdev, "no intr for %s: %d\n",
434 qcq->q.name, err);
435 goto err_out;
436 }
437
438 err = ionic_bus_get_irq(lif->ionic, qcq->intr.index);
439 if (err < 0) {
440 netdev_warn(lif->netdev, "no vector for %s: %d\n",
441 qcq->q.name, err);
442 goto err_out_free_intr;
443 }
444 qcq->intr.vector = err;
445 ionic_intr_mask_assert(lif->ionic->idev.intr_ctrl, qcq->intr.index,
446 IONIC_INTR_MASK_SET);
447
448 err = ionic_request_irq(lif, qcq);
449 if (err) {
450 netdev_warn(lif->netdev, "irq request failed %d\n", err);
451 goto err_out_free_intr;
452 }
453
454 /* try to get the irq on the local numa node first */
455 qcq->intr.cpu = cpumask_local_spread(qcq->intr.index,
456 dev_to_node(lif->ionic->dev));
457 if (qcq->intr.cpu != -1)
458 cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask);
459
460 netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index);
461 return 0;
462
463err_out_free_intr:
464 ionic_intr_free(lif->ionic, qcq->intr.index);
465err_out:
466 return err;
467}
468
1d062b7b
SN
469static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type,
470 unsigned int index,
471 const char *name, unsigned int flags,
472 unsigned int num_descs, unsigned int desc_size,
473 unsigned int cq_desc_size,
474 unsigned int sg_desc_size,
475 unsigned int pid, struct ionic_qcq **qcq)
476{
477 struct ionic_dev *idev = &lif->ionic->idev;
1d062b7b
SN
478 struct device *dev = lif->ionic->dev;
479 void *q_base, *cq_base, *sg_base;
480 dma_addr_t cq_base_pa = 0;
481 dma_addr_t sg_base_pa = 0;
482 dma_addr_t q_base_pa = 0;
483 struct ionic_qcq *new;
484 int err;
485
486 *qcq = NULL;
487
1d062b7b
SN
488 new = devm_kzalloc(dev, sizeof(*new), GFP_KERNEL);
489 if (!new) {
490 netdev_err(lif->netdev, "Cannot allocate queue structure\n");
491 err = -ENOMEM;
492 goto err_out;
493 }
494
f37bc346 495 new->q.dev = dev;
1d062b7b
SN
496 new->flags = flags;
497
e7164200 498 new->q.info = devm_kcalloc(dev, num_descs, sizeof(*new->q.info),
1d062b7b
SN
499 GFP_KERNEL);
500 if (!new->q.info) {
501 netdev_err(lif->netdev, "Cannot allocate queue info\n");
502 err = -ENOMEM;
ea5a8b09 503 goto err_out_free_qcq;
1d062b7b
SN
504 }
505
506 new->q.type = type;
f37bc346 507 new->q.max_sg_elems = lif->qtype_info[type].max_sg_elems;
1d062b7b
SN
508
509 err = ionic_q_init(lif, idev, &new->q, index, name, num_descs,
510 desc_size, sg_desc_size, pid);
511 if (err) {
512 netdev_err(lif->netdev, "Cannot initialize queue\n");
ea5a8b09 513 goto err_out_free_q_info;
1d062b7b
SN
514 }
515
101b40a0
SN
516 err = ionic_alloc_qcq_interrupt(lif, new);
517 if (err)
518 goto err_out;
1d062b7b 519
e7164200 520 new->cq.info = devm_kcalloc(dev, num_descs, sizeof(*new->cq.info),
1d062b7b
SN
521 GFP_KERNEL);
522 if (!new->cq.info) {
523 netdev_err(lif->netdev, "Cannot allocate completion queue info\n");
524 err = -ENOMEM;
0b064100 525 goto err_out_free_irq;
1d062b7b
SN
526 }
527
528 err = ionic_cq_init(lif, &new->cq, &new->intr, num_descs, cq_desc_size);
529 if (err) {
530 netdev_err(lif->netdev, "Cannot initialize completion queue\n");
ea5a8b09 531 goto err_out_free_cq_info;
1d062b7b
SN
532 }
533
9576a36c
SN
534 if (flags & IONIC_QCQ_F_NOTIFYQ) {
535 int q_size, cq_size;
536
537 /* q & cq need to be contiguous in case of notifyq */
538 q_size = ALIGN(num_descs * desc_size, PAGE_SIZE);
539 cq_size = ALIGN(num_descs * cq_desc_size, PAGE_SIZE);
540
541 new->q_size = PAGE_SIZE + q_size + cq_size;
542 new->q_base = dma_alloc_coherent(dev, new->q_size,
543 &new->q_base_pa, GFP_KERNEL);
544 if (!new->q_base) {
545 netdev_err(lif->netdev, "Cannot allocate qcq DMA memory\n");
546 err = -ENOMEM;
547 goto err_out_free_cq_info;
548 }
549 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
550 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
551 ionic_q_map(&new->q, q_base, q_base_pa);
552
553 cq_base = PTR_ALIGN(q_base + q_size, PAGE_SIZE);
554 cq_base_pa = ALIGN(new->q_base_pa + q_size, PAGE_SIZE);
555 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
556 ionic_cq_bind(&new->cq, &new->q);
557 } else {
558 new->q_size = PAGE_SIZE + (num_descs * desc_size);
559 new->q_base = dma_alloc_coherent(dev, new->q_size, &new->q_base_pa,
560 GFP_KERNEL);
561 if (!new->q_base) {
562 netdev_err(lif->netdev, "Cannot allocate queue DMA memory\n");
563 err = -ENOMEM;
564 goto err_out_free_cq_info;
565 }
566 q_base = PTR_ALIGN(new->q_base, PAGE_SIZE);
567 q_base_pa = ALIGN(new->q_base_pa, PAGE_SIZE);
568 ionic_q_map(&new->q, q_base, q_base_pa);
569
570 new->cq_size = PAGE_SIZE + (num_descs * cq_desc_size);
571 new->cq_base = dma_alloc_coherent(dev, new->cq_size, &new->cq_base_pa,
572 GFP_KERNEL);
573 if (!new->cq_base) {
574 netdev_err(lif->netdev, "Cannot allocate cq DMA memory\n");
575 err = -ENOMEM;
576 goto err_out_free_q;
577 }
578 cq_base = PTR_ALIGN(new->cq_base, PAGE_SIZE);
579 cq_base_pa = ALIGN(new->cq_base_pa, PAGE_SIZE);
580 ionic_cq_map(&new->cq, cq_base, cq_base_pa);
581 ionic_cq_bind(&new->cq, &new->q);
ea5a8b09 582 }
1d062b7b
SN
583
584 if (flags & IONIC_QCQ_F_SG) {
ea5a8b09
SN
585 new->sg_size = PAGE_SIZE + (num_descs * sg_desc_size);
586 new->sg_base = dma_alloc_coherent(dev, new->sg_size, &new->sg_base_pa,
587 GFP_KERNEL);
588 if (!new->sg_base) {
589 netdev_err(lif->netdev, "Cannot allocate sg DMA memory\n");
590 err = -ENOMEM;
591 goto err_out_free_cq;
592 }
593 sg_base = PTR_ALIGN(new->sg_base, PAGE_SIZE);
594 sg_base_pa = ALIGN(new->sg_base_pa, PAGE_SIZE);
1d062b7b
SN
595 ionic_q_sg_map(&new->q, sg_base, sg_base_pa);
596 }
597
04a83459
SN
598 INIT_WORK(&new->dim.work, ionic_dim_work);
599 new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
600
1d062b7b
SN
601 *qcq = new;
602
603 return 0;
604
ea5a8b09
SN
605err_out_free_cq:
606 dma_free_coherent(dev, new->cq_size, new->cq_base, new->cq_base_pa);
607err_out_free_q:
608 dma_free_coherent(dev, new->q_size, new->q_base, new->q_base_pa);
609err_out_free_cq_info:
610 devm_kfree(dev, new->cq.info);
0b064100 611err_out_free_irq:
101b40a0 612 if (flags & IONIC_QCQ_F_INTR) {
0b064100 613 devm_free_irq(dev, new->intr.vector, &new->napi);
36ac2c50 614 ionic_intr_free(lif->ionic, new->intr.index);
101b40a0 615 }
ea5a8b09
SN
616err_out_free_q_info:
617 devm_kfree(dev, new->q.info);
618err_out_free_qcq:
619 devm_kfree(dev, new);
1d062b7b
SN
620err_out:
621 dev_err(dev, "qcq alloc of %s%d failed %d\n", name, index, err);
622 return err;
623}
624
625static int ionic_qcqs_alloc(struct ionic_lif *lif)
626{
0f3154e6 627 struct device *dev = lif->ionic->dev;
1d062b7b
SN
628 unsigned int flags;
629 int err;
630
631 flags = IONIC_QCQ_F_INTR;
632 err = ionic_qcq_alloc(lif, IONIC_QTYPE_ADMINQ, 0, "admin", flags,
633 IONIC_ADMINQ_LENGTH,
634 sizeof(struct ionic_admin_cmd),
635 sizeof(struct ionic_admin_comp),
636 0, lif->kern_pid, &lif->adminqcq);
637 if (err)
638 return err;
2a8c2c1a 639 ionic_debugfs_add_qcq(lif, lif->adminqcq);
1d062b7b 640
77ceb68e
SN
641 if (lif->ionic->nnqs_per_lif) {
642 flags = IONIC_QCQ_F_NOTIFYQ;
643 err = ionic_qcq_alloc(lif, IONIC_QTYPE_NOTIFYQ, 0, "notifyq",
644 flags, IONIC_NOTIFYQ_LENGTH,
645 sizeof(struct ionic_notifyq_cmd),
646 sizeof(union ionic_notifyq_comp),
647 0, lif->kern_pid, &lif->notifyqcq);
648 if (err)
34dec947 649 goto err_out;
2a8c2c1a 650 ionic_debugfs_add_qcq(lif, lif->notifyqcq);
77ceb68e
SN
651
652 /* Let the notifyq ride on the adminq interrupt */
653 ionic_link_qcq_interrupts(lif->adminqcq, lif->notifyqcq);
654 }
655
0f3154e6 656 err = -ENOMEM;
ee205626 657 lif->txqcqs = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
34dec947 658 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 659 if (!lif->txqcqs)
34dec947 660 goto err_out;
ee205626 661 lif->rxqcqs = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
34dec947 662 sizeof(struct ionic_qcq *), GFP_KERNEL);
0f3154e6 663 if (!lif->rxqcqs)
34dec947 664 goto err_out;
0f3154e6 665
34dec947
SN
666 lif->txqstats = devm_kcalloc(dev, lif->ionic->ntxqs_per_lif,
667 sizeof(struct ionic_tx_stats), GFP_KERNEL);
668 if (!lif->txqstats)
669 goto err_out;
670 lif->rxqstats = devm_kcalloc(dev, lif->ionic->nrxqs_per_lif,
671 sizeof(struct ionic_rx_stats), GFP_KERNEL);
672 if (!lif->rxqstats)
673 goto err_out;
77ceb68e 674
34dec947 675 return 0;
77ceb68e 676
34dec947
SN
677err_out:
678 ionic_qcqs_free(lif);
77ceb68e
SN
679 return err;
680}
681
f053e1f8
SN
682static void ionic_qcq_sanitize(struct ionic_qcq *qcq)
683{
684 qcq->q.tail_idx = 0;
685 qcq->q.head_idx = 0;
686 qcq->cq.tail_idx = 0;
687 qcq->cq.done_color = 1;
688 memset(qcq->q_base, 0, qcq->q_size);
689 memset(qcq->cq_base, 0, qcq->cq_size);
690 memset(qcq->sg_base, 0, qcq->sg_size);
691}
692
0f3154e6
SN
693static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
694{
695 struct device *dev = lif->ionic->dev;
696 struct ionic_queue *q = &qcq->q;
697 struct ionic_cq *cq = &qcq->cq;
698 struct ionic_admin_ctx ctx = {
699 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
700 .cmd.q_init = {
701 .opcode = IONIC_CMD_Q_INIT,
702 .lif_index = cpu_to_le16(lif->index),
703 .type = q->type,
5b3f3f2a 704 .ver = lif->qtype_info[q->type].version,
0f3154e6
SN
705 .index = cpu_to_le32(q->index),
706 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
707 IONIC_QINIT_F_SG),
0f3154e6
SN
708 .pid = cpu_to_le16(q->pid),
709 .ring_size = ilog2(q->num_descs),
710 .ring_base = cpu_to_le64(q->base_pa),
711 .cq_ring_base = cpu_to_le64(cq->base_pa),
712 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
713 },
714 };
fe8c30b5 715 unsigned int intr_index;
0f3154e6
SN
716 int err;
717
2103ed2f
SN
718 intr_index = qcq->intr.index;
719
fe8c30b5
SN
720 ctx.cmd.q_init.intr_index = cpu_to_le16(intr_index);
721
0f3154e6
SN
722 dev_dbg(dev, "txq_init.pid %d\n", ctx.cmd.q_init.pid);
723 dev_dbg(dev, "txq_init.index %d\n", ctx.cmd.q_init.index);
724 dev_dbg(dev, "txq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
725 dev_dbg(dev, "txq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
726 dev_dbg(dev, "txq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
727 dev_dbg(dev, "txq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 728 dev_dbg(dev, "txq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 729
f053e1f8 730 ionic_qcq_sanitize(qcq);
49d3b493 731
0f3154e6
SN
732 err = ionic_adminq_post_wait(lif, &ctx);
733 if (err)
734 return err;
735
736 q->hw_type = ctx.comp.q_init.hw_type;
737 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
738 q->dbval = IONIC_DBELL_QID(q->hw_index);
739
740 dev_dbg(dev, "txq->hw_type %d\n", q->hw_type);
741 dev_dbg(dev, "txq->hw_index %d\n", q->hw_index);
742
fe8c30b5
SN
743 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
744 netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi,
745 NAPI_POLL_WEIGHT);
746
0f3154e6
SN
747 qcq->flags |= IONIC_QCQ_F_INITED;
748
0f3154e6
SN
749 return 0;
750}
751
752static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq)
753{
754 struct device *dev = lif->ionic->dev;
755 struct ionic_queue *q = &qcq->q;
756 struct ionic_cq *cq = &qcq->cq;
757 struct ionic_admin_ctx ctx = {
758 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
759 .cmd.q_init = {
760 .opcode = IONIC_CMD_Q_INIT,
761 .lif_index = cpu_to_le16(lif->index),
762 .type = q->type,
5b3f3f2a 763 .ver = lif->qtype_info[q->type].version,
0f3154e6 764 .index = cpu_to_le32(q->index),
08f2e4b2
SN
765 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
766 IONIC_QINIT_F_SG),
0f3154e6
SN
767 .intr_index = cpu_to_le16(cq->bound_intr->index),
768 .pid = cpu_to_le16(q->pid),
769 .ring_size = ilog2(q->num_descs),
770 .ring_base = cpu_to_le64(q->base_pa),
771 .cq_ring_base = cpu_to_le64(cq->base_pa),
08f2e4b2 772 .sg_ring_base = cpu_to_le64(q->sg_base_pa),
0f3154e6
SN
773 },
774 };
775 int err;
776
777 dev_dbg(dev, "rxq_init.pid %d\n", ctx.cmd.q_init.pid);
778 dev_dbg(dev, "rxq_init.index %d\n", ctx.cmd.q_init.index);
779 dev_dbg(dev, "rxq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
780 dev_dbg(dev, "rxq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
5b3f3f2a
SN
781 dev_dbg(dev, "rxq_init.flags 0x%x\n", ctx.cmd.q_init.flags);
782 dev_dbg(dev, "rxq_init.ver %d\n", ctx.cmd.q_init.ver);
fe8c30b5 783 dev_dbg(dev, "rxq_init.intr_index %d\n", ctx.cmd.q_init.intr_index);
0f3154e6 784
f053e1f8 785 ionic_qcq_sanitize(qcq);
49d3b493 786
0f3154e6
SN
787 err = ionic_adminq_post_wait(lif, &ctx);
788 if (err)
789 return err;
790
791 q->hw_type = ctx.comp.q_init.hw_type;
792 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
793 q->dbval = IONIC_DBELL_QID(q->hw_index);
794
795 dev_dbg(dev, "rxq->hw_type %d\n", q->hw_type);
796 dev_dbg(dev, "rxq->hw_index %d\n", q->hw_index);
797
fe8c30b5
SN
798 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
799 netif_napi_add(lif->netdev, &qcq->napi, ionic_rx_napi,
800 NAPI_POLL_WEIGHT);
801 else
802 netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi,
803 NAPI_POLL_WEIGHT);
0f3154e6 804
0f3154e6
SN
805 qcq->flags |= IONIC_QCQ_F_INITED;
806
0f3154e6
SN
807 return 0;
808}
809
77ceb68e
SN
810static bool ionic_notifyq_service(struct ionic_cq *cq,
811 struct ionic_cq_info *cq_info)
812{
813 union ionic_notifyq_comp *comp = cq_info->cq_desc;
c672412f 814 struct ionic_deferred_work *work;
77ceb68e
SN
815 struct net_device *netdev;
816 struct ionic_queue *q;
817 struct ionic_lif *lif;
818 u64 eid;
819
820 q = cq->bound_q;
821 lif = q->info[0].cb_arg;
822 netdev = lif->netdev;
823 eid = le64_to_cpu(comp->event.eid);
824
825 /* Have we run out of new completions to process? */
3fbc9bb6 826 if ((s64)(eid - lif->last_eid) <= 0)
77ceb68e
SN
827 return false;
828
829 lif->last_eid = eid;
830
831 dev_dbg(lif->ionic->dev, "notifyq event:\n");
832 dynamic_hex_dump("event ", DUMP_PREFIX_OFFSET, 16, 1,
833 comp, sizeof(*comp), true);
834
835 switch (le16_to_cpu(comp->event.ecode)) {
836 case IONIC_EVENT_LINK_CHANGE:
25cc5a5f 837 ionic_link_status_check_request(lif, CAN_NOT_SLEEP);
77ceb68e
SN
838 break;
839 case IONIC_EVENT_RESET:
c672412f
SN
840 work = kzalloc(sizeof(*work), GFP_ATOMIC);
841 if (!work) {
c0c682ee 842 netdev_err(lif->netdev, "Reset event dropped\n");
c672412f
SN
843 } else {
844 work->type = IONIC_DW_TYPE_LIF_RESET;
845 ionic_lif_deferred_enqueue(&lif->deferred, work);
846 }
77ceb68e
SN
847 break;
848 default:
5b3f3f2a 849 netdev_warn(netdev, "Notifyq event ecode=%d eid=%lld\n",
77ceb68e
SN
850 comp->event.ecode, eid);
851 break;
852 }
853
854 return true;
855}
856
1d062b7b
SN
857static bool ionic_adminq_service(struct ionic_cq *cq,
858 struct ionic_cq_info *cq_info)
859{
860 struct ionic_admin_comp *comp = cq_info->cq_desc;
861
862 if (!color_match(comp->color, cq->done_color))
863 return false;
864
865 ionic_q_service(cq->bound_q, cq_info, le16_to_cpu(comp->comp_index));
866
867 return true;
868}
869
870static int ionic_adminq_napi(struct napi_struct *napi, int budget)
871{
b4280948 872 struct ionic_intr_info *intr = napi_to_cq(napi)->bound_intr;
77ceb68e 873 struct ionic_lif *lif = napi_to_cq(napi)->lif;
b4280948
SN
874 struct ionic_dev *idev = &lif->ionic->idev;
875 unsigned int flags = 0;
77ceb68e
SN
876 int n_work = 0;
877 int a_work = 0;
b4280948
SN
878 int work_done;
879
880 if (lif->notifyqcq && lif->notifyqcq->flags & IONIC_QCQ_F_INITED)
881 n_work = ionic_cq_service(&lif->notifyqcq->cq, budget,
882 ionic_notifyq_service, NULL, NULL);
77ceb68e 883
b4280948
SN
884 if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED)
885 a_work = ionic_cq_service(&lif->adminqcq->cq, budget,
886 ionic_adminq_service, NULL, NULL);
887
888 work_done = max(n_work, a_work);
889 if (work_done < budget && napi_complete_done(napi, work_done)) {
890 flags |= IONIC_INTR_CRED_UNMASK;
04a83459 891 lif->adminqcq->cq.bound_intr->rearm_count++;
b4280948 892 }
77ceb68e 893
b4280948
SN
894 if (work_done || flags) {
895 flags |= IONIC_INTR_CRED_RESET_COALESCE;
896 ionic_intr_credits(idev->intr_ctrl,
897 intr->index,
898 n_work + a_work, flags);
899 }
900
901 return work_done;
1d062b7b
SN
902}
903
f64e0c56
SN
904void ionic_get_stats64(struct net_device *netdev,
905 struct rtnl_link_stats64 *ns)
8d61aad4
SN
906{
907 struct ionic_lif *lif = netdev_priv(netdev);
908 struct ionic_lif_stats *ls;
909
910 memset(ns, 0, sizeof(*ns));
911 ls = &lif->info->stats;
912
913 ns->rx_packets = le64_to_cpu(ls->rx_ucast_packets) +
914 le64_to_cpu(ls->rx_mcast_packets) +
915 le64_to_cpu(ls->rx_bcast_packets);
916
917 ns->tx_packets = le64_to_cpu(ls->tx_ucast_packets) +
918 le64_to_cpu(ls->tx_mcast_packets) +
919 le64_to_cpu(ls->tx_bcast_packets);
920
921 ns->rx_bytes = le64_to_cpu(ls->rx_ucast_bytes) +
922 le64_to_cpu(ls->rx_mcast_bytes) +
923 le64_to_cpu(ls->rx_bcast_bytes);
924
925 ns->tx_bytes = le64_to_cpu(ls->tx_ucast_bytes) +
926 le64_to_cpu(ls->tx_mcast_bytes) +
927 le64_to_cpu(ls->tx_bcast_bytes);
928
929 ns->rx_dropped = le64_to_cpu(ls->rx_ucast_drop_packets) +
930 le64_to_cpu(ls->rx_mcast_drop_packets) +
931 le64_to_cpu(ls->rx_bcast_drop_packets);
932
933 ns->tx_dropped = le64_to_cpu(ls->tx_ucast_drop_packets) +
934 le64_to_cpu(ls->tx_mcast_drop_packets) +
935 le64_to_cpu(ls->tx_bcast_drop_packets);
936
937 ns->multicast = le64_to_cpu(ls->rx_mcast_packets);
938
939 ns->rx_over_errors = le64_to_cpu(ls->rx_queue_empty);
940
941 ns->rx_missed_errors = le64_to_cpu(ls->rx_dma_error) +
942 le64_to_cpu(ls->rx_queue_disabled) +
943 le64_to_cpu(ls->rx_desc_fetch_error) +
944 le64_to_cpu(ls->rx_desc_data_error);
945
946 ns->tx_aborted_errors = le64_to_cpu(ls->tx_dma_error) +
947 le64_to_cpu(ls->tx_queue_disabled) +
948 le64_to_cpu(ls->tx_desc_fetch_error) +
949 le64_to_cpu(ls->tx_desc_data_error);
950
951 ns->rx_errors = ns->rx_over_errors +
952 ns->rx_missed_errors;
953
954 ns->tx_errors = ns->tx_aborted_errors;
955}
956
2a654540
SN
957static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr)
958{
959 struct ionic_admin_ctx ctx = {
960 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
961 .cmd.rx_filter_add = {
962 .opcode = IONIC_CMD_RX_FILTER_ADD,
963 .lif_index = cpu_to_le16(lif->index),
964 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC),
965 },
966 };
967 struct ionic_rx_filter *f;
968 int err;
969
970 /* don't bother if we already have it */
971 spin_lock_bh(&lif->rx_filters.lock);
972 f = ionic_rx_filter_by_addr(lif, addr);
973 spin_unlock_bh(&lif->rx_filters.lock);
974 if (f)
975 return 0;
976
cbec2153 977 netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr);
2a654540
SN
978
979 memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN);
980 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 981 if (err && err != -EEXIST)
2a654540
SN
982 return err;
983
984 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
985}
986
987static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr)
988{
989 struct ionic_admin_ctx ctx = {
990 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
991 .cmd.rx_filter_del = {
992 .opcode = IONIC_CMD_RX_FILTER_DEL,
993 .lif_index = cpu_to_le16(lif->index),
994 },
995 };
996 struct ionic_rx_filter *f;
997 int err;
998
999 spin_lock_bh(&lif->rx_filters.lock);
1000 f = ionic_rx_filter_by_addr(lif, addr);
1001 if (!f) {
1002 spin_unlock_bh(&lif->rx_filters.lock);
1003 return -ENOENT;
1004 }
1005
cbec2153
SN
1006 netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n",
1007 addr, f->filter_id);
1008
2a654540
SN
1009 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1010 ionic_rx_filter_free(lif, f);
1011 spin_unlock_bh(&lif->rx_filters.lock);
1012
1013 err = ionic_adminq_post_wait(lif, &ctx);
53faea3d 1014 if (err && err != -EEXIST)
2a654540
SN
1015 return err;
1016
2a654540
SN
1017 return 0;
1018}
1019
1800eee1
SAS
1020static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add,
1021 bool can_sleep)
2a654540 1022{
2a654540
SN
1023 struct ionic_deferred_work *work;
1024 unsigned int nmfilters;
1025 unsigned int nufilters;
1026
1027 if (add) {
1028 /* Do we have space for this filter? We test the counters
1029 * here before checking the need for deferral so that we
1030 * can return an overflow error to the stack.
1031 */
bb9f80f3
SN
1032 nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
1033 nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
2a654540
SN
1034
1035 if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters))
1036 lif->nmcast++;
1037 else if (!is_multicast_ether_addr(addr) &&
1038 lif->nucast < nufilters)
1039 lif->nucast++;
1040 else
1041 return -ENOSPC;
1042 } else {
1043 if (is_multicast_ether_addr(addr) && lif->nmcast)
1044 lif->nmcast--;
1045 else if (!is_multicast_ether_addr(addr) && lif->nucast)
1046 lif->nucast--;
1047 }
1048
1800eee1 1049 if (!can_sleep) {
2a654540 1050 work = kzalloc(sizeof(*work), GFP_ATOMIC);
c0c682ee 1051 if (!work)
2a654540 1052 return -ENOMEM;
2a654540
SN
1053 work->type = add ? IONIC_DW_TYPE_RX_ADDR_ADD :
1054 IONIC_DW_TYPE_RX_ADDR_DEL;
1055 memcpy(work->addr, addr, ETH_ALEN);
1056 netdev_dbg(lif->netdev, "deferred: rx_filter %s %pM\n",
1057 add ? "add" : "del", addr);
1058 ionic_lif_deferred_enqueue(&lif->deferred, work);
1059 } else {
1060 netdev_dbg(lif->netdev, "rx_filter %s %pM\n",
1061 add ? "add" : "del", addr);
1062 if (add)
1063 return ionic_lif_addr_add(lif, addr);
1064 else
1065 return ionic_lif_addr_del(lif, addr);
1066 }
1067
1068 return 0;
1069}
1070
1071static int ionic_addr_add(struct net_device *netdev, const u8 *addr)
1072{
7c8d008c 1073 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_SLEEP);
1800eee1
SAS
1074}
1075
1076static int ionic_ndo_addr_add(struct net_device *netdev, const u8 *addr)
1077{
7c8d008c 1078 return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR, CAN_NOT_SLEEP);
2a654540
SN
1079}
1080
1081static int ionic_addr_del(struct net_device *netdev, const u8 *addr)
1082{
7c8d008c 1083 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_SLEEP);
1800eee1
SAS
1084}
1085
1086static int ionic_ndo_addr_del(struct net_device *netdev, const u8 *addr)
1087{
7c8d008c 1088 return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR, CAN_NOT_SLEEP);
2a654540
SN
1089}
1090
1091static void ionic_lif_rx_mode(struct ionic_lif *lif, unsigned int rx_mode)
1092{
1093 struct ionic_admin_ctx ctx = {
1094 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1095 .cmd.rx_mode_set = {
1096 .opcode = IONIC_CMD_RX_MODE_SET,
1097 .lif_index = cpu_to_le16(lif->index),
1098 .rx_mode = cpu_to_le16(rx_mode),
1099 },
1100 };
1101 char buf[128];
1102 int err;
1103 int i;
1104#define REMAIN(__x) (sizeof(buf) - (__x))
1105
38e0f746
TI
1106 i = scnprintf(buf, sizeof(buf), "rx_mode 0x%04x -> 0x%04x:",
1107 lif->rx_mode, rx_mode);
2a654540 1108 if (rx_mode & IONIC_RX_MODE_F_UNICAST)
38e0f746 1109 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_UNICAST");
2a654540 1110 if (rx_mode & IONIC_RX_MODE_F_MULTICAST)
38e0f746 1111 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_MULTICAST");
2a654540 1112 if (rx_mode & IONIC_RX_MODE_F_BROADCAST)
38e0f746 1113 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_BROADCAST");
2a654540 1114 if (rx_mode & IONIC_RX_MODE_F_PROMISC)
38e0f746 1115 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_PROMISC");
2a654540 1116 if (rx_mode & IONIC_RX_MODE_F_ALLMULTI)
38e0f746 1117 i += scnprintf(&buf[i], REMAIN(i), " RX_MODE_F_ALLMULTI");
2a654540
SN
1118 netdev_dbg(lif->netdev, "lif%d %s\n", lif->index, buf);
1119
1120 err = ionic_adminq_post_wait(lif, &ctx);
1121 if (err)
1122 netdev_warn(lif->netdev, "set rx_mode 0x%04x failed: %d\n",
1123 rx_mode, err);
1124 else
1125 lif->rx_mode = rx_mode;
1126}
1127
81dbc241 1128static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep)
2a654540
SN
1129{
1130 struct ionic_lif *lif = netdev_priv(netdev);
e94f76bb 1131 struct ionic_deferred_work *work;
2a654540
SN
1132 unsigned int nfilters;
1133 unsigned int rx_mode;
1134
2a654540
SN
1135 rx_mode = IONIC_RX_MODE_F_UNICAST;
1136 rx_mode |= (netdev->flags & IFF_MULTICAST) ? IONIC_RX_MODE_F_MULTICAST : 0;
1137 rx_mode |= (netdev->flags & IFF_BROADCAST) ? IONIC_RX_MODE_F_BROADCAST : 0;
1138 rx_mode |= (netdev->flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0;
1139 rx_mode |= (netdev->flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0;
1140
1141 /* sync unicast addresses
1142 * next check to see if we're in an overflow state
1143 * if so, we track that we overflowed and enable NIC PROMISC
1144 * else if the overflow is set and not needed
1145 * we remove our overflow flag and check the netdev flags
1146 * to see if we can disable NIC PROMISC
1147 */
81dbc241 1148 if (can_sleep)
e0243e19 1149 __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del);
81dbc241
SN
1150 else
1151 __dev_uc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
bb9f80f3 1152 nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters);
2a654540
SN
1153 if (netdev_uc_count(netdev) + 1 > nfilters) {
1154 rx_mode |= IONIC_RX_MODE_F_PROMISC;
1155 lif->uc_overflow = true;
1156 } else if (lif->uc_overflow) {
1157 lif->uc_overflow = false;
1158 if (!(netdev->flags & IFF_PROMISC))
1159 rx_mode &= ~IONIC_RX_MODE_F_PROMISC;
1160 }
1161
1162 /* same for multicast */
81dbc241 1163 if (can_sleep)
e0243e19 1164 __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del);
81dbc241
SN
1165 else
1166 __dev_mc_sync(netdev, ionic_ndo_addr_add, ionic_ndo_addr_del);
bb9f80f3 1167 nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters);
2a654540
SN
1168 if (netdev_mc_count(netdev) > nfilters) {
1169 rx_mode |= IONIC_RX_MODE_F_ALLMULTI;
1170 lif->mc_overflow = true;
1171 } else if (lif->mc_overflow) {
1172 lif->mc_overflow = false;
1173 if (!(netdev->flags & IFF_ALLMULTI))
1174 rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI;
1175 }
1176
e94f76bb 1177 if (lif->rx_mode != rx_mode) {
81dbc241 1178 if (!can_sleep) {
e94f76bb
SN
1179 work = kzalloc(sizeof(*work), GFP_ATOMIC);
1180 if (!work) {
c0c682ee 1181 netdev_err(lif->netdev, "rxmode change dropped\n");
e94f76bb
SN
1182 return;
1183 }
1184 work->type = IONIC_DW_TYPE_RX_MODE;
1185 work->rx_mode = rx_mode;
1186 netdev_dbg(lif->netdev, "deferred: rx_mode\n");
1187 ionic_lif_deferred_enqueue(&lif->deferred, work);
1188 } else {
1189 ionic_lif_rx_mode(lif, rx_mode);
1190 }
1191 }
1800eee1
SAS
1192}
1193
1194static void ionic_ndo_set_rx_mode(struct net_device *netdev)
1195{
7c8d008c 1196 ionic_set_rx_mode(netdev, CAN_NOT_SLEEP);
2a654540
SN
1197}
1198
beead698
SN
1199static __le64 ionic_netdev_features_to_nic(netdev_features_t features)
1200{
1201 u64 wanted = 0;
1202
1203 if (features & NETIF_F_HW_VLAN_CTAG_TX)
1204 wanted |= IONIC_ETH_HW_VLAN_TX_TAG;
1205 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1206 wanted |= IONIC_ETH_HW_VLAN_RX_STRIP;
1207 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1208 wanted |= IONIC_ETH_HW_VLAN_RX_FILTER;
1209 if (features & NETIF_F_RXHASH)
1210 wanted |= IONIC_ETH_HW_RX_HASH;
1211 if (features & NETIF_F_RXCSUM)
1212 wanted |= IONIC_ETH_HW_RX_CSUM;
1213 if (features & NETIF_F_SG)
1214 wanted |= IONIC_ETH_HW_TX_SG;
1215 if (features & NETIF_F_HW_CSUM)
1216 wanted |= IONIC_ETH_HW_TX_CSUM;
1217 if (features & NETIF_F_TSO)
1218 wanted |= IONIC_ETH_HW_TSO;
1219 if (features & NETIF_F_TSO6)
1220 wanted |= IONIC_ETH_HW_TSO_IPV6;
1221 if (features & NETIF_F_TSO_ECN)
1222 wanted |= IONIC_ETH_HW_TSO_ECN;
1223 if (features & NETIF_F_GSO_GRE)
1224 wanted |= IONIC_ETH_HW_TSO_GRE;
1225 if (features & NETIF_F_GSO_GRE_CSUM)
1226 wanted |= IONIC_ETH_HW_TSO_GRE_CSUM;
1227 if (features & NETIF_F_GSO_IPXIP4)
1228 wanted |= IONIC_ETH_HW_TSO_IPXIP4;
1229 if (features & NETIF_F_GSO_IPXIP6)
1230 wanted |= IONIC_ETH_HW_TSO_IPXIP6;
1231 if (features & NETIF_F_GSO_UDP_TUNNEL)
1232 wanted |= IONIC_ETH_HW_TSO_UDP;
1233 if (features & NETIF_F_GSO_UDP_TUNNEL_CSUM)
1234 wanted |= IONIC_ETH_HW_TSO_UDP_CSUM;
1235
1236 return cpu_to_le64(wanted);
1237}
1238
1239static int ionic_set_nic_features(struct ionic_lif *lif,
1240 netdev_features_t features)
1241{
1242 struct device *dev = lif->ionic->dev;
1243 struct ionic_admin_ctx ctx = {
1244 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1245 .cmd.lif_setattr = {
1246 .opcode = IONIC_CMD_LIF_SETATTR,
1247 .index = cpu_to_le16(lif->index),
1248 .attr = IONIC_LIF_ATTR_FEATURES,
1249 },
1250 };
1251 u64 vlan_flags = IONIC_ETH_HW_VLAN_TX_TAG |
1252 IONIC_ETH_HW_VLAN_RX_STRIP |
1253 IONIC_ETH_HW_VLAN_RX_FILTER;
75fcb75b 1254 u64 old_hw_features;
beead698
SN
1255 int err;
1256
1257 ctx.cmd.lif_setattr.features = ionic_netdev_features_to_nic(features);
1258 err = ionic_adminq_post_wait(lif, &ctx);
1259 if (err)
1260 return err;
1261
75fcb75b 1262 old_hw_features = lif->hw_features;
beead698
SN
1263 lif->hw_features = le64_to_cpu(ctx.cmd.lif_setattr.features &
1264 ctx.comp.lif_setattr.features);
1265
75fcb75b
SN
1266 if ((old_hw_features ^ lif->hw_features) & IONIC_ETH_HW_RX_HASH)
1267 ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
1268
beead698
SN
1269 if ((vlan_flags & features) &&
1270 !(vlan_flags & le64_to_cpu(ctx.comp.lif_setattr.features)))
1271 dev_info_once(lif->ionic->dev, "NIC is not supporting vlan offload, likely in SmartNIC mode\n");
1272
1273 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1274 dev_dbg(dev, "feature ETH_HW_VLAN_TX_TAG\n");
1275 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1276 dev_dbg(dev, "feature ETH_HW_VLAN_RX_STRIP\n");
1277 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1278 dev_dbg(dev, "feature ETH_HW_VLAN_RX_FILTER\n");
1279 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1280 dev_dbg(dev, "feature ETH_HW_RX_HASH\n");
1281 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1282 dev_dbg(dev, "feature ETH_HW_TX_SG\n");
1283 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1284 dev_dbg(dev, "feature ETH_HW_TX_CSUM\n");
1285 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1286 dev_dbg(dev, "feature ETH_HW_RX_CSUM\n");
1287 if (lif->hw_features & IONIC_ETH_HW_TSO)
1288 dev_dbg(dev, "feature ETH_HW_TSO\n");
1289 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1290 dev_dbg(dev, "feature ETH_HW_TSO_IPV6\n");
1291 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1292 dev_dbg(dev, "feature ETH_HW_TSO_ECN\n");
1293 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1294 dev_dbg(dev, "feature ETH_HW_TSO_GRE\n");
1295 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1296 dev_dbg(dev, "feature ETH_HW_TSO_GRE_CSUM\n");
1297 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1298 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP4\n");
1299 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1300 dev_dbg(dev, "feature ETH_HW_TSO_IPXIP6\n");
1301 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1302 dev_dbg(dev, "feature ETH_HW_TSO_UDP\n");
1303 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1304 dev_dbg(dev, "feature ETH_HW_TSO_UDP_CSUM\n");
1305
1306 return 0;
1307}
1308
1309static int ionic_init_nic_features(struct ionic_lif *lif)
1310{
1311 struct net_device *netdev = lif->netdev;
1312 netdev_features_t features;
1313 int err;
1314
1315 /* set up what we expect to support by default */
1316 features = NETIF_F_HW_VLAN_CTAG_TX |
1317 NETIF_F_HW_VLAN_CTAG_RX |
1318 NETIF_F_HW_VLAN_CTAG_FILTER |
1319 NETIF_F_RXHASH |
1320 NETIF_F_SG |
1321 NETIF_F_HW_CSUM |
1322 NETIF_F_RXCSUM |
1323 NETIF_F_TSO |
1324 NETIF_F_TSO6 |
1325 NETIF_F_TSO_ECN;
1326
1327 err = ionic_set_nic_features(lif, features);
1328 if (err)
1329 return err;
1330
1331 /* tell the netdev what we actually can support */
1332 netdev->features |= NETIF_F_HIGHDMA;
1333
1334 if (lif->hw_features & IONIC_ETH_HW_VLAN_TX_TAG)
1335 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
1336 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_STRIP)
1337 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1338 if (lif->hw_features & IONIC_ETH_HW_VLAN_RX_FILTER)
1339 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1340 if (lif->hw_features & IONIC_ETH_HW_RX_HASH)
1341 netdev->hw_features |= NETIF_F_RXHASH;
1342 if (lif->hw_features & IONIC_ETH_HW_TX_SG)
1343 netdev->hw_features |= NETIF_F_SG;
1344
1345 if (lif->hw_features & IONIC_ETH_HW_TX_CSUM)
1346 netdev->hw_enc_features |= NETIF_F_HW_CSUM;
1347 if (lif->hw_features & IONIC_ETH_HW_RX_CSUM)
1348 netdev->hw_enc_features |= NETIF_F_RXCSUM;
1349 if (lif->hw_features & IONIC_ETH_HW_TSO)
1350 netdev->hw_enc_features |= NETIF_F_TSO;
1351 if (lif->hw_features & IONIC_ETH_HW_TSO_IPV6)
1352 netdev->hw_enc_features |= NETIF_F_TSO6;
1353 if (lif->hw_features & IONIC_ETH_HW_TSO_ECN)
1354 netdev->hw_enc_features |= NETIF_F_TSO_ECN;
1355 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE)
1356 netdev->hw_enc_features |= NETIF_F_GSO_GRE;
1357 if (lif->hw_features & IONIC_ETH_HW_TSO_GRE_CSUM)
1358 netdev->hw_enc_features |= NETIF_F_GSO_GRE_CSUM;
1359 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP4)
1360 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP4;
1361 if (lif->hw_features & IONIC_ETH_HW_TSO_IPXIP6)
1362 netdev->hw_enc_features |= NETIF_F_GSO_IPXIP6;
1363 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP)
1364 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
1365 if (lif->hw_features & IONIC_ETH_HW_TSO_UDP_CSUM)
1366 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM;
1367
1368 netdev->hw_features |= netdev->hw_enc_features;
1369 netdev->features |= netdev->hw_features;
ef7232da 1370 netdev->vlan_features |= netdev->features & ~NETIF_F_VLAN_FEATURES;
beead698 1371
c672412f
SN
1372 netdev->priv_flags |= IFF_UNICAST_FLT |
1373 IFF_LIVE_ADDR_CHANGE;
beead698
SN
1374
1375 return 0;
1376}
1377
1378static int ionic_set_features(struct net_device *netdev,
1379 netdev_features_t features)
1380{
1381 struct ionic_lif *lif = netdev_priv(netdev);
1382 int err;
1383
1384 netdev_dbg(netdev, "%s: lif->features=0x%08llx new_features=0x%08llx\n",
1385 __func__, (u64)lif->netdev->features, (u64)features);
1386
1387 err = ionic_set_nic_features(lif, features);
1388
1389 return err;
1390}
1391
1392static int ionic_set_mac_address(struct net_device *netdev, void *sa)
1393{
2a654540
SN
1394 struct sockaddr *addr = sa;
1395 u8 *mac;
1396 int err;
1397
1398 mac = (u8 *)addr->sa_data;
1399 if (ether_addr_equal(netdev->dev_addr, mac))
1400 return 0;
1401
1402 err = eth_prepare_mac_addr_change(netdev, addr);
1403 if (err)
1404 return err;
1405
1406 if (!is_zero_ether_addr(netdev->dev_addr)) {
1407 netdev_info(netdev, "deleting mac addr %pM\n",
1408 netdev->dev_addr);
1409 ionic_addr_del(netdev, netdev->dev_addr);
1410 }
1411
1412 eth_commit_mac_addr_change(netdev, addr);
1413 netdev_info(netdev, "updating mac addr %pM\n", mac);
1414
1415 return ionic_addr_add(netdev, mac);
beead698
SN
1416}
1417
f053e1f8
SN
1418static void ionic_stop_queues_reconfig(struct ionic_lif *lif)
1419{
1420 /* Stop and clean the queues before reconfiguration */
1421 mutex_lock(&lif->queue_lock);
1422 netif_device_detach(lif->netdev);
1423 ionic_stop_queues(lif);
1424 ionic_txrx_deinit(lif);
1425}
1426
1427static int ionic_start_queues_reconfig(struct ionic_lif *lif)
1428{
1429 int err;
1430
1431 /* Re-init the queues after reconfiguration */
1432
1433 /* The only way txrx_init can fail here is if communication
1434 * with FW is suddenly broken. There's not much we can do
1435 * at this point - error messages have already been printed,
1436 * so we can continue on and the user can eventually do a
1437 * DOWN and UP to try to reset and clear the issue.
1438 */
1439 err = ionic_txrx_init(lif);
1440 mutex_unlock(&lif->queue_lock);
25cc5a5f 1441 ionic_link_status_check_request(lif, CAN_SLEEP);
f053e1f8
SN
1442 netif_device_attach(lif->netdev);
1443
1444 return err;
1445}
1446
beead698
SN
1447static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
1448{
1449 struct ionic_lif *lif = netdev_priv(netdev);
1450 struct ionic_admin_ctx ctx = {
1451 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1452 .cmd.lif_setattr = {
1453 .opcode = IONIC_CMD_LIF_SETATTR,
1454 .index = cpu_to_le16(lif->index),
1455 .attr = IONIC_LIF_ATTR_MTU,
1456 .mtu = cpu_to_le32(new_mtu),
1457 },
1458 };
1459 int err;
1460
1461 err = ionic_adminq_post_wait(lif, &ctx);
1462 if (err)
1463 return err;
1464
f053e1f8 1465 /* if we're not running, nothing more to do */
79ba55a3
SN
1466 if (!netif_running(netdev)) {
1467 netdev->mtu = new_mtu;
f053e1f8 1468 return 0;
79ba55a3 1469 }
beead698 1470
f053e1f8 1471 ionic_stop_queues_reconfig(lif);
79ba55a3 1472 netdev->mtu = new_mtu;
f053e1f8 1473 return ionic_start_queues_reconfig(lif);
beead698
SN
1474}
1475
8c15440b
SN
1476static void ionic_tx_timeout_work(struct work_struct *ws)
1477{
1478 struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
1479
1480 netdev_info(lif->netdev, "Tx Timeout recovery\n");
1481
6f7d6f0f
SN
1482 /* if we were stopped before this scheduled job was launched,
1483 * don't bother the queues as they are already stopped.
1484 */
1485 if (!netif_running(lif->netdev))
1486 return;
1487
1488 ionic_stop_queues_reconfig(lif);
1489 ionic_start_queues_reconfig(lif);
8c15440b
SN
1490}
1491
0290bd29 1492static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
beead698 1493{
8c15440b
SN
1494 struct ionic_lif *lif = netdev_priv(netdev);
1495
1496 schedule_work(&lif->tx_timeout_work);
beead698
SN
1497}
1498
1499static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
1500 u16 vid)
1501{
2a654540
SN
1502 struct ionic_lif *lif = netdev_priv(netdev);
1503 struct ionic_admin_ctx ctx = {
1504 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1505 .cmd.rx_filter_add = {
1506 .opcode = IONIC_CMD_RX_FILTER_ADD,
1507 .lif_index = cpu_to_le16(lif->index),
1508 .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_VLAN),
1509 .vlan.vlan = cpu_to_le16(vid),
1510 },
1511 };
1512 int err;
1513
cbec2153 1514 netdev_dbg(netdev, "rx_filter add VLAN %d\n", vid);
2a654540
SN
1515 err = ionic_adminq_post_wait(lif, &ctx);
1516 if (err)
1517 return err;
1518
2a654540 1519 return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx);
beead698
SN
1520}
1521
1522static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
1523 u16 vid)
1524{
2a654540
SN
1525 struct ionic_lif *lif = netdev_priv(netdev);
1526 struct ionic_admin_ctx ctx = {
1527 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1528 .cmd.rx_filter_del = {
1529 .opcode = IONIC_CMD_RX_FILTER_DEL,
1530 .lif_index = cpu_to_le16(lif->index),
1531 },
1532 };
1533 struct ionic_rx_filter *f;
1534
1535 spin_lock_bh(&lif->rx_filters.lock);
1536
1537 f = ionic_rx_filter_by_vlan(lif, vid);
1538 if (!f) {
1539 spin_unlock_bh(&lif->rx_filters.lock);
1540 return -ENOENT;
1541 }
1542
cbec2153
SN
1543 netdev_dbg(netdev, "rx_filter del VLAN %d (id %d)\n",
1544 vid, f->filter_id);
2a654540
SN
1545
1546 ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id);
1547 ionic_rx_filter_free(lif, f);
1548 spin_unlock_bh(&lif->rx_filters.lock);
1549
1550 return ionic_adminq_post_wait(lif, &ctx);
beead698
SN
1551}
1552
aa319881
SN
1553int ionic_lif_rss_config(struct ionic_lif *lif, const u16 types,
1554 const u8 *key, const u32 *indir)
1555{
1556 struct ionic_admin_ctx ctx = {
1557 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1558 .cmd.lif_setattr = {
1559 .opcode = IONIC_CMD_LIF_SETATTR,
1560 .attr = IONIC_LIF_ATTR_RSS,
aa319881
SN
1561 .rss.addr = cpu_to_le64(lif->rss_ind_tbl_pa),
1562 },
1563 };
1564 unsigned int i, tbl_sz;
1565
75fcb75b
SN
1566 if (lif->hw_features & IONIC_ETH_HW_RX_HASH) {
1567 lif->rss_types = types;
1568 ctx.cmd.lif_setattr.rss.types = cpu_to_le16(types);
1569 }
aa319881
SN
1570
1571 if (key)
1572 memcpy(lif->rss_hash_key, key, IONIC_RSS_HASH_KEY_SIZE);
1573
1574 if (indir) {
1575 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1576 for (i = 0; i < tbl_sz; i++)
1577 lif->rss_ind_tbl[i] = indir[i];
1578 }
1579
1580 memcpy(ctx.cmd.lif_setattr.rss.key, lif->rss_hash_key,
1581 IONIC_RSS_HASH_KEY_SIZE);
1582
1583 return ionic_adminq_post_wait(lif, &ctx);
1584}
1585
1586static int ionic_lif_rss_init(struct ionic_lif *lif)
1587{
aa319881
SN
1588 unsigned int tbl_sz;
1589 unsigned int i;
1590
aa319881
SN
1591 lif->rss_types = IONIC_RSS_TYPE_IPV4 |
1592 IONIC_RSS_TYPE_IPV4_TCP |
1593 IONIC_RSS_TYPE_IPV4_UDP |
1594 IONIC_RSS_TYPE_IPV6 |
1595 IONIC_RSS_TYPE_IPV6_TCP |
1596 IONIC_RSS_TYPE_IPV6_UDP;
1597
1598 /* Fill indirection table with 'default' values */
1599 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1600 for (i = 0; i < tbl_sz; i++)
1601 lif->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, lif->nxqs);
1602
ffac2027 1603 return ionic_lif_rss_config(lif, lif->rss_types, NULL, NULL);
aa319881
SN
1604}
1605
ffac2027 1606static void ionic_lif_rss_deinit(struct ionic_lif *lif)
aa319881 1607{
ffac2027
SN
1608 int tbl_sz;
1609
1610 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
1611 memset(lif->rss_ind_tbl, 0, tbl_sz);
1612 memset(lif->rss_hash_key, 0, IONIC_RSS_HASH_KEY_SIZE);
1613
1614 ionic_lif_rss_config(lif, 0x0, NULL, NULL);
aa319881
SN
1615}
1616
e7e8e087
SN
1617static void ionic_lif_quiesce(struct ionic_lif *lif)
1618{
1619 struct ionic_admin_ctx ctx = {
1620 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
1621 .cmd.lif_setattr = {
1622 .opcode = IONIC_CMD_LIF_SETATTR,
1623 .index = cpu_to_le16(lif->index),
1624 .attr = IONIC_LIF_ATTR_STATE,
1625 .state = IONIC_LIF_QUIESCE,
1626 },
1627 };
1628 int err;
1629
1630 err = ionic_adminq_post_wait(lif, &ctx);
1631 if (err)
1632 netdev_err(lif->netdev, "lif quiesce failed %d\n", err);
1633}
1634
0f3154e6
SN
1635static void ionic_txrx_disable(struct ionic_lif *lif)
1636{
1637 unsigned int i;
ba6ab8ac 1638 int err = 0;
0f3154e6 1639
d5eddde5 1640 if (lif->txqcqs) {
ba6ab8ac
SN
1641 for (i = 0; i < lif->nxqs; i++)
1642 err = ionic_qcq_disable(lif->txqcqs[i], (err != -ETIMEDOUT));
d5eddde5
SN
1643 }
1644
1645 if (lif->rxqcqs) {
ba6ab8ac
SN
1646 for (i = 0; i < lif->nxqs; i++)
1647 err = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
0f3154e6 1648 }
e7e8e087
SN
1649
1650 ionic_lif_quiesce(lif);
0f3154e6
SN
1651}
1652
1653static void ionic_txrx_deinit(struct ionic_lif *lif)
1654{
1655 unsigned int i;
1656
d5eddde5 1657 if (lif->txqcqs) {
101b40a0 1658 for (i = 0; i < lif->nxqs && lif->txqcqs[i]; i++) {
34dec947
SN
1659 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1660 ionic_tx_flush(&lif->txqcqs[i]->cq);
1661 ionic_tx_empty(&lif->txqcqs[i]->q);
d5eddde5
SN
1662 }
1663 }
0f3154e6 1664
d5eddde5 1665 if (lif->rxqcqs) {
101b40a0 1666 for (i = 0; i < lif->nxqs && lif->rxqcqs[i]; i++) {
34dec947 1667 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
34dec947 1668 ionic_rx_empty(&lif->rxqcqs[i]->q);
d5eddde5 1669 }
0f3154e6 1670 }
49d3b493 1671 lif->rx_mode = 0;
0f3154e6
SN
1672}
1673
1674static void ionic_txrx_free(struct ionic_lif *lif)
1675{
1676 unsigned int i;
1677
d5eddde5 1678 if (lif->txqcqs) {
101b40a0 1679 for (i = 0; i < lif->ionic->ntxqs_per_lif && lif->txqcqs[i]; i++) {
34dec947 1680 ionic_qcq_free(lif, lif->txqcqs[i]);
101b40a0 1681 devm_kfree(lif->ionic->dev, lif->txqcqs[i]);
34dec947 1682 lif->txqcqs[i] = NULL;
d5eddde5
SN
1683 }
1684 }
0f3154e6 1685
d5eddde5 1686 if (lif->rxqcqs) {
101b40a0 1687 for (i = 0; i < lif->ionic->nrxqs_per_lif && lif->rxqcqs[i]; i++) {
34dec947 1688 ionic_qcq_free(lif, lif->rxqcqs[i]);
101b40a0 1689 devm_kfree(lif->ionic->dev, lif->rxqcqs[i]);
34dec947 1690 lif->rxqcqs[i] = NULL;
d5eddde5 1691 }
0f3154e6
SN
1692 }
1693}
1694
1695static int ionic_txrx_alloc(struct ionic_lif *lif)
1696{
5b3f3f2a 1697 unsigned int sg_desc_sz;
0f3154e6
SN
1698 unsigned int flags;
1699 unsigned int i;
1700 int err = 0;
1701
5b3f3f2a
SN
1702 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
1703 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
1704 sizeof(struct ionic_txq_sg_desc_v1))
1705 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
1706 else
1707 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
1708
0f3154e6 1709 flags = IONIC_QCQ_F_TX_STATS | IONIC_QCQ_F_SG;
fe8c30b5
SN
1710 if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
1711 flags |= IONIC_QCQ_F_INTR;
0f3154e6
SN
1712 for (i = 0; i < lif->nxqs; i++) {
1713 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
1714 lif->ntxq_descs,
1715 sizeof(struct ionic_txq_desc),
1716 sizeof(struct ionic_txq_comp),
5b3f3f2a 1717 sg_desc_sz,
34dec947 1718 lif->kern_pid, &lif->txqcqs[i]);
0f3154e6
SN
1719 if (err)
1720 goto err_out;
1721
04a83459 1722 if (flags & IONIC_QCQ_F_INTR) {
fe8c30b5 1723 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1724 lif->txqcqs[i]->intr.index,
fe8c30b5 1725 lif->tx_coalesce_hw);
04a83459
SN
1726 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
1727 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
1728 }
fe8c30b5 1729
34dec947 1730 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
0f3154e6
SN
1731 }
1732
08f2e4b2 1733 flags = IONIC_QCQ_F_RX_STATS | IONIC_QCQ_F_SG | IONIC_QCQ_F_INTR;
0f3154e6
SN
1734 for (i = 0; i < lif->nxqs; i++) {
1735 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
1736 lif->nrxq_descs,
1737 sizeof(struct ionic_rxq_desc),
1738 sizeof(struct ionic_rxq_comp),
08f2e4b2 1739 sizeof(struct ionic_rxq_sg_desc),
34dec947 1740 lif->kern_pid, &lif->rxqcqs[i]);
0f3154e6
SN
1741 if (err)
1742 goto err_out;
1743
8c15440b 1744 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
34dec947 1745 lif->rxqcqs[i]->intr.index,
780eded3 1746 lif->rx_coalesce_hw);
04a83459
SN
1747 if (test_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state))
1748 lif->rxqcqs[i]->intr.dim_coal_hw = lif->rx_coalesce_hw;
fe8c30b5
SN
1749
1750 if (!test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state))
34dec947
SN
1751 ionic_link_qcq_interrupts(lif->rxqcqs[i],
1752 lif->txqcqs[i]);
fe8c30b5 1753
34dec947 1754 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
0f3154e6
SN
1755 }
1756
1757 return 0;
1758
1759err_out:
1760 ionic_txrx_free(lif);
1761
1762 return err;
1763}
1764
1765static int ionic_txrx_init(struct ionic_lif *lif)
1766{
1767 unsigned int i;
1768 int err;
1769
1770 for (i = 0; i < lif->nxqs; i++) {
34dec947 1771 err = ionic_lif_txq_init(lif, lif->txqcqs[i]);
0f3154e6
SN
1772 if (err)
1773 goto err_out;
1774
34dec947 1775 err = ionic_lif_rxq_init(lif, lif->rxqcqs[i]);
0f3154e6 1776 if (err) {
34dec947 1777 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
0f3154e6
SN
1778 goto err_out;
1779 }
1780 }
1781
aa319881
SN
1782 if (lif->netdev->features & NETIF_F_RXHASH)
1783 ionic_lif_rss_init(lif);
1784
7c8d008c 1785 ionic_set_rx_mode(lif->netdev, CAN_SLEEP);
0f3154e6
SN
1786
1787 return 0;
1788
1789err_out:
1790 while (i--) {
34dec947
SN
1791 ionic_lif_qcq_deinit(lif, lif->txqcqs[i]);
1792 ionic_lif_qcq_deinit(lif, lif->rxqcqs[i]);
0f3154e6
SN
1793 }
1794
1795 return err;
1796}
1797
1798static int ionic_txrx_enable(struct ionic_lif *lif)
1799{
ba6ab8ac 1800 int derr = 0;
0f3154e6
SN
1801 int i, err;
1802
1803 for (i = 0; i < lif->nxqs; i++) {
7c737fc4
SN
1804 if (!(lif->rxqcqs[i] && lif->txqcqs[i])) {
1805 dev_err(lif->ionic->dev, "%s: bad qcq %d\n", __func__, i);
1806 err = -ENXIO;
1807 goto err_out;
1808 }
1809
34dec947
SN
1810 ionic_rx_fill(&lif->rxqcqs[i]->q);
1811 err = ionic_qcq_enable(lif->rxqcqs[i]);
0f3154e6
SN
1812 if (err)
1813 goto err_out;
1814
34dec947 1815 err = ionic_qcq_enable(lif->txqcqs[i]);
0f3154e6 1816 if (err) {
ba6ab8ac 1817 derr = ionic_qcq_disable(lif->rxqcqs[i], (err != -ETIMEDOUT));
0f3154e6
SN
1818 goto err_out;
1819 }
1820 }
1821
1822 return 0;
1823
1824err_out:
1825 while (i--) {
ba6ab8ac
SN
1826 derr = ionic_qcq_disable(lif->txqcqs[i], (derr != -ETIMEDOUT));
1827 derr = ionic_qcq_disable(lif->rxqcqs[i], (derr != -ETIMEDOUT));
0f3154e6
SN
1828 }
1829
1830 return err;
1831}
1832
49d3b493
SN
1833static int ionic_start_queues(struct ionic_lif *lif)
1834{
1835 int err;
1836
1837 if (test_and_set_bit(IONIC_LIF_F_UP, lif->state))
1838 return 0;
1839
1840 err = ionic_txrx_enable(lif);
1841 if (err) {
1842 clear_bit(IONIC_LIF_F_UP, lif->state);
1843 return err;
1844 }
1845 netif_tx_wake_all_queues(lif->netdev);
1846
1847 return 0;
1848}
1849
d4881430 1850static int ionic_open(struct net_device *netdev)
beead698
SN
1851{
1852 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1853 int err;
beead698 1854
0f3154e6
SN
1855 err = ionic_txrx_alloc(lif);
1856 if (err)
1857 return err;
1858
1859 err = ionic_txrx_init(lif);
1860 if (err)
25cc5a5f 1861 goto err_txrx_free;
beead698 1862
fa48494c
SN
1863 err = netif_set_real_num_tx_queues(netdev, lif->nxqs);
1864 if (err)
1865 goto err_txrx_deinit;
1866
1867 err = netif_set_real_num_rx_queues(netdev, lif->nxqs);
1868 if (err)
1869 goto err_txrx_deinit;
1870
49d3b493
SN
1871 /* don't start the queues until we have link */
1872 if (netif_carrier_ok(netdev)) {
1873 err = ionic_start_queues(lif);
1874 if (err)
1875 goto err_txrx_deinit;
1876 }
8d61aad4 1877
beead698 1878 return 0;
0f3154e6
SN
1879
1880err_txrx_deinit:
1881 ionic_txrx_deinit(lif);
25cc5a5f 1882err_txrx_free:
0f3154e6
SN
1883 ionic_txrx_free(lif);
1884 return err;
beead698
SN
1885}
1886
49d3b493 1887static void ionic_stop_queues(struct ionic_lif *lif)
beead698 1888{
49d3b493
SN
1889 if (!test_and_clear_bit(IONIC_LIF_F_UP, lif->state))
1890 return;
beead698 1891
49d3b493 1892 netif_tx_disable(lif->netdev);
b59eabd2 1893 ionic_txrx_disable(lif);
49d3b493 1894}
beead698 1895
d4881430 1896static int ionic_stop(struct net_device *netdev)
49d3b493
SN
1897{
1898 struct ionic_lif *lif = netdev_priv(netdev);
0f3154e6 1899
b59eabd2 1900 if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
c672412f
SN
1901 return 0;
1902
49d3b493 1903 ionic_stop_queues(lif);
0f3154e6
SN
1904 ionic_txrx_deinit(lif);
1905 ionic_txrx_free(lif);
beead698 1906
49d3b493 1907 return 0;
beead698
SN
1908}
1909
fbb39807
SN
1910static int ionic_get_vf_config(struct net_device *netdev,
1911 int vf, struct ifla_vf_info *ivf)
1912{
1913 struct ionic_lif *lif = netdev_priv(netdev);
1914 struct ionic *ionic = lif->ionic;
1915 int ret = 0;
1916
a836c352
SN
1917 if (!netif_device_present(netdev))
1918 return -EBUSY;
1919
fbb39807
SN
1920 down_read(&ionic->vf_op_lock);
1921
1922 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1923 ret = -EINVAL;
1924 } else {
1925 ivf->vf = vf;
d701ec32 1926 ivf->vlan = le16_to_cpu(ionic->vfs[vf].vlanid);
fbb39807
SN
1927 ivf->qos = 0;
1928 ivf->spoofchk = ionic->vfs[vf].spoofchk;
1929 ivf->linkstate = ionic->vfs[vf].linkstate;
d701ec32 1930 ivf->max_tx_rate = le32_to_cpu(ionic->vfs[vf].maxrate);
fbb39807
SN
1931 ivf->trusted = ionic->vfs[vf].trusted;
1932 ether_addr_copy(ivf->mac, ionic->vfs[vf].macaddr);
1933 }
1934
1935 up_read(&ionic->vf_op_lock);
1936 return ret;
1937}
1938
1939static int ionic_get_vf_stats(struct net_device *netdev, int vf,
1940 struct ifla_vf_stats *vf_stats)
1941{
1942 struct ionic_lif *lif = netdev_priv(netdev);
1943 struct ionic *ionic = lif->ionic;
1944 struct ionic_lif_stats *vs;
1945 int ret = 0;
1946
a836c352
SN
1947 if (!netif_device_present(netdev))
1948 return -EBUSY;
1949
fbb39807
SN
1950 down_read(&ionic->vf_op_lock);
1951
1952 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1953 ret = -EINVAL;
1954 } else {
1955 memset(vf_stats, 0, sizeof(*vf_stats));
1956 vs = &ionic->vfs[vf].stats;
1957
1958 vf_stats->rx_packets = le64_to_cpu(vs->rx_ucast_packets);
1959 vf_stats->tx_packets = le64_to_cpu(vs->tx_ucast_packets);
1960 vf_stats->rx_bytes = le64_to_cpu(vs->rx_ucast_bytes);
1961 vf_stats->tx_bytes = le64_to_cpu(vs->tx_ucast_bytes);
1962 vf_stats->broadcast = le64_to_cpu(vs->rx_bcast_packets);
1963 vf_stats->multicast = le64_to_cpu(vs->rx_mcast_packets);
1964 vf_stats->rx_dropped = le64_to_cpu(vs->rx_ucast_drop_packets) +
1965 le64_to_cpu(vs->rx_mcast_drop_packets) +
1966 le64_to_cpu(vs->rx_bcast_drop_packets);
1967 vf_stats->tx_dropped = le64_to_cpu(vs->tx_ucast_drop_packets) +
1968 le64_to_cpu(vs->tx_mcast_drop_packets) +
1969 le64_to_cpu(vs->tx_bcast_drop_packets);
1970 }
1971
1972 up_read(&ionic->vf_op_lock);
1973 return ret;
1974}
1975
1976static int ionic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1977{
1978 struct ionic_lif *lif = netdev_priv(netdev);
1979 struct ionic *ionic = lif->ionic;
1980 int ret;
1981
1982 if (!(is_zero_ether_addr(mac) || is_valid_ether_addr(mac)))
1983 return -EINVAL;
1984
a836c352
SN
1985 if (!netif_device_present(netdev))
1986 return -EBUSY;
1987
e396ce5f 1988 down_write(&ionic->vf_op_lock);
fbb39807
SN
1989
1990 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
1991 ret = -EINVAL;
1992 } else {
1993 ret = ionic_set_vf_config(ionic, vf, IONIC_VF_ATTR_MAC, mac);
1994 if (!ret)
1995 ether_addr_copy(ionic->vfs[vf].macaddr, mac);
1996 }
1997
e396ce5f 1998 up_write(&ionic->vf_op_lock);
fbb39807
SN
1999 return ret;
2000}
2001
2002static int ionic_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
2003 u8 qos, __be16 proto)
2004{
2005 struct ionic_lif *lif = netdev_priv(netdev);
2006 struct ionic *ionic = lif->ionic;
2007 int ret;
2008
2009 /* until someday when we support qos */
2010 if (qos)
2011 return -EINVAL;
2012
2013 if (vlan > 4095)
2014 return -EINVAL;
2015
2016 if (proto != htons(ETH_P_8021Q))
2017 return -EPROTONOSUPPORT;
2018
a836c352
SN
2019 if (!netif_device_present(netdev))
2020 return -EBUSY;
2021
e396ce5f 2022 down_write(&ionic->vf_op_lock);
fbb39807
SN
2023
2024 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2025 ret = -EINVAL;
2026 } else {
2027 ret = ionic_set_vf_config(ionic, vf,
2028 IONIC_VF_ATTR_VLAN, (u8 *)&vlan);
2029 if (!ret)
d701ec32 2030 ionic->vfs[vf].vlanid = cpu_to_le16(vlan);
fbb39807
SN
2031 }
2032
e396ce5f 2033 up_write(&ionic->vf_op_lock);
fbb39807
SN
2034 return ret;
2035}
2036
2037static int ionic_set_vf_rate(struct net_device *netdev, int vf,
2038 int tx_min, int tx_max)
2039{
2040 struct ionic_lif *lif = netdev_priv(netdev);
2041 struct ionic *ionic = lif->ionic;
2042 int ret;
2043
2044 /* setting the min just seems silly */
2045 if (tx_min)
2046 return -EINVAL;
2047
a836c352
SN
2048 if (!netif_device_present(netdev))
2049 return -EBUSY;
2050
fbb39807
SN
2051 down_write(&ionic->vf_op_lock);
2052
2053 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2054 ret = -EINVAL;
2055 } else {
2056 ret = ionic_set_vf_config(ionic, vf,
2057 IONIC_VF_ATTR_RATE, (u8 *)&tx_max);
2058 if (!ret)
d701ec32 2059 lif->ionic->vfs[vf].maxrate = cpu_to_le32(tx_max);
fbb39807
SN
2060 }
2061
2062 up_write(&ionic->vf_op_lock);
2063 return ret;
2064}
2065
2066static int ionic_set_vf_spoofchk(struct net_device *netdev, int vf, bool set)
2067{
2068 struct ionic_lif *lif = netdev_priv(netdev);
2069 struct ionic *ionic = lif->ionic;
2070 u8 data = set; /* convert to u8 for config */
2071 int ret;
2072
a836c352
SN
2073 if (!netif_device_present(netdev))
2074 return -EBUSY;
2075
fbb39807
SN
2076 down_write(&ionic->vf_op_lock);
2077
2078 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2079 ret = -EINVAL;
2080 } else {
2081 ret = ionic_set_vf_config(ionic, vf,
2082 IONIC_VF_ATTR_SPOOFCHK, &data);
2083 if (!ret)
2084 ionic->vfs[vf].spoofchk = data;
2085 }
2086
2087 up_write(&ionic->vf_op_lock);
2088 return ret;
2089}
2090
2091static int ionic_set_vf_trust(struct net_device *netdev, int vf, bool set)
2092{
2093 struct ionic_lif *lif = netdev_priv(netdev);
2094 struct ionic *ionic = lif->ionic;
2095 u8 data = set; /* convert to u8 for config */
2096 int ret;
2097
a836c352
SN
2098 if (!netif_device_present(netdev))
2099 return -EBUSY;
2100
fbb39807
SN
2101 down_write(&ionic->vf_op_lock);
2102
2103 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2104 ret = -EINVAL;
2105 } else {
2106 ret = ionic_set_vf_config(ionic, vf,
2107 IONIC_VF_ATTR_TRUST, &data);
2108 if (!ret)
2109 ionic->vfs[vf].trusted = data;
2110 }
2111
2112 up_write(&ionic->vf_op_lock);
2113 return ret;
2114}
2115
2116static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set)
2117{
2118 struct ionic_lif *lif = netdev_priv(netdev);
2119 struct ionic *ionic = lif->ionic;
2120 u8 data;
2121 int ret;
2122
2123 switch (set) {
2124 case IFLA_VF_LINK_STATE_ENABLE:
2125 data = IONIC_VF_LINK_STATUS_UP;
2126 break;
2127 case IFLA_VF_LINK_STATE_DISABLE:
2128 data = IONIC_VF_LINK_STATUS_DOWN;
2129 break;
2130 case IFLA_VF_LINK_STATE_AUTO:
2131 data = IONIC_VF_LINK_STATUS_AUTO;
2132 break;
2133 default:
2134 return -EINVAL;
2135 }
2136
a836c352
SN
2137 if (!netif_device_present(netdev))
2138 return -EBUSY;
2139
fbb39807
SN
2140 down_write(&ionic->vf_op_lock);
2141
2142 if (vf >= pci_num_vf(ionic->pdev) || !ionic->vfs) {
2143 ret = -EINVAL;
2144 } else {
2145 ret = ionic_set_vf_config(ionic, vf,
2146 IONIC_VF_ATTR_LINKSTATE, &data);
2147 if (!ret)
2148 ionic->vfs[vf].linkstate = set;
2149 }
2150
2151 up_write(&ionic->vf_op_lock);
2152 return ret;
2153}
2154
beead698
SN
2155static const struct net_device_ops ionic_netdev_ops = {
2156 .ndo_open = ionic_open,
2157 .ndo_stop = ionic_stop,
0f3154e6 2158 .ndo_start_xmit = ionic_start_xmit,
8d61aad4 2159 .ndo_get_stats64 = ionic_get_stats64,
1800eee1 2160 .ndo_set_rx_mode = ionic_ndo_set_rx_mode,
beead698
SN
2161 .ndo_set_features = ionic_set_features,
2162 .ndo_set_mac_address = ionic_set_mac_address,
2163 .ndo_validate_addr = eth_validate_addr,
2164 .ndo_tx_timeout = ionic_tx_timeout,
2165 .ndo_change_mtu = ionic_change_mtu,
2166 .ndo_vlan_rx_add_vid = ionic_vlan_rx_add_vid,
2167 .ndo_vlan_rx_kill_vid = ionic_vlan_rx_kill_vid,
fbb39807
SN
2168 .ndo_set_vf_vlan = ionic_set_vf_vlan,
2169 .ndo_set_vf_trust = ionic_set_vf_trust,
2170 .ndo_set_vf_mac = ionic_set_vf_mac,
2171 .ndo_set_vf_rate = ionic_set_vf_rate,
2172 .ndo_set_vf_spoofchk = ionic_set_vf_spoofchk,
2173 .ndo_get_vf_config = ionic_get_vf_config,
2174 .ndo_set_vf_link_state = ionic_set_vf_link_state,
2175 .ndo_get_vf_stats = ionic_get_vf_stats,
beead698
SN
2176};
2177
a34e25ab
SN
2178static void ionic_swap_queues(struct ionic_qcq *a, struct ionic_qcq *b)
2179{
2180 /* only swapping the queues, not the napi, flags, or other stuff */
2181 swap(a->q.num_descs, b->q.num_descs);
2182 swap(a->q.base, b->q.base);
2183 swap(a->q.base_pa, b->q.base_pa);
2184 swap(a->q.info, b->q.info);
2185 swap(a->q_base, b->q_base);
2186 swap(a->q_base_pa, b->q_base_pa);
2187 swap(a->q_size, b->q_size);
2188
2189 swap(a->q.sg_base, b->q.sg_base);
2190 swap(a->q.sg_base_pa, b->q.sg_base_pa);
2191 swap(a->sg_base, b->sg_base);
2192 swap(a->sg_base_pa, b->sg_base_pa);
2193 swap(a->sg_size, b->sg_size);
2194
2195 swap(a->cq.num_descs, b->cq.num_descs);
2196 swap(a->cq.base, b->cq.base);
2197 swap(a->cq.base_pa, b->cq.base_pa);
2198 swap(a->cq.info, b->cq.info);
2199 swap(a->cq_base, b->cq_base);
2200 swap(a->cq_base_pa, b->cq_base_pa);
2201 swap(a->cq_size, b->cq_size);
55eda6bb
SN
2202
2203 ionic_debugfs_del_qcq(a);
2204 ionic_debugfs_add_qcq(a->q.lif, a);
a34e25ab
SN
2205}
2206
2207int ionic_reconfigure_queues(struct ionic_lif *lif,
2208 struct ionic_queue_params *qparam)
2209{
2210 struct ionic_qcq **tx_qcqs = NULL;
2211 struct ionic_qcq **rx_qcqs = NULL;
2212 unsigned int sg_desc_sz;
2213 unsigned int flags;
2214 int err = -ENOMEM;
2215 unsigned int i;
2216
2217 /* allocate temporary qcq arrays to hold new queue structs */
101b40a0
SN
2218 if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) {
2219 tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif,
a34e25ab
SN
2220 sizeof(struct ionic_qcq *), GFP_KERNEL);
2221 if (!tx_qcqs)
2222 goto err_out;
2223 }
101b40a0
SN
2224 if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs) {
2225 rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif,
a34e25ab
SN
2226 sizeof(struct ionic_qcq *), GFP_KERNEL);
2227 if (!rx_qcqs)
2228 goto err_out;
2229 }
2230
101b40a0
SN
2231 /* allocate new desc_info and rings, but leave the interrupt setup
2232 * until later so as to not mess with the still-running queues
2233 */
a34e25ab
SN
2234 if (lif->qtype_info[IONIC_QTYPE_TXQ].version >= 1 &&
2235 lif->qtype_info[IONIC_QTYPE_TXQ].sg_desc_sz ==
2236 sizeof(struct ionic_txq_sg_desc_v1))
2237 sg_desc_sz = sizeof(struct ionic_txq_sg_desc_v1);
2238 else
2239 sg_desc_sz = sizeof(struct ionic_txq_sg_desc);
2240
2241 if (tx_qcqs) {
101b40a0 2242 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2243 flags = lif->txqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2244 err = ionic_qcq_alloc(lif, IONIC_QTYPE_TXQ, i, "tx", flags,
2245 qparam->ntxq_descs,
2246 sizeof(struct ionic_txq_desc),
2247 sizeof(struct ionic_txq_comp),
2248 sg_desc_sz,
2249 lif->kern_pid, &tx_qcqs[i]);
2250 if (err)
2251 goto err_out;
2252 }
2253 }
2254
2255 if (rx_qcqs) {
101b40a0 2256 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2257 flags = lif->rxqcqs[i]->flags & ~IONIC_QCQ_F_INTR;
2258 err = ionic_qcq_alloc(lif, IONIC_QTYPE_RXQ, i, "rx", flags,
2259 qparam->nrxq_descs,
2260 sizeof(struct ionic_rxq_desc),
2261 sizeof(struct ionic_rxq_comp),
2262 sizeof(struct ionic_rxq_sg_desc),
2263 lif->kern_pid, &rx_qcqs[i]);
2264 if (err)
2265 goto err_out;
2266 }
2267 }
2268
2269 /* stop and clean the queues */
2270 ionic_stop_queues_reconfig(lif);
2271
101b40a0
SN
2272 if (qparam->nxqs != lif->nxqs) {
2273 err = netif_set_real_num_tx_queues(lif->netdev, qparam->nxqs);
2274 if (err)
2275 goto err_out_reinit_unlock;
2276 err = netif_set_real_num_rx_queues(lif->netdev, qparam->nxqs);
2277 if (err) {
2278 netif_set_real_num_tx_queues(lif->netdev, lif->nxqs);
2279 goto err_out_reinit_unlock;
2280 }
2281 }
2282
a34e25ab
SN
2283 /* swap new desc_info and rings, keeping existing interrupt config */
2284 if (tx_qcqs) {
2285 lif->ntxq_descs = qparam->ntxq_descs;
101b40a0 2286 for (i = 0; i < qparam->nxqs; i++)
a34e25ab
SN
2287 ionic_swap_queues(lif->txqcqs[i], tx_qcqs[i]);
2288 }
2289
2290 if (rx_qcqs) {
2291 lif->nrxq_descs = qparam->nrxq_descs;
101b40a0 2292 for (i = 0; i < qparam->nxqs; i++)
a34e25ab
SN
2293 ionic_swap_queues(lif->rxqcqs[i], rx_qcqs[i]);
2294 }
2295
101b40a0
SN
2296 /* if we need to change the interrupt layout, this is the time */
2297 if (qparam->intr_split != test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state) ||
2298 qparam->nxqs != lif->nxqs) {
2299 if (qparam->intr_split) {
2300 set_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2301 } else {
2302 clear_bit(IONIC_LIF_F_SPLIT_INTR, lif->state);
2303 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2304 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
2305 }
2306
2307 /* clear existing interrupt assignments */
2308 for (i = 0; i < lif->ionic->ntxqs_per_lif; i++) {
2309 ionic_qcq_intr_free(lif, lif->txqcqs[i]);
2310 ionic_qcq_intr_free(lif, lif->rxqcqs[i]);
2311 }
2312
2313 /* re-assign the interrupts */
2314 for (i = 0; i < qparam->nxqs; i++) {
2315 lif->rxqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2316 err = ionic_alloc_qcq_interrupt(lif, lif->rxqcqs[i]);
2317 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2318 lif->rxqcqs[i]->intr.index,
2319 lif->rx_coalesce_hw);
2320
2321 if (qparam->intr_split) {
2322 lif->txqcqs[i]->flags |= IONIC_QCQ_F_INTR;
2323 err = ionic_alloc_qcq_interrupt(lif, lif->txqcqs[i]);
2324 ionic_intr_coal_init(lif->ionic->idev.intr_ctrl,
2325 lif->txqcqs[i]->intr.index,
2326 lif->tx_coalesce_hw);
04a83459
SN
2327 if (test_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state))
2328 lif->txqcqs[i]->intr.dim_coal_hw = lif->tx_coalesce_hw;
101b40a0
SN
2329 } else {
2330 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2331 ionic_link_qcq_interrupts(lif->rxqcqs[i], lif->txqcqs[i]);
2332 }
2333 }
2334 }
2335
ed6d9b02
SN
2336 /* now we can rework the debugfs mappings */
2337 if (tx_qcqs) {
2338 for (i = 0; i < qparam->nxqs; i++) {
2339 ionic_debugfs_del_qcq(lif->txqcqs[i]);
2340 ionic_debugfs_add_qcq(lif, lif->txqcqs[i]);
2341 }
2342 }
2343
2344 if (rx_qcqs) {
2345 for (i = 0; i < qparam->nxqs; i++) {
2346 ionic_debugfs_del_qcq(lif->rxqcqs[i]);
2347 ionic_debugfs_add_qcq(lif, lif->rxqcqs[i]);
2348 }
2349 }
2350
101b40a0
SN
2351 swap(lif->nxqs, qparam->nxqs);
2352
2353err_out_reinit_unlock:
25cc5a5f 2354 /* re-init the queues, but don't lose an error code */
101b40a0
SN
2355 if (err)
2356 ionic_start_queues_reconfig(lif);
2357 else
2358 err = ionic_start_queues_reconfig(lif);
a34e25ab
SN
2359
2360err_out:
2361 /* free old allocs without cleaning intr */
101b40a0 2362 for (i = 0; i < qparam->nxqs; i++) {
a34e25ab
SN
2363 if (tx_qcqs && tx_qcqs[i]) {
2364 tx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2365 ionic_qcq_free(lif, tx_qcqs[i]);
101b40a0 2366 devm_kfree(lif->ionic->dev, tx_qcqs[i]);
a34e25ab
SN
2367 tx_qcqs[i] = NULL;
2368 }
2369 if (rx_qcqs && rx_qcqs[i]) {
2370 rx_qcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2371 ionic_qcq_free(lif, rx_qcqs[i]);
101b40a0 2372 devm_kfree(lif->ionic->dev, rx_qcqs[i]);
a34e25ab
SN
2373 rx_qcqs[i] = NULL;
2374 }
2375 }
2376
2377 /* free q array */
2378 if (rx_qcqs) {
2379 devm_kfree(lif->ionic->dev, rx_qcqs);
2380 rx_qcqs = NULL;
2381 }
2382 if (tx_qcqs) {
2383 devm_kfree(lif->ionic->dev, tx_qcqs);
2384 tx_qcqs = NULL;
2385 }
2386
101b40a0
SN
2387 /* clean the unused dma and info allocations when new set is smaller
2388 * than the full array, but leave the qcq shells in place
2389 */
2390 for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
2391 lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2392 ionic_qcq_free(lif, lif->txqcqs[i]);
2393
2394 lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
2395 ionic_qcq_free(lif, lif->rxqcqs[i]);
2396 }
2397
a34e25ab
SN
2398 return err;
2399}
2400
30b87ab4 2401int ionic_lif_alloc(struct ionic *ionic)
1a58e196
SN
2402{
2403 struct device *dev = ionic->dev;
4b03b273 2404 union ionic_lif_identity *lid;
1a58e196
SN
2405 struct net_device *netdev;
2406 struct ionic_lif *lif;
aa319881 2407 int tbl_sz;
1a58e196
SN
2408 int err;
2409
4b03b273
SN
2410 lid = kzalloc(sizeof(*lid), GFP_KERNEL);
2411 if (!lid)
30b87ab4 2412 return -ENOMEM;
4b03b273 2413
1a58e196
SN
2414 netdev = alloc_etherdev_mqs(sizeof(*lif),
2415 ionic->ntxqs_per_lif, ionic->ntxqs_per_lif);
2416 if (!netdev) {
2417 dev_err(dev, "Cannot allocate netdev, aborting\n");
4b1debbe
CIK
2418 err = -ENOMEM;
2419 goto err_out_free_lid;
1a58e196
SN
2420 }
2421
2422 SET_NETDEV_DEV(netdev, dev);
2423
2424 lif = netdev_priv(netdev);
2425 lif->netdev = netdev;
30b87ab4 2426 ionic->lif = lif;
beead698 2427 netdev->netdev_ops = &ionic_netdev_ops;
4d03e00a 2428 ionic_ethtool_set_ops(netdev);
beead698
SN
2429
2430 netdev->watchdog_timeo = 2 * HZ;
aa47b540
SN
2431 netif_carrier_off(netdev);
2432
4b03b273
SN
2433 lif->identity = lid;
2434 lif->lif_type = IONIC_LIF_TYPE_CLASSIC;
bb9f80f3
SN
2435 err = ionic_lif_identify(ionic, lif->lif_type, lif->identity);
2436 if (err) {
2437 dev_err(ionic->dev, "Cannot identify type %d: %d\n",
2438 lif->lif_type, err);
2439 goto err_out_free_netdev;
2440 }
eba87609
SN
2441 lif->netdev->min_mtu = max_t(unsigned int, ETH_MIN_MTU,
2442 le32_to_cpu(lif->identity->eth.min_frame_size));
4b03b273
SN
2443 lif->netdev->max_mtu =
2444 le32_to_cpu(lif->identity->eth.max_frame_size) - ETH_HLEN - VLAN_HLEN;
1a58e196
SN
2445
2446 lif->neqs = ionic->neqs_per_lif;
2447 lif->nxqs = ionic->ntxqs_per_lif;
2448
2449 lif->ionic = ionic;
30b87ab4 2450 lif->index = 0;
0f3154e6
SN
2451 lif->ntxq_descs = IONIC_DEF_TXRX_DESC;
2452 lif->nrxq_descs = IONIC_DEF_TXRX_DESC;
1a58e196 2453
8c15440b 2454 /* Convert the default coalesce value to actual hw resolution */
780eded3 2455 lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT;
ff7ebed9 2456 lif->rx_coalesce_hw = ionic_coal_usec_to_hw(lif->ionic,
780eded3 2457 lif->rx_coalesce_usecs);
fe8c30b5
SN
2458 lif->tx_coalesce_usecs = lif->rx_coalesce_usecs;
2459 lif->tx_coalesce_hw = lif->rx_coalesce_hw;
04a83459
SN
2460 set_bit(IONIC_LIF_F_RX_DIM_INTR, lif->state);
2461 set_bit(IONIC_LIF_F_TX_DIM_INTR, lif->state);
8c15440b 2462
30b87ab4 2463 snprintf(lif->name, sizeof(lif->name), "lif%u", lif->index);
1a58e196 2464
1d062b7b
SN
2465 spin_lock_init(&lif->adminq_lock);
2466
2a654540
SN
2467 spin_lock_init(&lif->deferred.lock);
2468 INIT_LIST_HEAD(&lif->deferred.list);
2469 INIT_WORK(&lif->deferred.work, ionic_lif_deferred_work);
2470
1a58e196
SN
2471 /* allocate lif info */
2472 lif->info_sz = ALIGN(sizeof(*lif->info), PAGE_SIZE);
2473 lif->info = dma_alloc_coherent(dev, lif->info_sz,
2474 &lif->info_pa, GFP_KERNEL);
2475 if (!lif->info) {
2476 dev_err(dev, "Failed to allocate lif info, aborting\n");
2477 err = -ENOMEM;
2478 goto err_out_free_netdev;
2479 }
2480
2a8c2c1a
SN
2481 ionic_debugfs_add_lif(lif);
2482
30b87ab4
SN
2483 /* allocate control queues and txrx queue arrays */
2484 ionic_lif_queue_identify(lif);
1d062b7b
SN
2485 err = ionic_qcqs_alloc(lif);
2486 if (err)
2487 goto err_out_free_lif_info;
2488
aa319881
SN
2489 /* allocate rss indirection table */
2490 tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz);
2491 lif->rss_ind_tbl_sz = sizeof(*lif->rss_ind_tbl) * tbl_sz;
2492 lif->rss_ind_tbl = dma_alloc_coherent(dev, lif->rss_ind_tbl_sz,
2493 &lif->rss_ind_tbl_pa,
2494 GFP_KERNEL);
2495
2496 if (!lif->rss_ind_tbl) {
73a63ee9 2497 err = -ENOMEM;
aa319881
SN
2498 dev_err(dev, "Failed to allocate rss indirection table, aborting\n");
2499 goto err_out_free_qcqs;
2500 }
ffac2027 2501 netdev_rss_key_fill(lif->rss_hash_key, IONIC_RSS_HASH_KEY_SIZE);
aa319881 2502
30b87ab4 2503 return 0;
1a58e196 2504
aa319881
SN
2505err_out_free_qcqs:
2506 ionic_qcqs_free(lif);
1d062b7b
SN
2507err_out_free_lif_info:
2508 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2509 lif->info = NULL;
2510 lif->info_pa = 0;
1a58e196
SN
2511err_out_free_netdev:
2512 free_netdev(lif->netdev);
2513 lif = NULL;
4b1debbe 2514err_out_free_lid:
4b03b273 2515 kfree(lid);
1a58e196 2516
30b87ab4 2517 return err;
1a58e196
SN
2518}
2519
2520static void ionic_lif_reset(struct ionic_lif *lif)
2521{
2522 struct ionic_dev *idev = &lif->ionic->idev;
2523
2524 mutex_lock(&lif->ionic->dev_cmd_lock);
2525 ionic_dev_cmd_lif_reset(idev, lif->index);
2526 ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2527 mutex_unlock(&lif->ionic->dev_cmd_lock);
2528}
2529
c672412f
SN
2530static void ionic_lif_handle_fw_down(struct ionic_lif *lif)
2531{
2532 struct ionic *ionic = lif->ionic;
2533
2534 if (test_and_set_bit(IONIC_LIF_F_FW_RESET, lif->state))
2535 return;
2536
2537 dev_info(ionic->dev, "FW Down: Stopping LIFs\n");
2538
2539 netif_device_detach(lif->netdev);
2540
2541 if (test_bit(IONIC_LIF_F_UP, lif->state)) {
2542 dev_info(ionic->dev, "Surprise FW stop, stopping queues\n");
0925e9db 2543 mutex_lock(&lif->queue_lock);
c672412f 2544 ionic_stop_queues(lif);
0925e9db 2545 mutex_unlock(&lif->queue_lock);
c672412f
SN
2546 }
2547
2548 if (netif_running(lif->netdev)) {
2549 ionic_txrx_deinit(lif);
2550 ionic_txrx_free(lif);
2551 }
30b87ab4 2552 ionic_lif_deinit(lif);
6bc977fa 2553 ionic_reset(ionic);
c672412f
SN
2554 ionic_qcqs_free(lif);
2555
2556 dev_info(ionic->dev, "FW Down: LIFs stopped\n");
2557}
2558
2559static void ionic_lif_handle_fw_up(struct ionic_lif *lif)
2560{
2561 struct ionic *ionic = lif->ionic;
2562 int err;
2563
2564 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2565 return;
2566
2567 dev_info(ionic->dev, "FW Up: restarting LIFs\n");
2568
1d53aedc 2569 ionic_init_devinfo(ionic);
a21b5d49
SN
2570 err = ionic_identify(ionic);
2571 if (err)
2572 goto err_out;
2573 err = ionic_port_identify(ionic);
2574 if (err)
2575 goto err_out;
2576 err = ionic_port_init(ionic);
2577 if (err)
2578 goto err_out;
c672412f
SN
2579 err = ionic_qcqs_alloc(lif);
2580 if (err)
2581 goto err_out;
2582
30b87ab4 2583 err = ionic_lif_init(lif);
c672412f
SN
2584 if (err)
2585 goto err_qcqs_free;
2586
2587 if (lif->registered)
2588 ionic_lif_set_netdev_info(lif);
2589
7e4d4759
SN
2590 ionic_rx_filter_replay(lif);
2591
c672412f
SN
2592 if (netif_running(lif->netdev)) {
2593 err = ionic_txrx_alloc(lif);
2594 if (err)
2595 goto err_lifs_deinit;
2596
2597 err = ionic_txrx_init(lif);
2598 if (err)
2599 goto err_txrx_free;
2600 }
2601
2602 clear_bit(IONIC_LIF_F_FW_RESET, lif->state);
25cc5a5f 2603 ionic_link_status_check_request(lif, CAN_SLEEP);
c672412f
SN
2604 netif_device_attach(lif->netdev);
2605 dev_info(ionic->dev, "FW Up: LIFs restarted\n");
2606
2607 return;
2608
2609err_txrx_free:
2610 ionic_txrx_free(lif);
2611err_lifs_deinit:
30b87ab4 2612 ionic_lif_deinit(lif);
c672412f
SN
2613err_qcqs_free:
2614 ionic_qcqs_free(lif);
2615err_out:
2616 dev_err(ionic->dev, "FW Up: LIFs restart failed - err %d\n", err);
2617}
2618
30b87ab4 2619void ionic_lif_free(struct ionic_lif *lif)
1a58e196
SN
2620{
2621 struct device *dev = lif->ionic->dev;
2622
aa319881
SN
2623 /* free rss indirection table */
2624 dma_free_coherent(dev, lif->rss_ind_tbl_sz, lif->rss_ind_tbl,
2625 lif->rss_ind_tbl_pa);
2626 lif->rss_ind_tbl = NULL;
2627 lif->rss_ind_tbl_pa = 0;
2628
1d062b7b
SN
2629 /* free queues */
2630 ionic_qcqs_free(lif);
c672412f
SN
2631 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state))
2632 ionic_lif_reset(lif);
1a58e196
SN
2633
2634 /* free lif info */
4b03b273 2635 kfree(lif->identity);
1a58e196
SN
2636 dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa);
2637 lif->info = NULL;
2638 lif->info_pa = 0;
2639
6461b446
SN
2640 /* unmap doorbell page */
2641 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2642 lif->kern_dbpage = NULL;
2643 kfree(lif->dbid_inuse);
2644 lif->dbid_inuse = NULL;
2645
1a58e196
SN
2646 /* free netdev & lif */
2647 ionic_debugfs_del_lif(lif);
1a58e196
SN
2648 free_netdev(lif->netdev);
2649}
2650
30b87ab4 2651void ionic_lif_deinit(struct ionic_lif *lif)
1a58e196 2652{
c672412f 2653 if (!test_and_clear_bit(IONIC_LIF_F_INITED, lif->state))
1a58e196
SN
2654 return;
2655
c672412f
SN
2656 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2657 cancel_work_sync(&lif->deferred.work);
2658 cancel_work_sync(&lif->tx_timeout_work);
7e4d4759 2659 ionic_rx_filters_deinit(lif);
bdff4666
SN
2660 if (lif->netdev->features & NETIF_F_RXHASH)
2661 ionic_lif_rss_deinit(lif);
c672412f 2662 }
1a58e196 2663
1d062b7b 2664 napi_disable(&lif->adminqcq->napi);
77ceb68e 2665 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2666 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2667
0925e9db 2668 mutex_destroy(&lif->queue_lock);
1a58e196
SN
2669 ionic_lif_reset(lif);
2670}
2671
1d062b7b
SN
2672static int ionic_lif_adminq_init(struct ionic_lif *lif)
2673{
2674 struct device *dev = lif->ionic->dev;
2675 struct ionic_q_init_comp comp;
2676 struct ionic_dev *idev;
2677 struct ionic_qcq *qcq;
2678 struct ionic_queue *q;
2679 int err;
2680
2681 idev = &lif->ionic->idev;
2682 qcq = lif->adminqcq;
2683 q = &qcq->q;
2684
2685 mutex_lock(&lif->ionic->dev_cmd_lock);
2686 ionic_dev_cmd_adminq_init(idev, qcq, lif->index, qcq->intr.index);
2687 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2688 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2689 mutex_unlock(&lif->ionic->dev_cmd_lock);
2690 if (err) {
2691 netdev_err(lif->netdev, "adminq init failed %d\n", err);
2692 return err;
2693 }
2694
2695 q->hw_type = comp.hw_type;
2696 q->hw_index = le32_to_cpu(comp.hw_index);
2697 q->dbval = IONIC_DBELL_QID(q->hw_index);
2698
2699 dev_dbg(dev, "adminq->hw_type %d\n", q->hw_type);
2700 dev_dbg(dev, "adminq->hw_index %d\n", q->hw_index);
2701
2702 netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi,
2703 NAPI_POLL_WEIGHT);
2704
1d062b7b
SN
2705 napi_enable(&qcq->napi);
2706
2707 if (qcq->flags & IONIC_QCQ_F_INTR)
2708 ionic_intr_mask(idev->intr_ctrl, qcq->intr.index,
2709 IONIC_INTR_MASK_CLEAR);
2710
2711 qcq->flags |= IONIC_QCQ_F_INITED;
2712
1d062b7b
SN
2713 return 0;
2714}
2715
77ceb68e
SN
2716static int ionic_lif_notifyq_init(struct ionic_lif *lif)
2717{
2718 struct ionic_qcq *qcq = lif->notifyqcq;
2719 struct device *dev = lif->ionic->dev;
2720 struct ionic_queue *q = &qcq->q;
2721 int err;
2722
2723 struct ionic_admin_ctx ctx = {
2724 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2725 .cmd.q_init = {
2726 .opcode = IONIC_CMD_Q_INIT,
2727 .lif_index = cpu_to_le16(lif->index),
2728 .type = q->type,
5b3f3f2a 2729 .ver = lif->qtype_info[q->type].version,
77ceb68e
SN
2730 .index = cpu_to_le32(q->index),
2731 .flags = cpu_to_le16(IONIC_QINIT_F_IRQ |
2732 IONIC_QINIT_F_ENA),
2733 .intr_index = cpu_to_le16(lif->adminqcq->intr.index),
2734 .pid = cpu_to_le16(q->pid),
2735 .ring_size = ilog2(q->num_descs),
2736 .ring_base = cpu_to_le64(q->base_pa),
2737 }
2738 };
2739
2740 dev_dbg(dev, "notifyq_init.pid %d\n", ctx.cmd.q_init.pid);
2741 dev_dbg(dev, "notifyq_init.index %d\n", ctx.cmd.q_init.index);
2742 dev_dbg(dev, "notifyq_init.ring_base 0x%llx\n", ctx.cmd.q_init.ring_base);
2743 dev_dbg(dev, "notifyq_init.ring_size %d\n", ctx.cmd.q_init.ring_size);
2744
2745 err = ionic_adminq_post_wait(lif, &ctx);
2746 if (err)
2747 return err;
2748
c672412f 2749 lif->last_eid = 0;
77ceb68e
SN
2750 q->hw_type = ctx.comp.q_init.hw_type;
2751 q->hw_index = le32_to_cpu(ctx.comp.q_init.hw_index);
2752 q->dbval = IONIC_DBELL_QID(q->hw_index);
2753
2754 dev_dbg(dev, "notifyq->hw_type %d\n", q->hw_type);
2755 dev_dbg(dev, "notifyq->hw_index %d\n", q->hw_index);
2756
2757 /* preset the callback info */
2758 q->info[0].cb_arg = lif;
2759
2760 qcq->flags |= IONIC_QCQ_F_INITED;
2761
77ceb68e
SN
2762 return 0;
2763}
2764
2a654540
SN
2765static int ionic_station_set(struct ionic_lif *lif)
2766{
2767 struct net_device *netdev = lif->netdev;
2768 struct ionic_admin_ctx ctx = {
2769 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2770 .cmd.lif_getattr = {
2771 .opcode = IONIC_CMD_LIF_GETATTR,
2772 .index = cpu_to_le16(lif->index),
2773 .attr = IONIC_LIF_ATTR_MAC,
2774 },
2775 };
2776 struct sockaddr addr;
2777 int err;
2778
2779 err = ionic_adminq_post_wait(lif, &ctx);
2780 if (err)
2781 return err;
216902ae
SN
2782 netdev_dbg(lif->netdev, "found initial MAC addr %pM\n",
2783 ctx.comp.lif_getattr.mac);
fbb39807
SN
2784 if (is_zero_ether_addr(ctx.comp.lif_getattr.mac))
2785 return 0;
2786
f20a4d40
SN
2787 if (!is_zero_ether_addr(netdev->dev_addr)) {
2788 /* If the netdev mac is non-zero and doesn't match the default
2789 * device address, it was set by something earlier and we're
2790 * likely here again after a fw-upgrade reset. We need to be
2791 * sure the netdev mac is in our filter list.
2792 */
2793 if (!ether_addr_equal(ctx.comp.lif_getattr.mac,
2794 netdev->dev_addr))
7c8d008c 2795 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
f20a4d40
SN
2796 } else {
2797 /* Update the netdev mac with the device's mac */
216902ae
SN
2798 memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len);
2799 addr.sa_family = AF_INET;
2800 err = eth_prepare_mac_addr_change(netdev, &addr);
2801 if (err) {
2802 netdev_warn(lif->netdev, "ignoring bad MAC addr from NIC %pM - err %d\n",
2803 addr.sa_data, err);
2804 return 0;
2805 }
2a654540 2806
216902ae
SN
2807 eth_commit_mac_addr_change(netdev, &addr);
2808 }
fbb39807 2809
2a654540
SN
2810 netdev_dbg(lif->netdev, "adding station MAC addr %pM\n",
2811 netdev->dev_addr);
7c8d008c 2812 ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR, CAN_SLEEP);
2a654540
SN
2813
2814 return 0;
2815}
2816
30b87ab4 2817int ionic_lif_init(struct ionic_lif *lif)
1a58e196
SN
2818{
2819 struct ionic_dev *idev = &lif->ionic->idev;
6461b446 2820 struct device *dev = lif->ionic->dev;
1a58e196 2821 struct ionic_lif_init_comp comp;
6461b446 2822 int dbpage_num;
1a58e196
SN
2823 int err;
2824
1a58e196
SN
2825 mutex_lock(&lif->ionic->dev_cmd_lock);
2826 ionic_dev_cmd_lif_init(idev, lif->index, lif->info_pa);
2827 err = ionic_dev_cmd_wait(lif->ionic, DEVCMD_TIMEOUT);
2828 ionic_dev_cmd_comp(idev, (union ionic_dev_cmd_comp *)&comp);
2829 mutex_unlock(&lif->ionic->dev_cmd_lock);
2830 if (err)
2831 return err;
2832
2833 lif->hw_index = le16_to_cpu(comp.hw_index);
0925e9db 2834 mutex_init(&lif->queue_lock);
1a58e196 2835
6461b446
SN
2836 /* now that we have the hw_index we can figure out our doorbell page */
2837 lif->dbid_count = le32_to_cpu(lif->ionic->ident.dev.ndbpgs_per_lif);
2838 if (!lif->dbid_count) {
2839 dev_err(dev, "No doorbell pages, aborting\n");
2840 return -EINVAL;
2841 }
2842
2843 lif->dbid_inuse = bitmap_alloc(lif->dbid_count, GFP_KERNEL);
2844 if (!lif->dbid_inuse) {
2845 dev_err(dev, "Failed alloc doorbell id bitmap, aborting\n");
2846 return -ENOMEM;
2847 }
2848
2849 /* first doorbell id reserved for kernel (dbid aka pid == zero) */
2850 set_bit(0, lif->dbid_inuse);
2851 lif->kern_pid = 0;
2852
2853 dbpage_num = ionic_db_page_num(lif, lif->kern_pid);
2854 lif->kern_dbpage = ionic_bus_map_dbpage(lif->ionic, dbpage_num);
2855 if (!lif->kern_dbpage) {
2856 dev_err(dev, "Cannot map dbpage, aborting\n");
2857 err = -ENOMEM;
2858 goto err_out_free_dbid;
2859 }
2860
1d062b7b
SN
2861 err = ionic_lif_adminq_init(lif);
2862 if (err)
2863 goto err_out_adminq_deinit;
2864
77ceb68e
SN
2865 if (lif->ionic->nnqs_per_lif) {
2866 err = ionic_lif_notifyq_init(lif);
2867 if (err)
2868 goto err_out_notifyq_deinit;
2869 }
2870
beead698
SN
2871 err = ionic_init_nic_features(lif);
2872 if (err)
2873 goto err_out_notifyq_deinit;
2874
7e4d4759
SN
2875 if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) {
2876 err = ionic_rx_filters_init(lif);
2877 if (err)
2878 goto err_out_notifyq_deinit;
2879 }
c1e329eb 2880
2a654540
SN
2881 err = ionic_station_set(lif);
2882 if (err)
2883 goto err_out_notifyq_deinit;
2884
0f3154e6
SN
2885 lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT;
2886
c6d3d73a 2887 set_bit(IONIC_LIF_F_INITED, lif->state);
1a58e196 2888
8c15440b
SN
2889 INIT_WORK(&lif->tx_timeout_work, ionic_tx_timeout_work);
2890
1a58e196 2891 return 0;
6461b446 2892
77ceb68e
SN
2893err_out_notifyq_deinit:
2894 ionic_lif_qcq_deinit(lif, lif->notifyqcq);
1d062b7b
SN
2895err_out_adminq_deinit:
2896 ionic_lif_qcq_deinit(lif, lif->adminqcq);
2897 ionic_lif_reset(lif);
2898 ionic_bus_unmap_dbpage(lif->ionic, lif->kern_dbpage);
2899 lif->kern_dbpage = NULL;
6461b446
SN
2900err_out_free_dbid:
2901 kfree(lif->dbid_inuse);
2902 lif->dbid_inuse = NULL;
2903
2904 return err;
1a58e196
SN
2905}
2906
1a371ea1
SN
2907static void ionic_lif_notify_work(struct work_struct *ws)
2908{
2909}
2910
2911static void ionic_lif_set_netdev_info(struct ionic_lif *lif)
2912{
2913 struct ionic_admin_ctx ctx = {
2914 .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work),
2915 .cmd.lif_setattr = {
2916 .opcode = IONIC_CMD_LIF_SETATTR,
2917 .index = cpu_to_le16(lif->index),
2918 .attr = IONIC_LIF_ATTR_NAME,
2919 },
2920 };
2921
2922 strlcpy(ctx.cmd.lif_setattr.name, lif->netdev->name,
2923 sizeof(ctx.cmd.lif_setattr.name));
2924
2925 ionic_adminq_post_wait(lif, &ctx);
2926}
2927
2928static struct ionic_lif *ionic_netdev_lif(struct net_device *netdev)
2929{
2930 if (!netdev || netdev->netdev_ops->ndo_start_xmit != ionic_start_xmit)
2931 return NULL;
2932
2933 return netdev_priv(netdev);
2934}
2935
2936static int ionic_lif_notify(struct notifier_block *nb,
2937 unsigned long event, void *info)
2938{
2939 struct net_device *ndev = netdev_notifier_info_to_dev(info);
2940 struct ionic *ionic = container_of(nb, struct ionic, nb);
2941 struct ionic_lif *lif = ionic_netdev_lif(ndev);
2942
2943 if (!lif || lif->ionic != ionic)
2944 return NOTIFY_DONE;
2945
2946 switch (event) {
2947 case NETDEV_CHANGENAME:
2948 ionic_lif_set_netdev_info(lif);
2949 break;
2950 }
2951
2952 return NOTIFY_DONE;
2953}
2954
30b87ab4 2955int ionic_lif_register(struct ionic_lif *lif)
beead698
SN
2956{
2957 int err;
2958
30b87ab4 2959 INIT_WORK(&lif->ionic->nb_work, ionic_lif_notify_work);
1a371ea1 2960
30b87ab4 2961 lif->ionic->nb.notifier_call = ionic_lif_notify;
1a371ea1 2962
30b87ab4 2963 err = register_netdevice_notifier(&lif->ionic->nb);
1a371ea1 2964 if (err)
30b87ab4 2965 lif->ionic->nb.notifier_call = NULL;
1a371ea1 2966
beead698 2967 /* only register LIF0 for now */
30b87ab4 2968 err = register_netdev(lif->netdev);
beead698 2969 if (err) {
30b87ab4 2970 dev_err(lif->ionic->dev, "Cannot register net device, aborting\n");
beead698
SN
2971 return err;
2972 }
f6e428b2 2973
25cc5a5f 2974 ionic_link_status_check_request(lif, CAN_SLEEP);
30b87ab4
SN
2975 lif->registered = true;
2976 ionic_lif_set_netdev_info(lif);
beead698
SN
2977
2978 return 0;
2979}
2980
30b87ab4 2981void ionic_lif_unregister(struct ionic_lif *lif)
beead698 2982{
30b87ab4
SN
2983 if (lif->ionic->nb.notifier_call) {
2984 unregister_netdevice_notifier(&lif->ionic->nb);
2985 cancel_work_sync(&lif->ionic->nb_work);
2986 lif->ionic->nb.notifier_call = NULL;
1a371ea1
SN
2987 }
2988
30b87ab4
SN
2989 if (lif->netdev->reg_state == NETREG_REGISTERED)
2990 unregister_netdev(lif->netdev);
2991 lif->registered = false;
beead698
SN
2992}
2993
5b3f3f2a
SN
2994static void ionic_lif_queue_identify(struct ionic_lif *lif)
2995{
d701ec32 2996 union ionic_q_identity __iomem *q_ident;
5b3f3f2a 2997 struct ionic *ionic = lif->ionic;
5b3f3f2a
SN
2998 struct ionic_dev *idev;
2999 int qtype;
3000 int err;
3001
3002 idev = &lif->ionic->idev;
d701ec32 3003 q_ident = (union ionic_q_identity __iomem *)&idev->dev_cmd_regs->data;
5b3f3f2a
SN
3004
3005 for (qtype = 0; qtype < ARRAY_SIZE(ionic_qtype_versions); qtype++) {
3006 struct ionic_qtype_info *qti = &lif->qtype_info[qtype];
3007
3008 /* filter out the ones we know about */
3009 switch (qtype) {
3010 case IONIC_QTYPE_ADMINQ:
3011 case IONIC_QTYPE_NOTIFYQ:
3012 case IONIC_QTYPE_RXQ:
3013 case IONIC_QTYPE_TXQ:
3014 break;
3015 default:
3016 continue;
3017 }
3018
3019 memset(qti, 0, sizeof(*qti));
3020
3021 mutex_lock(&ionic->dev_cmd_lock);
3022 ionic_dev_cmd_queue_identify(idev, lif->lif_type, qtype,
3023 ionic_qtype_versions[qtype]);
3024 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3025 if (!err) {
d701ec32
SN
3026 qti->version = readb(&q_ident->version);
3027 qti->supported = readb(&q_ident->supported);
3028 qti->features = readq(&q_ident->features);
3029 qti->desc_sz = readw(&q_ident->desc_sz);
3030 qti->comp_sz = readw(&q_ident->comp_sz);
3031 qti->sg_desc_sz = readw(&q_ident->sg_desc_sz);
3032 qti->max_sg_elems = readw(&q_ident->max_sg_elems);
3033 qti->sg_desc_stride = readw(&q_ident->sg_desc_stride);
5b3f3f2a
SN
3034 }
3035 mutex_unlock(&ionic->dev_cmd_lock);
3036
3037 if (err == -EINVAL) {
3038 dev_err(ionic->dev, "qtype %d not supported\n", qtype);
3039 continue;
3040 } else if (err == -EIO) {
3041 dev_err(ionic->dev, "q_ident failed, not supported on older FW\n");
3042 return;
3043 } else if (err) {
3044 dev_err(ionic->dev, "q_ident failed, qtype %d: %d\n",
3045 qtype, err);
3046 return;
3047 }
3048
3049 dev_dbg(ionic->dev, " qtype[%d].version = %d\n",
3050 qtype, qti->version);
3051 dev_dbg(ionic->dev, " qtype[%d].supported = 0x%02x\n",
3052 qtype, qti->supported);
3053 dev_dbg(ionic->dev, " qtype[%d].features = 0x%04llx\n",
3054 qtype, qti->features);
3055 dev_dbg(ionic->dev, " qtype[%d].desc_sz = %d\n",
3056 qtype, qti->desc_sz);
3057 dev_dbg(ionic->dev, " qtype[%d].comp_sz = %d\n",
3058 qtype, qti->comp_sz);
3059 dev_dbg(ionic->dev, " qtype[%d].sg_desc_sz = %d\n",
3060 qtype, qti->sg_desc_sz);
3061 dev_dbg(ionic->dev, " qtype[%d].max_sg_elems = %d\n",
3062 qtype, qti->max_sg_elems);
3063 dev_dbg(ionic->dev, " qtype[%d].sg_desc_stride = %d\n",
3064 qtype, qti->sg_desc_stride);
3065 }
3066}
3067
1a58e196
SN
3068int ionic_lif_identify(struct ionic *ionic, u8 lif_type,
3069 union ionic_lif_identity *lid)
3070{
3071 struct ionic_dev *idev = &ionic->idev;
3072 size_t sz;
3073 int err;
3074
3075 sz = min(sizeof(*lid), sizeof(idev->dev_cmd_regs->data));
3076
3077 mutex_lock(&ionic->dev_cmd_lock);
3078 ionic_dev_cmd_lif_identify(idev, lif_type, IONIC_IDENTITY_VERSION_1);
3079 err = ionic_dev_cmd_wait(ionic, DEVCMD_TIMEOUT);
3080 memcpy_fromio(lid, &idev->dev_cmd_regs->data, sz);
3081 mutex_unlock(&ionic->dev_cmd_lock);
3082 if (err)
3083 return (err);
3084
3085 dev_dbg(ionic->dev, "capabilities 0x%llx\n",
3086 le64_to_cpu(lid->capabilities));
3087
3088 dev_dbg(ionic->dev, "eth.max_ucast_filters %d\n",
3089 le32_to_cpu(lid->eth.max_ucast_filters));
3090 dev_dbg(ionic->dev, "eth.max_mcast_filters %d\n",
3091 le32_to_cpu(lid->eth.max_mcast_filters));
3092 dev_dbg(ionic->dev, "eth.features 0x%llx\n",
3093 le64_to_cpu(lid->eth.config.features));
3094 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_ADMINQ] %d\n",
3095 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_ADMINQ]));
3096 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_NOTIFYQ] %d\n",
3097 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_NOTIFYQ]));
3098 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_RXQ] %d\n",
3099 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_RXQ]));
3100 dev_dbg(ionic->dev, "eth.queue_count[IONIC_QTYPE_TXQ] %d\n",
3101 le32_to_cpu(lid->eth.config.queue_count[IONIC_QTYPE_TXQ]));
3102 dev_dbg(ionic->dev, "eth.config.name %s\n", lid->eth.config.name);
3103 dev_dbg(ionic->dev, "eth.config.mac %pM\n", lid->eth.config.mac);
3104 dev_dbg(ionic->dev, "eth.config.mtu %d\n",
3105 le32_to_cpu(lid->eth.config.mtu));
3106
3107 return 0;
3108}
3109
30b87ab4 3110int ionic_lif_size(struct ionic *ionic)
1a58e196
SN
3111{
3112 struct ionic_identity *ident = &ionic->ident;
3113 unsigned int nintrs, dev_nintrs;
3114 union ionic_lif_config *lc;
3115 unsigned int ntxqs_per_lif;
3116 unsigned int nrxqs_per_lif;
3117 unsigned int neqs_per_lif;
3118 unsigned int nnqs_per_lif;
3119 unsigned int nxqs, neqs;
3120 unsigned int min_intrs;
3121 int err;
3122
3123 lc = &ident->lif.eth.config;
3124 dev_nintrs = le32_to_cpu(ident->dev.nintrs);
3125 neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count);
3126 nnqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_NOTIFYQ]);
3127 ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]);
3128 nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]);
3129
3130 nxqs = min(ntxqs_per_lif, nrxqs_per_lif);
3131 nxqs = min(nxqs, num_online_cpus());
3132 neqs = min(neqs_per_lif, num_online_cpus());
3133
3134try_again:
3135 /* interrupt usage:
3136 * 1 for master lif adminq/notifyq
3137 * 1 for each CPU for master lif TxRx queue pairs
3138 * whatever's left is for RDMA queues
3139 */
3140 nintrs = 1 + nxqs + neqs;
3141 min_intrs = 2; /* adminq + 1 TxRx queue pair */
3142
3143 if (nintrs > dev_nintrs)
3144 goto try_fewer;
3145
3146 err = ionic_bus_alloc_irq_vectors(ionic, nintrs);
3147 if (err < 0 && err != -ENOSPC) {
3148 dev_err(ionic->dev, "Can't get intrs from OS: %d\n", err);
3149 return err;
3150 }
3151 if (err == -ENOSPC)
3152 goto try_fewer;
3153
3154 if (err != nintrs) {
3155 ionic_bus_free_irq_vectors(ionic);
3156 goto try_fewer;
3157 }
3158
3159 ionic->nnqs_per_lif = nnqs_per_lif;
3160 ionic->neqs_per_lif = neqs;
3161 ionic->ntxqs_per_lif = nxqs;
3162 ionic->nrxqs_per_lif = nxqs;
3163 ionic->nintrs = nintrs;
3164
3165 ionic_debugfs_add_sizes(ionic);
3166
3167 return 0;
3168
3169try_fewer:
3170 if (nnqs_per_lif > 1) {
3171 nnqs_per_lif >>= 1;
3172 goto try_again;
3173 }
3174 if (neqs > 1) {
3175 neqs >>= 1;
3176 goto try_again;
3177 }
3178 if (nxqs > 1) {
3179 nxqs >>= 1;
3180 goto try_again;
3181 }
3182 dev_err(ionic->dev, "Can't get minimum %d intrs from OS\n", min_intrs);
3183 return -ENOSPC;
3184}