2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/flow_table.h>
36 struct mlx5e_rq_param
{
37 u32 rqc
[MLX5_ST_SZ_DW(rqc
)];
38 struct mlx5_wq_param wq
;
41 struct mlx5e_sq_param
{
42 u32 sqc
[MLX5_ST_SZ_DW(sqc
)];
43 struct mlx5_wq_param wq
;
47 struct mlx5e_cq_param
{
48 u32 cqc
[MLX5_ST_SZ_DW(cqc
)];
49 struct mlx5_wq_param wq
;
53 struct mlx5e_channel_param
{
54 struct mlx5e_rq_param rq
;
55 struct mlx5e_sq_param sq
;
56 struct mlx5e_cq_param rx_cq
;
57 struct mlx5e_cq_param tx_cq
;
60 static void mlx5e_update_carrier(struct mlx5e_priv
*priv
)
62 struct mlx5_core_dev
*mdev
= priv
->mdev
;
65 port_state
= mlx5_query_vport_state(mdev
,
66 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT
);
68 if (port_state
== VPORT_STATE_UP
)
69 netif_carrier_on(priv
->netdev
);
71 netif_carrier_off(priv
->netdev
);
74 static void mlx5e_update_carrier_work(struct work_struct
*work
)
76 struct mlx5e_priv
*priv
= container_of(work
, struct mlx5e_priv
,
79 mutex_lock(&priv
->state_lock
);
80 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
81 mlx5e_update_carrier(priv
);
82 mutex_unlock(&priv
->state_lock
);
85 static void mlx5e_update_pport_counters(struct mlx5e_priv
*priv
)
87 struct mlx5_core_dev
*mdev
= priv
->mdev
;
88 struct mlx5e_pport_stats
*s
= &priv
->stats
.pport
;
91 int sz
= MLX5_ST_SZ_BYTES(ppcnt_reg
);
93 in
= mlx5_vzalloc(sz
);
94 out
= mlx5_vzalloc(sz
);
98 MLX5_SET(ppcnt_reg
, in
, local_port
, 1);
100 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_IEEE_802_3_COUNTERS_GROUP
);
101 mlx5_core_access_reg(mdev
, in
, sz
, out
,
102 sz
, MLX5_REG_PPCNT
, 0, 0);
103 memcpy(s
->IEEE_802_3_counters
,
104 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
105 sizeof(s
->IEEE_802_3_counters
));
107 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2863_COUNTERS_GROUP
);
108 mlx5_core_access_reg(mdev
, in
, sz
, out
,
109 sz
, MLX5_REG_PPCNT
, 0, 0);
110 memcpy(s
->RFC_2863_counters
,
111 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
112 sizeof(s
->RFC_2863_counters
));
114 MLX5_SET(ppcnt_reg
, in
, grp
, MLX5_RFC_2819_COUNTERS_GROUP
);
115 mlx5_core_access_reg(mdev
, in
, sz
, out
,
116 sz
, MLX5_REG_PPCNT
, 0, 0);
117 memcpy(s
->RFC_2819_counters
,
118 MLX5_ADDR_OF(ppcnt_reg
, out
, counter_set
),
119 sizeof(s
->RFC_2819_counters
));
126 void mlx5e_update_stats(struct mlx5e_priv
*priv
)
128 struct mlx5_core_dev
*mdev
= priv
->mdev
;
129 struct mlx5e_vport_stats
*s
= &priv
->stats
.vport
;
130 struct mlx5e_rq_stats
*rq_stats
;
131 struct mlx5e_sq_stats
*sq_stats
;
132 u32 in
[MLX5_ST_SZ_DW(query_vport_counter_in
)];
134 int outlen
= MLX5_ST_SZ_BYTES(query_vport_counter_out
);
138 out
= mlx5_vzalloc(outlen
);
142 /* Collect firts the SW counters and then HW for consistency */
145 s
->tx_queue_stopped
= 0;
146 s
->tx_queue_wake
= 0;
147 s
->tx_queue_dropped
= 0;
154 for (i
= 0; i
< priv
->params
.num_channels
; i
++) {
155 rq_stats
= &priv
->channel
[i
]->rq
.stats
;
157 s
->lro_packets
+= rq_stats
->lro_packets
;
158 s
->lro_bytes
+= rq_stats
->lro_bytes
;
159 s
->rx_csum_none
+= rq_stats
->csum_none
;
160 s
->rx_csum_sw
+= rq_stats
->csum_sw
;
161 s
->rx_wqe_err
+= rq_stats
->wqe_err
;
163 for (j
= 0; j
< priv
->params
.num_tc
; j
++) {
164 sq_stats
= &priv
->channel
[i
]->sq
[j
].stats
;
166 s
->tso_packets
+= sq_stats
->tso_packets
;
167 s
->tso_bytes
+= sq_stats
->tso_bytes
;
168 s
->tx_queue_stopped
+= sq_stats
->stopped
;
169 s
->tx_queue_wake
+= sq_stats
->wake
;
170 s
->tx_queue_dropped
+= sq_stats
->dropped
;
171 tx_offload_none
+= sq_stats
->csum_offload_none
;
176 memset(in
, 0, sizeof(in
));
178 MLX5_SET(query_vport_counter_in
, in
, opcode
,
179 MLX5_CMD_OP_QUERY_VPORT_COUNTER
);
180 MLX5_SET(query_vport_counter_in
, in
, op_mod
, 0);
181 MLX5_SET(query_vport_counter_in
, in
, other_vport
, 0);
183 memset(out
, 0, outlen
);
185 if (mlx5_cmd_exec(mdev
, in
, sizeof(in
), out
, outlen
))
188 #define MLX5_GET_CTR(p, x) \
189 MLX5_GET64(query_vport_counter_out, p, x)
191 s
->rx_error_packets
=
192 MLX5_GET_CTR(out
, received_errors
.packets
);
194 MLX5_GET_CTR(out
, received_errors
.octets
);
195 s
->tx_error_packets
=
196 MLX5_GET_CTR(out
, transmit_errors
.packets
);
198 MLX5_GET_CTR(out
, transmit_errors
.octets
);
200 s
->rx_unicast_packets
=
201 MLX5_GET_CTR(out
, received_eth_unicast
.packets
);
202 s
->rx_unicast_bytes
=
203 MLX5_GET_CTR(out
, received_eth_unicast
.octets
);
204 s
->tx_unicast_packets
=
205 MLX5_GET_CTR(out
, transmitted_eth_unicast
.packets
);
206 s
->tx_unicast_bytes
=
207 MLX5_GET_CTR(out
, transmitted_eth_unicast
.octets
);
209 s
->rx_multicast_packets
=
210 MLX5_GET_CTR(out
, received_eth_multicast
.packets
);
211 s
->rx_multicast_bytes
=
212 MLX5_GET_CTR(out
, received_eth_multicast
.octets
);
213 s
->tx_multicast_packets
=
214 MLX5_GET_CTR(out
, transmitted_eth_multicast
.packets
);
215 s
->tx_multicast_bytes
=
216 MLX5_GET_CTR(out
, transmitted_eth_multicast
.octets
);
218 s
->rx_broadcast_packets
=
219 MLX5_GET_CTR(out
, received_eth_broadcast
.packets
);
220 s
->rx_broadcast_bytes
=
221 MLX5_GET_CTR(out
, received_eth_broadcast
.octets
);
222 s
->tx_broadcast_packets
=
223 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.packets
);
224 s
->tx_broadcast_bytes
=
225 MLX5_GET_CTR(out
, transmitted_eth_broadcast
.octets
);
228 s
->rx_unicast_packets
+
229 s
->rx_multicast_packets
+
230 s
->rx_broadcast_packets
;
232 s
->rx_unicast_bytes
+
233 s
->rx_multicast_bytes
+
234 s
->rx_broadcast_bytes
;
236 s
->tx_unicast_packets
+
237 s
->tx_multicast_packets
+
238 s
->tx_broadcast_packets
;
240 s
->tx_unicast_bytes
+
241 s
->tx_multicast_bytes
+
242 s
->tx_broadcast_bytes
;
244 /* Update calculated offload counters */
245 s
->tx_csum_offload
= s
->tx_packets
- tx_offload_none
;
246 s
->rx_csum_good
= s
->rx_packets
- s
->rx_csum_none
-
249 mlx5e_update_pport_counters(priv
);
254 static void mlx5e_update_stats_work(struct work_struct
*work
)
256 struct delayed_work
*dwork
= to_delayed_work(work
);
257 struct mlx5e_priv
*priv
= container_of(dwork
, struct mlx5e_priv
,
259 mutex_lock(&priv
->state_lock
);
260 if (test_bit(MLX5E_STATE_OPENED
, &priv
->state
)) {
261 mlx5e_update_stats(priv
);
262 schedule_delayed_work(dwork
,
264 MLX5E_UPDATE_STATS_INTERVAL
));
266 mutex_unlock(&priv
->state_lock
);
269 static void __mlx5e_async_event(struct mlx5e_priv
*priv
,
270 enum mlx5_dev_event event
)
273 case MLX5_DEV_EVENT_PORT_UP
:
274 case MLX5_DEV_EVENT_PORT_DOWN
:
275 schedule_work(&priv
->update_carrier_work
);
283 static void mlx5e_async_event(struct mlx5_core_dev
*mdev
, void *vpriv
,
284 enum mlx5_dev_event event
, unsigned long param
)
286 struct mlx5e_priv
*priv
= vpriv
;
288 spin_lock(&priv
->async_events_spinlock
);
289 if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
))
290 __mlx5e_async_event(priv
, event
);
291 spin_unlock(&priv
->async_events_spinlock
);
294 static void mlx5e_enable_async_events(struct mlx5e_priv
*priv
)
296 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
299 static void mlx5e_disable_async_events(struct mlx5e_priv
*priv
)
301 spin_lock_irq(&priv
->async_events_spinlock
);
302 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE
, &priv
->state
);
303 spin_unlock_irq(&priv
->async_events_spinlock
);
306 #define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
307 #define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
309 static int mlx5e_create_rq(struct mlx5e_channel
*c
,
310 struct mlx5e_rq_param
*param
,
313 struct mlx5e_priv
*priv
= c
->priv
;
314 struct mlx5_core_dev
*mdev
= priv
->mdev
;
315 void *rqc
= param
->rqc
;
316 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
321 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
323 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
328 rq
->wq
.db
= &rq
->wq
.db
[MLX5_RCV_DBR
];
330 wq_sz
= mlx5_wq_ll_get_size(&rq
->wq
);
331 rq
->skb
= kzalloc_node(wq_sz
* sizeof(*rq
->skb
), GFP_KERNEL
,
332 cpu_to_node(c
->cpu
));
335 goto err_rq_wq_destroy
;
338 rq
->wqe_sz
= (priv
->params
.lro_en
) ? priv
->params
.lro_wqe_sz
:
339 MLX5E_SW2HW_MTU(priv
->netdev
->mtu
);
340 rq
->wqe_sz
= SKB_DATA_ALIGN(rq
->wqe_sz
+ MLX5E_NET_IP_ALIGN
);
342 for (i
= 0; i
< wq_sz
; i
++) {
343 struct mlx5e_rx_wqe
*wqe
= mlx5_wq_ll_get_wqe(&rq
->wq
, i
);
344 u32 byte_count
= rq
->wqe_sz
- MLX5E_NET_IP_ALIGN
;
346 wqe
->data
.lkey
= c
->mkey_be
;
347 wqe
->data
.byte_count
=
348 cpu_to_be32(byte_count
| MLX5_HW_START_PADDING
);
352 rq
->netdev
= c
->netdev
;
360 mlx5_wq_destroy(&rq
->wq_ctrl
);
365 static void mlx5e_destroy_rq(struct mlx5e_rq
*rq
)
368 mlx5_wq_destroy(&rq
->wq_ctrl
);
371 static int mlx5e_enable_rq(struct mlx5e_rq
*rq
, struct mlx5e_rq_param
*param
)
373 struct mlx5e_priv
*priv
= rq
->priv
;
374 struct mlx5_core_dev
*mdev
= priv
->mdev
;
382 inlen
= MLX5_ST_SZ_BYTES(create_rq_in
) +
383 sizeof(u64
) * rq
->wq_ctrl
.buf
.npages
;
384 in
= mlx5_vzalloc(inlen
);
388 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
389 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
391 memcpy(rqc
, param
->rqc
, sizeof(param
->rqc
));
393 MLX5_SET(rqc
, rqc
, cqn
, rq
->cq
.mcq
.cqn
);
394 MLX5_SET(rqc
, rqc
, state
, MLX5_RQC_STATE_RST
);
395 MLX5_SET(rqc
, rqc
, flush_in_error_en
, 1);
396 MLX5_SET(wq
, wq
, log_wq_pg_sz
, rq
->wq_ctrl
.buf
.page_shift
-
397 MLX5_ADAPTER_PAGE_SHIFT
);
398 MLX5_SET64(wq
, wq
, dbr_addr
, rq
->wq_ctrl
.db
.dma
);
400 mlx5_fill_page_array(&rq
->wq_ctrl
.buf
,
401 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
403 err
= mlx5_core_create_rq(mdev
, in
, inlen
, &rq
->rqn
);
410 static int mlx5e_modify_rq(struct mlx5e_rq
*rq
, int curr_state
, int next_state
)
412 struct mlx5e_channel
*c
= rq
->channel
;
413 struct mlx5e_priv
*priv
= c
->priv
;
414 struct mlx5_core_dev
*mdev
= priv
->mdev
;
421 inlen
= MLX5_ST_SZ_BYTES(modify_rq_in
);
422 in
= mlx5_vzalloc(inlen
);
426 rqc
= MLX5_ADDR_OF(modify_rq_in
, in
, ctx
);
428 MLX5_SET(modify_rq_in
, in
, rq_state
, curr_state
);
429 MLX5_SET(rqc
, rqc
, state
, next_state
);
431 err
= mlx5_core_modify_rq(mdev
, rq
->rqn
, in
, inlen
);
438 static void mlx5e_disable_rq(struct mlx5e_rq
*rq
)
440 mlx5_core_destroy_rq(rq
->priv
->mdev
, rq
->rqn
);
443 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq
*rq
)
445 unsigned long exp_time
= jiffies
+ msecs_to_jiffies(20000);
446 struct mlx5e_channel
*c
= rq
->channel
;
447 struct mlx5e_priv
*priv
= c
->priv
;
448 struct mlx5_wq_ll
*wq
= &rq
->wq
;
450 while (time_before(jiffies
, exp_time
)) {
451 if (wq
->cur_sz
>= priv
->params
.min_rx_wqes
)
460 static int mlx5e_open_rq(struct mlx5e_channel
*c
,
461 struct mlx5e_rq_param
*param
,
466 err
= mlx5e_create_rq(c
, param
, rq
);
470 err
= mlx5e_enable_rq(rq
, param
);
474 err
= mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RST
, MLX5_RQC_STATE_RDY
);
478 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
479 mlx5e_send_nop(&c
->sq
[0], true); /* trigger mlx5e_post_rx_wqes() */
484 mlx5e_disable_rq(rq
);
486 mlx5e_destroy_rq(rq
);
491 static void mlx5e_close_rq(struct mlx5e_rq
*rq
)
493 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE
, &rq
->state
);
494 napi_synchronize(&rq
->channel
->napi
); /* prevent mlx5e_post_rx_wqes */
496 mlx5e_modify_rq(rq
, MLX5_RQC_STATE_RDY
, MLX5_RQC_STATE_ERR
);
497 while (!mlx5_wq_ll_is_empty(&rq
->wq
))
500 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
501 napi_synchronize(&rq
->channel
->napi
);
503 mlx5e_disable_rq(rq
);
504 mlx5e_destroy_rq(rq
);
507 static void mlx5e_free_sq_db(struct mlx5e_sq
*sq
)
513 static int mlx5e_alloc_sq_db(struct mlx5e_sq
*sq
, int numa
)
515 int wq_sz
= mlx5_wq_cyc_get_size(&sq
->wq
);
516 int df_sz
= wq_sz
* MLX5_SEND_WQEBB_NUM_DS
;
518 sq
->skb
= kzalloc_node(wq_sz
* sizeof(*sq
->skb
), GFP_KERNEL
, numa
);
519 sq
->dma_fifo
= kzalloc_node(df_sz
* sizeof(*sq
->dma_fifo
), GFP_KERNEL
,
522 if (!sq
->skb
|| !sq
->dma_fifo
) {
523 mlx5e_free_sq_db(sq
);
527 sq
->dma_fifo_mask
= df_sz
- 1;
532 static int mlx5e_create_sq(struct mlx5e_channel
*c
,
534 struct mlx5e_sq_param
*param
,
537 struct mlx5e_priv
*priv
= c
->priv
;
538 struct mlx5_core_dev
*mdev
= priv
->mdev
;
540 void *sqc
= param
->sqc
;
541 void *sqc_wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
545 err
= mlx5_alloc_map_uar(mdev
, &sq
->uar
);
549 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
551 err
= mlx5_wq_cyc_create(mdev
, ¶m
->wq
, sqc_wq
, &sq
->wq
,
554 goto err_unmap_free_uar
;
556 sq
->wq
.db
= &sq
->wq
.db
[MLX5_SND_DBR
];
557 sq
->uar_map
= sq
->uar
.map
;
558 sq
->uar_bf_map
= sq
->uar
.bf_map
;
559 sq
->bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
560 sq
->max_inline
= param
->max_inline
;
562 err
= mlx5e_alloc_sq_db(sq
, cpu_to_node(c
->cpu
));
564 goto err_sq_wq_destroy
;
566 txq_ix
= c
->ix
+ tc
* priv
->params
.num_channels
;
567 sq
->txq
= netdev_get_tx_queue(priv
->netdev
, txq_ix
);
570 sq
->mkey_be
= c
->mkey_be
;
573 sq
->edge
= (sq
->wq
.sz_m1
+ 1) - MLX5_SEND_WQE_MAX_WQEBBS
;
574 sq
->bf_budget
= MLX5E_SQ_BF_BUDGET
;
575 priv
->txq_to_sq_map
[txq_ix
] = sq
;
580 mlx5_wq_destroy(&sq
->wq_ctrl
);
583 mlx5_unmap_free_uar(mdev
, &sq
->uar
);
588 static void mlx5e_destroy_sq(struct mlx5e_sq
*sq
)
590 struct mlx5e_channel
*c
= sq
->channel
;
591 struct mlx5e_priv
*priv
= c
->priv
;
593 mlx5e_free_sq_db(sq
);
594 mlx5_wq_destroy(&sq
->wq_ctrl
);
595 mlx5_unmap_free_uar(priv
->mdev
, &sq
->uar
);
598 static int mlx5e_enable_sq(struct mlx5e_sq
*sq
, struct mlx5e_sq_param
*param
)
600 struct mlx5e_channel
*c
= sq
->channel
;
601 struct mlx5e_priv
*priv
= c
->priv
;
602 struct mlx5_core_dev
*mdev
= priv
->mdev
;
610 inlen
= MLX5_ST_SZ_BYTES(create_sq_in
) +
611 sizeof(u64
) * sq
->wq_ctrl
.buf
.npages
;
612 in
= mlx5_vzalloc(inlen
);
616 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
617 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
619 memcpy(sqc
, param
->sqc
, sizeof(param
->sqc
));
621 MLX5_SET(sqc
, sqc
, tis_num_0
, priv
->tisn
[sq
->tc
]);
622 MLX5_SET(sqc
, sqc
, cqn
, c
->sq
[sq
->tc
].cq
.mcq
.cqn
);
623 MLX5_SET(sqc
, sqc
, state
, MLX5_SQC_STATE_RST
);
624 MLX5_SET(sqc
, sqc
, tis_lst_sz
, 1);
625 MLX5_SET(sqc
, sqc
, flush_in_error_en
, 1);
627 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_CYCLIC
);
628 MLX5_SET(wq
, wq
, uar_page
, sq
->uar
.index
);
629 MLX5_SET(wq
, wq
, log_wq_pg_sz
, sq
->wq_ctrl
.buf
.page_shift
-
630 MLX5_ADAPTER_PAGE_SHIFT
);
631 MLX5_SET64(wq
, wq
, dbr_addr
, sq
->wq_ctrl
.db
.dma
);
633 mlx5_fill_page_array(&sq
->wq_ctrl
.buf
,
634 (__be64
*)MLX5_ADDR_OF(wq
, wq
, pas
));
636 err
= mlx5_core_create_sq(mdev
, in
, inlen
, &sq
->sqn
);
643 static int mlx5e_modify_sq(struct mlx5e_sq
*sq
, int curr_state
, int next_state
)
645 struct mlx5e_channel
*c
= sq
->channel
;
646 struct mlx5e_priv
*priv
= c
->priv
;
647 struct mlx5_core_dev
*mdev
= priv
->mdev
;
654 inlen
= MLX5_ST_SZ_BYTES(modify_sq_in
);
655 in
= mlx5_vzalloc(inlen
);
659 sqc
= MLX5_ADDR_OF(modify_sq_in
, in
, ctx
);
661 MLX5_SET(modify_sq_in
, in
, sq_state
, curr_state
);
662 MLX5_SET(sqc
, sqc
, state
, next_state
);
664 err
= mlx5_core_modify_sq(mdev
, sq
->sqn
, in
, inlen
);
671 static void mlx5e_disable_sq(struct mlx5e_sq
*sq
)
673 struct mlx5e_channel
*c
= sq
->channel
;
674 struct mlx5e_priv
*priv
= c
->priv
;
675 struct mlx5_core_dev
*mdev
= priv
->mdev
;
677 mlx5_core_destroy_sq(mdev
, sq
->sqn
);
680 static int mlx5e_open_sq(struct mlx5e_channel
*c
,
682 struct mlx5e_sq_param
*param
,
687 err
= mlx5e_create_sq(c
, tc
, param
, sq
);
691 err
= mlx5e_enable_sq(sq
, param
);
695 err
= mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RST
, MLX5_SQC_STATE_RDY
);
699 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
700 netdev_tx_reset_queue(sq
->txq
);
701 netif_tx_start_queue(sq
->txq
);
706 mlx5e_disable_sq(sq
);
708 mlx5e_destroy_sq(sq
);
713 static inline void netif_tx_disable_queue(struct netdev_queue
*txq
)
715 __netif_tx_lock_bh(txq
);
716 netif_tx_stop_queue(txq
);
717 __netif_tx_unlock_bh(txq
);
720 static void mlx5e_close_sq(struct mlx5e_sq
*sq
)
722 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE
, &sq
->state
);
723 napi_synchronize(&sq
->channel
->napi
); /* prevent netif_tx_wake_queue */
724 netif_tx_disable_queue(sq
->txq
);
726 /* ensure hw is notified of all pending wqes */
727 if (mlx5e_sq_has_room_for(sq
, 1))
728 mlx5e_send_nop(sq
, true);
730 mlx5e_modify_sq(sq
, MLX5_SQC_STATE_RDY
, MLX5_SQC_STATE_ERR
);
731 while (sq
->cc
!= sq
->pc
) /* wait till sq is empty */
734 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
735 napi_synchronize(&sq
->channel
->napi
);
737 mlx5e_disable_sq(sq
);
738 mlx5e_destroy_sq(sq
);
741 static int mlx5e_create_cq(struct mlx5e_channel
*c
,
742 struct mlx5e_cq_param
*param
,
745 struct mlx5e_priv
*priv
= c
->priv
;
746 struct mlx5_core_dev
*mdev
= priv
->mdev
;
747 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
753 param
->wq
.buf_numa_node
= cpu_to_node(c
->cpu
);
754 param
->wq
.db_numa_node
= cpu_to_node(c
->cpu
);
755 param
->eq_ix
= c
->ix
;
757 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
762 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
767 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
768 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
771 mcq
->vector
= param
->eq_ix
;
772 mcq
->comp
= mlx5e_completion_event
;
773 mcq
->event
= mlx5e_cq_error_event
;
775 mcq
->uar
= &priv
->cq_uar
;
777 for (i
= 0; i
< mlx5_cqwq_get_size(&cq
->wq
); i
++) {
778 struct mlx5_cqe64
*cqe
= mlx5_cqwq_get_wqe(&cq
->wq
, i
);
789 static void mlx5e_destroy_cq(struct mlx5e_cq
*cq
)
791 mlx5_wq_destroy(&cq
->wq_ctrl
);
794 static int mlx5e_enable_cq(struct mlx5e_cq
*cq
, struct mlx5e_cq_param
*param
)
796 struct mlx5e_priv
*priv
= cq
->priv
;
797 struct mlx5_core_dev
*mdev
= priv
->mdev
;
798 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
807 inlen
= MLX5_ST_SZ_BYTES(create_cq_in
) +
808 sizeof(u64
) * cq
->wq_ctrl
.buf
.npages
;
809 in
= mlx5_vzalloc(inlen
);
813 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
815 memcpy(cqc
, param
->cqc
, sizeof(param
->cqc
));
817 mlx5_fill_page_array(&cq
->wq_ctrl
.buf
,
818 (__be64
*)MLX5_ADDR_OF(create_cq_in
, in
, pas
));
820 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn
, &irqn_not_used
);
822 MLX5_SET(cqc
, cqc
, c_eqn
, eqn
);
823 MLX5_SET(cqc
, cqc
, uar_page
, mcq
->uar
->index
);
824 MLX5_SET(cqc
, cqc
, log_page_size
, cq
->wq_ctrl
.buf
.page_shift
-
825 MLX5_ADAPTER_PAGE_SHIFT
);
826 MLX5_SET64(cqc
, cqc
, dbr_addr
, cq
->wq_ctrl
.db
.dma
);
828 err
= mlx5_core_create_cq(mdev
, mcq
, in
, inlen
);
840 static void mlx5e_disable_cq(struct mlx5e_cq
*cq
)
842 struct mlx5e_priv
*priv
= cq
->priv
;
843 struct mlx5_core_dev
*mdev
= priv
->mdev
;
845 mlx5_core_destroy_cq(mdev
, &cq
->mcq
);
848 static int mlx5e_open_cq(struct mlx5e_channel
*c
,
849 struct mlx5e_cq_param
*param
,
851 u16 moderation_usecs
,
852 u16 moderation_frames
)
855 struct mlx5e_priv
*priv
= c
->priv
;
856 struct mlx5_core_dev
*mdev
= priv
->mdev
;
858 err
= mlx5e_create_cq(c
, param
, cq
);
862 err
= mlx5e_enable_cq(cq
, param
);
866 err
= mlx5_core_modify_cq_moderation(mdev
, &cq
->mcq
,
875 mlx5e_destroy_cq(cq
);
880 static void mlx5e_close_cq(struct mlx5e_cq
*cq
)
882 mlx5e_disable_cq(cq
);
883 mlx5e_destroy_cq(cq
);
886 static int mlx5e_get_cpu(struct mlx5e_priv
*priv
, int ix
)
888 return cpumask_first(priv
->mdev
->priv
.irq_info
[ix
].mask
);
891 static int mlx5e_open_tx_cqs(struct mlx5e_channel
*c
,
892 struct mlx5e_channel_param
*cparam
)
894 struct mlx5e_priv
*priv
= c
->priv
;
898 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
899 err
= mlx5e_open_cq(c
, &cparam
->tx_cq
, &c
->sq
[tc
].cq
,
900 priv
->params
.tx_cq_moderation_usec
,
901 priv
->params
.tx_cq_moderation_pkts
);
903 goto err_close_tx_cqs
;
909 for (tc
--; tc
>= 0; tc
--)
910 mlx5e_close_cq(&c
->sq
[tc
].cq
);
915 static void mlx5e_close_tx_cqs(struct mlx5e_channel
*c
)
919 for (tc
= 0; tc
< c
->num_tc
; tc
++)
920 mlx5e_close_cq(&c
->sq
[tc
].cq
);
923 static int mlx5e_open_sqs(struct mlx5e_channel
*c
,
924 struct mlx5e_channel_param
*cparam
)
929 for (tc
= 0; tc
< c
->num_tc
; tc
++) {
930 err
= mlx5e_open_sq(c
, tc
, &cparam
->sq
, &c
->sq
[tc
]);
938 for (tc
--; tc
>= 0; tc
--)
939 mlx5e_close_sq(&c
->sq
[tc
]);
944 static void mlx5e_close_sqs(struct mlx5e_channel
*c
)
948 for (tc
= 0; tc
< c
->num_tc
; tc
++)
949 mlx5e_close_sq(&c
->sq
[tc
]);
952 static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv
*priv
, int ix
)
956 for (i
= 0; i
< MLX5E_MAX_NUM_TC
; i
++)
957 priv
->channeltc_to_txq_map
[ix
][i
] =
958 ix
+ i
* priv
->params
.num_channels
;
961 static int mlx5e_open_channel(struct mlx5e_priv
*priv
, int ix
,
962 struct mlx5e_channel_param
*cparam
,
963 struct mlx5e_channel
**cp
)
965 struct net_device
*netdev
= priv
->netdev
;
966 int cpu
= mlx5e_get_cpu(priv
, ix
);
967 struct mlx5e_channel
*c
;
970 c
= kzalloc_node(sizeof(*c
), GFP_KERNEL
, cpu_to_node(cpu
));
977 c
->pdev
= &priv
->mdev
->pdev
->dev
;
978 c
->netdev
= priv
->netdev
;
979 c
->mkey_be
= cpu_to_be32(priv
->mr
.key
);
980 c
->num_tc
= priv
->params
.num_tc
;
982 mlx5e_build_channeltc_to_txq_map(priv
, ix
);
984 netif_napi_add(netdev
, &c
->napi
, mlx5e_napi_poll
, 64);
986 err
= mlx5e_open_tx_cqs(c
, cparam
);
990 err
= mlx5e_open_cq(c
, &cparam
->rx_cq
, &c
->rq
.cq
,
991 priv
->params
.rx_cq_moderation_usec
,
992 priv
->params
.rx_cq_moderation_pkts
);
994 goto err_close_tx_cqs
;
996 napi_enable(&c
->napi
);
998 err
= mlx5e_open_sqs(c
, cparam
);
1000 goto err_disable_napi
;
1002 err
= mlx5e_open_rq(c
, &cparam
->rq
, &c
->rq
);
1006 netif_set_xps_queue(netdev
, get_cpu_mask(c
->cpu
), ix
);
1015 napi_disable(&c
->napi
);
1016 mlx5e_close_cq(&c
->rq
.cq
);
1019 mlx5e_close_tx_cqs(c
);
1022 netif_napi_del(&c
->napi
);
1028 static void mlx5e_close_channel(struct mlx5e_channel
*c
)
1030 mlx5e_close_rq(&c
->rq
);
1032 napi_disable(&c
->napi
);
1033 mlx5e_close_cq(&c
->rq
.cq
);
1034 mlx5e_close_tx_cqs(c
);
1035 netif_napi_del(&c
->napi
);
1039 static void mlx5e_build_rq_param(struct mlx5e_priv
*priv
,
1040 struct mlx5e_rq_param
*param
)
1042 void *rqc
= param
->rqc
;
1043 void *wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1045 MLX5_SET(wq
, wq
, wq_type
, MLX5_WQ_TYPE_LINKED_LIST
);
1046 MLX5_SET(wq
, wq
, end_padding_mode
, MLX5_WQ_END_PAD_MODE_ALIGN
);
1047 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(sizeof(struct mlx5e_rx_wqe
)));
1048 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_rq_size
);
1049 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1051 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1052 param
->wq
.linear
= 1;
1055 static void mlx5e_build_sq_param(struct mlx5e_priv
*priv
,
1056 struct mlx5e_sq_param
*param
)
1058 void *sqc
= param
->sqc
;
1059 void *wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
1061 MLX5_SET(wq
, wq
, log_wq_sz
, priv
->params
.log_sq_size
);
1062 MLX5_SET(wq
, wq
, log_wq_stride
, ilog2(MLX5_SEND_WQE_BB
));
1063 MLX5_SET(wq
, wq
, pd
, priv
->pdn
);
1065 param
->wq
.buf_numa_node
= dev_to_node(&priv
->mdev
->pdev
->dev
);
1066 param
->max_inline
= priv
->params
.tx_max_inline
;
1069 static void mlx5e_build_common_cq_param(struct mlx5e_priv
*priv
,
1070 struct mlx5e_cq_param
*param
)
1072 void *cqc
= param
->cqc
;
1074 MLX5_SET(cqc
, cqc
, uar_page
, priv
->cq_uar
.index
);
1077 static void mlx5e_build_rx_cq_param(struct mlx5e_priv
*priv
,
1078 struct mlx5e_cq_param
*param
)
1080 void *cqc
= param
->cqc
;
1082 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_rq_size
);
1084 mlx5e_build_common_cq_param(priv
, param
);
1087 static void mlx5e_build_tx_cq_param(struct mlx5e_priv
*priv
,
1088 struct mlx5e_cq_param
*param
)
1090 void *cqc
= param
->cqc
;
1092 MLX5_SET(cqc
, cqc
, log_cq_size
, priv
->params
.log_sq_size
);
1094 mlx5e_build_common_cq_param(priv
, param
);
1097 static void mlx5e_build_channel_param(struct mlx5e_priv
*priv
,
1098 struct mlx5e_channel_param
*cparam
)
1100 memset(cparam
, 0, sizeof(*cparam
));
1102 mlx5e_build_rq_param(priv
, &cparam
->rq
);
1103 mlx5e_build_sq_param(priv
, &cparam
->sq
);
1104 mlx5e_build_rx_cq_param(priv
, &cparam
->rx_cq
);
1105 mlx5e_build_tx_cq_param(priv
, &cparam
->tx_cq
);
1108 static int mlx5e_open_channels(struct mlx5e_priv
*priv
)
1110 struct mlx5e_channel_param cparam
;
1111 int nch
= priv
->params
.num_channels
;
1116 priv
->channel
= kcalloc(nch
, sizeof(struct mlx5e_channel
*),
1119 priv
->txq_to_sq_map
= kcalloc(nch
* priv
->params
.num_tc
,
1120 sizeof(struct mlx5e_sq
*), GFP_KERNEL
);
1122 if (!priv
->channel
|| !priv
->txq_to_sq_map
)
1123 goto err_free_txq_to_sq_map
;
1125 mlx5e_build_channel_param(priv
, &cparam
);
1126 for (i
= 0; i
< nch
; i
++) {
1127 err
= mlx5e_open_channel(priv
, i
, &cparam
, &priv
->channel
[i
]);
1129 goto err_close_channels
;
1132 for (j
= 0; j
< nch
; j
++) {
1133 err
= mlx5e_wait_for_min_rx_wqes(&priv
->channel
[j
]->rq
);
1135 goto err_close_channels
;
1141 for (i
--; i
>= 0; i
--)
1142 mlx5e_close_channel(priv
->channel
[i
]);
1144 err_free_txq_to_sq_map
:
1145 kfree(priv
->txq_to_sq_map
);
1146 kfree(priv
->channel
);
1151 static void mlx5e_close_channels(struct mlx5e_priv
*priv
)
1155 for (i
= 0; i
< priv
->params
.num_channels
; i
++)
1156 mlx5e_close_channel(priv
->channel
[i
]);
1158 kfree(priv
->txq_to_sq_map
);
1159 kfree(priv
->channel
);
1162 static int mlx5e_rx_hash_fn(int hfunc
)
1164 return (hfunc
== ETH_RSS_HASH_TOP
) ?
1165 MLX5_RX_HASH_FN_TOEPLITZ
:
1166 MLX5_RX_HASH_FN_INVERTED_XOR8
;
1169 static int mlx5e_bits_invert(unsigned long a
, int size
)
1174 for (i
= 0; i
< size
; i
++)
1175 inv
|= (test_bit(size
- i
- 1, &a
) ? 1 : 0) << i
;
1180 static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
)
1184 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++) {
1187 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_XOR
)
1188 ix
= mlx5e_bits_invert(i
, MLX5E_LOG_INDIR_RQT_SIZE
);
1190 ix
= priv
->params
.indirection_rqt
[ix
];
1191 ix
= ix
% priv
->params
.num_channels
;
1192 MLX5_SET(rqtc
, rqtc
, rq_num
[i
],
1193 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1194 priv
->channel
[ix
]->rq
.rqn
:
1199 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv
*priv
, void *rqtc
,
1200 enum mlx5e_rqt_ix rqt_ix
)
1204 case MLX5E_INDIRECTION_RQT
:
1205 mlx5e_fill_indir_rqt_rqns(priv
, rqtc
);
1209 default: /* MLX5E_SINGLE_RQ_RQT */
1210 MLX5_SET(rqtc
, rqtc
, rq_num
[0],
1211 test_bit(MLX5E_STATE_OPENED
, &priv
->state
) ?
1212 priv
->channel
[0]->rq
.rqn
:
1219 static int mlx5e_create_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1221 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1228 sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 1 : MLX5E_INDIR_RQT_SIZE
;
1230 inlen
= MLX5_ST_SZ_BYTES(create_rqt_in
) + sizeof(u32
) * sz
;
1231 in
= mlx5_vzalloc(inlen
);
1235 rqtc
= MLX5_ADDR_OF(create_rqt_in
, in
, rqt_context
);
1237 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1238 MLX5_SET(rqtc
, rqtc
, rqt_max_size
, sz
);
1240 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1242 err
= mlx5_core_create_rqt(mdev
, in
, inlen
, &priv
->rqtn
[rqt_ix
]);
1249 int mlx5e_redirect_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1251 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1258 sz
= (rqt_ix
== MLX5E_SINGLE_RQ_RQT
) ? 1 : MLX5E_INDIR_RQT_SIZE
;
1260 inlen
= MLX5_ST_SZ_BYTES(modify_rqt_in
) + sizeof(u32
) * sz
;
1261 in
= mlx5_vzalloc(inlen
);
1265 rqtc
= MLX5_ADDR_OF(modify_rqt_in
, in
, ctx
);
1267 MLX5_SET(rqtc
, rqtc
, rqt_actual_size
, sz
);
1269 mlx5e_fill_rqt_rqns(priv
, rqtc
, rqt_ix
);
1271 MLX5_SET(modify_rqt_in
, in
, bitmask
.rqn_list
, 1);
1273 err
= mlx5_core_modify_rqt(mdev
, priv
->rqtn
[rqt_ix
], in
, inlen
);
1280 static void mlx5e_destroy_rqt(struct mlx5e_priv
*priv
, enum mlx5e_rqt_ix rqt_ix
)
1282 mlx5_core_destroy_rqt(priv
->mdev
, priv
->rqtn
[rqt_ix
]);
1285 static void mlx5e_redirect_rqts(struct mlx5e_priv
*priv
)
1287 mlx5e_redirect_rqt(priv
, MLX5E_INDIRECTION_RQT
);
1288 mlx5e_redirect_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
1291 static void mlx5e_build_tir_ctx_lro(void *tirc
, struct mlx5e_priv
*priv
)
1293 if (!priv
->params
.lro_en
)
1296 #define ROUGH_MAX_L2_L3_HDR_SZ 256
1298 MLX5_SET(tirc
, tirc
, lro_enable_mask
,
1299 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO
|
1300 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO
);
1301 MLX5_SET(tirc
, tirc
, lro_max_ip_payload_size
,
1302 (priv
->params
.lro_wqe_sz
-
1303 ROUGH_MAX_L2_L3_HDR_SZ
) >> 8);
1304 MLX5_SET(tirc
, tirc
, lro_timeout_period_usecs
,
1305 MLX5_CAP_ETH(priv
->mdev
,
1306 lro_timer_supported_periods
[2]));
1309 static int mlx5e_modify_tir_lro(struct mlx5e_priv
*priv
, int tt
)
1311 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1318 inlen
= MLX5_ST_SZ_BYTES(modify_tir_in
);
1319 in
= mlx5_vzalloc(inlen
);
1323 MLX5_SET(modify_tir_in
, in
, bitmask
.lro
, 1);
1324 tirc
= MLX5_ADDR_OF(modify_tir_in
, in
, ctx
);
1326 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1328 err
= mlx5_core_modify_tir(mdev
, priv
->tirn
[tt
], in
, inlen
);
1335 static int mlx5e_set_dev_port_mtu(struct net_device
*netdev
)
1337 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1338 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1342 err
= mlx5_set_port_mtu(mdev
, MLX5E_SW2HW_MTU(netdev
->mtu
), 1);
1346 mlx5_query_port_oper_mtu(mdev
, &hw_mtu
, 1);
1348 if (MLX5E_HW2SW_MTU(hw_mtu
) != netdev
->mtu
)
1349 netdev_warn(netdev
, "%s: Port MTU %d is different than netdev mtu %d\n",
1350 __func__
, MLX5E_HW2SW_MTU(hw_mtu
), netdev
->mtu
);
1352 netdev
->mtu
= MLX5E_HW2SW_MTU(hw_mtu
);
1356 int mlx5e_open_locked(struct net_device
*netdev
)
1358 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1362 set_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1364 num_txqs
= priv
->params
.num_channels
* priv
->params
.num_tc
;
1365 netif_set_real_num_tx_queues(netdev
, num_txqs
);
1366 netif_set_real_num_rx_queues(netdev
, priv
->params
.num_channels
);
1368 err
= mlx5e_set_dev_port_mtu(netdev
);
1370 goto err_clear_state_opened_flag
;
1372 err
= mlx5e_open_channels(priv
);
1374 netdev_err(netdev
, "%s: mlx5e_open_channels failed, %d\n",
1376 goto err_clear_state_opened_flag
;
1379 mlx5e_update_carrier(priv
);
1380 mlx5e_redirect_rqts(priv
);
1382 schedule_delayed_work(&priv
->update_stats_work
, 0);
1386 err_clear_state_opened_flag
:
1387 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1391 static int mlx5e_open(struct net_device
*netdev
)
1393 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1396 mutex_lock(&priv
->state_lock
);
1397 err
= mlx5e_open_locked(netdev
);
1398 mutex_unlock(&priv
->state_lock
);
1403 int mlx5e_close_locked(struct net_device
*netdev
)
1405 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1407 /* May already be CLOSED in case a previous configuration operation
1408 * (e.g RX/TX queue size change) that involves close&open failed.
1410 if (!test_bit(MLX5E_STATE_OPENED
, &priv
->state
))
1413 clear_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1415 mlx5e_redirect_rqts(priv
);
1416 netif_carrier_off(priv
->netdev
);
1417 mlx5e_close_channels(priv
);
1422 static int mlx5e_close(struct net_device
*netdev
)
1424 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1427 mutex_lock(&priv
->state_lock
);
1428 err
= mlx5e_close_locked(netdev
);
1429 mutex_unlock(&priv
->state_lock
);
1434 static int mlx5e_create_drop_rq(struct mlx5e_priv
*priv
,
1435 struct mlx5e_rq
*rq
,
1436 struct mlx5e_rq_param
*param
)
1438 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1439 void *rqc
= param
->rqc
;
1440 void *rqc_wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
1443 param
->wq
.db_numa_node
= param
->wq
.buf_numa_node
;
1445 err
= mlx5_wq_ll_create(mdev
, ¶m
->wq
, rqc_wq
, &rq
->wq
,
1455 static int mlx5e_create_drop_cq(struct mlx5e_priv
*priv
,
1456 struct mlx5e_cq
*cq
,
1457 struct mlx5e_cq_param
*param
)
1459 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1460 struct mlx5_core_cq
*mcq
= &cq
->mcq
;
1465 err
= mlx5_cqwq_create(mdev
, ¶m
->wq
, param
->cqc
, &cq
->wq
,
1470 mlx5_vector2eqn(mdev
, param
->eq_ix
, &eqn_not_used
, &irqn
);
1473 mcq
->set_ci_db
= cq
->wq_ctrl
.db
.db
;
1474 mcq
->arm_db
= cq
->wq_ctrl
.db
.db
+ 1;
1475 *mcq
->set_ci_db
= 0;
1477 mcq
->vector
= param
->eq_ix
;
1478 mcq
->comp
= mlx5e_completion_event
;
1479 mcq
->event
= mlx5e_cq_error_event
;
1481 mcq
->uar
= &priv
->cq_uar
;
1488 static int mlx5e_open_drop_rq(struct mlx5e_priv
*priv
)
1490 struct mlx5e_cq_param cq_param
;
1491 struct mlx5e_rq_param rq_param
;
1492 struct mlx5e_rq
*rq
= &priv
->drop_rq
;
1493 struct mlx5e_cq
*cq
= &priv
->drop_rq
.cq
;
1496 memset(&cq_param
, 0, sizeof(cq_param
));
1497 memset(&rq_param
, 0, sizeof(rq_param
));
1498 mlx5e_build_rx_cq_param(priv
, &cq_param
);
1499 mlx5e_build_rq_param(priv
, &rq_param
);
1501 err
= mlx5e_create_drop_cq(priv
, cq
, &cq_param
);
1505 err
= mlx5e_enable_cq(cq
, &cq_param
);
1507 goto err_destroy_cq
;
1509 err
= mlx5e_create_drop_rq(priv
, rq
, &rq_param
);
1511 goto err_disable_cq
;
1513 err
= mlx5e_enable_rq(rq
, &rq_param
);
1515 goto err_destroy_rq
;
1520 mlx5e_destroy_rq(&priv
->drop_rq
);
1523 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1526 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1531 static void mlx5e_close_drop_rq(struct mlx5e_priv
*priv
)
1533 mlx5e_disable_rq(&priv
->drop_rq
);
1534 mlx5e_destroy_rq(&priv
->drop_rq
);
1535 mlx5e_disable_cq(&priv
->drop_rq
.cq
);
1536 mlx5e_destroy_cq(&priv
->drop_rq
.cq
);
1539 static int mlx5e_create_tis(struct mlx5e_priv
*priv
, int tc
)
1541 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1542 u32 in
[MLX5_ST_SZ_DW(create_tis_in
)];
1543 void *tisc
= MLX5_ADDR_OF(create_tis_in
, in
, ctx
);
1545 memset(in
, 0, sizeof(in
));
1547 MLX5_SET(tisc
, tisc
, prio
, tc
);
1548 MLX5_SET(tisc
, tisc
, transport_domain
, priv
->tdn
);
1550 return mlx5_core_create_tis(mdev
, in
, sizeof(in
), &priv
->tisn
[tc
]);
1553 static void mlx5e_destroy_tis(struct mlx5e_priv
*priv
, int tc
)
1555 mlx5_core_destroy_tis(priv
->mdev
, priv
->tisn
[tc
]);
1558 static int mlx5e_create_tises(struct mlx5e_priv
*priv
)
1563 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++) {
1564 err
= mlx5e_create_tis(priv
, tc
);
1566 goto err_close_tises
;
1572 for (tc
--; tc
>= 0; tc
--)
1573 mlx5e_destroy_tis(priv
, tc
);
1578 static void mlx5e_destroy_tises(struct mlx5e_priv
*priv
)
1582 for (tc
= 0; tc
< priv
->params
.num_tc
; tc
++)
1583 mlx5e_destroy_tis(priv
, tc
);
1586 static void mlx5e_build_tir_ctx(struct mlx5e_priv
*priv
, u32
*tirc
, int tt
)
1588 void *hfso
= MLX5_ADDR_OF(tirc
, tirc
, rx_hash_field_selector_outer
);
1590 MLX5_SET(tirc
, tirc
, transport_domain
, priv
->tdn
);
1592 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1593 MLX5_HASH_FIELD_SEL_DST_IP)
1595 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1596 MLX5_HASH_FIELD_SEL_DST_IP |\
1597 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1598 MLX5_HASH_FIELD_SEL_L4_DPORT)
1600 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1601 MLX5_HASH_FIELD_SEL_DST_IP |\
1602 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1604 mlx5e_build_tir_ctx_lro(tirc
, priv
);
1606 MLX5_SET(tirc
, tirc
, disp_type
, MLX5_TIRC_DISP_TYPE_INDIRECT
);
1610 MLX5_SET(tirc
, tirc
, indirect_table
,
1611 priv
->rqtn
[MLX5E_SINGLE_RQ_RQT
]);
1612 MLX5_SET(tirc
, tirc
, rx_hash_fn
, MLX5_RX_HASH_FN_INVERTED_XOR8
);
1615 MLX5_SET(tirc
, tirc
, indirect_table
,
1616 priv
->rqtn
[MLX5E_INDIRECTION_RQT
]);
1617 MLX5_SET(tirc
, tirc
, rx_hash_fn
,
1618 mlx5e_rx_hash_fn(priv
->params
.rss_hfunc
));
1619 if (priv
->params
.rss_hfunc
== ETH_RSS_HASH_TOP
) {
1620 void *rss_key
= MLX5_ADDR_OF(tirc
, tirc
,
1621 rx_hash_toeplitz_key
);
1622 size_t len
= MLX5_FLD_SZ_BYTES(tirc
,
1623 rx_hash_toeplitz_key
);
1625 MLX5_SET(tirc
, tirc
, rx_hash_symmetric
, 1);
1626 memcpy(rss_key
, priv
->params
.toeplitz_hash_key
, len
);
1632 case MLX5E_TT_IPV4_TCP
:
1633 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1634 MLX5_L3_PROT_TYPE_IPV4
);
1635 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1636 MLX5_L4_PROT_TYPE_TCP
);
1637 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1638 MLX5_HASH_IP_L4PORTS
);
1641 case MLX5E_TT_IPV6_TCP
:
1642 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1643 MLX5_L3_PROT_TYPE_IPV6
);
1644 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1645 MLX5_L4_PROT_TYPE_TCP
);
1646 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1647 MLX5_HASH_IP_L4PORTS
);
1650 case MLX5E_TT_IPV4_UDP
:
1651 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1652 MLX5_L3_PROT_TYPE_IPV4
);
1653 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1654 MLX5_L4_PROT_TYPE_UDP
);
1655 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1656 MLX5_HASH_IP_L4PORTS
);
1659 case MLX5E_TT_IPV6_UDP
:
1660 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1661 MLX5_L3_PROT_TYPE_IPV6
);
1662 MLX5_SET(rx_hash_field_select
, hfso
, l4_prot_type
,
1663 MLX5_L4_PROT_TYPE_UDP
);
1664 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1665 MLX5_HASH_IP_L4PORTS
);
1668 case MLX5E_TT_IPV4_IPSEC_AH
:
1669 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1670 MLX5_L3_PROT_TYPE_IPV4
);
1671 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1672 MLX5_HASH_IP_IPSEC_SPI
);
1675 case MLX5E_TT_IPV6_IPSEC_AH
:
1676 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1677 MLX5_L3_PROT_TYPE_IPV6
);
1678 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1679 MLX5_HASH_IP_IPSEC_SPI
);
1682 case MLX5E_TT_IPV4_IPSEC_ESP
:
1683 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1684 MLX5_L3_PROT_TYPE_IPV4
);
1685 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1686 MLX5_HASH_IP_IPSEC_SPI
);
1689 case MLX5E_TT_IPV6_IPSEC_ESP
:
1690 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1691 MLX5_L3_PROT_TYPE_IPV6
);
1692 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1693 MLX5_HASH_IP_IPSEC_SPI
);
1697 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1698 MLX5_L3_PROT_TYPE_IPV4
);
1699 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1704 MLX5_SET(rx_hash_field_select
, hfso
, l3_prot_type
,
1705 MLX5_L3_PROT_TYPE_IPV6
);
1706 MLX5_SET(rx_hash_field_select
, hfso
, selected_fields
,
1712 static int mlx5e_create_tir(struct mlx5e_priv
*priv
, int tt
)
1714 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1720 inlen
= MLX5_ST_SZ_BYTES(create_tir_in
);
1721 in
= mlx5_vzalloc(inlen
);
1725 tirc
= MLX5_ADDR_OF(create_tir_in
, in
, ctx
);
1727 mlx5e_build_tir_ctx(priv
, tirc
, tt
);
1729 err
= mlx5_core_create_tir(mdev
, in
, inlen
, &priv
->tirn
[tt
]);
1736 static void mlx5e_destroy_tir(struct mlx5e_priv
*priv
, int tt
)
1738 mlx5_core_destroy_tir(priv
->mdev
, priv
->tirn
[tt
]);
1741 static int mlx5e_create_tirs(struct mlx5e_priv
*priv
)
1746 for (i
= 0; i
< MLX5E_NUM_TT
; i
++) {
1747 err
= mlx5e_create_tir(priv
, i
);
1749 goto err_destroy_tirs
;
1755 for (i
--; i
>= 0; i
--)
1756 mlx5e_destroy_tir(priv
, i
);
1761 static void mlx5e_destroy_tirs(struct mlx5e_priv
*priv
)
1765 for (i
= 0; i
< MLX5E_NUM_TT
; i
++)
1766 mlx5e_destroy_tir(priv
, i
);
1769 static struct rtnl_link_stats64
*
1770 mlx5e_get_stats(struct net_device
*dev
, struct rtnl_link_stats64
*stats
)
1772 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1773 struct mlx5e_vport_stats
*vstats
= &priv
->stats
.vport
;
1775 stats
->rx_packets
= vstats
->rx_packets
;
1776 stats
->rx_bytes
= vstats
->rx_bytes
;
1777 stats
->tx_packets
= vstats
->tx_packets
;
1778 stats
->tx_bytes
= vstats
->tx_bytes
;
1779 stats
->multicast
= vstats
->rx_multicast_packets
+
1780 vstats
->tx_multicast_packets
;
1781 stats
->tx_errors
= vstats
->tx_error_packets
;
1782 stats
->rx_errors
= vstats
->rx_error_packets
;
1783 stats
->tx_dropped
= vstats
->tx_queue_dropped
;
1784 stats
->rx_crc_errors
= 0;
1785 stats
->rx_length_errors
= 0;
1790 static void mlx5e_set_rx_mode(struct net_device
*dev
)
1792 struct mlx5e_priv
*priv
= netdev_priv(dev
);
1794 schedule_work(&priv
->set_rx_mode_work
);
1797 static int mlx5e_set_mac(struct net_device
*netdev
, void *addr
)
1799 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1800 struct sockaddr
*saddr
= addr
;
1802 if (!is_valid_ether_addr(saddr
->sa_data
))
1803 return -EADDRNOTAVAIL
;
1805 netif_addr_lock_bh(netdev
);
1806 ether_addr_copy(netdev
->dev_addr
, saddr
->sa_data
);
1807 netif_addr_unlock_bh(netdev
);
1809 schedule_work(&priv
->set_rx_mode_work
);
1814 static int mlx5e_set_features(struct net_device
*netdev
,
1815 netdev_features_t features
)
1817 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1819 netdev_features_t changes
= features
^ netdev
->features
;
1821 mutex_lock(&priv
->state_lock
);
1823 if (changes
& NETIF_F_LRO
) {
1824 bool was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1827 mlx5e_close_locked(priv
->netdev
);
1829 priv
->params
.lro_en
= !!(features
& NETIF_F_LRO
);
1830 mlx5e_modify_tir_lro(priv
, MLX5E_TT_IPV4_TCP
);
1831 mlx5e_modify_tir_lro(priv
, MLX5E_TT_IPV6_TCP
);
1834 err
= mlx5e_open_locked(priv
->netdev
);
1837 mutex_unlock(&priv
->state_lock
);
1839 if (changes
& NETIF_F_HW_VLAN_CTAG_FILTER
) {
1840 if (features
& NETIF_F_HW_VLAN_CTAG_FILTER
)
1841 mlx5e_enable_vlan_filter(priv
);
1843 mlx5e_disable_vlan_filter(priv
);
1849 static int mlx5e_change_mtu(struct net_device
*netdev
, int new_mtu
)
1851 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1852 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1857 mlx5_query_port_max_mtu(mdev
, &max_mtu
, 1);
1859 if (new_mtu
> max_mtu
) {
1861 "%s: Bad MTU (%d) > (%d) Max\n",
1862 __func__
, new_mtu
, max_mtu
);
1866 mutex_lock(&priv
->state_lock
);
1868 was_opened
= test_bit(MLX5E_STATE_OPENED
, &priv
->state
);
1870 mlx5e_close_locked(netdev
);
1872 netdev
->mtu
= new_mtu
;
1875 err
= mlx5e_open_locked(netdev
);
1877 mutex_unlock(&priv
->state_lock
);
1882 static struct net_device_ops mlx5e_netdev_ops
= {
1883 .ndo_open
= mlx5e_open
,
1884 .ndo_stop
= mlx5e_close
,
1885 .ndo_start_xmit
= mlx5e_xmit
,
1886 .ndo_get_stats64
= mlx5e_get_stats
,
1887 .ndo_set_rx_mode
= mlx5e_set_rx_mode
,
1888 .ndo_set_mac_address
= mlx5e_set_mac
,
1889 .ndo_vlan_rx_add_vid
= mlx5e_vlan_rx_add_vid
,
1890 .ndo_vlan_rx_kill_vid
= mlx5e_vlan_rx_kill_vid
,
1891 .ndo_set_features
= mlx5e_set_features
,
1892 .ndo_change_mtu
= mlx5e_change_mtu
,
1895 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev
*mdev
)
1897 if (MLX5_CAP_GEN(mdev
, port_type
) != MLX5_CAP_PORT_TYPE_ETH
)
1899 if (!MLX5_CAP_GEN(mdev
, eth_net_offloads
) ||
1900 !MLX5_CAP_GEN(mdev
, nic_flow_table
) ||
1901 !MLX5_CAP_ETH(mdev
, csum_cap
) ||
1902 !MLX5_CAP_ETH(mdev
, max_lso_cap
) ||
1903 !MLX5_CAP_ETH(mdev
, vlan_cap
) ||
1904 !MLX5_CAP_ETH(mdev
, rss_ind_tbl_cap
) ||
1905 MLX5_CAP_FLOWTABLE(mdev
,
1906 flow_table_properties_nic_receive
.max_ft_level
)
1908 mlx5_core_warn(mdev
,
1909 "Not creating net device, some required device capabilities are missing\n");
1915 u16
mlx5e_get_max_inline_cap(struct mlx5_core_dev
*mdev
)
1917 int bf_buf_size
= (1 << MLX5_CAP_GEN(mdev
, log_bf_reg_size
)) / 2;
1919 return bf_buf_size
-
1920 sizeof(struct mlx5e_tx_wqe
) +
1921 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
1924 static void mlx5e_build_netdev_priv(struct mlx5_core_dev
*mdev
,
1925 struct net_device
*netdev
,
1928 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1931 priv
->params
.log_sq_size
=
1932 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE
;
1933 priv
->params
.log_rq_size
=
1934 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE
;
1935 priv
->params
.rx_cq_moderation_usec
=
1936 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC
;
1937 priv
->params
.rx_cq_moderation_pkts
=
1938 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS
;
1939 priv
->params
.tx_cq_moderation_usec
=
1940 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC
;
1941 priv
->params
.tx_cq_moderation_pkts
=
1942 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS
;
1943 priv
->params
.tx_max_inline
= mlx5e_get_max_inline_cap(mdev
);
1944 priv
->params
.min_rx_wqes
=
1945 MLX5E_PARAMS_DEFAULT_MIN_RX_WQES
;
1946 priv
->params
.num_tc
= 1;
1947 priv
->params
.default_vlan_prio
= 0;
1948 priv
->params
.rss_hfunc
= ETH_RSS_HASH_XOR
;
1950 netdev_rss_key_fill(priv
->params
.toeplitz_hash_key
,
1951 sizeof(priv
->params
.toeplitz_hash_key
));
1953 for (i
= 0; i
< MLX5E_INDIR_RQT_SIZE
; i
++)
1954 priv
->params
.indirection_rqt
[i
] = i
% num_channels
;
1956 priv
->params
.lro_wqe_sz
=
1957 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ
;
1960 priv
->netdev
= netdev
;
1961 priv
->params
.num_channels
= num_channels
;
1962 priv
->default_vlan_prio
= priv
->params
.default_vlan_prio
;
1964 spin_lock_init(&priv
->async_events_spinlock
);
1965 mutex_init(&priv
->state_lock
);
1967 INIT_WORK(&priv
->update_carrier_work
, mlx5e_update_carrier_work
);
1968 INIT_WORK(&priv
->set_rx_mode_work
, mlx5e_set_rx_mode_work
);
1969 INIT_DELAYED_WORK(&priv
->update_stats_work
, mlx5e_update_stats_work
);
1972 static void mlx5e_set_netdev_dev_addr(struct net_device
*netdev
)
1974 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1976 mlx5_query_nic_vport_mac_address(priv
->mdev
, netdev
->dev_addr
);
1979 static void mlx5e_build_netdev(struct net_device
*netdev
)
1981 struct mlx5e_priv
*priv
= netdev_priv(netdev
);
1982 struct mlx5_core_dev
*mdev
= priv
->mdev
;
1984 SET_NETDEV_DEV(netdev
, &mdev
->pdev
->dev
);
1986 if (priv
->params
.num_tc
> 1)
1987 mlx5e_netdev_ops
.ndo_select_queue
= mlx5e_select_queue
;
1989 netdev
->netdev_ops
= &mlx5e_netdev_ops
;
1990 netdev
->watchdog_timeo
= 15 * HZ
;
1992 netdev
->ethtool_ops
= &mlx5e_ethtool_ops
;
1994 netdev
->vlan_features
|= NETIF_F_SG
;
1995 netdev
->vlan_features
|= NETIF_F_IP_CSUM
;
1996 netdev
->vlan_features
|= NETIF_F_IPV6_CSUM
;
1997 netdev
->vlan_features
|= NETIF_F_GRO
;
1998 netdev
->vlan_features
|= NETIF_F_TSO
;
1999 netdev
->vlan_features
|= NETIF_F_TSO6
;
2000 netdev
->vlan_features
|= NETIF_F_RXCSUM
;
2001 netdev
->vlan_features
|= NETIF_F_RXHASH
;
2003 if (!!MLX5_CAP_ETH(mdev
, lro_cap
))
2004 netdev
->vlan_features
|= NETIF_F_LRO
;
2006 netdev
->hw_features
= netdev
->vlan_features
;
2007 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
2008 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
2009 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
2011 netdev
->features
= netdev
->hw_features
;
2012 if (!priv
->params
.lro_en
)
2013 netdev
->features
&= ~NETIF_F_LRO
;
2015 netdev
->features
|= NETIF_F_HIGHDMA
;
2017 netdev
->priv_flags
|= IFF_UNICAST_FLT
;
2019 mlx5e_set_netdev_dev_addr(netdev
);
2022 static int mlx5e_create_mkey(struct mlx5e_priv
*priv
, u32 pdn
,
2023 struct mlx5_core_mr
*mr
)
2025 struct mlx5_core_dev
*mdev
= priv
->mdev
;
2026 struct mlx5_create_mkey_mbox_in
*in
;
2029 in
= mlx5_vzalloc(sizeof(*in
));
2033 in
->seg
.flags
= MLX5_PERM_LOCAL_WRITE
|
2034 MLX5_PERM_LOCAL_READ
|
2035 MLX5_ACCESS_MODE_PA
;
2036 in
->seg
.flags_pd
= cpu_to_be32(pdn
| MLX5_MKEY_LEN64
);
2037 in
->seg
.qpn_mkey7_0
= cpu_to_be32(0xffffff << 8);
2039 err
= mlx5_core_create_mkey(mdev
, mr
, in
, sizeof(*in
), NULL
, NULL
,
2047 static void *mlx5e_create_netdev(struct mlx5_core_dev
*mdev
)
2049 struct net_device
*netdev
;
2050 struct mlx5e_priv
*priv
;
2051 int nch
= mlx5e_get_max_num_channels(mdev
);
2054 if (mlx5e_check_required_hca_cap(mdev
))
2057 netdev
= alloc_etherdev_mqs(sizeof(struct mlx5e_priv
), nch
, nch
);
2059 mlx5_core_err(mdev
, "alloc_etherdev_mqs() failed\n");
2063 mlx5e_build_netdev_priv(mdev
, netdev
, nch
);
2064 mlx5e_build_netdev(netdev
);
2066 netif_carrier_off(netdev
);
2068 priv
= netdev_priv(netdev
);
2070 err
= mlx5_alloc_map_uar(mdev
, &priv
->cq_uar
);
2072 mlx5_core_err(mdev
, "alloc_map uar failed, %d\n", err
);
2073 goto err_free_netdev
;
2076 err
= mlx5_core_alloc_pd(mdev
, &priv
->pdn
);
2078 mlx5_core_err(mdev
, "alloc pd failed, %d\n", err
);
2079 goto err_unmap_free_uar
;
2082 err
= mlx5_alloc_transport_domain(mdev
, &priv
->tdn
);
2084 mlx5_core_err(mdev
, "alloc td failed, %d\n", err
);
2085 goto err_dealloc_pd
;
2088 err
= mlx5e_create_mkey(priv
, priv
->pdn
, &priv
->mr
);
2090 mlx5_core_err(mdev
, "create mkey failed, %d\n", err
);
2091 goto err_dealloc_transport_domain
;
2094 err
= mlx5e_create_tises(priv
);
2096 mlx5_core_warn(mdev
, "create tises failed, %d\n", err
);
2097 goto err_destroy_mkey
;
2100 err
= mlx5e_open_drop_rq(priv
);
2102 mlx5_core_err(mdev
, "open drop rq failed, %d\n", err
);
2103 goto err_destroy_tises
;
2106 err
= mlx5e_create_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2108 mlx5_core_warn(mdev
, "create rqt(INDIR) failed, %d\n", err
);
2109 goto err_close_drop_rq
;
2112 err
= mlx5e_create_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2114 mlx5_core_warn(mdev
, "create rqt(SINGLE) failed, %d\n", err
);
2115 goto err_destroy_rqt_indir
;
2118 err
= mlx5e_create_tirs(priv
);
2120 mlx5_core_warn(mdev
, "create tirs failed, %d\n", err
);
2121 goto err_destroy_rqt_single
;
2124 err
= mlx5e_create_flow_tables(priv
);
2126 mlx5_core_warn(mdev
, "create flow tables failed, %d\n", err
);
2127 goto err_destroy_tirs
;
2130 mlx5e_init_eth_addr(priv
);
2132 err
= register_netdev(netdev
);
2134 mlx5_core_err(mdev
, "register_netdev failed, %d\n", err
);
2135 goto err_destroy_flow_tables
;
2138 mlx5e_enable_async_events(priv
);
2139 schedule_work(&priv
->set_rx_mode_work
);
2143 err_destroy_flow_tables
:
2144 mlx5e_destroy_flow_tables(priv
);
2147 mlx5e_destroy_tirs(priv
);
2149 err_destroy_rqt_single
:
2150 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2152 err_destroy_rqt_indir
:
2153 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2156 mlx5e_close_drop_rq(priv
);
2159 mlx5e_destroy_tises(priv
);
2162 mlx5_core_destroy_mkey(mdev
, &priv
->mr
);
2164 err_dealloc_transport_domain
:
2165 mlx5_dealloc_transport_domain(mdev
, priv
->tdn
);
2168 mlx5_core_dealloc_pd(mdev
, priv
->pdn
);
2171 mlx5_unmap_free_uar(mdev
, &priv
->cq_uar
);
2174 free_netdev(netdev
);
2179 static void mlx5e_destroy_netdev(struct mlx5_core_dev
*mdev
, void *vpriv
)
2181 struct mlx5e_priv
*priv
= vpriv
;
2182 struct net_device
*netdev
= priv
->netdev
;
2184 set_bit(MLX5E_STATE_DESTROYING
, &priv
->state
);
2186 schedule_work(&priv
->set_rx_mode_work
);
2187 mlx5e_disable_async_events(priv
);
2188 flush_scheduled_work();
2189 unregister_netdev(netdev
);
2190 mlx5e_destroy_flow_tables(priv
);
2191 mlx5e_destroy_tirs(priv
);
2192 mlx5e_destroy_rqt(priv
, MLX5E_SINGLE_RQ_RQT
);
2193 mlx5e_destroy_rqt(priv
, MLX5E_INDIRECTION_RQT
);
2194 mlx5e_close_drop_rq(priv
);
2195 mlx5e_destroy_tises(priv
);
2196 mlx5_core_destroy_mkey(priv
->mdev
, &priv
->mr
);
2197 mlx5_dealloc_transport_domain(priv
->mdev
, priv
->tdn
);
2198 mlx5_core_dealloc_pd(priv
->mdev
, priv
->pdn
);
2199 mlx5_unmap_free_uar(priv
->mdev
, &priv
->cq_uar
);
2200 free_netdev(netdev
);
2203 static void *mlx5e_get_netdev(void *vpriv
)
2205 struct mlx5e_priv
*priv
= vpriv
;
2207 return priv
->netdev
;
2210 static struct mlx5_interface mlx5e_interface
= {
2211 .add
= mlx5e_create_netdev
,
2212 .remove
= mlx5e_destroy_netdev
,
2213 .event
= mlx5e_async_event
,
2214 .protocol
= MLX5_INTERFACE_PROTOCOL_ETH
,
2215 .get_dev
= mlx5e_get_netdev
,
2218 void mlx5e_init(void)
2220 mlx5_register_interface(&mlx5e_interface
);
2223 void mlx5e_cleanup(void)
2225 mlx5_unregister_interface(&mlx5e_interface
);