2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include "qede_ethdev.h"
12 qed_start_vport(struct ecore_dev
*edev
, struct qed_start_vport_params
*p_params
)
16 for_each_hwfn(edev
, i
) {
17 struct ecore_hwfn
*p_hwfn
= &edev
->hwfns
[i
];
19 struct ecore_sp_vport_start_params start
= { 0 };
21 start
.tpa_mode
= p_params
->gro_enable
? ECORE_TPA_MODE_GRO
:
23 start
.remove_inner_vlan
= p_params
->remove_inner_vlan
;
24 start
.tx_switching
= tx_switching
;
25 start
.only_untagged
= false; /* untagged only */
26 start
.drop_ttl0
= p_params
->drop_ttl0
;
27 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
28 start
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
29 start
.concrete_fid
= p_hwfn
->hw_info
.concrete_fid
;
30 start
.handle_ptp_pkts
= p_params
->handle_ptp_pkts
;
31 start
.vport_id
= p_params
->vport_id
;
32 start
.max_buffers_per_cqe
= 16; /* TODO-is this right */
33 start
.mtu
= p_params
->mtu
;
34 /* @DPDK - Disable FW placement */
35 start
.zero_placement_offset
= 1;
37 rc
= ecore_sp_vport_start(p_hwfn
, &start
);
39 DP_ERR(edev
, "Failed to start VPORT\n");
43 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
44 "Started V-PORT %d with MTU %d\n",
45 p_params
->vport_id
, p_params
->mtu
);
48 ecore_reset_vport_stats(edev
);
53 static int qed_stop_vport(struct ecore_dev
*edev
, uint8_t vport_id
)
57 for_each_hwfn(edev
, i
) {
58 struct ecore_hwfn
*p_hwfn
= &edev
->hwfns
[i
];
59 rc
= ecore_sp_vport_stop(p_hwfn
,
60 p_hwfn
->hw_info
.opaque_fid
, vport_id
);
63 DP_ERR(edev
, "Failed to stop VPORT\n");
72 qed_update_vport(struct ecore_dev
*edev
, struct qed_update_vport_params
*params
)
74 struct ecore_sp_vport_update_params sp_params
;
75 struct ecore_rss_params sp_rss_params
;
78 memset(&sp_params
, 0, sizeof(sp_params
));
79 memset(&sp_rss_params
, 0, sizeof(sp_rss_params
));
81 /* Translate protocol params into sp params */
82 sp_params
.vport_id
= params
->vport_id
;
83 sp_params
.update_vport_active_rx_flg
= params
->update_vport_active_flg
;
84 sp_params
.update_vport_active_tx_flg
= params
->update_vport_active_flg
;
85 sp_params
.vport_active_rx_flg
= params
->vport_active_flg
;
86 sp_params
.vport_active_tx_flg
= params
->vport_active_flg
;
87 sp_params
.update_inner_vlan_removal_flg
=
88 params
->update_inner_vlan_removal_flg
;
89 sp_params
.inner_vlan_removal_flg
= params
->inner_vlan_removal_flg
;
90 sp_params
.update_tx_switching_flg
= params
->update_tx_switching_flg
;
91 sp_params
.tx_switching_flg
= params
->tx_switching_flg
;
92 sp_params
.accept_any_vlan
= params
->accept_any_vlan
;
93 sp_params
.update_accept_any_vlan_flg
=
94 params
->update_accept_any_vlan_flg
;
95 sp_params
.mtu
= params
->mtu
;
97 /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns.
98 * We need to re-fix the rss values per engine for CMT.
101 if (edev
->num_hwfns
> 1 && params
->update_rss_flg
) {
102 struct qed_update_vport_rss_params
*rss
= ¶ms
->rss_params
;
105 /* Find largest entry, since it's possible RSS needs to
106 * be disabled [in case only 1 queue per-hwfn]
108 for (k
= 0; k
< ECORE_RSS_IND_TABLE_SIZE
; k
++)
109 max
= (max
> rss
->rss_ind_table
[k
]) ?
110 max
: rss
->rss_ind_table
[k
];
112 /* Either fix RSS values or disable RSS */
113 if (edev
->num_hwfns
< max
+ 1) {
114 int divisor
= (max
+ edev
->num_hwfns
- 1) /
117 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
118 "CMT - fixing RSS values (modulo %02x)\n",
121 for (k
= 0; k
< ECORE_RSS_IND_TABLE_SIZE
; k
++)
122 rss
->rss_ind_table
[k
] =
123 rss
->rss_ind_table
[k
] % divisor
;
125 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
126 "CMT - 1 queue per-hwfn; Disabling RSS\n");
127 params
->update_rss_flg
= 0;
131 /* Now, update the RSS configuration for actual configuration */
132 if (params
->update_rss_flg
) {
133 sp_rss_params
.update_rss_config
= 1;
134 sp_rss_params
.rss_enable
= 1;
135 sp_rss_params
.update_rss_capabilities
= 1;
136 sp_rss_params
.update_rss_ind_table
= 1;
137 sp_rss_params
.update_rss_key
= 1;
138 sp_rss_params
.rss_caps
= ECORE_RSS_IPV4
| ECORE_RSS_IPV6
|
139 ECORE_RSS_IPV4_TCP
| ECORE_RSS_IPV6_TCP
;
140 sp_rss_params
.rss_table_size_log
= 7; /* 2^7 = 128 */
141 rte_memcpy(sp_rss_params
.rss_ind_table
,
142 params
->rss_params
.rss_ind_table
,
143 ECORE_RSS_IND_TABLE_SIZE
* sizeof(uint16_t));
144 rte_memcpy(sp_rss_params
.rss_key
, params
->rss_params
.rss_key
,
145 ECORE_RSS_KEY_SIZE
* sizeof(uint32_t));
146 sp_params
.rss_params
= &sp_rss_params
;
149 for_each_hwfn(edev
, i
) {
150 struct ecore_hwfn
*p_hwfn
= &edev
->hwfns
[i
];
152 sp_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
153 rc
= ecore_sp_vport_update(p_hwfn
, &sp_params
,
154 ECORE_SPQ_MODE_EBLOCK
, NULL
);
156 DP_ERR(edev
, "Failed to update VPORT\n");
160 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
161 "Updated V-PORT %d: active_flag %d [update %d]\n",
162 params
->vport_id
, params
->vport_active_flg
,
163 params
->update_vport_active_flg
);
170 qed_start_rxq(struct ecore_dev
*edev
,
172 struct ecore_queue_start_common_params
*p_params
,
173 uint16_t bd_max_bytes
,
174 dma_addr_t bd_chain_phys_addr
,
175 dma_addr_t cqe_pbl_addr
,
176 uint16_t cqe_pbl_size
, void OSAL_IOMEM
* *pp_prod
)
178 struct ecore_hwfn
*p_hwfn
;
181 hwfn_index
= rss_num
% edev
->num_hwfns
;
182 p_hwfn
= &edev
->hwfns
[hwfn_index
];
184 p_params
->queue_id
= p_params
->queue_id
/ edev
->num_hwfns
;
185 p_params
->stats_id
= p_params
->vport_id
;
187 rc
= ecore_sp_eth_rx_queue_start(p_hwfn
,
188 p_hwfn
->hw_info
.opaque_fid
,
192 cqe_pbl_addr
, cqe_pbl_size
, pp_prod
);
195 DP_ERR(edev
, "Failed to start RXQ#%d\n", p_params
->queue_id
);
199 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
200 "Started RX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
201 p_params
->queue_id
, rss_num
, p_params
->vport_id
,
208 qed_stop_rxq(struct ecore_dev
*edev
, struct qed_stop_rxq_params
*params
)
211 struct ecore_hwfn
*p_hwfn
;
213 hwfn_index
= params
->rss_id
% edev
->num_hwfns
;
214 p_hwfn
= &edev
->hwfns
[hwfn_index
];
216 rc
= ecore_sp_eth_rx_queue_stop(p_hwfn
,
217 params
->rx_queue_id
/ edev
->num_hwfns
,
218 params
->eq_completion_only
, false);
220 DP_ERR(edev
, "Failed to stop RXQ#%d\n", params
->rx_queue_id
);
228 qed_start_txq(struct ecore_dev
*edev
,
230 struct ecore_queue_start_common_params
*p_params
,
232 uint16_t pbl_size
, void OSAL_IOMEM
* *pp_doorbell
)
234 struct ecore_hwfn
*p_hwfn
;
237 hwfn_index
= rss_num
% edev
->num_hwfns
;
238 p_hwfn
= &edev
->hwfns
[hwfn_index
];
240 p_params
->queue_id
= p_params
->queue_id
/ edev
->num_hwfns
;
241 p_params
->stats_id
= p_params
->vport_id
;
243 rc
= ecore_sp_eth_tx_queue_start(p_hwfn
,
244 p_hwfn
->hw_info
.opaque_fid
,
247 pbl_addr
, pbl_size
, pp_doorbell
);
250 DP_ERR(edev
, "Failed to start TXQ#%d\n", p_params
->queue_id
);
254 DP_VERBOSE(edev
, ECORE_MSG_SPQ
,
255 "Started TX-Q %d [rss_num %d] on V-PORT %d and SB %d\n",
256 p_params
->queue_id
, rss_num
, p_params
->vport_id
,
263 qed_stop_txq(struct ecore_dev
*edev
, struct qed_stop_txq_params
*params
)
265 struct ecore_hwfn
*p_hwfn
;
268 hwfn_index
= params
->rss_id
% edev
->num_hwfns
;
269 p_hwfn
= &edev
->hwfns
[hwfn_index
];
271 rc
= ecore_sp_eth_tx_queue_stop(p_hwfn
,
272 params
->tx_queue_id
/ edev
->num_hwfns
);
274 DP_ERR(edev
, "Failed to stop TXQ#%d\n", params
->tx_queue_id
);
282 qed_fp_cqe_completion(struct ecore_dev
*edev
,
283 uint8_t rss_id
, struct eth_slow_path_rx_cqe
*cqe
)
285 return ecore_eth_cqe_completion(&edev
->hwfns
[rss_id
% edev
->num_hwfns
],
289 static int qed_fastpath_stop(struct ecore_dev
*edev
)
291 ecore_hw_stop_fastpath(edev
);
296 static void qed_fastpath_start(struct ecore_dev
*edev
)
298 struct ecore_hwfn
*p_hwfn
;
301 for_each_hwfn(edev
, i
) {
302 p_hwfn
= &edev
->hwfns
[i
];
303 ecore_hw_start_fastpath(p_hwfn
);
308 qed_get_vport_stats(struct ecore_dev
*edev
, struct ecore_eth_stats
*stats
)
310 ecore_get_vport_stats(edev
, stats
);
314 qed_configure_filter_ucast(struct ecore_dev
*edev
,
315 struct qed_filter_ucast_params
*params
)
317 struct ecore_filter_ucast ucast
;
319 if (!params
->vlan_valid
&& !params
->mac_valid
) {
320 DP_NOTICE(edev
, true,
321 "Tried configuring a unicast filter,"
322 "but both MAC and VLAN are not set\n");
326 memset(&ucast
, 0, sizeof(ucast
));
327 switch (params
->type
) {
328 case QED_FILTER_XCAST_TYPE_ADD
:
329 ucast
.opcode
= ECORE_FILTER_ADD
;
331 case QED_FILTER_XCAST_TYPE_DEL
:
332 ucast
.opcode
= ECORE_FILTER_REMOVE
;
334 case QED_FILTER_XCAST_TYPE_REPLACE
:
335 ucast
.opcode
= ECORE_FILTER_REPLACE
;
338 DP_NOTICE(edev
, true, "Unknown unicast filter type %d\n",
342 if (params
->vlan_valid
&& params
->mac_valid
) {
343 ucast
.type
= ECORE_FILTER_MAC_VLAN
;
344 ether_addr_copy((struct ether_addr
*)¶ms
->mac
,
345 (struct ether_addr
*)&ucast
.mac
);
346 ucast
.vlan
= params
->vlan
;
347 } else if (params
->mac_valid
) {
348 ucast
.type
= ECORE_FILTER_MAC
;
349 ether_addr_copy((struct ether_addr
*)¶ms
->mac
,
350 (struct ether_addr
*)&ucast
.mac
);
352 ucast
.type
= ECORE_FILTER_VLAN
;
353 ucast
.vlan
= params
->vlan
;
356 ucast
.is_rx_filter
= true;
357 ucast
.is_tx_filter
= true;
359 return ecore_filter_ucast_cmd(edev
, &ucast
, ECORE_SPQ_MODE_CB
, NULL
);
363 qed_configure_filter_mcast(struct ecore_dev
*edev
,
364 struct qed_filter_mcast_params
*params
)
366 struct ecore_filter_mcast mcast
;
369 memset(&mcast
, 0, sizeof(mcast
));
370 switch (params
->type
) {
371 case QED_FILTER_XCAST_TYPE_ADD
:
372 mcast
.opcode
= ECORE_FILTER_ADD
;
374 case QED_FILTER_XCAST_TYPE_DEL
:
375 mcast
.opcode
= ECORE_FILTER_REMOVE
;
378 DP_NOTICE(edev
, true, "Unknown multicast filter type %d\n",
382 mcast
.num_mc_addrs
= params
->num
;
383 for (i
= 0; i
< mcast
.num_mc_addrs
; i
++)
384 ether_addr_copy((struct ether_addr
*)¶ms
->mac
[i
],
385 (struct ether_addr
*)&mcast
.mac
[i
]);
387 return ecore_filter_mcast_cmd(edev
, &mcast
, ECORE_SPQ_MODE_CB
, NULL
);
390 int qed_configure_filter_rx_mode(struct ecore_dev
*edev
,
391 enum qed_filter_rx_mode_type type
)
393 struct ecore_filter_accept_flags flags
;
395 memset(&flags
, 0, sizeof(flags
));
397 flags
.update_rx_mode_config
= 1;
398 flags
.update_tx_mode_config
= 1;
399 flags
.rx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
400 ECORE_ACCEPT_MCAST_MATCHED
|
403 flags
.tx_accept_filter
= ECORE_ACCEPT_UCAST_MATCHED
|
404 ECORE_ACCEPT_MCAST_MATCHED
|
407 if (type
== QED_FILTER_RX_MODE_TYPE_PROMISC
) {
408 flags
.rx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
;
410 flags
.tx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
;
411 DP_INFO(edev
, "Enabling Tx unmatched flag for VF\n");
413 } else if (type
== QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
) {
414 flags
.rx_accept_filter
|= ECORE_ACCEPT_MCAST_UNMATCHED
;
415 } else if (type
== (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC
|
416 QED_FILTER_RX_MODE_TYPE_PROMISC
)) {
417 flags
.rx_accept_filter
|= ECORE_ACCEPT_UCAST_UNMATCHED
|
418 ECORE_ACCEPT_MCAST_UNMATCHED
;
421 return ecore_filter_accept_cmd(edev
, 0, flags
, false, false,
422 ECORE_SPQ_MODE_CB
, NULL
);
426 qed_configure_filter(struct ecore_dev
*edev
, struct qed_filter_params
*params
)
428 switch (params
->type
) {
429 case QED_FILTER_TYPE_UCAST
:
430 return qed_configure_filter_ucast(edev
, ¶ms
->filter
.ucast
);
431 case QED_FILTER_TYPE_MCAST
:
432 return qed_configure_filter_mcast(edev
, ¶ms
->filter
.mcast
);
433 case QED_FILTER_TYPE_RX_MODE
:
434 return qed_configure_filter_rx_mode(edev
,
438 DP_NOTICE(edev
, true, "Unknown filter type %d\n",
444 static const struct qed_eth_ops qed_eth_ops_pass
= {
445 INIT_STRUCT_FIELD(common
, &qed_common_ops_pass
),
446 INIT_STRUCT_FIELD(fill_dev_info
, &qed_fill_eth_dev_info
),
447 INIT_STRUCT_FIELD(vport_start
, &qed_start_vport
),
448 INIT_STRUCT_FIELD(vport_stop
, &qed_stop_vport
),
449 INIT_STRUCT_FIELD(vport_update
, &qed_update_vport
),
450 INIT_STRUCT_FIELD(q_rx_start
, &qed_start_rxq
),
451 INIT_STRUCT_FIELD(q_tx_start
, &qed_start_txq
),
452 INIT_STRUCT_FIELD(q_rx_stop
, &qed_stop_rxq
),
453 INIT_STRUCT_FIELD(q_tx_stop
, &qed_stop_txq
),
454 INIT_STRUCT_FIELD(eth_cqe_completion
, &qed_fp_cqe_completion
),
455 INIT_STRUCT_FIELD(fastpath_stop
, &qed_fastpath_stop
),
456 INIT_STRUCT_FIELD(fastpath_start
, &qed_fastpath_start
),
457 INIT_STRUCT_FIELD(get_vport_stats
, &qed_get_vport_stats
),
458 INIT_STRUCT_FIELD(filter_config
, &qed_configure_filter
),
461 const struct qed_eth_ops
*qed_get_eth_ops(void)
463 return &qed_eth_ops_pass
;