1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
10 #include "ecore_status.h"
11 #include "ecore_hsi_eth.h"
12 #include "ecore_chain.h"
13 #include "ecore_spq.h"
14 #include "ecore_init_fw_funcs.h"
15 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
21 #include "ecore_int.h"
24 #include "ecore_sriov.h"
25 #include "ecore_mcp.h"
27 #define ECORE_MAX_SGES_NUM 16
28 #define CRC32_POLY 0x1edc6f41
30 struct ecore_l2_info
{
32 unsigned long **pp_qid_usage
;
34 /* The lock is meant to synchronize access to the qid usage */
38 enum _ecore_status_t
ecore_l2_alloc(struct ecore_hwfn
*p_hwfn
)
40 struct ecore_l2_info
*p_l2_info
;
41 unsigned long **pp_qids
;
44 if (!ECORE_IS_L2_PERSONALITY(p_hwfn
))
47 p_l2_info
= OSAL_VZALLOC(p_hwfn
->p_dev
, sizeof(*p_l2_info
));
50 p_hwfn
->p_l2_info
= p_l2_info
;
52 if (IS_PF(p_hwfn
->p_dev
)) {
53 p_l2_info
->queues
= RESC_NUM(p_hwfn
, ECORE_L2_QUEUE
);
57 ecore_vf_get_num_rxqs(p_hwfn
, &rx
);
58 ecore_vf_get_num_txqs(p_hwfn
, &tx
);
60 p_l2_info
->queues
= (u32
)OSAL_MAX_T(u8
, rx
, tx
);
63 pp_qids
= OSAL_VZALLOC(p_hwfn
->p_dev
,
64 sizeof(unsigned long *) *
66 if (pp_qids
== OSAL_NULL
)
68 p_l2_info
->pp_qid_usage
= pp_qids
;
70 for (i
= 0; i
< p_l2_info
->queues
; i
++) {
71 pp_qids
[i
] = OSAL_VZALLOC(p_hwfn
->p_dev
,
72 MAX_QUEUES_PER_QZONE
/ 8);
73 if (pp_qids
[i
] == OSAL_NULL
)
77 #ifdef CONFIG_ECORE_LOCK_ALLOC
78 if (OSAL_MUTEX_ALLOC(p_hwfn
, &p_l2_info
->lock
))
85 void ecore_l2_setup(struct ecore_hwfn
*p_hwfn
)
87 if (!ECORE_IS_L2_PERSONALITY(p_hwfn
))
90 OSAL_MUTEX_INIT(&p_hwfn
->p_l2_info
->lock
);
93 void ecore_l2_free(struct ecore_hwfn
*p_hwfn
)
97 if (!ECORE_IS_L2_PERSONALITY(p_hwfn
))
100 if (p_hwfn
->p_l2_info
== OSAL_NULL
)
103 if (p_hwfn
->p_l2_info
->pp_qid_usage
== OSAL_NULL
)
106 /* Free until hit first uninitialized entry */
107 for (i
= 0; i
< p_hwfn
->p_l2_info
->queues
; i
++) {
108 if (p_hwfn
->p_l2_info
->pp_qid_usage
[i
] == OSAL_NULL
)
110 OSAL_VFREE(p_hwfn
->p_dev
,
111 p_hwfn
->p_l2_info
->pp_qid_usage
[i
]);
112 p_hwfn
->p_l2_info
->pp_qid_usage
[i
] = OSAL_NULL
;
115 #ifdef CONFIG_ECORE_LOCK_ALLOC
116 /* Lock is last to initialize, if everything else was */
117 if (i
== p_hwfn
->p_l2_info
->queues
)
118 OSAL_MUTEX_DEALLOC(&p_hwfn
->p_l2_info
->lock
);
121 OSAL_VFREE(p_hwfn
->p_dev
, p_hwfn
->p_l2_info
->pp_qid_usage
);
122 p_hwfn
->p_l2_info
->pp_qid_usage
= OSAL_NULL
;
125 OSAL_VFREE(p_hwfn
->p_dev
, p_hwfn
->p_l2_info
);
126 p_hwfn
->p_l2_info
= OSAL_NULL
;
129 /* TODO - we'll need locking around these... */
130 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn
*p_hwfn
,
131 struct ecore_queue_cid
*p_cid
)
133 struct ecore_l2_info
*p_l2_info
= p_hwfn
->p_l2_info
;
134 u16 queue_id
= p_cid
->rel
.queue_id
;
138 OSAL_MUTEX_ACQUIRE(&p_l2_info
->lock
);
140 if (queue_id
> p_l2_info
->queues
) {
141 DP_NOTICE(p_hwfn
, true,
142 "Requested to increase usage for qzone %04x out of %08x\n",
143 queue_id
, p_l2_info
->queues
);
148 first
= (u8
)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info
->pp_qid_usage
[queue_id
],
149 MAX_QUEUES_PER_QZONE
);
150 if (first
>= MAX_QUEUES_PER_QZONE
) {
155 OSAL_SET_BIT(first
, p_l2_info
->pp_qid_usage
[queue_id
]);
156 p_cid
->qid_usage_idx
= first
;
159 OSAL_MUTEX_RELEASE(&p_l2_info
->lock
);
163 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn
*p_hwfn
,
164 struct ecore_queue_cid
*p_cid
)
166 OSAL_MUTEX_ACQUIRE(&p_hwfn
->p_l2_info
->lock
);
168 OSAL_CLEAR_BIT(p_cid
->qid_usage_idx
,
169 p_hwfn
->p_l2_info
->pp_qid_usage
[p_cid
->rel
.queue_id
]);
171 OSAL_MUTEX_RELEASE(&p_hwfn
->p_l2_info
->lock
);
174 void ecore_eth_queue_cid_release(struct ecore_hwfn
*p_hwfn
,
175 struct ecore_queue_cid
*p_cid
)
177 bool b_legacy_vf
= !!(p_cid
->vf_legacy
&
178 ECORE_QCID_LEGACY_VF_CID
);
180 /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
181 * For legacy vf-queues, the CID doesn't go through here.
183 if (IS_PF(p_hwfn
->p_dev
) && !b_legacy_vf
)
184 _ecore_cxt_release_cid(p_hwfn
, p_cid
->cid
, p_cid
->vfid
);
186 /* VFs maintain the index inside queue-zone on their own */
187 if (p_cid
->vfid
== ECORE_QUEUE_CID_PF
)
188 ecore_eth_queue_qid_usage_del(p_hwfn
, p_cid
);
190 OSAL_VFREE(p_hwfn
->p_dev
, p_cid
);
193 /* The internal is only meant to be directly called by PFs initializeing CIDs
196 static struct ecore_queue_cid
*
197 _ecore_eth_queue_to_cid(struct ecore_hwfn
*p_hwfn
,
198 u16 opaque_fid
, u32 cid
,
199 struct ecore_queue_start_common_params
*p_params
,
201 struct ecore_queue_cid_vf_params
*p_vf_params
)
203 struct ecore_queue_cid
*p_cid
;
204 enum _ecore_status_t rc
;
206 p_cid
= OSAL_VZALLOC(p_hwfn
->p_dev
, sizeof(*p_cid
));
207 if (p_cid
== OSAL_NULL
)
210 p_cid
->opaque_fid
= opaque_fid
;
212 p_cid
->p_owner
= p_hwfn
;
214 /* Fill in parameters */
215 p_cid
->rel
.vport_id
= p_params
->vport_id
;
216 p_cid
->rel
.queue_id
= p_params
->queue_id
;
217 p_cid
->rel
.stats_id
= p_params
->stats_id
;
218 p_cid
->sb_igu_id
= p_params
->p_sb
->igu_sb_id
;
219 p_cid
->b_is_rx
= b_is_rx
;
220 p_cid
->sb_idx
= p_params
->sb_idx
;
222 /* Fill-in bits related to VFs' queues if information was provided */
223 if (p_vf_params
!= OSAL_NULL
) {
224 p_cid
->vfid
= p_vf_params
->vfid
;
225 p_cid
->vf_qid
= p_vf_params
->vf_qid
;
226 p_cid
->vf_legacy
= p_vf_params
->vf_legacy
;
228 p_cid
->vfid
= ECORE_QUEUE_CID_PF
;
231 /* Don't try calculating the absolute indices for VFs */
232 if (IS_VF(p_hwfn
->p_dev
)) {
233 p_cid
->abs
= p_cid
->rel
;
238 /* Calculate the engine-absolute indices of the resources.
239 * This would guarantee they're valid later on.
240 * In some cases [SBs] we already have the right values.
242 rc
= ecore_fw_vport(p_hwfn
, p_cid
->rel
.vport_id
, &p_cid
->abs
.vport_id
);
243 if (rc
!= ECORE_SUCCESS
)
246 rc
= ecore_fw_l2_queue(p_hwfn
, p_cid
->rel
.queue_id
,
247 &p_cid
->abs
.queue_id
);
248 if (rc
!= ECORE_SUCCESS
)
251 /* In case of a PF configuring its VF's queues, the stats-id is already
252 * absolute [since there's a single index that's suitable per-VF].
254 if (p_cid
->vfid
== ECORE_QUEUE_CID_PF
) {
255 rc
= ecore_fw_vport(p_hwfn
, p_cid
->rel
.stats_id
,
256 &p_cid
->abs
.stats_id
);
257 if (rc
!= ECORE_SUCCESS
)
260 p_cid
->abs
.stats_id
= p_cid
->rel
.stats_id
;
264 /* VF-images have provided the qid_usage_idx on their own.
265 * Otherwise, we need to allocate a unique one.
268 if (!ecore_eth_queue_qid_usage_add(p_hwfn
, p_cid
))
271 p_cid
->qid_usage_idx
= p_vf_params
->qid_usage_idx
;
274 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
275 "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
276 p_cid
->opaque_fid
, p_cid
->cid
,
277 p_cid
->rel
.vport_id
, p_cid
->abs
.vport_id
,
278 p_cid
->rel
.queue_id
, p_cid
->qid_usage_idx
,
280 p_cid
->rel
.stats_id
, p_cid
->abs
.stats_id
,
281 p_cid
->sb_igu_id
, p_cid
->sb_idx
);
286 OSAL_VFREE(p_hwfn
->p_dev
, p_cid
);
290 struct ecore_queue_cid
*
291 ecore_eth_queue_to_cid(struct ecore_hwfn
*p_hwfn
, u16 opaque_fid
,
292 struct ecore_queue_start_common_params
*p_params
,
294 struct ecore_queue_cid_vf_params
*p_vf_params
)
296 struct ecore_queue_cid
*p_cid
;
297 u8 vfid
= ECORE_CXT_PF_CID
;
298 bool b_legacy_vf
= false;
301 /* In case of legacy VFs, The CID can be derived from the additional
302 * VF parameters - the VF assumes queue X uses CID X, so we can simply
303 * use the vf_qid for this purpose as well.
306 vfid
= p_vf_params
->vfid
;
308 if (p_vf_params
->vf_legacy
&
309 ECORE_QCID_LEGACY_VF_CID
) {
311 cid
= p_vf_params
->vf_qid
;
315 /* Get a unique firmware CID for this queue, in case it's a PF.
316 * VF's don't need a CID as the queue configuration will be done
319 if (IS_PF(p_hwfn
->p_dev
) && !b_legacy_vf
) {
320 if (_ecore_cxt_acquire_cid(p_hwfn
, PROTOCOLID_ETH
,
321 &cid
, vfid
) != ECORE_SUCCESS
) {
322 DP_NOTICE(p_hwfn
, true, "Failed to acquire cid\n");
327 p_cid
= _ecore_eth_queue_to_cid(p_hwfn
, opaque_fid
, cid
,
328 p_params
, b_is_rx
, p_vf_params
);
329 if ((p_cid
== OSAL_NULL
) && IS_PF(p_hwfn
->p_dev
) && !b_legacy_vf
)
330 _ecore_cxt_release_cid(p_hwfn
, cid
, vfid
);
335 static struct ecore_queue_cid
*
336 ecore_eth_queue_to_cid_pf(struct ecore_hwfn
*p_hwfn
, u16 opaque_fid
,
338 struct ecore_queue_start_common_params
*p_params
)
340 return ecore_eth_queue_to_cid(p_hwfn
, opaque_fid
, p_params
, b_is_rx
,
345 ecore_sp_eth_vport_start(struct ecore_hwfn
*p_hwfn
,
346 struct ecore_sp_vport_start_params
*p_params
)
348 struct vport_start_ramrod_data
*p_ramrod
= OSAL_NULL
;
349 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
350 struct ecore_sp_init_data init_data
;
351 struct eth_vport_tpa_param
*p_tpa
;
352 u16 rx_mode
= 0, tx_err
= 0;
354 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
356 rc
= ecore_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
357 if (rc
!= ECORE_SUCCESS
)
361 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
362 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
363 init_data
.opaque_fid
= p_params
->opaque_fid
;
364 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
366 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
367 ETH_RAMROD_VPORT_START
,
368 PROTOCOLID_ETH
, &init_data
);
369 if (rc
!= ECORE_SUCCESS
)
372 p_ramrod
= &p_ent
->ramrod
.vport_start
;
373 p_ramrod
->vport_id
= abs_vport_id
;
375 p_ramrod
->mtu
= OSAL_CPU_TO_LE16(p_params
->mtu
);
376 p_ramrod
->handle_ptp_pkts
= p_params
->handle_ptp_pkts
;
377 p_ramrod
->inner_vlan_removal_en
= p_params
->remove_inner_vlan
;
378 p_ramrod
->drop_ttl0_en
= p_params
->drop_ttl0
;
379 p_ramrod
->untagged
= p_params
->only_untagged
;
380 p_ramrod
->zero_placement_offset
= p_params
->zero_placement_offset
;
382 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
, 1);
383 SET_FIELD(rx_mode
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
, 1);
385 p_ramrod
->rx_mode
.state
= OSAL_CPU_TO_LE16(rx_mode
);
387 /* Handle requests for strict behavior on transmission errors */
388 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE
,
389 p_params
->b_err_illegal_vlan_mode
?
390 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
391 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_PACKET_TOO_SMALL
,
392 p_params
->b_err_small_pkt
?
393 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
394 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR
,
395 p_params
->b_err_anti_spoof
?
396 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
397 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS
,
398 p_params
->b_err_illegal_inband_mode
?
399 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
400 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG
,
401 p_params
->b_err_vlan_insert_with_inband
?
402 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
403 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_MTU_VIOLATION
,
404 p_params
->b_err_big_pkt
?
405 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
406 SET_FIELD(tx_err
, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME
,
407 p_params
->b_err_ctrl_frame
?
408 ETH_TX_ERR_ASSERT_MALICIOUS
: 0);
409 p_ramrod
->tx_err_behav
.values
= OSAL_CPU_TO_LE16(tx_err
);
411 /* TPA related fields */
412 p_tpa
= &p_ramrod
->tpa_param
;
413 OSAL_MEMSET(p_tpa
, 0, sizeof(struct eth_vport_tpa_param
));
414 p_tpa
->max_buff_num
= p_params
->max_buffers_per_cqe
;
416 switch (p_params
->tpa_mode
) {
417 case ECORE_TPA_MODE_GRO
:
418 p_tpa
->tpa_max_aggs_num
= ETH_TPA_MAX_AGGS_NUM
;
419 p_tpa
->tpa_max_size
= (u16
)-1;
420 p_tpa
->tpa_min_size_to_cont
= p_params
->mtu
/ 2;
421 p_tpa
->tpa_min_size_to_start
= p_params
->mtu
/ 2;
422 p_tpa
->tpa_ipv4_en_flg
= 1;
423 p_tpa
->tpa_ipv6_en_flg
= 1;
424 p_tpa
->tpa_ipv4_tunn_en_flg
= 1;
425 p_tpa
->tpa_ipv6_tunn_en_flg
= 1;
426 p_tpa
->tpa_pkt_split_flg
= 1;
427 p_tpa
->tpa_gro_consistent_flg
= 1;
433 p_ramrod
->tx_switching_en
= p_params
->tx_switching
;
435 if (CHIP_REV_IS_SLOW(p_hwfn
->p_dev
))
436 p_ramrod
->tx_switching_en
= 0;
439 p_ramrod
->ctl_frame_mac_check_en
= !!p_params
->check_mac
;
440 p_ramrod
->ctl_frame_ethtype_check_en
= !!p_params
->check_ethtype
;
442 /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
443 p_ramrod
->sw_fid
= ecore_concrete_to_sw_fid(p_params
->concrete_fid
);
445 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
449 ecore_sp_vport_start(struct ecore_hwfn
*p_hwfn
,
450 struct ecore_sp_vport_start_params
*p_params
)
452 if (IS_VF(p_hwfn
->p_dev
))
453 return ecore_vf_pf_vport_start(p_hwfn
, p_params
->vport_id
,
455 p_params
->remove_inner_vlan
,
457 p_params
->max_buffers_per_cqe
,
458 p_params
->only_untagged
);
460 return ecore_sp_eth_vport_start(p_hwfn
, p_params
);
463 static enum _ecore_status_t
464 ecore_sp_vport_update_rss(struct ecore_hwfn
*p_hwfn
,
465 struct vport_update_ramrod_data
*p_ramrod
,
466 struct ecore_rss_params
*p_rss
)
468 struct eth_vport_rss_config
*p_config
;
469 u16 capabilities
= 0;
471 enum _ecore_status_t rc
= ECORE_SUCCESS
;
474 p_ramrod
->common
.update_rss_flg
= 0;
477 p_config
= &p_ramrod
->rss_config
;
479 OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE
!=
480 ETH_RSS_IND_TABLE_ENTRIES_NUM
);
482 rc
= ecore_fw_rss_eng(p_hwfn
, p_rss
->rss_eng_id
, &p_config
->rss_id
);
483 if (rc
!= ECORE_SUCCESS
)
486 p_ramrod
->common
.update_rss_flg
= p_rss
->update_rss_config
;
487 p_config
->update_rss_capabilities
= p_rss
->update_rss_capabilities
;
488 p_config
->update_rss_ind_table
= p_rss
->update_rss_ind_table
;
489 p_config
->update_rss_key
= p_rss
->update_rss_key
;
491 p_config
->rss_mode
= p_rss
->rss_enable
?
492 ETH_VPORT_RSS_MODE_REGULAR
: ETH_VPORT_RSS_MODE_DISABLED
;
494 p_config
->capabilities
= 0;
496 SET_FIELD(capabilities
,
497 ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY
,
498 !!(p_rss
->rss_caps
& ECORE_RSS_IPV4
));
499 SET_FIELD(capabilities
,
500 ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY
,
501 !!(p_rss
->rss_caps
& ECORE_RSS_IPV6
));
502 SET_FIELD(capabilities
,
503 ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY
,
504 !!(p_rss
->rss_caps
& ECORE_RSS_IPV4_TCP
));
505 SET_FIELD(capabilities
,
506 ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY
,
507 !!(p_rss
->rss_caps
& ECORE_RSS_IPV6_TCP
));
508 SET_FIELD(capabilities
,
509 ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY
,
510 !!(p_rss
->rss_caps
& ECORE_RSS_IPV4_UDP
));
511 SET_FIELD(capabilities
,
512 ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY
,
513 !!(p_rss
->rss_caps
& ECORE_RSS_IPV6_UDP
));
514 p_config
->tbl_size
= p_rss
->rss_table_size_log
;
515 p_config
->capabilities
= OSAL_CPU_TO_LE16(capabilities
);
517 DP_VERBOSE(p_hwfn
, ECORE_MSG_IFUP
,
518 "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
519 p_ramrod
->common
.update_rss_flg
,
521 p_config
->update_rss_capabilities
,
522 p_config
->capabilities
,
523 p_config
->update_rss_ind_table
, p_config
->update_rss_key
);
525 table_size
= OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE
,
526 1 << p_config
->tbl_size
);
527 for (i
= 0; i
< table_size
; i
++) {
528 struct ecore_queue_cid
*p_queue
= p_rss
->rss_ind_table
[i
];
533 p_config
->indirection_table
[i
] =
534 OSAL_CPU_TO_LE16(p_queue
->abs
.queue_id
);
537 DP_VERBOSE(p_hwfn
, ECORE_MSG_IFUP
,
538 "Configured RSS indirection table [%d entries]:\n",
540 for (i
= 0; i
< ECORE_RSS_IND_TABLE_SIZE
; i
+= 0x10) {
541 DP_VERBOSE(p_hwfn
, ECORE_MSG_IFUP
,
542 "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
543 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
]),
544 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 1]),
545 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 2]),
546 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 3]),
547 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 4]),
548 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 5]),
549 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 6]),
550 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 7]),
551 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 8]),
552 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 9]),
553 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 10]),
554 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 11]),
555 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 12]),
556 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 13]),
557 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 14]),
558 OSAL_LE16_TO_CPU(p_config
->indirection_table
[i
+ 15]));
561 for (i
= 0; i
< 10; i
++)
562 p_config
->rss_key
[i
] = OSAL_CPU_TO_LE32(p_rss
->rss_key
[i
]);
568 ecore_sp_update_accept_mode(struct ecore_hwfn
*p_hwfn
,
569 struct vport_update_ramrod_data
*p_ramrod
,
570 struct ecore_filter_accept_flags accept_flags
)
572 p_ramrod
->common
.update_rx_mode_flg
=
573 accept_flags
.update_rx_mode_config
;
574 p_ramrod
->common
.update_tx_mode_flg
=
575 accept_flags
.update_tx_mode_config
;
578 /* On B0 emulation we cannot enable Tx, since this would cause writes
579 * to PVFC HW block which isn't implemented in emulation.
581 if (CHIP_REV_IS_SLOW(p_hwfn
->p_dev
)) {
582 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
583 "Non-Asic - prevent Tx mode in vport update\n");
584 p_ramrod
->common
.update_tx_mode_flg
= 0;
588 /* Set Rx mode accept flags */
589 if (p_ramrod
->common
.update_rx_mode_flg
) {
590 u8 accept_filter
= accept_flags
.rx_accept_filter
;
593 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_DROP_ALL
,
594 !(!!(accept_filter
& ECORE_ACCEPT_UCAST_MATCHED
) ||
595 !!(accept_filter
& ECORE_ACCEPT_UCAST_UNMATCHED
)));
597 SET_FIELD(state
, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED
,
598 !!(accept_filter
& ECORE_ACCEPT_UCAST_UNMATCHED
));
600 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_DROP_ALL
,
601 !(!!(accept_filter
& ECORE_ACCEPT_MCAST_MATCHED
) ||
602 !!(accept_filter
& ECORE_ACCEPT_MCAST_UNMATCHED
)));
604 SET_FIELD(state
, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL
,
605 (!!(accept_filter
& ECORE_ACCEPT_MCAST_MATCHED
) &&
606 !!(accept_filter
& ECORE_ACCEPT_MCAST_UNMATCHED
)));
608 SET_FIELD(state
, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL
,
609 !!(accept_filter
& ECORE_ACCEPT_BCAST
));
611 SET_FIELD(state
, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI
,
612 !!(accept_filter
& ECORE_ACCEPT_ANY_VNI
));
614 p_ramrod
->rx_mode
.state
= OSAL_CPU_TO_LE16(state
);
615 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
616 "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
617 p_ramrod
->common
.vport_id
, state
);
620 /* Set Tx mode accept flags */
621 if (p_ramrod
->common
.update_tx_mode_flg
) {
622 u8 accept_filter
= accept_flags
.tx_accept_filter
;
625 SET_FIELD(state
, ETH_VPORT_TX_MODE_UCAST_DROP_ALL
,
626 !!(accept_filter
& ECORE_ACCEPT_NONE
));
628 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_DROP_ALL
,
629 !!(accept_filter
& ECORE_ACCEPT_NONE
));
631 SET_FIELD(state
, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL
,
632 (!!(accept_filter
& ECORE_ACCEPT_MCAST_MATCHED
) &&
633 !!(accept_filter
& ECORE_ACCEPT_MCAST_UNMATCHED
)));
635 SET_FIELD(state
, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL
,
636 !!(accept_filter
& ECORE_ACCEPT_BCAST
));
638 p_ramrod
->tx_mode
.state
= OSAL_CPU_TO_LE16(state
);
639 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
640 "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
641 p_ramrod
->common
.vport_id
, state
);
646 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data
*p_ramrod
,
647 struct ecore_sge_tpa_params
*p_params
)
649 struct eth_vport_tpa_param
*p_tpa
;
653 p_ramrod
->common
.update_tpa_param_flg
= 0;
654 p_ramrod
->common
.update_tpa_en_flg
= 0;
655 p_ramrod
->common
.update_tpa_param_flg
= 0;
659 p_ramrod
->common
.update_tpa_en_flg
= p_params
->update_tpa_en_flg
;
660 p_tpa
= &p_ramrod
->tpa_param
;
661 p_tpa
->tpa_ipv4_en_flg
= p_params
->tpa_ipv4_en_flg
;
662 p_tpa
->tpa_ipv6_en_flg
= p_params
->tpa_ipv6_en_flg
;
663 p_tpa
->tpa_ipv4_tunn_en_flg
= p_params
->tpa_ipv4_tunn_en_flg
;
664 p_tpa
->tpa_ipv6_tunn_en_flg
= p_params
->tpa_ipv6_tunn_en_flg
;
666 p_ramrod
->common
.update_tpa_param_flg
= p_params
->update_tpa_param_flg
;
667 p_tpa
->max_buff_num
= p_params
->max_buffers_per_cqe
;
668 p_tpa
->tpa_pkt_split_flg
= p_params
->tpa_pkt_split_flg
;
669 p_tpa
->tpa_hdr_data_split_flg
= p_params
->tpa_hdr_data_split_flg
;
670 p_tpa
->tpa_gro_consistent_flg
= p_params
->tpa_gro_consistent_flg
;
671 p_tpa
->tpa_max_aggs_num
= p_params
->tpa_max_aggs_num
;
672 val
= p_params
->tpa_max_size
;
673 p_tpa
->tpa_max_size
= OSAL_CPU_TO_LE16(val
);
674 val
= p_params
->tpa_min_size_to_start
;
675 p_tpa
->tpa_min_size_to_start
= OSAL_CPU_TO_LE16(val
);
676 val
= p_params
->tpa_min_size_to_cont
;
677 p_tpa
->tpa_min_size_to_cont
= OSAL_CPU_TO_LE16(val
);
681 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data
*p_ramrod
,
682 struct ecore_sp_vport_update_params
*p_params
)
686 OSAL_MEMSET(&p_ramrod
->approx_mcast
.bins
, 0,
687 sizeof(p_ramrod
->approx_mcast
.bins
));
689 if (!p_params
->update_approx_mcast_flg
)
692 p_ramrod
->common
.update_approx_mcast_flg
= 1;
693 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
694 u32
*p_bins
= p_params
->bins
;
696 p_ramrod
->approx_mcast
.bins
[i
] = OSAL_CPU_TO_LE32(p_bins
[i
]);
701 ecore_sp_vport_update(struct ecore_hwfn
*p_hwfn
,
702 struct ecore_sp_vport_update_params
*p_params
,
703 enum spq_mode comp_mode
,
704 struct ecore_spq_comp_cb
*p_comp_data
)
706 struct ecore_rss_params
*p_rss_params
= p_params
->rss_params
;
707 struct vport_update_ramrod_data_cmn
*p_cmn
;
708 struct ecore_sp_init_data init_data
;
709 struct vport_update_ramrod_data
*p_ramrod
= OSAL_NULL
;
710 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
711 u8 abs_vport_id
= 0, val
;
712 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
714 if (IS_VF(p_hwfn
->p_dev
)) {
715 rc
= ecore_vf_pf_vport_update(p_hwfn
, p_params
);
719 rc
= ecore_fw_vport(p_hwfn
, p_params
->vport_id
, &abs_vport_id
);
720 if (rc
!= ECORE_SUCCESS
)
724 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
725 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
726 init_data
.opaque_fid
= p_params
->opaque_fid
;
727 init_data
.comp_mode
= comp_mode
;
728 init_data
.p_comp_data
= p_comp_data
;
730 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
731 ETH_RAMROD_VPORT_UPDATE
,
732 PROTOCOLID_ETH
, &init_data
);
733 if (rc
!= ECORE_SUCCESS
)
736 /* Copy input params to ramrod according to FW struct */
737 p_ramrod
= &p_ent
->ramrod
.vport_update
;
738 p_cmn
= &p_ramrod
->common
;
740 p_cmn
->vport_id
= abs_vport_id
;
742 p_cmn
->rx_active_flg
= p_params
->vport_active_rx_flg
;
743 p_cmn
->update_rx_active_flg
= p_params
->update_vport_active_rx_flg
;
744 p_cmn
->tx_active_flg
= p_params
->vport_active_tx_flg
;
745 p_cmn
->update_tx_active_flg
= p_params
->update_vport_active_tx_flg
;
747 p_cmn
->accept_any_vlan
= p_params
->accept_any_vlan
;
748 val
= p_params
->update_accept_any_vlan_flg
;
749 p_cmn
->update_accept_any_vlan_flg
= val
;
751 p_cmn
->inner_vlan_removal_en
= p_params
->inner_vlan_removal_flg
;
752 val
= p_params
->update_inner_vlan_removal_flg
;
753 p_cmn
->update_inner_vlan_removal_en_flg
= val
;
755 p_cmn
->default_vlan_en
= p_params
->default_vlan_enable_flg
;
756 val
= p_params
->update_default_vlan_enable_flg
;
757 p_cmn
->update_default_vlan_en_flg
= val
;
759 p_cmn
->default_vlan
= OSAL_CPU_TO_LE16(p_params
->default_vlan
);
760 p_cmn
->update_default_vlan_flg
= p_params
->update_default_vlan_flg
;
762 p_cmn
->silent_vlan_removal_en
= p_params
->silent_vlan_removal_flg
;
764 p_ramrod
->common
.tx_switching_en
= p_params
->tx_switching_flg
;
767 if (CHIP_REV_IS_FPGA(p_hwfn
->p_dev
))
768 if (p_ramrod
->common
.tx_switching_en
||
769 p_ramrod
->common
.update_tx_switching_en_flg
) {
770 DP_NOTICE(p_hwfn
, false,
771 "FPGA - why are we seeing tx-switching? Overriding it\n");
772 p_ramrod
->common
.tx_switching_en
= 0;
773 p_ramrod
->common
.update_tx_switching_en_flg
= 1;
776 p_cmn
->update_tx_switching_en_flg
= p_params
->update_tx_switching_flg
;
778 p_cmn
->anti_spoofing_en
= p_params
->anti_spoofing_en
;
779 val
= p_params
->update_anti_spoofing_en_flg
;
780 p_ramrod
->common
.update_anti_spoofing_en_flg
= val
;
782 rc
= ecore_sp_vport_update_rss(p_hwfn
, p_ramrod
, p_rss_params
);
783 if (rc
!= ECORE_SUCCESS
) {
784 /* Return spq entry which is taken in ecore_sp_init_request()*/
785 ecore_spq_return_entry(p_hwfn
, p_ent
);
789 if (p_params
->update_ctl_frame_check
) {
790 p_cmn
->ctl_frame_mac_check_en
= p_params
->mac_chk_en
;
791 p_cmn
->ctl_frame_ethtype_check_en
= p_params
->ethtype_chk_en
;
794 /* Update mcast bins for VFs, PF doesn't use this functionality */
795 ecore_sp_update_mcast_bin(p_ramrod
, p_params
);
797 ecore_sp_update_accept_mode(p_hwfn
, p_ramrod
, p_params
->accept_flags
);
798 ecore_sp_vport_update_sge_tpa(p_ramrod
, p_params
->sge_tpa_params
);
800 p_ramrod
->common
.update_mtu_flg
= 1;
801 p_ramrod
->common
.mtu
= OSAL_CPU_TO_LE16(p_params
->mtu
);
804 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
807 enum _ecore_status_t
ecore_sp_vport_stop(struct ecore_hwfn
*p_hwfn
,
808 u16 opaque_fid
, u8 vport_id
)
810 struct vport_stop_ramrod_data
*p_ramrod
;
811 struct ecore_sp_init_data init_data
;
812 struct ecore_spq_entry
*p_ent
;
814 enum _ecore_status_t rc
;
816 if (IS_VF(p_hwfn
->p_dev
))
817 return ecore_vf_pf_vport_stop(p_hwfn
);
819 rc
= ecore_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
820 if (rc
!= ECORE_SUCCESS
)
824 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
825 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
826 init_data
.opaque_fid
= opaque_fid
;
827 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
829 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
830 ETH_RAMROD_VPORT_STOP
,
831 PROTOCOLID_ETH
, &init_data
);
832 if (rc
!= ECORE_SUCCESS
)
835 p_ramrod
= &p_ent
->ramrod
.vport_stop
;
836 p_ramrod
->vport_id
= abs_vport_id
;
838 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
841 static enum _ecore_status_t
842 ecore_vf_pf_accept_flags(struct ecore_hwfn
*p_hwfn
,
843 struct ecore_filter_accept_flags
*p_accept_flags
)
845 struct ecore_sp_vport_update_params s_params
;
847 OSAL_MEMSET(&s_params
, 0, sizeof(s_params
));
848 OSAL_MEMCPY(&s_params
.accept_flags
, p_accept_flags
,
849 sizeof(struct ecore_filter_accept_flags
));
851 return ecore_vf_pf_vport_update(p_hwfn
, &s_params
);
855 ecore_filter_accept_cmd(struct ecore_dev
*p_dev
,
857 struct ecore_filter_accept_flags accept_flags
,
858 u8 update_accept_any_vlan
,
860 enum spq_mode comp_mode
,
861 struct ecore_spq_comp_cb
*p_comp_data
)
863 struct ecore_sp_vport_update_params vport_update_params
;
866 /* Prepare and send the vport rx_mode change */
867 OSAL_MEMSET(&vport_update_params
, 0, sizeof(vport_update_params
));
868 vport_update_params
.vport_id
= vport
;
869 vport_update_params
.accept_flags
= accept_flags
;
870 vport_update_params
.update_accept_any_vlan_flg
= update_accept_any_vlan
;
871 vport_update_params
.accept_any_vlan
= accept_any_vlan
;
873 for_each_hwfn(p_dev
, i
) {
874 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
876 vport_update_params
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
879 rc
= ecore_vf_pf_accept_flags(p_hwfn
, &accept_flags
);
880 if (rc
!= ECORE_SUCCESS
)
885 rc
= ecore_sp_vport_update(p_hwfn
, &vport_update_params
,
886 comp_mode
, p_comp_data
);
887 if (rc
!= ECORE_SUCCESS
) {
888 DP_ERR(p_dev
, "Update rx_mode failed %d\n", rc
);
892 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
893 "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
894 accept_flags
.rx_accept_filter
,
895 accept_flags
.tx_accept_filter
);
897 if (update_accept_any_vlan
)
898 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
899 "accept_any_vlan=%d configured\n",
907 ecore_eth_rxq_start_ramrod(struct ecore_hwfn
*p_hwfn
,
908 struct ecore_queue_cid
*p_cid
,
910 dma_addr_t bd_chain_phys_addr
,
911 dma_addr_t cqe_pbl_addr
,
914 struct rx_queue_start_ramrod_data
*p_ramrod
= OSAL_NULL
;
915 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
916 struct ecore_sp_init_data init_data
;
917 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
919 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
920 "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
921 p_cid
->opaque_fid
, p_cid
->cid
, p_cid
->abs
.queue_id
,
922 p_cid
->abs
.vport_id
, p_cid
->sb_igu_id
);
925 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
926 init_data
.cid
= p_cid
->cid
;
927 init_data
.opaque_fid
= p_cid
->opaque_fid
;
928 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
930 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
931 ETH_RAMROD_RX_QUEUE_START
,
932 PROTOCOLID_ETH
, &init_data
);
933 if (rc
!= ECORE_SUCCESS
)
936 p_ramrod
= &p_ent
->ramrod
.rx_queue_start
;
938 p_ramrod
->sb_id
= OSAL_CPU_TO_LE16(p_cid
->sb_igu_id
);
939 p_ramrod
->sb_index
= p_cid
->sb_idx
;
940 p_ramrod
->vport_id
= p_cid
->abs
.vport_id
;
941 p_ramrod
->stats_counter_id
= p_cid
->abs
.stats_id
;
942 p_ramrod
->rx_queue_id
= OSAL_CPU_TO_LE16(p_cid
->abs
.queue_id
);
943 p_ramrod
->complete_cqe_flg
= 0;
944 p_ramrod
->complete_event_flg
= 1;
946 p_ramrod
->bd_max_bytes
= OSAL_CPU_TO_LE16(bd_max_bytes
);
947 DMA_REGPAIR_LE(p_ramrod
->bd_base
, bd_chain_phys_addr
);
949 p_ramrod
->num_of_pbl_pages
= OSAL_CPU_TO_LE16(cqe_pbl_size
);
950 DMA_REGPAIR_LE(p_ramrod
->cqe_pbl_addr
, cqe_pbl_addr
);
952 if (p_cid
->vfid
!= ECORE_QUEUE_CID_PF
) {
953 bool b_legacy_vf
= !!(p_cid
->vf_legacy
&
954 ECORE_QCID_LEGACY_VF_RX_PROD
);
956 p_ramrod
->vf_rx_prod_index
= p_cid
->vf_qid
;
957 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
958 "Queue%s is meant for VF rxq[%02x]\n",
959 b_legacy_vf
? " [legacy]" : "",
961 p_ramrod
->vf_rx_prod_use_zone_a
= b_legacy_vf
;
964 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
967 static enum _ecore_status_t
968 ecore_eth_pf_rx_queue_start(struct ecore_hwfn
*p_hwfn
,
969 struct ecore_queue_cid
*p_cid
,
971 dma_addr_t bd_chain_phys_addr
,
972 dma_addr_t cqe_pbl_addr
,
974 void OSAL_IOMEM
* *pp_prod
)
976 u32 init_prod_val
= 0;
978 *pp_prod
= (u8 OSAL_IOMEM
*)
980 GTT_BAR0_MAP_REG_MSDM_RAM
+
981 MSTORM_ETH_PF_PRODS_OFFSET(p_cid
->abs
.queue_id
);
983 /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
984 __internal_ram_wr(p_hwfn
, *pp_prod
, sizeof(u32
),
985 (u32
*)(&init_prod_val
));
987 return ecore_eth_rxq_start_ramrod(p_hwfn
, p_cid
,
990 cqe_pbl_addr
, cqe_pbl_size
);
994 ecore_eth_rx_queue_start(struct ecore_hwfn
*p_hwfn
,
996 struct ecore_queue_start_common_params
*p_params
,
998 dma_addr_t bd_chain_phys_addr
,
999 dma_addr_t cqe_pbl_addr
,
1001 struct ecore_rxq_start_ret_params
*p_ret_params
)
1003 struct ecore_queue_cid
*p_cid
;
1004 enum _ecore_status_t rc
;
1006 /* Allocate a CID for the queue */
1007 p_cid
= ecore_eth_queue_to_cid_pf(p_hwfn
, opaque_fid
, true, p_params
);
1008 if (p_cid
== OSAL_NULL
)
1011 if (IS_PF(p_hwfn
->p_dev
))
1012 rc
= ecore_eth_pf_rx_queue_start(p_hwfn
, p_cid
,
1015 cqe_pbl_addr
, cqe_pbl_size
,
1016 &p_ret_params
->p_prod
);
1018 rc
= ecore_vf_pf_rxq_start(p_hwfn
, p_cid
,
1023 &p_ret_params
->p_prod
);
1025 /* Provide the caller with a reference to as handler */
1026 if (rc
!= ECORE_SUCCESS
)
1027 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
1029 p_ret_params
->p_handle
= (void *)p_cid
;
1034 enum _ecore_status_t
1035 ecore_sp_eth_rx_queues_update(struct ecore_hwfn
*p_hwfn
,
1036 void **pp_rxq_handles
,
1038 u8 complete_cqe_flg
,
1039 u8 complete_event_flg
,
1040 enum spq_mode comp_mode
,
1041 struct ecore_spq_comp_cb
*p_comp_data
)
1043 struct rx_queue_update_ramrod_data
*p_ramrod
= OSAL_NULL
;
1044 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1045 struct ecore_sp_init_data init_data
;
1046 struct ecore_queue_cid
*p_cid
;
1047 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
1050 if (IS_VF(p_hwfn
->p_dev
))
1051 return ecore_vf_pf_rxqs_update(p_hwfn
,
1052 (struct ecore_queue_cid
**)
1056 complete_event_flg
);
1058 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1059 init_data
.comp_mode
= comp_mode
;
1060 init_data
.p_comp_data
= p_comp_data
;
1062 for (i
= 0; i
< num_rxqs
; i
++) {
1063 p_cid
= ((struct ecore_queue_cid
**)pp_rxq_handles
)[i
];
1066 init_data
.cid
= p_cid
->cid
;
1067 init_data
.opaque_fid
= p_cid
->opaque_fid
;
1069 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
1070 ETH_RAMROD_RX_QUEUE_UPDATE
,
1071 PROTOCOLID_ETH
, &init_data
);
1072 if (rc
!= ECORE_SUCCESS
)
1075 p_ramrod
= &p_ent
->ramrod
.rx_queue_update
;
1076 p_ramrod
->vport_id
= p_cid
->abs
.vport_id
;
1078 p_ramrod
->rx_queue_id
= OSAL_CPU_TO_LE16(p_cid
->abs
.queue_id
);
1079 p_ramrod
->complete_cqe_flg
= complete_cqe_flg
;
1080 p_ramrod
->complete_event_flg
= complete_event_flg
;
1082 rc
= ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1083 if (rc
!= ECORE_SUCCESS
)
1090 static enum _ecore_status_t
1091 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn
*p_hwfn
,
1092 struct ecore_queue_cid
*p_cid
,
1093 bool b_eq_completion_only
,
1094 bool b_cqe_completion
)
1096 struct rx_queue_stop_ramrod_data
*p_ramrod
= OSAL_NULL
;
1097 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1098 struct ecore_sp_init_data init_data
;
1099 enum _ecore_status_t rc
;
1101 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1102 init_data
.cid
= p_cid
->cid
;
1103 init_data
.opaque_fid
= p_cid
->opaque_fid
;
1104 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
1106 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
1107 ETH_RAMROD_RX_QUEUE_STOP
,
1108 PROTOCOLID_ETH
, &init_data
);
1109 if (rc
!= ECORE_SUCCESS
)
1112 p_ramrod
= &p_ent
->ramrod
.rx_queue_stop
;
1113 p_ramrod
->vport_id
= p_cid
->abs
.vport_id
;
1114 p_ramrod
->rx_queue_id
= OSAL_CPU_TO_LE16(p_cid
->abs
.queue_id
);
1116 /* Cleaning the queue requires the completion to arrive there.
1117 * In addition, VFs require the answer to come as eqe to PF.
1119 p_ramrod
->complete_cqe_flg
= ((p_cid
->vfid
== ECORE_QUEUE_CID_PF
) &&
1120 !b_eq_completion_only
) ||
1122 p_ramrod
->complete_event_flg
= (p_cid
->vfid
!= ECORE_QUEUE_CID_PF
) ||
1123 b_eq_completion_only
;
1125 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1128 enum _ecore_status_t
ecore_eth_rx_queue_stop(struct ecore_hwfn
*p_hwfn
,
1130 bool eq_completion_only
,
1131 bool cqe_completion
)
1133 struct ecore_queue_cid
*p_cid
= (struct ecore_queue_cid
*)p_rxq
;
1134 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
1136 if (IS_PF(p_hwfn
->p_dev
))
1137 rc
= ecore_eth_pf_rx_queue_stop(p_hwfn
, p_cid
,
1141 rc
= ecore_vf_pf_rxq_stop(p_hwfn
, p_cid
, cqe_completion
);
1143 if (rc
== ECORE_SUCCESS
)
1144 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
1148 enum _ecore_status_t
1149 ecore_eth_txq_start_ramrod(struct ecore_hwfn
*p_hwfn
,
1150 struct ecore_queue_cid
*p_cid
,
1151 dma_addr_t pbl_addr
, u16 pbl_size
,
1154 struct tx_queue_start_ramrod_data
*p_ramrod
= OSAL_NULL
;
1155 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1156 struct ecore_sp_init_data init_data
;
1157 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
1160 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1161 init_data
.cid
= p_cid
->cid
;
1162 init_data
.opaque_fid
= p_cid
->opaque_fid
;
1163 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
1165 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
1166 ETH_RAMROD_TX_QUEUE_START
,
1167 PROTOCOLID_ETH
, &init_data
);
1168 if (rc
!= ECORE_SUCCESS
)
1171 p_ramrod
= &p_ent
->ramrod
.tx_queue_start
;
1172 p_ramrod
->vport_id
= p_cid
->abs
.vport_id
;
1174 p_ramrod
->sb_id
= OSAL_CPU_TO_LE16(p_cid
->sb_igu_id
);
1175 p_ramrod
->sb_index
= p_cid
->sb_idx
;
1176 p_ramrod
->stats_counter_id
= p_cid
->abs
.stats_id
;
1178 p_ramrod
->queue_zone_id
= OSAL_CPU_TO_LE16(p_cid
->abs
.queue_id
);
1179 p_ramrod
->same_as_last_id
= OSAL_CPU_TO_LE16(p_cid
->abs
.queue_id
);
1181 p_ramrod
->pbl_size
= OSAL_CPU_TO_LE16(pbl_size
);
1182 DMA_REGPAIR_LE(p_ramrod
->pbl_base_addr
, pbl_addr
);
1184 p_ramrod
->qm_pq_id
= OSAL_CPU_TO_LE16(pq_id
);
1186 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1189 static enum _ecore_status_t
1190 ecore_eth_pf_tx_queue_start(struct ecore_hwfn
*p_hwfn
,
1191 struct ecore_queue_cid
*p_cid
,
1193 dma_addr_t pbl_addr
, u16 pbl_size
,
1194 void OSAL_IOMEM
* *pp_doorbell
)
1196 enum _ecore_status_t rc
;
1199 /* TODO - set tc in the pq_params for multi-cos.
1200 * If pacing is enabled then select queue according to
1201 * rate limiter availability otherwise select queue based
1204 if (IS_ECORE_PACING(p_hwfn
))
1205 pq_id
= ecore_get_cm_pq_idx_rl(p_hwfn
, p_cid
->rel
.queue_id
);
1207 pq_id
= ecore_get_cm_pq_idx_mcos(p_hwfn
, tc
);
1209 rc
= ecore_eth_txq_start_ramrod(p_hwfn
, p_cid
, pbl_addr
,
1211 if (rc
!= ECORE_SUCCESS
)
1214 /* Provide the caller with the necessary return values */
1215 *pp_doorbell
= (u8 OSAL_IOMEM
*)
1217 DB_ADDR(p_cid
->cid
, DQ_DEMS_LEGACY
);
1219 return ECORE_SUCCESS
;
1222 enum _ecore_status_t
1223 ecore_eth_tx_queue_start(struct ecore_hwfn
*p_hwfn
, u16 opaque_fid
,
1224 struct ecore_queue_start_common_params
*p_params
,
1226 dma_addr_t pbl_addr
, u16 pbl_size
,
1227 struct ecore_txq_start_ret_params
*p_ret_params
)
1229 struct ecore_queue_cid
*p_cid
;
1230 enum _ecore_status_t rc
;
1232 p_cid
= ecore_eth_queue_to_cid_pf(p_hwfn
, opaque_fid
, false, p_params
);
1233 if (p_cid
== OSAL_NULL
)
1236 if (IS_PF(p_hwfn
->p_dev
))
1237 rc
= ecore_eth_pf_tx_queue_start(p_hwfn
, p_cid
, tc
,
1239 &p_ret_params
->p_doorbell
);
1241 rc
= ecore_vf_pf_txq_start(p_hwfn
, p_cid
,
1243 &p_ret_params
->p_doorbell
);
1245 if (rc
!= ECORE_SUCCESS
)
1246 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
1248 p_ret_params
->p_handle
= (void *)p_cid
;
1253 static enum _ecore_status_t
1254 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn
*p_hwfn
,
1255 struct ecore_queue_cid
*p_cid
)
1257 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1258 struct ecore_sp_init_data init_data
;
1259 enum _ecore_status_t rc
;
1261 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1262 init_data
.cid
= p_cid
->cid
;
1263 init_data
.opaque_fid
= p_cid
->opaque_fid
;
1264 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
1266 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
1267 ETH_RAMROD_TX_QUEUE_STOP
,
1268 PROTOCOLID_ETH
, &init_data
);
1269 if (rc
!= ECORE_SUCCESS
)
1272 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1275 enum _ecore_status_t
ecore_eth_tx_queue_stop(struct ecore_hwfn
*p_hwfn
,
1278 struct ecore_queue_cid
*p_cid
= (struct ecore_queue_cid
*)p_handle
;
1279 enum _ecore_status_t rc
;
1281 if (IS_PF(p_hwfn
->p_dev
))
1282 rc
= ecore_eth_pf_tx_queue_stop(p_hwfn
, p_cid
);
1284 rc
= ecore_vf_pf_txq_stop(p_hwfn
, p_cid
);
1286 if (rc
== ECORE_SUCCESS
)
1287 ecore_eth_queue_cid_release(p_hwfn
, p_cid
);
1291 static enum eth_filter_action
1292 ecore_filter_action(enum ecore_filter_opcode opcode
)
1294 enum eth_filter_action action
= MAX_ETH_FILTER_ACTION
;
1297 case ECORE_FILTER_ADD
:
1298 action
= ETH_FILTER_ACTION_ADD
;
1300 case ECORE_FILTER_REMOVE
:
1301 action
= ETH_FILTER_ACTION_REMOVE
;
1303 case ECORE_FILTER_FLUSH
:
1304 action
= ETH_FILTER_ACTION_REMOVE_ALL
;
1307 action
= MAX_ETH_FILTER_ACTION
;
1313 static enum _ecore_status_t
1314 ecore_filter_ucast_common(struct ecore_hwfn
*p_hwfn
,
1316 struct ecore_filter_ucast
*p_filter_cmd
,
1317 struct vport_filter_update_ramrod_data
**pp_ramrod
,
1318 struct ecore_spq_entry
**pp_ent
,
1319 enum spq_mode comp_mode
,
1320 struct ecore_spq_comp_cb
*p_comp_data
)
1322 u8 vport_to_add_to
= 0, vport_to_remove_from
= 0;
1323 struct vport_filter_update_ramrod_data
*p_ramrod
;
1324 struct eth_filter_cmd
*p_first_filter
;
1325 struct eth_filter_cmd
*p_second_filter
;
1326 struct ecore_sp_init_data init_data
;
1327 enum eth_filter_action action
;
1328 enum _ecore_status_t rc
;
1330 rc
= ecore_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_remove_from
,
1331 &vport_to_remove_from
);
1332 if (rc
!= ECORE_SUCCESS
)
1335 rc
= ecore_fw_vport(p_hwfn
, p_filter_cmd
->vport_to_add_to
,
1337 if (rc
!= ECORE_SUCCESS
)
1341 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1342 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
1343 init_data
.opaque_fid
= opaque_fid
;
1344 init_data
.comp_mode
= comp_mode
;
1345 init_data
.p_comp_data
= p_comp_data
;
1347 rc
= ecore_sp_init_request(p_hwfn
, pp_ent
,
1348 ETH_RAMROD_FILTERS_UPDATE
,
1349 PROTOCOLID_ETH
, &init_data
);
1350 if (rc
!= ECORE_SUCCESS
)
1353 *pp_ramrod
= &(*pp_ent
)->ramrod
.vport_filter_update
;
1354 p_ramrod
= *pp_ramrod
;
1355 p_ramrod
->filter_cmd_hdr
.rx
= p_filter_cmd
->is_rx_filter
? 1 : 0;
1356 p_ramrod
->filter_cmd_hdr
.tx
= p_filter_cmd
->is_tx_filter
? 1 : 0;
1359 if (CHIP_REV_IS_SLOW(p_hwfn
->p_dev
)) {
1360 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
1361 "Non-Asic - prevent Tx filters\n");
1362 p_ramrod
->filter_cmd_hdr
.tx
= 0;
1366 switch (p_filter_cmd
->opcode
) {
1367 case ECORE_FILTER_REPLACE
:
1368 case ECORE_FILTER_MOVE
:
1369 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 2;
1372 p_ramrod
->filter_cmd_hdr
.cmd_cnt
= 1;
1376 p_first_filter
= &p_ramrod
->filter_cmds
[0];
1377 p_second_filter
= &p_ramrod
->filter_cmds
[1];
1379 switch (p_filter_cmd
->type
) {
1380 case ECORE_FILTER_MAC
:
1381 p_first_filter
->type
= ETH_FILTER_TYPE_MAC
;
1383 case ECORE_FILTER_VLAN
:
1384 p_first_filter
->type
= ETH_FILTER_TYPE_VLAN
;
1386 case ECORE_FILTER_MAC_VLAN
:
1387 p_first_filter
->type
= ETH_FILTER_TYPE_PAIR
;
1389 case ECORE_FILTER_INNER_MAC
:
1390 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC
;
1392 case ECORE_FILTER_INNER_VLAN
:
1393 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_VLAN
;
1395 case ECORE_FILTER_INNER_PAIR
:
1396 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_PAIR
;
1398 case ECORE_FILTER_INNER_MAC_VNI_PAIR
:
1399 p_first_filter
->type
= ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
;
1401 case ECORE_FILTER_MAC_VNI_PAIR
:
1402 p_first_filter
->type
= ETH_FILTER_TYPE_MAC_VNI_PAIR
;
1404 case ECORE_FILTER_VNI
:
1405 p_first_filter
->type
= ETH_FILTER_TYPE_VNI
;
1407 case ECORE_FILTER_UNUSED
: /* @DPDK */
1408 p_first_filter
->type
= MAX_ETH_FILTER_TYPE
;
1412 if ((p_first_filter
->type
== ETH_FILTER_TYPE_MAC
) ||
1413 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1414 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC
) ||
1415 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
) ||
1416 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1417 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
))
1418 ecore_set_fw_mac_addr(&p_first_filter
->mac_msb
,
1419 &p_first_filter
->mac_mid
,
1420 &p_first_filter
->mac_lsb
,
1421 (u8
*)p_filter_cmd
->mac
);
1423 if ((p_first_filter
->type
== ETH_FILTER_TYPE_VLAN
) ||
1424 (p_first_filter
->type
== ETH_FILTER_TYPE_PAIR
) ||
1425 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_VLAN
) ||
1426 (p_first_filter
->type
== ETH_FILTER_TYPE_INNER_PAIR
))
1427 p_first_filter
->vlan_id
= OSAL_CPU_TO_LE16(p_filter_cmd
->vlan
);
1429 if ((p_first_filter
->type
== ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR
) ||
1430 (p_first_filter
->type
== ETH_FILTER_TYPE_MAC_VNI_PAIR
) ||
1431 (p_first_filter
->type
== ETH_FILTER_TYPE_VNI
))
1432 p_first_filter
->vni
= OSAL_CPU_TO_LE32(p_filter_cmd
->vni
);
1434 if (p_filter_cmd
->opcode
== ECORE_FILTER_MOVE
) {
1435 p_second_filter
->type
= p_first_filter
->type
;
1436 p_second_filter
->mac_msb
= p_first_filter
->mac_msb
;
1437 p_second_filter
->mac_mid
= p_first_filter
->mac_mid
;
1438 p_second_filter
->mac_lsb
= p_first_filter
->mac_lsb
;
1439 p_second_filter
->vlan_id
= p_first_filter
->vlan_id
;
1440 p_second_filter
->vni
= p_first_filter
->vni
;
1442 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE
;
1444 p_first_filter
->vport_id
= vport_to_remove_from
;
1446 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1447 p_second_filter
->vport_id
= vport_to_add_to
;
1448 } else if (p_filter_cmd
->opcode
== ECORE_FILTER_REPLACE
) {
1449 p_first_filter
->vport_id
= vport_to_add_to
;
1450 OSAL_MEMCPY(p_second_filter
, p_first_filter
,
1451 sizeof(*p_second_filter
));
1452 p_first_filter
->action
= ETH_FILTER_ACTION_REMOVE_ALL
;
1453 p_second_filter
->action
= ETH_FILTER_ACTION_ADD
;
1455 action
= ecore_filter_action(p_filter_cmd
->opcode
);
1457 if (action
== MAX_ETH_FILTER_ACTION
) {
1458 DP_NOTICE(p_hwfn
, true,
1459 "%d is not supported yet\n",
1460 p_filter_cmd
->opcode
);
1461 return ECORE_NOTIMPL
;
1464 p_first_filter
->action
= action
;
1465 p_first_filter
->vport_id
=
1466 (p_filter_cmd
->opcode
== ECORE_FILTER_REMOVE
) ?
1467 vport_to_remove_from
: vport_to_add_to
;
1470 return ECORE_SUCCESS
;
1473 enum _ecore_status_t
1474 ecore_sp_eth_filter_ucast(struct ecore_hwfn
*p_hwfn
,
1476 struct ecore_filter_ucast
*p_filter_cmd
,
1477 enum spq_mode comp_mode
,
1478 struct ecore_spq_comp_cb
*p_comp_data
)
1480 struct vport_filter_update_ramrod_data
*p_ramrod
= OSAL_NULL
;
1481 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1482 struct eth_filter_cmd_header
*p_header
;
1483 enum _ecore_status_t rc
;
1485 rc
= ecore_filter_ucast_common(p_hwfn
, opaque_fid
, p_filter_cmd
,
1487 comp_mode
, p_comp_data
);
1488 if (rc
!= ECORE_SUCCESS
) {
1489 DP_ERR(p_hwfn
, "Uni. filter command failed %d\n", rc
);
1492 p_header
= &p_ramrod
->filter_cmd_hdr
;
1493 p_header
->assert_on_error
= p_filter_cmd
->assert_on_error
;
1495 rc
= ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1496 if (rc
!= ECORE_SUCCESS
) {
1497 DP_ERR(p_hwfn
, "Unicast filter ADD command failed %d\n", rc
);
1501 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
1502 "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1503 (p_filter_cmd
->opcode
== ECORE_FILTER_ADD
) ? "ADD" :
1504 ((p_filter_cmd
->opcode
== ECORE_FILTER_REMOVE
) ?
1506 ((p_filter_cmd
->opcode
== ECORE_FILTER_MOVE
) ?
1507 "MOVE" : "REPLACE")),
1508 (p_filter_cmd
->type
== ECORE_FILTER_MAC
) ? "MAC" :
1509 ((p_filter_cmd
->type
== ECORE_FILTER_VLAN
) ?
1510 "VLAN" : "MAC & VLAN"),
1511 p_ramrod
->filter_cmd_hdr
.cmd_cnt
,
1512 p_filter_cmd
->is_rx_filter
, p_filter_cmd
->is_tx_filter
);
1513 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
1514 "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1515 p_filter_cmd
->vport_to_add_to
,
1516 p_filter_cmd
->vport_to_remove_from
,
1517 p_filter_cmd
->mac
[0], p_filter_cmd
->mac
[1],
1518 p_filter_cmd
->mac
[2], p_filter_cmd
->mac
[3],
1519 p_filter_cmd
->mac
[4], p_filter_cmd
->mac
[5],
1520 p_filter_cmd
->vlan
);
1522 return ECORE_SUCCESS
;
1525 /*******************************************************************************
1527 * Calculates crc 32 on a buffer
1528 * Note: crc32_length MUST be aligned to 8
1530 ******************************************************************************/
1531 static u32
ecore_calc_crc32c(u8
*crc32_packet
, u32 crc32_length
, u32 crc32_seed
)
1533 u32 byte
= 0, bit
= 0, crc32_result
= crc32_seed
;
1534 u8 msb
= 0, current_byte
= 0;
1536 if ((crc32_packet
== OSAL_NULL
) ||
1537 (crc32_length
== 0) || ((crc32_length
% 8) != 0)) {
1538 return crc32_result
;
1541 for (byte
= 0; byte
< crc32_length
; byte
++) {
1542 current_byte
= crc32_packet
[byte
];
1543 for (bit
= 0; bit
< 8; bit
++) {
1544 msb
= (u8
)(crc32_result
>> 31);
1545 crc32_result
= crc32_result
<< 1;
1546 if (msb
!= (0x1 & (current_byte
>> bit
))) {
1547 crc32_result
= crc32_result
^ CRC32_POLY
;
1553 return crc32_result
;
1556 static u32
ecore_crc32c_le(u32 seed
, u8
*mac
)
1558 u32 packet_buf
[2] = { 0 };
1560 OSAL_MEMCPY((u8
*)(&packet_buf
[0]), &mac
[0], 6);
1561 return ecore_calc_crc32c((u8
*)packet_buf
, 8, seed
);
1564 u8
ecore_mcast_bin_from_mac(u8
*mac
)
1566 u32 crc
= ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED
, mac
);
1571 static enum _ecore_status_t
1572 ecore_sp_eth_filter_mcast(struct ecore_hwfn
*p_hwfn
,
1573 struct ecore_filter_mcast
*p_filter_cmd
,
1574 enum spq_mode comp_mode
,
1575 struct ecore_spq_comp_cb
*p_comp_data
)
1577 struct vport_update_ramrod_data
*p_ramrod
= OSAL_NULL
;
1578 u32 bins
[ETH_MULTICAST_MAC_BINS_IN_REGS
];
1579 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
1580 struct ecore_sp_init_data init_data
;
1581 u8 abs_vport_id
= 0;
1582 enum _ecore_status_t rc
;
1585 if (p_filter_cmd
->opcode
== ECORE_FILTER_ADD
)
1586 rc
= ecore_fw_vport(p_hwfn
,
1587 p_filter_cmd
->vport_to_add_to
,
1590 rc
= ecore_fw_vport(p_hwfn
,
1591 p_filter_cmd
->vport_to_remove_from
,
1593 if (rc
!= ECORE_SUCCESS
)
1597 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
1598 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
1599 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1600 init_data
.comp_mode
= comp_mode
;
1601 init_data
.p_comp_data
= p_comp_data
;
1603 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
1604 ETH_RAMROD_VPORT_UPDATE
,
1605 PROTOCOLID_ETH
, &init_data
);
1606 if (rc
!= ECORE_SUCCESS
) {
1607 DP_ERR(p_hwfn
, "Multi-cast command failed %d\n", rc
);
1611 p_ramrod
= &p_ent
->ramrod
.vport_update
;
1612 p_ramrod
->common
.update_approx_mcast_flg
= 1;
1614 /* explicitly clear out the entire vector */
1615 OSAL_MEMSET(&p_ramrod
->approx_mcast
.bins
,
1616 0, sizeof(p_ramrod
->approx_mcast
.bins
));
1617 OSAL_MEMSET(bins
, 0, sizeof(u32
) * ETH_MULTICAST_MAC_BINS_IN_REGS
);
1618 /* filter ADD op is explicit set op and it removes
1619 * any existing filters for the vport.
1621 if (p_filter_cmd
->opcode
== ECORE_FILTER_ADD
) {
1622 for (i
= 0; i
< p_filter_cmd
->num_mc_addrs
; i
++) {
1625 bit
= ecore_mcast_bin_from_mac(p_filter_cmd
->mac
[i
]);
1626 bins
[bit
/ 32] |= 1 << (bit
% 32);
1629 /* Convert to correct endianity */
1630 for (i
= 0; i
< ETH_MULTICAST_MAC_BINS_IN_REGS
; i
++) {
1631 struct vport_update_ramrod_mcast
*p_ramrod_bins
;
1633 p_ramrod_bins
= &p_ramrod
->approx_mcast
;
1634 p_ramrod_bins
->bins
[i
] = OSAL_CPU_TO_LE32(bins
[i
]);
1638 p_ramrod
->common
.vport_id
= abs_vport_id
;
1640 rc
= ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
1641 if (rc
!= ECORE_SUCCESS
)
1642 DP_ERR(p_hwfn
, "Multicast filter command failed %d\n", rc
);
1647 enum _ecore_status_t
1648 ecore_filter_mcast_cmd(struct ecore_dev
*p_dev
,
1649 struct ecore_filter_mcast
*p_filter_cmd
,
1650 enum spq_mode comp_mode
,
1651 struct ecore_spq_comp_cb
*p_comp_data
)
1653 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1656 /* only ADD and REMOVE operations are supported for multi-cast */
1657 if ((p_filter_cmd
->opcode
!= ECORE_FILTER_ADD
&&
1658 (p_filter_cmd
->opcode
!= ECORE_FILTER_REMOVE
)) ||
1659 (p_filter_cmd
->num_mc_addrs
> ECORE_MAX_MC_ADDRS
)) {
1663 for_each_hwfn(p_dev
, i
) {
1664 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
1667 ecore_vf_pf_filter_mcast(p_hwfn
, p_filter_cmd
);
1671 rc
= ecore_sp_eth_filter_mcast(p_hwfn
,
1673 comp_mode
, p_comp_data
);
1674 if (rc
!= ECORE_SUCCESS
)
1681 enum _ecore_status_t
1682 ecore_filter_ucast_cmd(struct ecore_dev
*p_dev
,
1683 struct ecore_filter_ucast
*p_filter_cmd
,
1684 enum spq_mode comp_mode
,
1685 struct ecore_spq_comp_cb
*p_comp_data
)
1687 enum _ecore_status_t rc
= ECORE_SUCCESS
;
1690 for_each_hwfn(p_dev
, i
) {
1691 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
1695 rc
= ecore_vf_pf_filter_ucast(p_hwfn
, p_filter_cmd
);
1699 opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
1700 rc
= ecore_sp_eth_filter_ucast(p_hwfn
,
1703 comp_mode
, p_comp_data
);
1704 if (rc
!= ECORE_SUCCESS
)
1711 /* Statistics related code */
1712 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn
*p_hwfn
,
1713 u32
*p_addr
, u32
*p_len
,
1716 if (IS_PF(p_hwfn
->p_dev
)) {
1717 *p_addr
= BAR0_MAP_REG_PSDM_RAM
+
1718 PSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1719 *p_len
= sizeof(struct eth_pstorm_per_queue_stat
);
1721 struct ecore_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1722 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1724 *p_addr
= p_resp
->pfdev_info
.stats_info
.pstats
.address
;
1725 *p_len
= p_resp
->pfdev_info
.stats_info
.pstats
.len
;
1729 static void __ecore_get_vport_pstats(struct ecore_hwfn
*p_hwfn
,
1730 struct ecore_ptt
*p_ptt
,
1731 struct ecore_eth_stats
*p_stats
,
1734 struct eth_pstorm_per_queue_stat pstats
;
1735 u32 pstats_addr
= 0, pstats_len
= 0;
1737 __ecore_get_vport_pstats_addrlen(p_hwfn
, &pstats_addr
, &pstats_len
,
1740 OSAL_MEMSET(&pstats
, 0, sizeof(pstats
));
1741 ecore_memcpy_from(p_hwfn
, p_ptt
, &pstats
, pstats_addr
, pstats_len
);
1743 p_stats
->common
.tx_ucast_bytes
+=
1744 HILO_64_REGPAIR(pstats
.sent_ucast_bytes
);
1745 p_stats
->common
.tx_mcast_bytes
+=
1746 HILO_64_REGPAIR(pstats
.sent_mcast_bytes
);
1747 p_stats
->common
.tx_bcast_bytes
+=
1748 HILO_64_REGPAIR(pstats
.sent_bcast_bytes
);
1749 p_stats
->common
.tx_ucast_pkts
+=
1750 HILO_64_REGPAIR(pstats
.sent_ucast_pkts
);
1751 p_stats
->common
.tx_mcast_pkts
+=
1752 HILO_64_REGPAIR(pstats
.sent_mcast_pkts
);
1753 p_stats
->common
.tx_bcast_pkts
+=
1754 HILO_64_REGPAIR(pstats
.sent_bcast_pkts
);
1755 p_stats
->common
.tx_err_drop_pkts
+=
1756 HILO_64_REGPAIR(pstats
.error_drop_pkts
);
1759 static void __ecore_get_vport_tstats(struct ecore_hwfn
*p_hwfn
,
1760 struct ecore_ptt
*p_ptt
,
1761 struct ecore_eth_stats
*p_stats
)
1763 struct tstorm_per_port_stat tstats
;
1764 u32 tstats_addr
, tstats_len
;
1766 if (IS_PF(p_hwfn
->p_dev
)) {
1767 tstats_addr
= BAR0_MAP_REG_TSDM_RAM
+
1768 TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn
));
1769 tstats_len
= sizeof(struct tstorm_per_port_stat
);
1771 struct ecore_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1772 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1774 tstats_addr
= p_resp
->pfdev_info
.stats_info
.tstats
.address
;
1775 tstats_len
= p_resp
->pfdev_info
.stats_info
.tstats
.len
;
1778 OSAL_MEMSET(&tstats
, 0, sizeof(tstats
));
1779 ecore_memcpy_from(p_hwfn
, p_ptt
, &tstats
, tstats_addr
, tstats_len
);
1781 p_stats
->common
.mftag_filter_discards
+=
1782 HILO_64_REGPAIR(tstats
.mftag_filter_discard
);
1783 p_stats
->common
.mac_filter_discards
+=
1784 HILO_64_REGPAIR(tstats
.eth_mac_filter_discard
);
1787 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn
*p_hwfn
,
1788 u32
*p_addr
, u32
*p_len
,
1791 if (IS_PF(p_hwfn
->p_dev
)) {
1792 *p_addr
= BAR0_MAP_REG_USDM_RAM
+
1793 USTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1794 *p_len
= sizeof(struct eth_ustorm_per_queue_stat
);
1796 struct ecore_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1797 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1799 *p_addr
= p_resp
->pfdev_info
.stats_info
.ustats
.address
;
1800 *p_len
= p_resp
->pfdev_info
.stats_info
.ustats
.len
;
1804 static void __ecore_get_vport_ustats(struct ecore_hwfn
*p_hwfn
,
1805 struct ecore_ptt
*p_ptt
,
1806 struct ecore_eth_stats
*p_stats
,
1809 struct eth_ustorm_per_queue_stat ustats
;
1810 u32 ustats_addr
= 0, ustats_len
= 0;
1812 __ecore_get_vport_ustats_addrlen(p_hwfn
, &ustats_addr
, &ustats_len
,
1815 OSAL_MEMSET(&ustats
, 0, sizeof(ustats
));
1816 ecore_memcpy_from(p_hwfn
, p_ptt
, &ustats
, ustats_addr
, ustats_len
);
1818 p_stats
->common
.rx_ucast_bytes
+=
1819 HILO_64_REGPAIR(ustats
.rcv_ucast_bytes
);
1820 p_stats
->common
.rx_mcast_bytes
+=
1821 HILO_64_REGPAIR(ustats
.rcv_mcast_bytes
);
1822 p_stats
->common
.rx_bcast_bytes
+=
1823 HILO_64_REGPAIR(ustats
.rcv_bcast_bytes
);
1824 p_stats
->common
.rx_ucast_pkts
+=
1825 HILO_64_REGPAIR(ustats
.rcv_ucast_pkts
);
1826 p_stats
->common
.rx_mcast_pkts
+=
1827 HILO_64_REGPAIR(ustats
.rcv_mcast_pkts
);
1828 p_stats
->common
.rx_bcast_pkts
+=
1829 HILO_64_REGPAIR(ustats
.rcv_bcast_pkts
);
1832 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn
*p_hwfn
,
1833 u32
*p_addr
, u32
*p_len
,
1836 if (IS_PF(p_hwfn
->p_dev
)) {
1837 *p_addr
= BAR0_MAP_REG_MSDM_RAM
+
1838 MSTORM_QUEUE_STAT_OFFSET(statistics_bin
);
1839 *p_len
= sizeof(struct eth_mstorm_per_queue_stat
);
1841 struct ecore_vf_iov
*p_iov
= p_hwfn
->vf_iov_info
;
1842 struct pfvf_acquire_resp_tlv
*p_resp
= &p_iov
->acquire_resp
;
1844 *p_addr
= p_resp
->pfdev_info
.stats_info
.mstats
.address
;
1845 *p_len
= p_resp
->pfdev_info
.stats_info
.mstats
.len
;
1849 static void __ecore_get_vport_mstats(struct ecore_hwfn
*p_hwfn
,
1850 struct ecore_ptt
*p_ptt
,
1851 struct ecore_eth_stats
*p_stats
,
1854 struct eth_mstorm_per_queue_stat mstats
;
1855 u32 mstats_addr
= 0, mstats_len
= 0;
1857 __ecore_get_vport_mstats_addrlen(p_hwfn
, &mstats_addr
, &mstats_len
,
1860 OSAL_MEMSET(&mstats
, 0, sizeof(mstats
));
1861 ecore_memcpy_from(p_hwfn
, p_ptt
, &mstats
, mstats_addr
, mstats_len
);
1863 p_stats
->common
.no_buff_discards
+=
1864 HILO_64_REGPAIR(mstats
.no_buff_discard
);
1865 p_stats
->common
.packet_too_big_discard
+=
1866 HILO_64_REGPAIR(mstats
.packet_too_big_discard
);
1867 p_stats
->common
.ttl0_discard
+=
1868 HILO_64_REGPAIR(mstats
.ttl0_discard
);
1869 p_stats
->common
.tpa_coalesced_pkts
+=
1870 HILO_64_REGPAIR(mstats
.tpa_coalesced_pkts
);
1871 p_stats
->common
.tpa_coalesced_events
+=
1872 HILO_64_REGPAIR(mstats
.tpa_coalesced_events
);
1873 p_stats
->common
.tpa_aborts_num
+=
1874 HILO_64_REGPAIR(mstats
.tpa_aborts_num
);
1875 p_stats
->common
.tpa_coalesced_bytes
+=
1876 HILO_64_REGPAIR(mstats
.tpa_coalesced_bytes
);
1879 static void __ecore_get_vport_port_stats(struct ecore_hwfn
*p_hwfn
,
1880 struct ecore_ptt
*p_ptt
,
1881 struct ecore_eth_stats
*p_stats
)
1883 struct ecore_eth_stats_common
*p_common
= &p_stats
->common
;
1884 struct port_stats port_stats
;
1887 OSAL_MEMSET(&port_stats
, 0, sizeof(port_stats
));
1889 ecore_memcpy_from(p_hwfn
, p_ptt
, &port_stats
,
1890 p_hwfn
->mcp_info
->port_addr
+
1891 OFFSETOF(struct public_port
, stats
),
1892 sizeof(port_stats
));
1894 p_common
->rx_64_byte_packets
+= port_stats
.eth
.r64
;
1895 p_common
->rx_65_to_127_byte_packets
+= port_stats
.eth
.r127
;
1896 p_common
->rx_128_to_255_byte_packets
+= port_stats
.eth
.r255
;
1897 p_common
->rx_256_to_511_byte_packets
+= port_stats
.eth
.r511
;
1898 p_common
->rx_512_to_1023_byte_packets
+= port_stats
.eth
.r1023
;
1899 p_common
->rx_1024_to_1518_byte_packets
+= port_stats
.eth
.r1518
;
1900 p_common
->rx_crc_errors
+= port_stats
.eth
.rfcs
;
1901 p_common
->rx_mac_crtl_frames
+= port_stats
.eth
.rxcf
;
1902 p_common
->rx_pause_frames
+= port_stats
.eth
.rxpf
;
1903 p_common
->rx_pfc_frames
+= port_stats
.eth
.rxpp
;
1904 p_common
->rx_align_errors
+= port_stats
.eth
.raln
;
1905 p_common
->rx_carrier_errors
+= port_stats
.eth
.rfcr
;
1906 p_common
->rx_oversize_packets
+= port_stats
.eth
.rovr
;
1907 p_common
->rx_jabbers
+= port_stats
.eth
.rjbr
;
1908 p_common
->rx_undersize_packets
+= port_stats
.eth
.rund
;
1909 p_common
->rx_fragments
+= port_stats
.eth
.rfrg
;
1910 p_common
->tx_64_byte_packets
+= port_stats
.eth
.t64
;
1911 p_common
->tx_65_to_127_byte_packets
+= port_stats
.eth
.t127
;
1912 p_common
->tx_128_to_255_byte_packets
+= port_stats
.eth
.t255
;
1913 p_common
->tx_256_to_511_byte_packets
+= port_stats
.eth
.t511
;
1914 p_common
->tx_512_to_1023_byte_packets
+= port_stats
.eth
.t1023
;
1915 p_common
->tx_1024_to_1518_byte_packets
+= port_stats
.eth
.t1518
;
1916 p_common
->tx_pause_frames
+= port_stats
.eth
.txpf
;
1917 p_common
->tx_pfc_frames
+= port_stats
.eth
.txpp
;
1918 p_common
->rx_mac_bytes
+= port_stats
.eth
.rbyte
;
1919 p_common
->rx_mac_uc_packets
+= port_stats
.eth
.rxuca
;
1920 p_common
->rx_mac_mc_packets
+= port_stats
.eth
.rxmca
;
1921 p_common
->rx_mac_bc_packets
+= port_stats
.eth
.rxbca
;
1922 p_common
->rx_mac_frames_ok
+= port_stats
.eth
.rxpok
;
1923 p_common
->tx_mac_bytes
+= port_stats
.eth
.tbyte
;
1924 p_common
->tx_mac_uc_packets
+= port_stats
.eth
.txuca
;
1925 p_common
->tx_mac_mc_packets
+= port_stats
.eth
.txmca
;
1926 p_common
->tx_mac_bc_packets
+= port_stats
.eth
.txbca
;
1927 p_common
->tx_mac_ctrl_frames
+= port_stats
.eth
.txcf
;
1928 for (j
= 0; j
< 8; j
++) {
1929 p_common
->brb_truncates
+= port_stats
.brb
.brb_truncate
[j
];
1930 p_common
->brb_discards
+= port_stats
.brb
.brb_discard
[j
];
1933 if (ECORE_IS_BB(p_hwfn
->p_dev
)) {
1934 struct ecore_eth_stats_bb
*p_bb
= &p_stats
->bb
;
1936 p_bb
->rx_1519_to_1522_byte_packets
+=
1937 port_stats
.eth
.u0
.bb0
.r1522
;
1938 p_bb
->rx_1519_to_2047_byte_packets
+=
1939 port_stats
.eth
.u0
.bb0
.r2047
;
1940 p_bb
->rx_2048_to_4095_byte_packets
+=
1941 port_stats
.eth
.u0
.bb0
.r4095
;
1942 p_bb
->rx_4096_to_9216_byte_packets
+=
1943 port_stats
.eth
.u0
.bb0
.r9216
;
1944 p_bb
->rx_9217_to_16383_byte_packets
+=
1945 port_stats
.eth
.u0
.bb0
.r16383
;
1946 p_bb
->tx_1519_to_2047_byte_packets
+=
1947 port_stats
.eth
.u1
.bb1
.t2047
;
1948 p_bb
->tx_2048_to_4095_byte_packets
+=
1949 port_stats
.eth
.u1
.bb1
.t4095
;
1950 p_bb
->tx_4096_to_9216_byte_packets
+=
1951 port_stats
.eth
.u1
.bb1
.t9216
;
1952 p_bb
->tx_9217_to_16383_byte_packets
+=
1953 port_stats
.eth
.u1
.bb1
.t16383
;
1954 p_bb
->tx_lpi_entry_count
+= port_stats
.eth
.u2
.bb2
.tlpiec
;
1955 p_bb
->tx_total_collisions
+= port_stats
.eth
.u2
.bb2
.tncl
;
1957 struct ecore_eth_stats_ah
*p_ah
= &p_stats
->ah
;
1959 p_ah
->rx_1519_to_max_byte_packets
+=
1960 port_stats
.eth
.u0
.ah0
.r1519_to_max
;
1961 p_ah
->tx_1519_to_max_byte_packets
=
1962 port_stats
.eth
.u1
.ah1
.t1519_to_max
;
1965 p_common
->link_change_count
= ecore_rd(p_hwfn
, p_ptt
,
1966 p_hwfn
->mcp_info
->port_addr
+
1967 OFFSETOF(struct public_port
,
1968 link_change_count
));
1971 void __ecore_get_vport_stats(struct ecore_hwfn
*p_hwfn
,
1972 struct ecore_ptt
*p_ptt
,
1973 struct ecore_eth_stats
*stats
,
1974 u16 statistics_bin
, bool b_get_port_stats
)
1976 __ecore_get_vport_mstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1977 __ecore_get_vport_ustats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1978 __ecore_get_vport_tstats(p_hwfn
, p_ptt
, stats
);
1979 __ecore_get_vport_pstats(p_hwfn
, p_ptt
, stats
, statistics_bin
);
1982 /* Avoid getting PORT stats for emulation. */
1983 if (CHIP_REV_IS_EMUL(p_hwfn
->p_dev
))
1987 if (b_get_port_stats
&& p_hwfn
->mcp_info
)
1988 __ecore_get_vport_port_stats(p_hwfn
, p_ptt
, stats
);
1991 static void _ecore_get_vport_stats(struct ecore_dev
*p_dev
,
1992 struct ecore_eth_stats
*stats
)
1997 OSAL_MEMSET(stats
, 0, sizeof(*stats
));
1999 for_each_hwfn(p_dev
, i
) {
2000 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
2001 struct ecore_ptt
*p_ptt
= IS_PF(p_dev
) ?
2002 ecore_ptt_acquire(p_hwfn
) : OSAL_NULL
;
2003 bool b_get_port_stats
;
2006 /* The main vport index is relative first */
2007 if (ecore_fw_vport(p_hwfn
, 0, &fw_vport
)) {
2008 DP_ERR(p_hwfn
, "No vport available!\n");
2013 if (IS_PF(p_dev
) && !p_ptt
) {
2014 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2018 b_get_port_stats
= IS_PF(p_dev
) && IS_LEAD_HWFN(p_hwfn
);
2019 __ecore_get_vport_stats(p_hwfn
, p_ptt
, stats
, fw_vport
,
2023 if (IS_PF(p_dev
) && p_ptt
)
2024 ecore_ptt_release(p_hwfn
, p_ptt
);
2028 void ecore_get_vport_stats(struct ecore_dev
*p_dev
,
2029 struct ecore_eth_stats
*stats
)
2034 OSAL_MEMSET(stats
, 0, sizeof(*stats
));
2038 _ecore_get_vport_stats(p_dev
, stats
);
2040 if (!p_dev
->reset_stats
)
2043 /* Reduce the statistics baseline */
2044 for (i
= 0; i
< sizeof(struct ecore_eth_stats
) / sizeof(u64
); i
++)
2045 ((u64
*)stats
)[i
] -= ((u64
*)p_dev
->reset_stats
)[i
];
2048 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2049 void ecore_reset_vport_stats(struct ecore_dev
*p_dev
)
2053 for_each_hwfn(p_dev
, i
) {
2054 struct ecore_hwfn
*p_hwfn
= &p_dev
->hwfns
[i
];
2055 struct eth_mstorm_per_queue_stat mstats
;
2056 struct eth_ustorm_per_queue_stat ustats
;
2057 struct eth_pstorm_per_queue_stat pstats
;
2058 struct ecore_ptt
*p_ptt
= IS_PF(p_dev
) ?
2059 ecore_ptt_acquire(p_hwfn
) : OSAL_NULL
;
2060 u32 addr
= 0, len
= 0;
2062 if (IS_PF(p_dev
) && !p_ptt
) {
2063 DP_ERR(p_hwfn
, "Failed to acquire ptt\n");
2067 OSAL_MEMSET(&mstats
, 0, sizeof(mstats
));
2068 __ecore_get_vport_mstats_addrlen(p_hwfn
, &addr
, &len
, 0);
2069 ecore_memcpy_to(p_hwfn
, p_ptt
, addr
, &mstats
, len
);
2071 OSAL_MEMSET(&ustats
, 0, sizeof(ustats
));
2072 __ecore_get_vport_ustats_addrlen(p_hwfn
, &addr
, &len
, 0);
2073 ecore_memcpy_to(p_hwfn
, p_ptt
, addr
, &ustats
, len
);
2075 OSAL_MEMSET(&pstats
, 0, sizeof(pstats
));
2076 __ecore_get_vport_pstats_addrlen(p_hwfn
, &addr
, &len
, 0);
2077 ecore_memcpy_to(p_hwfn
, p_ptt
, addr
, &pstats
, len
);
2080 ecore_ptt_release(p_hwfn
, p_ptt
);
2083 /* PORT statistics are not necessarily reset, so we need to
2084 * read and create a baseline for future statistics.
2085 * Link change stat is maintained by MFW, return its value as is.
2087 if (!p_dev
->reset_stats
)
2088 DP_INFO(p_dev
, "Reset stats not allocated\n");
2090 _ecore_get_vport_stats(p_dev
, p_dev
->reset_stats
);
2091 p_dev
->reset_stats
->common
.link_change_count
= 0;
2095 static enum gft_profile_type
2096 ecore_arfs_mode_to_hsi(enum ecore_filter_config_mode mode
)
2098 if (mode
== ECORE_FILTER_CONFIG_MODE_5_TUPLE
)
2099 return GFT_PROFILE_TYPE_4_TUPLE
;
2101 if (mode
== ECORE_FILTER_CONFIG_MODE_IP_DEST
)
2102 return GFT_PROFILE_TYPE_IP_DST_ADDR
;
2104 if (mode
== ECORE_FILTER_CONFIG_MODE_TUNN_TYPE
)
2105 return GFT_PROFILE_TYPE_TUNNEL_TYPE
;
2107 if (mode
== ECORE_FILTER_CONFIG_MODE_IP_SRC
)
2108 return GFT_PROFILE_TYPE_IP_SRC_ADDR
;
2110 return GFT_PROFILE_TYPE_L4_DST_PORT
;
2113 void ecore_arfs_mode_configure(struct ecore_hwfn
*p_hwfn
,
2114 struct ecore_ptt
*p_ptt
,
2115 struct ecore_arfs_config_params
*p_cfg_params
)
2117 if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS
, &p_hwfn
->p_dev
->mf_bits
))
2120 if (p_cfg_params
->mode
!= ECORE_FILTER_CONFIG_MODE_DISABLE
) {
2121 ecore_gft_config(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
2126 ecore_arfs_mode_to_hsi(p_cfg_params
->mode
));
2127 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
2128 "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2129 p_cfg_params
->tcp
? "Enable" : "Disable",
2130 p_cfg_params
->udp
? "Enable" : "Disable",
2131 p_cfg_params
->ipv4
? "Enable" : "Disable",
2132 p_cfg_params
->ipv6
? "Enable" : "Disable");
2134 ecore_gft_disable(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
);
2136 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
, "Configured ARFS mode : %d\n",
2137 (int)p_cfg_params
->mode
);
2140 enum _ecore_status_t
2141 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn
*p_hwfn
,
2142 struct ecore_spq_comp_cb
*p_cb
,
2143 dma_addr_t p_addr
, u16 length
,
2144 u16 qid
, u8 vport_id
,
2147 struct rx_update_gft_filter_data
*p_ramrod
= OSAL_NULL
;
2148 struct ecore_spq_entry
*p_ent
= OSAL_NULL
;
2149 struct ecore_sp_init_data init_data
;
2150 u16 abs_rx_q_id
= 0;
2151 u8 abs_vport_id
= 0;
2152 enum _ecore_status_t rc
= ECORE_NOTIMPL
;
2154 rc
= ecore_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
2155 if (rc
!= ECORE_SUCCESS
)
2158 rc
= ecore_fw_l2_queue(p_hwfn
, qid
, &abs_rx_q_id
);
2159 if (rc
!= ECORE_SUCCESS
)
2163 OSAL_MEMSET(&init_data
, 0, sizeof(init_data
));
2164 init_data
.cid
= ecore_spq_get_cid(p_hwfn
);
2166 init_data
.opaque_fid
= p_hwfn
->hw_info
.opaque_fid
;
2169 init_data
.comp_mode
= ECORE_SPQ_MODE_CB
;
2170 init_data
.p_comp_data
= p_cb
;
2172 init_data
.comp_mode
= ECORE_SPQ_MODE_EBLOCK
;
2175 rc
= ecore_sp_init_request(p_hwfn
, &p_ent
,
2176 ETH_RAMROD_GFT_UPDATE_FILTER
,
2177 PROTOCOLID_ETH
, &init_data
);
2178 if (rc
!= ECORE_SUCCESS
)
2181 p_ramrod
= &p_ent
->ramrod
.rx_update_gft
;
2183 DMA_REGPAIR_LE(p_ramrod
->pkt_hdr_addr
, p_addr
);
2184 p_ramrod
->pkt_hdr_length
= OSAL_CPU_TO_LE16(length
);
2186 p_ramrod
->action_icid_valid
= 0;
2187 p_ramrod
->action_icid
= 0;
2189 p_ramrod
->rx_qid_valid
= 1;
2190 p_ramrod
->rx_qid
= OSAL_CPU_TO_LE16(abs_rx_q_id
);
2192 p_ramrod
->flow_id_valid
= 0;
2193 p_ramrod
->flow_id
= 0;
2195 p_ramrod
->vport_id
= OSAL_CPU_TO_LE16((u16
)abs_vport_id
);
2196 p_ramrod
->filter_action
= b_is_add
? GFT_ADD_FILTER
2197 : GFT_DELETE_FILTER
;
2199 DP_VERBOSE(p_hwfn
, ECORE_MSG_SP
,
2200 "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2201 abs_vport_id
, abs_rx_q_id
,
2202 b_is_add
? "Adding" : "Removing",
2203 (unsigned long)p_addr
, length
);
2205 return ecore_spq_post(p_hwfn
, p_ent
, OSAL_NULL
);
2208 enum _ecore_status_t
ecore_get_rxq_coalesce(struct ecore_hwfn
*p_hwfn
,
2209 struct ecore_ptt
*p_ptt
,
2210 struct ecore_queue_cid
*p_cid
,
2213 u32 coalesce
, address
, is_valid
;
2214 struct cau_sb_entry sb_entry
;
2216 enum _ecore_status_t rc
;
2218 rc
= ecore_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2219 p_cid
->sb_igu_id
* sizeof(u64
),
2220 (u64
)(osal_uintptr_t
)&sb_entry
, 2,
2221 OSAL_NULL
/* default parameters */);
2222 if (rc
!= ECORE_SUCCESS
) {
2223 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2227 timer_res
= GET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES0
);
2229 address
= BAR0_MAP_REG_USDM_RAM
+
2230 USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid
->abs
.queue_id
);
2231 coalesce
= ecore_rd(p_hwfn
, p_ptt
, address
);
2233 is_valid
= GET_FIELD(coalesce
, COALESCING_TIMESET_VALID
);
2237 coalesce
= GET_FIELD(coalesce
, COALESCING_TIMESET_TIMESET
);
2238 *p_rx_coal
= (u16
)(coalesce
<< timer_res
);
2240 return ECORE_SUCCESS
;
2243 enum _ecore_status_t
ecore_get_txq_coalesce(struct ecore_hwfn
*p_hwfn
,
2244 struct ecore_ptt
*p_ptt
,
2245 struct ecore_queue_cid
*p_cid
,
2248 u32 coalesce
, address
, is_valid
;
2249 struct cau_sb_entry sb_entry
;
2251 enum _ecore_status_t rc
;
2253 rc
= ecore_dmae_grc2host(p_hwfn
, p_ptt
, CAU_REG_SB_VAR_MEMORY
+
2254 p_cid
->sb_igu_id
* sizeof(u64
),
2255 (u64
)(osal_uintptr_t
)&sb_entry
, 2,
2256 OSAL_NULL
/* default parameters */);
2257 if (rc
!= ECORE_SUCCESS
) {
2258 DP_ERR(p_hwfn
, "dmae_grc2host failed %d\n", rc
);
2262 timer_res
= GET_FIELD(sb_entry
.params
, CAU_SB_ENTRY_TIMER_RES1
);
2264 address
= BAR0_MAP_REG_XSDM_RAM
+
2265 XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid
->abs
.queue_id
);
2266 coalesce
= ecore_rd(p_hwfn
, p_ptt
, address
);
2268 is_valid
= GET_FIELD(coalesce
, COALESCING_TIMESET_VALID
);
2272 coalesce
= GET_FIELD(coalesce
, COALESCING_TIMESET_TIMESET
);
2273 *p_tx_coal
= (u16
)(coalesce
<< timer_res
);
2275 return ECORE_SUCCESS
;
2278 enum _ecore_status_t
2279 ecore_get_queue_coalesce(struct ecore_hwfn
*p_hwfn
, u16
*p_coal
,
2282 struct ecore_queue_cid
*p_cid
= (struct ecore_queue_cid
*)handle
;
2283 enum _ecore_status_t rc
= ECORE_SUCCESS
;
2284 struct ecore_ptt
*p_ptt
;
2286 if (IS_VF(p_hwfn
->p_dev
)) {
2287 rc
= ecore_vf_pf_get_coalesce(p_hwfn
, p_coal
, p_cid
);
2288 if (rc
!= ECORE_SUCCESS
)
2289 DP_NOTICE(p_hwfn
, false,
2290 "Unable to read queue calescing\n");
2295 p_ptt
= ecore_ptt_acquire(p_hwfn
);
2299 if (p_cid
->b_is_rx
) {
2300 rc
= ecore_get_rxq_coalesce(p_hwfn
, p_ptt
, p_cid
, p_coal
);
2301 if (rc
!= ECORE_SUCCESS
)
2304 rc
= ecore_get_txq_coalesce(p_hwfn
, p_ptt
, p_cid
, p_coal
);
2305 if (rc
!= ECORE_SUCCESS
)
2310 ecore_ptt_release(p_hwfn
, p_ptt
);
2315 enum _ecore_status_t
2316 ecore_eth_tx_queue_maxrate(struct ecore_hwfn
*p_hwfn
,
2317 struct ecore_ptt
*p_ptt
,
2318 struct ecore_queue_cid
*p_cid
, u32 rate
)
2320 struct ecore_mcp_link_state
*p_link
;
2323 vport
= (u8
)ecore_get_qm_vport_idx_rl(p_hwfn
, p_cid
->rel
.queue_id
);
2324 p_link
= &ECORE_LEADING_HWFN(p_hwfn
->p_dev
)->mcp_info
->link_output
;
2326 DP_VERBOSE(p_hwfn
, ECORE_MSG_LINK
,
2327 "About to rate limit qm vport %d for queue %d with rate %d\n",
2328 vport
, p_cid
->rel
.queue_id
, rate
);
2330 return ecore_init_vport_rl(p_hwfn
, p_ptt
, vport
, rate
,
2334 #define RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT 100
2335 #define RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US 1
2337 enum _ecore_status_t
2338 ecore_update_eth_rss_ind_table_entry(struct ecore_hwfn
*p_hwfn
,
2341 u16 ind_table_value
)
2343 struct eth_tstorm_rss_update_data update_data
= { 0 };
2344 void OSAL_IOMEM
*addr
= OSAL_NULL
;
2345 enum _ecore_status_t rc
;
2349 OSAL_BUILD_BUG_ON(sizeof(update_data
) != sizeof(u64
));
2351 rc
= ecore_fw_vport(p_hwfn
, vport_id
, &abs_vport_id
);
2352 if (rc
!= ECORE_SUCCESS
)
2355 addr
= (u8 OSAL_IOMEM
*)p_hwfn
->regview
+
2356 GTT_BAR0_MAP_REG_TSDM_RAM
+
2357 TSTORM_ETH_RSS_UPDATE_OFFSET(p_hwfn
->rel_pf_id
);
2359 *(u64
*)(&update_data
) = DIRECT_REG_RD64(p_hwfn
, addr
);
2361 for (cnt
= 0; update_data
.valid
&&
2362 cnt
< RSS_TSTORM_UPDATE_STATUS_MAX_POLL_COUNT
; cnt
++) {
2363 OSAL_UDELAY(RSS_TSTORM_UPDATE_STATUS_POLL_PERIOD_US
);
2364 *(u64
*)(&update_data
) = DIRECT_REG_RD64(p_hwfn
, addr
);
2367 if (update_data
.valid
) {
2368 DP_NOTICE(p_hwfn
, true,
2369 "rss update valid status is not clear! valid=0x%x vport id=%d ind_Table_idx=%d ind_table_value=%d.\n",
2370 update_data
.valid
, vport_id
, ind_table_index
,
2376 update_data
.valid
= 1;
2377 update_data
.ind_table_index
= ind_table_index
;
2378 update_data
.ind_table_value
= ind_table_value
;
2379 update_data
.vport_id
= abs_vport_id
;
2381 DIRECT_REG_WR64(p_hwfn
, addr
, *(u64
*)(&update_data
));
2383 return ECORE_SUCCESS
;