]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/drivers/net/ice/ice_ethdev.h
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / dpdk / drivers / net / ice / ice_ethdev.h
CommitLineData
9f95a23c
TL
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2018 Intel Corporation
3 */
4
5#ifndef _ICE_ETHDEV_H_
6#define _ICE_ETHDEV_H_
7
8#include <rte_kvargs.h>
9
10#include <rte_ethdev_driver.h>
11
12#include "base/ice_common.h"
13#include "base/ice_adminq_cmd.h"
14
15#define ICE_VLAN_TAG_SIZE 4
16
17#define ICE_ADMINQ_LEN 32
18#define ICE_SBIOQ_LEN 32
19#define ICE_MAILBOXQ_LEN 32
20#define ICE_ADMINQ_BUF_SZ 4096
21#define ICE_SBIOQ_BUF_SZ 4096
22#define ICE_MAILBOXQ_BUF_SZ 4096
23/* Number of queues per TC should be one of 1, 2, 4, 8, 16, 32, 64 */
24#define ICE_MAX_Q_PER_TC 64
25#define ICE_NUM_DESC_DEFAULT 512
26#define ICE_BUF_SIZE_MIN 1024
27#define ICE_FRAME_SIZE_MAX 9728
28#define ICE_QUEUE_BASE_ADDR_UNIT 128
29/* number of VSIs and queue default setting */
30#define ICE_MAX_QP_NUM_PER_VF 16
31#define ICE_DEFAULT_QP_NUM_FDIR 1
32#define ICE_UINT32_BIT_SIZE (CHAR_BIT * sizeof(uint32_t))
33#define ICE_VFTA_SIZE (4096 / ICE_UINT32_BIT_SIZE)
34/* Maximun number of MAC addresses */
35#define ICE_NUM_MACADDR_MAX 64
36/* Maximum number of VFs */
37#define ICE_MAX_VF 128
38#define ICE_MAX_INTR_QUEUE_NUM 256
39
40#define ICE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
41#define ICE_RX_VEC_ID RTE_INTR_VEC_RXTX_OFFSET
42
43#define ICE_MAX_PKT_TYPE 1024
44
f67539c2
TL
45/* DDP package search path */
46#define ICE_PKG_FILE_DEFAULT "/lib/firmware/intel/ice/ddp/ice.pkg"
47#define ICE_PKG_FILE_UPDATES "/lib/firmware/updates/intel/ice/ddp/ice.pkg"
48#define ICE_PKG_FILE_SEARCH_PATH_DEFAULT "/lib/firmware/intel/ice/ddp/"
49#define ICE_PKG_FILE_SEARCH_PATH_UPDATES "/lib/firmware/updates/intel/ice/ddp/"
50#define ICE_MAX_PKG_FILENAME_SIZE 256
51
9f95a23c
TL
52/**
53 * vlan_id is a 12 bit number.
54 * The VFTA array is actually a 4096 bit array, 128 of 32bit elements.
55 * 2^5 = 32. The val of lower 5 bits specifies the bit in the 32bit element.
56 * The higher 7 bit val specifies VFTA array index.
57 */
58#define ICE_VFTA_BIT(vlan_id) (1 << ((vlan_id) & 0x1F))
59#define ICE_VFTA_IDX(vlan_id) ((vlan_id) >> 5)
60
61/* Default TC traffic in case DCB is not enabled */
62#define ICE_DEFAULT_TCMAP 0x1
63#define ICE_FDIR_QUEUE_ID 0
64
65/* Always assign pool 0 to main VSI, VMDQ will start from 1 */
66#define ICE_VMDQ_POOL_BASE 1
67
68#define ICE_DEFAULT_RX_FREE_THRESH 32
69#define ICE_DEFAULT_RX_PTHRESH 8
70#define ICE_DEFAULT_RX_HTHRESH 8
71#define ICE_DEFAULT_RX_WTHRESH 0
72
73#define ICE_DEFAULT_TX_FREE_THRESH 32
74#define ICE_DEFAULT_TX_PTHRESH 32
75#define ICE_DEFAULT_TX_HTHRESH 0
76#define ICE_DEFAULT_TX_WTHRESH 0
77#define ICE_DEFAULT_TX_RSBIT_THRESH 32
78
79/* Bit shift and mask */
80#define ICE_4_BIT_WIDTH (CHAR_BIT / 2)
81#define ICE_4_BIT_MASK RTE_LEN2MASK(ICE_4_BIT_WIDTH, uint8_t)
82#define ICE_8_BIT_WIDTH CHAR_BIT
83#define ICE_8_BIT_MASK UINT8_MAX
84#define ICE_16_BIT_WIDTH (CHAR_BIT * 2)
85#define ICE_16_BIT_MASK UINT16_MAX
86#define ICE_32_BIT_WIDTH (CHAR_BIT * 4)
87#define ICE_32_BIT_MASK UINT32_MAX
88#define ICE_40_BIT_WIDTH (CHAR_BIT * 5)
89#define ICE_40_BIT_MASK RTE_LEN2MASK(ICE_40_BIT_WIDTH, uint64_t)
90#define ICE_48_BIT_WIDTH (CHAR_BIT * 6)
91#define ICE_48_BIT_MASK RTE_LEN2MASK(ICE_48_BIT_WIDTH, uint64_t)
92
93#define ICE_FLAG_RSS BIT_ULL(0)
94#define ICE_FLAG_DCB BIT_ULL(1)
95#define ICE_FLAG_VMDQ BIT_ULL(2)
96#define ICE_FLAG_SRIOV BIT_ULL(3)
97#define ICE_FLAG_HEADER_SPLIT_DISABLED BIT_ULL(4)
98#define ICE_FLAG_HEADER_SPLIT_ENABLED BIT_ULL(5)
99#define ICE_FLAG_FDIR BIT_ULL(6)
100#define ICE_FLAG_VXLAN BIT_ULL(7)
101#define ICE_FLAG_RSS_AQ_CAPABLE BIT_ULL(8)
102#define ICE_FLAG_VF_MAC_BY_PF BIT_ULL(9)
103#define ICE_FLAG_ALL (ICE_FLAG_RSS | \
104 ICE_FLAG_DCB | \
105 ICE_FLAG_VMDQ | \
106 ICE_FLAG_SRIOV | \
107 ICE_FLAG_HEADER_SPLIT_DISABLED | \
108 ICE_FLAG_HEADER_SPLIT_ENABLED | \
109 ICE_FLAG_FDIR | \
110 ICE_FLAG_VXLAN | \
111 ICE_FLAG_RSS_AQ_CAPABLE | \
112 ICE_FLAG_VF_MAC_BY_PF)
113
114#define ICE_RSS_OFFLOAD_ALL ( \
115 ETH_RSS_FRAG_IPV4 | \
116 ETH_RSS_NONFRAG_IPV4_TCP | \
117 ETH_RSS_NONFRAG_IPV4_UDP | \
118 ETH_RSS_NONFRAG_IPV4_SCTP | \
119 ETH_RSS_NONFRAG_IPV4_OTHER | \
120 ETH_RSS_FRAG_IPV6 | \
121 ETH_RSS_NONFRAG_IPV6_TCP | \
122 ETH_RSS_NONFRAG_IPV6_UDP | \
123 ETH_RSS_NONFRAG_IPV6_SCTP | \
124 ETH_RSS_NONFRAG_IPV6_OTHER | \
125 ETH_RSS_L2_PAYLOAD)
126
127/**
128 * The overhead from MTU to max frame size.
129 * Considering QinQ packet, the VLAN tag needs to be counted twice.
130 */
131#define ICE_ETH_OVERHEAD \
f67539c2
TL
132 (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN + ICE_VLAN_TAG_SIZE * 2)
133
134/* DDP package type */
135enum ice_pkg_type {
136 ICE_PKG_TYPE_UNKNOWN,
137 ICE_PKG_TYPE_OS_DEFAULT,
138 ICE_PKG_TYPE_COMMS,
139};
9f95a23c
TL
140
141struct ice_adapter;
142
143/**
144 * MAC filter structure
145 */
146struct ice_mac_filter_info {
f67539c2 147 struct rte_ether_addr mac_addr;
9f95a23c
TL
148};
149
150TAILQ_HEAD(ice_mac_filter_list, ice_mac_filter);
151
152/* MAC filter list structure */
153struct ice_mac_filter {
154 TAILQ_ENTRY(ice_mac_filter) next;
155 struct ice_mac_filter_info mac_info;
156};
157
158/**
159 * VLAN filter structure
160 */
161struct ice_vlan_filter_info {
162 uint16_t vlan_id;
163};
164
165TAILQ_HEAD(ice_vlan_filter_list, ice_vlan_filter);
166
167/* VLAN filter list structure */
168struct ice_vlan_filter {
169 TAILQ_ENTRY(ice_vlan_filter) next;
170 struct ice_vlan_filter_info vlan_info;
171};
172
173struct pool_entry {
174 LIST_ENTRY(pool_entry) next;
175 uint16_t base;
176 uint16_t len;
177};
178
179LIST_HEAD(res_list, pool_entry);
180
181struct ice_res_pool_info {
182 uint32_t base; /* Resource start index */
183 uint32_t num_alloc; /* Allocated resource number */
184 uint32_t num_free; /* Total available resource number */
185 struct res_list alloc_list; /* Allocated resource list */
186 struct res_list free_list; /* Available resource list */
187};
188
189TAILQ_HEAD(ice_vsi_list_head, ice_vsi_list);
190
191struct ice_vsi;
192
193/* VSI list structure */
194struct ice_vsi_list {
195 TAILQ_ENTRY(ice_vsi_list) list;
196 struct ice_vsi *vsi;
197};
198
199struct ice_rx_queue;
200struct ice_tx_queue;
201
202/**
203 * Structure that defines a VSI, associated with a adapter.
204 */
205struct ice_vsi {
206 struct ice_adapter *adapter; /* Backreference to associated adapter */
207 struct ice_aqc_vsi_props info; /* VSI properties */
208 /**
209 * When drivers loaded, only a default main VSI exists. In case new VSI
210 * needs to add, HW needs to know the layout that VSIs are organized.
211 * Besides that, VSI isan element and can't switch packets, which needs
212 * to add new component VEB to perform switching. So, a new VSI needs
213 * to specify the the uplink VSI (Parent VSI) before created. The
214 * uplink VSI will check whether it had a VEB to switch packets. If no,
215 * it will try to create one. Then, uplink VSI will move the new VSI
216 * into its' sib_vsi_list to manage all the downlink VSI.
217 * sib_vsi_list: the VSI list that shared the same uplink VSI.
218 * parent_vsi : the uplink VSI. It's NULL for main VSI.
219 * veb : the VEB associates with the VSI.
220 */
221 struct ice_vsi_list sib_vsi_list; /* sibling vsi list */
222 struct ice_vsi *parent_vsi;
223 enum ice_vsi_type type; /* VSI types */
224 uint16_t vlan_num; /* Total VLAN number */
225 uint16_t mac_num; /* Total mac number */
226 struct ice_mac_filter_list mac_list; /* macvlan filter list */
227 struct ice_vlan_filter_list vlan_list; /* vlan filter list */
228 uint16_t nb_qps; /* Number of queue pairs VSI can occupy */
229 uint16_t nb_used_qps; /* Number of queue pairs VSI uses */
230 uint16_t max_macaddrs; /* Maximum number of MAC addresses */
231 uint16_t base_queue; /* The first queue index of this VSI */
232 uint16_t vsi_id; /* Hardware Id */
233 uint16_t idx; /* vsi_handle: SW index in hw->vsi_ctx */
234 /* VF number to which the VSI connects, valid when VSI is VF type */
235 uint8_t vf_num;
236 uint16_t msix_intr; /* The MSIX interrupt binds to VSI */
237 uint16_t nb_msix; /* The max number of msix vector */
238 uint8_t enabled_tc; /* The traffic class enabled */
239 uint8_t vlan_anti_spoof_on; /* The VLAN anti-spoofing enabled */
240 uint8_t vlan_filter_on; /* The VLAN filter enabled */
241 /* information about rss configuration */
242 u32 rss_key_size;
243 u32 rss_lut_size;
244 uint8_t *rss_lut;
245 uint8_t *rss_key;
246 struct ice_eth_stats eth_stats_offset;
247 struct ice_eth_stats eth_stats;
248 bool offset_loaded;
249};
250
f67539c2
TL
251enum proto_xtr_type {
252 PROTO_XTR_NONE,
253 PROTO_XTR_VLAN,
254 PROTO_XTR_IPV4,
255 PROTO_XTR_IPV6,
256 PROTO_XTR_IPV6_FLOW,
257 PROTO_XTR_TCP,
258};
259
260enum ice_fdir_tunnel_type {
261 ICE_FDIR_TUNNEL_TYPE_NONE = 0,
262 ICE_FDIR_TUNNEL_TYPE_VXLAN,
263 ICE_FDIR_TUNNEL_TYPE_GTPU,
264 ICE_FDIR_TUNNEL_TYPE_GTPU_EH,
265};
266
267struct rte_flow;
268TAILQ_HEAD(ice_flow_list, rte_flow);
269
270struct ice_flow_parser_node;
271TAILQ_HEAD(ice_parser_list, ice_flow_parser_node);
272
273struct ice_fdir_filter_conf {
274 struct ice_fdir_fltr input;
275 enum ice_fdir_tunnel_type tunnel_type;
276
277 struct ice_fdir_counter *counter; /* flow specific counter context */
278 struct rte_flow_action_count act_count;
279
280 uint64_t input_set;
281};
282
283#define ICE_MAX_FDIR_FILTER_NUM (1024 * 16)
284
285struct ice_fdir_fltr_pattern {
286 enum ice_fltr_ptype flow_type;
287
288 union {
289 struct ice_fdir_v4 v4;
290 struct ice_fdir_v6 v6;
291 } ip, mask;
292
293 struct ice_fdir_udp_gtp gtpu_data;
294 struct ice_fdir_udp_gtp gtpu_mask;
295
296 struct ice_fdir_extra ext_data;
297 struct ice_fdir_extra ext_mask;
298
299 enum ice_fdir_tunnel_type tunnel_type;
300};
301
302#define ICE_FDIR_COUNTER_DEFAULT_POOL_SIZE 1
303#define ICE_FDIR_COUNTER_MAX_POOL_SIZE 32
304#define ICE_FDIR_COUNTERS_PER_BLOCK 256
305#define ICE_FDIR_COUNTER_INDEX(base_idx) \
306 ((base_idx) * ICE_FDIR_COUNTERS_PER_BLOCK)
307struct ice_fdir_counter_pool;
308
309struct ice_fdir_counter {
310 TAILQ_ENTRY(ice_fdir_counter) next;
311 struct ice_fdir_counter_pool *pool;
312 uint8_t shared;
313 uint32_t ref_cnt;
314 uint32_t id;
315 uint64_t hits;
316 uint64_t bytes;
317 uint32_t hw_index;
318};
319
320TAILQ_HEAD(ice_fdir_counter_list, ice_fdir_counter);
321
322struct ice_fdir_counter_pool {
323 TAILQ_ENTRY(ice_fdir_counter_pool) next;
324 struct ice_fdir_counter_list counter_list;
325 struct ice_fdir_counter counters[0];
326};
327
328TAILQ_HEAD(ice_fdir_counter_pool_list, ice_fdir_counter_pool);
329
330struct ice_fdir_counter_pool_container {
331 struct ice_fdir_counter_pool_list pool_list;
332 struct ice_fdir_counter_pool *pools[ICE_FDIR_COUNTER_MAX_POOL_SIZE];
333 uint8_t index_free;
334};
335
336/**
337 * A structure used to define fields of a FDIR related info.
338 */
339struct ice_fdir_info {
340 struct ice_vsi *fdir_vsi; /* pointer to fdir VSI structure */
341 struct ice_tx_queue *txq;
342 struct ice_rx_queue *rxq;
343 void *prg_pkt; /* memory for fdir program packet */
344 uint64_t dma_addr; /* physic address of packet memory*/
345 const struct rte_memzone *mz;
346 struct ice_fdir_filter_conf conf;
347
348 struct ice_fdir_filter_conf **hash_map;
349 struct rte_hash *hash_table;
350
351 struct ice_fdir_counter_pool_container counter;
352};
353
9f95a23c
TL
354struct ice_pf {
355 struct ice_adapter *adapter; /* The adapter this PF associate to */
356 struct ice_vsi *main_vsi; /* pointer to main VSI structure */
357 /* Used for next free software vsi idx.
358 * To save the effort, we don't recycle the index.
359 * Suppose the indexes are more than enough.
360 */
361 uint16_t next_vsi_idx;
362 uint16_t vsis_allocated;
363 uint16_t vsis_unallocated;
364 struct ice_res_pool_info qp_pool; /*Queue pair pool */
365 struct ice_res_pool_info msix_pool; /* MSIX interrupt pool */
366 struct rte_eth_dev_data *dev_data; /* Pointer to the device data */
f67539c2 367 struct rte_ether_addr dev_addr; /* PF device mac address */
9f95a23c
TL
368 uint64_t flags; /* PF feature flags */
369 uint16_t hash_lut_size; /* The size of hash lookup table */
370 uint16_t lan_nb_qp_max;
371 uint16_t lan_nb_qps; /* The number of queue pairs of LAN */
f67539c2
TL
372 uint16_t base_queue; /* The base queue pairs index in the device */
373 uint8_t *proto_xtr; /* Protocol extraction type for all queues */
374 uint16_t fdir_nb_qps; /* The number of queue pairs of Flow Director */
375 uint16_t fdir_qp_offset;
376 struct ice_fdir_info fdir; /* flow director info */
377 uint16_t hw_prof_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
378 uint16_t fdir_fltr_cnt[ICE_FLTR_PTYPE_MAX][ICE_FD_HW_SEG_MAX];
9f95a23c
TL
379 struct ice_hw_port_stats stats_offset;
380 struct ice_hw_port_stats stats;
381 /* internal packet statistics, it should be excluded from the total */
382 struct ice_eth_stats internal_stats_offset;
383 struct ice_eth_stats internal_stats;
384 bool offset_loaded;
385 bool adapter_stopped;
f67539c2
TL
386 struct ice_flow_list flow_list;
387 rte_spinlock_t flow_ops_lock;
388 struct ice_parser_list rss_parser_list;
389 struct ice_parser_list perm_parser_list;
390 struct ice_parser_list dist_parser_list;
391 bool init_link_up;
392};
393
394#define ICE_MAX_QUEUE_NUM 2048
395
396/**
397 * Cache devargs parse result.
398 */
399struct ice_devargs {
400 int safe_mode_support;
401 uint8_t proto_xtr_dflt;
402 int pipe_mode_support;
403 int flow_mark_support;
404 uint8_t proto_xtr[ICE_MAX_QUEUE_NUM];
9f95a23c
TL
405};
406
407/**
408 * Structure to store private data for each PF/VF instance.
409 */
410struct ice_adapter {
411 /* Common for both PF and VF */
412 struct ice_hw hw;
413 struct rte_eth_dev *eth_dev;
414 struct ice_pf pf;
415 bool rx_bulk_alloc_allowed;
f67539c2
TL
416 bool rx_vec_allowed;
417 bool tx_vec_allowed;
9f95a23c
TL
418 bool tx_simple_allowed;
419 /* ptype mapping table */
420 uint32_t ptype_tbl[ICE_MAX_PKT_TYPE] __rte_cache_min_aligned;
421 bool is_safe_mode;
f67539c2
TL
422 struct ice_devargs devargs;
423 enum ice_pkg_type active_pkg_type; /* loaded ddp package type */
9f95a23c
TL
424};
425
426struct ice_vsi_vlan_pvid_info {
427 uint16_t on; /* Enable or disable pvid */
428 union {
429 uint16_t pvid; /* Valid in case 'on' is set to set pvid */
430 struct {
431 /* Valid in case 'on' is cleared. 'tagged' will reject
432 * tagged packets, while 'untagged' will reject
433 * untagged packets.
434 */
435 uint8_t tagged;
436 uint8_t untagged;
437 } reject;
438 } config;
439};
440
441#define ICE_DEV_TO_PCI(eth_dev) \
442 RTE_DEV_TO_PCI((eth_dev)->device)
443
444/* ICE_DEV_PRIVATE_TO */
445#define ICE_DEV_PRIVATE_TO_PF(adapter) \
446 (&((struct ice_adapter *)adapter)->pf)
447#define ICE_DEV_PRIVATE_TO_HW(adapter) \
448 (&((struct ice_adapter *)adapter)->hw)
449#define ICE_DEV_PRIVATE_TO_ADAPTER(adapter) \
450 ((struct ice_adapter *)adapter)
451
452/* ICE_VSI_TO */
453#define ICE_VSI_TO_HW(vsi) \
454 (&(((struct ice_vsi *)vsi)->adapter->hw))
455#define ICE_VSI_TO_PF(vsi) \
456 (&(((struct ice_vsi *)vsi)->adapter->pf))
457#define ICE_VSI_TO_ETH_DEV(vsi) \
458 (((struct ice_vsi *)vsi)->adapter->eth_dev)
459
460/* ICE_PF_TO */
461#define ICE_PF_TO_HW(pf) \
462 (&(((struct ice_pf *)pf)->adapter->hw))
463#define ICE_PF_TO_ADAPTER(pf) \
464 ((struct ice_adapter *)(pf)->adapter)
465#define ICE_PF_TO_ETH_DEV(pf) \
466 (((struct ice_pf *)pf)->adapter->eth_dev)
467
f67539c2
TL
468enum ice_pkg_type ice_load_pkg_type(struct ice_hw *hw);
469struct ice_vsi *
470ice_setup_vsi(struct ice_pf *pf, enum ice_vsi_type type);
471int
472ice_release_vsi(struct ice_vsi *vsi);
473void ice_vsi_enable_queues_intr(struct ice_vsi *vsi);
474void ice_vsi_disable_queues_intr(struct ice_vsi *vsi);
475void ice_vsi_queues_bind_intr(struct ice_vsi *vsi);
476
9f95a23c
TL
477static inline int
478ice_align_floor(int n)
479{
480 if (n == 0)
481 return 0;
482 return 1 << (sizeof(n) * CHAR_BIT - 1 - __builtin_clz(n));
483}
484
485#define ICE_PHY_TYPE_SUPPORT_50G(phy_type) \
486 (((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CR2) || \
487 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR2) || \
488 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR2) || \
489 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR2) || \
490 ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC) || \
491 ((phy_type) & ICE_PHY_TYPE_LOW_50G_LAUI2) || \
492 ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC) || \
493 ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI2) || \
494 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_CP) || \
495 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_SR) || \
496 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_FR) || \
497 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_LR) || \
498 ((phy_type) & ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4) || \
499 ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC) || \
500 ((phy_type) & ICE_PHY_TYPE_LOW_50G_AUI1))
501
502#define ICE_PHY_TYPE_SUPPORT_100G_LOW(phy_type) \
503 (((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR4) || \
504 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR4) || \
505 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_LR4) || \
506 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR4) || \
507 ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC) || \
508 ((phy_type) & ICE_PHY_TYPE_LOW_100G_CAUI4) || \
509 ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC) || \
510 ((phy_type) & ICE_PHY_TYPE_LOW_100G_AUI4) || \
511 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4) || \
512 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4) || \
513 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_CP2) || \
514 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_SR2) || \
515 ((phy_type) & ICE_PHY_TYPE_LOW_100GBASE_DR))
516
517#define ICE_PHY_TYPE_SUPPORT_100G_HIGH(phy_type) \
518 (((phy_type) & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) || \
519 ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC) || \
520 ((phy_type) & ICE_PHY_TYPE_HIGH_100G_CAUI2) || \
521 ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC) || \
522 ((phy_type) & ICE_PHY_TYPE_HIGH_100G_AUI2))
523
524#endif /* _ICE_ETHDEV_H_ */