1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2009-2018 Microsoft Corp.
3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
4 * Copyright (c) 2012 NetApp Inc.
5 * Copyright (c) 2012 Citrix Inc.
10 * Tunable ethdev params
12 #define HN_MIN_RX_BUF_SIZE 1024
13 #define HN_MAX_XFER_LEN 2048
14 #define HN_MAX_MAC_ADDRS 1
15 #define HN_MAX_CHANNELS 64
17 /* Claimed to be 12232B */
18 #define HN_MTU_MAX (9 * 1024)
21 #define HN_CHAN_INTERVAL_US 100
23 /* Host monitor interval */
24 #define HN_CHAN_LATENCY_NS 50000
26 /* Buffers need to be aligned */
28 #define PAGE_SIZE 4096
32 #define PAGE_MASK (PAGE_SIZE - 1)
45 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
46 uint64_t size_bins
[8];
51 struct vmbus_channel
*chan
;
55 struct rte_mempool
*txdesc_pool
;
58 /* Applied packet transmission aggregation limits. */
63 /* Packet transmission aggregation states */
64 struct hn_txdesc
*agg_txd
;
67 struct rndis_packet_msg
*agg_prevpkt
;
69 struct hn_stats stats
;
74 struct vmbus_channel
*chan
;
75 struct rte_mempool
*mb_pool
;
76 struct rte_ring
*rx_ring
;
78 rte_spinlock_t ring_lock
;
82 struct hn_stats stats
;
88 /* multi-packet data from host */
89 struct hn_rx_bufinfo
{
90 struct vmbus_channel
*chan
;
93 struct rte_mbuf_ext_shared_info shinfo
;
94 } __rte_cache_aligned
;
96 #define HN_INVALID_PORT UINT16_MAX
99 struct rte_vmbus_device
*vmbus
;
100 struct hn_rx_queue
*primary
;
101 rte_rwlock_t vf_lock
;
109 uint32_t link_status
;
112 struct rte_mem_resource
*rxbuf_res
; /* UIO resource for Rx */
113 struct hn_rx_bufinfo
*rxbuf_info
;
114 uint32_t rxbuf_section_cnt
; /* # of Rx sections */
115 volatile uint32_t rxbuf_outstanding
;
116 uint16_t max_queues
; /* Max available queues */
118 uint64_t rss_offloads
;
120 rte_spinlock_t chim_lock
;
121 struct rte_mem_resource
*chim_res
; /* UIO resource for Tx */
122 struct rte_bitmap
*chim_bmap
; /* Send buffer map */
124 uint32_t chim_szmax
; /* Max size per buffer */
125 uint32_t chim_cnt
; /* Max packets per buffer */
130 uint32_t rndis_agg_size
;
131 uint32_t rndis_agg_pkts
;
132 uint32_t rndis_agg_align
;
134 volatile uint32_t rndis_pending
;
135 rte_atomic32_t rndis_req_id
;
136 uint8_t rndis_resp
[256];
140 uint16_t rss_ind
[128];
142 struct rte_eth_dev_owner owner
;
143 struct rte_intr_handle vf_intr
;
145 struct vmbus_channel
*channels
[HN_MAX_CHANNELS
];
148 static inline struct vmbus_channel
*
149 hn_primary_chan(const struct hn_data
*hv
)
151 return hv
->channels
[0];
154 uint32_t hn_process_events(struct hn_data
*hv
, uint16_t queue_id
,
157 uint16_t hn_xmit_pkts(void *tx_queue
, struct rte_mbuf
**tx_pkts
,
159 uint16_t hn_recv_pkts(void *rx_queue
, struct rte_mbuf
**rx_pkts
,
162 int hn_chim_init(struct rte_eth_dev
*dev
);
163 void hn_chim_uninit(struct rte_eth_dev
*dev
);
164 int hn_dev_link_update(struct rte_eth_dev
*dev
, int wait
);
165 int hn_dev_tx_queue_setup(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
166 uint16_t nb_desc
, unsigned int socket_id
,
167 const struct rte_eth_txconf
*tx_conf
);
168 void hn_dev_tx_queue_release(void *arg
);
169 void hn_dev_tx_queue_info(struct rte_eth_dev
*dev
, uint16_t queue_idx
,
170 struct rte_eth_txq_info
*qinfo
);
171 int hn_dev_tx_done_cleanup(void *arg
, uint32_t free_cnt
);
173 struct hn_rx_queue
*hn_rx_queue_alloc(struct hn_data
*hv
,
175 unsigned int socket_id
);
176 int hn_dev_rx_queue_setup(struct rte_eth_dev
*dev
,
177 uint16_t queue_idx
, uint16_t nb_desc
,
178 unsigned int socket_id
,
179 const struct rte_eth_rxconf
*rx_conf
,
180 struct rte_mempool
*mp
);
181 void hn_dev_rx_queue_release(void *arg
);
182 void hn_dev_free_queues(struct rte_eth_dev
*dev
);
184 /* Check if VF is attached */
186 hn_vf_attached(const struct hn_data
*hv
)
188 return hv
->vf_port
!= HN_INVALID_PORT
;
192 * Get VF device for existing netvsc device
193 * Assumes vf_lock is held.
195 static inline struct rte_eth_dev
*
196 hn_get_vf_dev(const struct hn_data
*hv
)
198 uint16_t vf_port
= hv
->vf_port
;
200 if (vf_port
== HN_INVALID_PORT
)
203 return &rte_eth_devices
[vf_port
];
206 int hn_vf_info_get(struct hn_data
*hv
,
207 struct rte_eth_dev_info
*info
);
208 int hn_vf_add(struct rte_eth_dev
*dev
, struct hn_data
*hv
);
209 int hn_vf_configure(struct rte_eth_dev
*dev
,
210 const struct rte_eth_conf
*dev_conf
);
211 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev
*dev
);
212 int hn_vf_start(struct rte_eth_dev
*dev
);
213 void hn_vf_reset(struct rte_eth_dev
*dev
);
214 void hn_vf_stop(struct rte_eth_dev
*dev
);
215 void hn_vf_close(struct rte_eth_dev
*dev
);
217 int hn_vf_allmulticast_enable(struct rte_eth_dev
*dev
);
218 int hn_vf_allmulticast_disable(struct rte_eth_dev
*dev
);
219 int hn_vf_promiscuous_enable(struct rte_eth_dev
*dev
);
220 int hn_vf_promiscuous_disable(struct rte_eth_dev
*dev
);
221 int hn_vf_mc_addr_list(struct rte_eth_dev
*dev
,
222 struct rte_ether_addr
*mc_addr_set
,
223 uint32_t nb_mc_addr
);
225 int hn_vf_link_update(struct rte_eth_dev
*dev
,
226 int wait_to_complete
);
227 int hn_vf_tx_queue_setup(struct rte_eth_dev
*dev
,
228 uint16_t queue_idx
, uint16_t nb_desc
,
229 unsigned int socket_id
,
230 const struct rte_eth_txconf
*tx_conf
);
231 void hn_vf_tx_queue_release(struct hn_data
*hv
, uint16_t queue_id
);
232 int hn_vf_rx_queue_setup(struct rte_eth_dev
*dev
,
233 uint16_t queue_idx
, uint16_t nb_desc
,
234 unsigned int socket_id
,
235 const struct rte_eth_rxconf
*rx_conf
,
236 struct rte_mempool
*mp
);
237 void hn_vf_rx_queue_release(struct hn_data
*hv
, uint16_t queue_id
);
239 int hn_vf_stats_get(struct rte_eth_dev
*dev
, struct rte_eth_stats
*stats
);
240 int hn_vf_stats_reset(struct rte_eth_dev
*dev
);
241 int hn_vf_xstats_get_names(struct rte_eth_dev
*dev
,
242 struct rte_eth_xstat_name
*xstats_names
,
244 int hn_vf_xstats_get(struct rte_eth_dev
*dev
,
245 struct rte_eth_xstat
*xstats
,
246 unsigned int offset
, unsigned int n
);
247 int hn_vf_xstats_reset(struct rte_eth_dev
*dev
);
248 int hn_vf_rss_hash_update(struct rte_eth_dev
*dev
,
249 struct rte_eth_rss_conf
*rss_conf
);
250 int hn_vf_reta_hash_update(struct rte_eth_dev
*dev
,
251 struct rte_eth_rss_reta_entry64
*reta_conf
,