]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/net/netvsc/hn_var.h
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / net / netvsc / hn_var.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2009-2018 Microsoft Corp.
3 * Copyright (c) 2016 Brocade Communications Systems, Inc.
4 * Copyright (c) 2012 NetApp Inc.
5 * Copyright (c) 2012 Citrix Inc.
6 * All rights reserved.
7 */
8
9 /*
10 * Tunable ethdev params
11 */
12 #define HN_MIN_RX_BUF_SIZE 1024
13 #define HN_MAX_XFER_LEN 2048
14 #define HN_MAX_MAC_ADDRS 1
15 #define HN_MAX_CHANNELS 64
16
17 /* Claimed to be 12232B */
18 #define HN_MTU_MAX (9 * 1024)
19
20 /* Retry interval */
21 #define HN_CHAN_INTERVAL_US 100
22
23 /* Host monitor interval */
24 #define HN_CHAN_LATENCY_NS 50000
25
26 /* Buffers need to be aligned */
27 #ifndef PAGE_SIZE
28 #define PAGE_SIZE 4096
29 #endif
30
31 #ifndef PAGE_MASK
32 #define PAGE_MASK (PAGE_SIZE - 1)
33 #endif
34
35 struct hn_data;
36 struct hn_txdesc;
37
38 struct hn_stats {
39 uint64_t packets;
40 uint64_t bytes;
41 uint64_t errors;
42 uint64_t ring_full;
43 uint64_t multicast;
44 uint64_t broadcast;
45 /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */
46 uint64_t size_bins[8];
47 };
48
49 struct hn_tx_queue {
50 struct hn_data *hv;
51 struct vmbus_channel *chan;
52 uint16_t port_id;
53 uint16_t queue_id;
54 uint32_t free_thresh;
55
56 /* Applied packet transmission aggregation limits. */
57 uint32_t agg_szmax;
58 uint32_t agg_pktmax;
59 uint32_t agg_align;
60
61 /* Packet transmission aggregation states */
62 struct hn_txdesc *agg_txd;
63 uint32_t agg_pktleft;
64 uint32_t agg_szleft;
65 struct rndis_packet_msg *agg_prevpkt;
66
67 struct hn_stats stats;
68 };
69
70 struct hn_rx_queue {
71 struct hn_data *hv;
72 struct vmbus_channel *chan;
73 struct rte_mempool *mb_pool;
74 struct rte_ring *rx_ring;
75
76 rte_spinlock_t ring_lock;
77 uint32_t event_sz;
78 uint16_t port_id;
79 uint16_t queue_id;
80 struct hn_stats stats;
81
82 void *event_buf;
83 };
84
85
86 /* multi-packet data from host */
87 struct hn_rx_bufinfo {
88 struct vmbus_channel *chan;
89 struct hn_data *hv;
90 uint64_t xactid;
91 struct rte_mbuf_ext_shared_info shinfo;
92 } __rte_cache_aligned;
93
94 #define HN_INVALID_PORT UINT16_MAX
95
96 struct hn_data {
97 struct rte_vmbus_device *vmbus;
98 struct hn_rx_queue *primary;
99 rte_spinlock_t vf_lock;
100 uint16_t port_id;
101 uint16_t vf_port;
102
103 uint8_t vf_present;
104 uint8_t closed;
105 uint8_t vlan_strip;
106
107 uint32_t link_status;
108 uint32_t link_speed;
109
110 struct rte_mem_resource *rxbuf_res; /* UIO resource for Rx */
111 struct hn_rx_bufinfo *rxbuf_info;
112 uint32_t rxbuf_section_cnt; /* # of Rx sections */
113 volatile uint32_t rxbuf_outstanding;
114 uint16_t max_queues; /* Max available queues */
115 uint16_t num_queues;
116 uint64_t rss_offloads;
117
118 struct rte_mem_resource *chim_res; /* UIO resource for Tx */
119 struct rte_mempool *tx_pool; /* Tx descriptors */
120 uint32_t chim_szmax; /* Max size per buffer */
121 uint32_t chim_cnt; /* Max packets per buffer */
122
123 uint32_t latency;
124 uint32_t nvs_ver;
125 uint32_t ndis_ver;
126 uint32_t rndis_agg_size;
127 uint32_t rndis_agg_pkts;
128 uint32_t rndis_agg_align;
129
130 volatile uint32_t rndis_pending;
131 rte_atomic32_t rndis_req_id;
132 uint8_t rndis_resp[256];
133
134 struct ether_addr mac_addr;
135
136 struct rte_eth_dev_owner owner;
137 struct rte_intr_handle vf_intr;
138
139 struct vmbus_channel *channels[HN_MAX_CHANNELS];
140 };
141
142 static inline struct vmbus_channel *
143 hn_primary_chan(const struct hn_data *hv)
144 {
145 return hv->channels[0];
146 }
147
148 uint32_t hn_process_events(struct hn_data *hv, uint16_t queue_id,
149 uint32_t tx_limit);
150
151 uint16_t hn_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
152 uint16_t nb_pkts);
153 uint16_t hn_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
154 uint16_t nb_pkts);
155
156 int hn_tx_pool_init(struct rte_eth_dev *dev);
157 void hn_tx_pool_uninit(struct rte_eth_dev *dev);
158 int hn_dev_link_update(struct rte_eth_dev *dev, int wait);
159 int hn_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
160 uint16_t nb_desc, unsigned int socket_id,
161 const struct rte_eth_txconf *tx_conf);
162 void hn_dev_tx_queue_release(void *arg);
163 void hn_dev_tx_queue_info(struct rte_eth_dev *dev, uint16_t queue_idx,
164 struct rte_eth_txq_info *qinfo);
165 int hn_dev_tx_done_cleanup(void *arg, uint32_t free_cnt);
166
167 struct hn_rx_queue *hn_rx_queue_alloc(struct hn_data *hv,
168 uint16_t queue_id,
169 unsigned int socket_id);
170 int hn_dev_rx_queue_setup(struct rte_eth_dev *dev,
171 uint16_t queue_idx, uint16_t nb_desc,
172 unsigned int socket_id,
173 const struct rte_eth_rxconf *rx_conf,
174 struct rte_mempool *mp);
175 void hn_dev_rx_queue_release(void *arg);
176 void hn_dev_free_queues(struct rte_eth_dev *dev);
177
178 /* Check if VF is attached */
179 static inline bool
180 hn_vf_attached(const struct hn_data *hv)
181 {
182 return hv->vf_port != HN_INVALID_PORT;
183 }
184
185 /* Get VF device for existing netvsc device */
186 static inline struct rte_eth_dev *
187 hn_get_vf_dev(const struct hn_data *hv)
188 {
189 uint16_t vf_port = hv->vf_port;
190
191 /* make sure vf_port is loaded */
192 rte_smp_rmb();
193
194 if (vf_port == HN_INVALID_PORT)
195 return NULL;
196 else
197 return &rte_eth_devices[vf_port];
198 }
199
200 void hn_vf_info_get(struct hn_data *hv,
201 struct rte_eth_dev_info *info);
202 int hn_vf_add(struct rte_eth_dev *dev, struct hn_data *hv);
203 int hn_vf_configure(struct rte_eth_dev *dev,
204 const struct rte_eth_conf *dev_conf);
205 const uint32_t *hn_vf_supported_ptypes(struct rte_eth_dev *dev);
206 int hn_vf_start(struct rte_eth_dev *dev);
207 void hn_vf_reset(struct rte_eth_dev *dev);
208 void hn_vf_stop(struct rte_eth_dev *dev);
209 void hn_vf_close(struct rte_eth_dev *dev);
210
211 void hn_vf_allmulticast_enable(struct rte_eth_dev *dev);
212 void hn_vf_allmulticast_disable(struct rte_eth_dev *dev);
213 void hn_vf_promiscuous_enable(struct rte_eth_dev *dev);
214 void hn_vf_promiscuous_disable(struct rte_eth_dev *dev);
215 int hn_vf_mc_addr_list(struct rte_eth_dev *dev,
216 struct ether_addr *mc_addr_set,
217 uint32_t nb_mc_addr);
218
219 int hn_vf_link_update(struct rte_eth_dev *dev,
220 int wait_to_complete);
221 int hn_vf_tx_queue_setup(struct rte_eth_dev *dev,
222 uint16_t queue_idx, uint16_t nb_desc,
223 unsigned int socket_id,
224 const struct rte_eth_txconf *tx_conf);
225 void hn_vf_tx_queue_release(struct hn_data *hv, uint16_t queue_id);
226 int hn_vf_rx_queue_setup(struct rte_eth_dev *dev,
227 uint16_t queue_idx, uint16_t nb_desc,
228 unsigned int socket_id,
229 const struct rte_eth_rxconf *rx_conf,
230 struct rte_mempool *mp);
231 void hn_vf_rx_queue_release(struct hn_data *hv, uint16_t queue_id);
232
233 int hn_vf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats);
234 void hn_vf_stats_reset(struct rte_eth_dev *dev);
235 int hn_vf_xstats_get_names(struct rte_eth_dev *dev,
236 struct rte_eth_xstat_name *xstats_names,
237 unsigned int size);
238 int hn_vf_xstats_get(struct rte_eth_dev *dev,
239 struct rte_eth_xstat *xstats,
240 unsigned int n);
241 void hn_vf_xstats_reset(struct rte_eth_dev *dev);