]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/net/vmxnet3/vmxnet3_int.h
Merge tag 'v5.10-rc1' into asoc-5.10
[mirror_ubuntu-hirsute-kernel.git] / drivers / net / vmxnet3 / vmxnet3_int.h
CommitLineData
d1a890fa
SB
1/*
2 * Linux driver for VMware's vmxnet3 ethernet NIC.
3 *
123db31d 4 * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
d1a890fa
SB
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; version 2 of the License and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more
14 * details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * The full GNU General Public License is included in this distribution in
21 * the file called "COPYING".
22 *
190af10f 23 * Maintained by: pv-drivers@vmware.com
d1a890fa
SB
24 *
25 */
26
27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H
29
72e85c45 30#include <linux/bitops.h>
d1a890fa
SB
31#include <linux/ethtool.h>
32#include <linux/delay.h>
33#include <linux/netdevice.h>
34#include <linux/pci.h>
d1a890fa 35#include <linux/compiler.h>
d1a890fa
SB
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/ioport.h>
39#include <linux/highmem.h>
d1a890fa
SB
40#include <linux/timer.h>
41#include <linux/skbuff.h>
42#include <linux/interrupt.h>
43#include <linux/workqueue.h>
44#include <linux/uaccess.h>
45#include <asm/dma.h>
46#include <asm/page.h>
47
48#include <linux/tcp.h>
49#include <linux/udp.h>
50#include <linux/ip.h>
51#include <linux/ipv6.h>
52#include <linux/in.h>
53#include <linux/etherdevice.h>
54#include <asm/checksum.h>
55#include <linux/if_vlan.h>
56#include <linux/if_arp.h>
57#include <linux/inetdevice.h>
eebb02b1 58#include <linux/log2.h>
d1a890fa
SB
59
60#include "vmxnet3_defs.h"
61
62#ifdef DEBUG
63# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI(debug)"
64#else
65# define VMXNET3_DRIVER_VERSION_REPORT VMXNET3_DRIVER_VERSION_STRING"-NAPI"
66#endif
67
68
69/*
70 * Version numbers
71 */
a31135e3 72#define VMXNET3_DRIVER_VERSION_STRING "1.5.0.0-k"
d1a890fa 73
61aeecea 74/* Each byte of this 32-bit integer encodes a version number in
75 * VMXNET3_DRIVER_VERSION_STRING.
76 */
a31135e3 77#define VMXNET3_DRIVER_VERSION_NUM 0x01050000
d1a890fa 78
09c5088e
SB
79#if defined(CONFIG_PCI_MSI)
80 /* RSS only makes sense if MSI-X is supported. */
81 #define VMXNET3_RSS
82#endif
d1a890fa 83
123db31d 84#define VMXNET3_REV_4 3 /* Vmxnet3 Rev. 4 */
190af10f
SK
85#define VMXNET3_REV_3 2 /* Vmxnet3 Rev. 3 */
86#define VMXNET3_REV_2 1 /* Vmxnet3 Rev. 2 */
87#define VMXNET3_REV_1 0 /* Vmxnet3 Rev. 1 */
88
d1a890fa
SB
89/*
90 * Capabilities
91 */
92
93enum {
94 VMNET_CAP_SG = 0x0001, /* Can do scatter-gather transmits. */
95 VMNET_CAP_IP4_CSUM = 0x0002, /* Can checksum only TCP/UDP over
96 * IPv4 */
97 VMNET_CAP_HW_CSUM = 0x0004, /* Can checksum all packets. */
98 VMNET_CAP_HIGH_DMA = 0x0008, /* Can DMA to high memory. */
99 VMNET_CAP_TOE = 0x0010, /* Supports TCP/IP offload. */
100 VMNET_CAP_TSO = 0x0020, /* Supports TCP Segmentation
101 * offload */
102 VMNET_CAP_SW_TSO = 0x0040, /* Supports SW TCP Segmentation */
103 VMNET_CAP_VMXNET_APROM = 0x0080, /* Vmxnet APROM support */
104 VMNET_CAP_HW_TX_VLAN = 0x0100, /* Can we do VLAN tagging in HW */
105 VMNET_CAP_HW_RX_VLAN = 0x0200, /* Can we do VLAN untagging in HW */
106 VMNET_CAP_SW_VLAN = 0x0400, /* VLAN tagging/untagging in SW */
107 VMNET_CAP_WAKE_PCKT_RCV = 0x0800, /* Can wake on network packet recv? */
108 VMNET_CAP_ENABLE_INT_INLINE = 0x1000, /* Enable Interrupt Inline */
109 VMNET_CAP_ENABLE_HEADER_COPY = 0x2000, /* copy header for vmkernel */
110 VMNET_CAP_TX_CHAIN = 0x4000, /* Guest can use multiple tx entries
111 * for a pkt */
112 VMNET_CAP_RX_CHAIN = 0x8000, /* pkt can span multiple rx entries */
113 VMNET_CAP_LPD = 0x10000, /* large pkt delivery */
114 VMNET_CAP_BPF = 0x20000, /* BPF Support in VMXNET Virtual HW*/
115 VMNET_CAP_SG_SPAN_PAGES = 0x40000, /* Scatter-gather can span multiple*/
116 /* pages transmits */
117 VMNET_CAP_IP6_CSUM = 0x80000, /* Can do IPv6 csum offload. */
118 VMNET_CAP_TSO6 = 0x100000, /* TSO seg. offload for IPv6 pkts. */
119 VMNET_CAP_TSO256k = 0x200000, /* Can do TSO seg offload for */
120 /* pkts up to 256kB. */
121 VMNET_CAP_UPT = 0x400000 /* Support UPT */
122};
123
124/*
b1226c7d 125 * Maximum devices supported.
d1a890fa 126 */
d1a890fa
SB
127#define MAX_ETHERNET_CARDS 10
128#define MAX_PCI_PASSTHRU_DEVICE 6
129
130struct vmxnet3_cmd_ring {
131 union Vmxnet3_GenericDesc *base;
132 u32 size;
133 u32 next2fill;
134 u32 next2comp;
135 u8 gen;
136 dma_addr_t basePA;
137};
138
139static inline void
140vmxnet3_cmd_ring_adv_next2fill(struct vmxnet3_cmd_ring *ring)
141{
142 ring->next2fill++;
143 if (unlikely(ring->next2fill == ring->size)) {
144 ring->next2fill = 0;
145 VMXNET3_FLIP_RING_GEN(ring->gen);
146 }
147}
148
149static inline void
150vmxnet3_cmd_ring_adv_next2comp(struct vmxnet3_cmd_ring *ring)
151{
152 VMXNET3_INC_RING_IDX_ONLY(ring->next2comp, ring->size);
153}
154
155static inline int
156vmxnet3_cmd_ring_desc_avail(struct vmxnet3_cmd_ring *ring)
157{
158 return (ring->next2comp > ring->next2fill ? 0 : ring->size) +
159 ring->next2comp - ring->next2fill - 1;
160}
161
162struct vmxnet3_comp_ring {
163 union Vmxnet3_GenericDesc *base;
164 u32 size;
165 u32 next2proc;
166 u8 gen;
167 u8 intr_idx;
168 dma_addr_t basePA;
169};
170
171static inline void
172vmxnet3_comp_ring_adv_next2proc(struct vmxnet3_comp_ring *ring)
173{
174 ring->next2proc++;
175 if (unlikely(ring->next2proc == ring->size)) {
176 ring->next2proc = 0;
177 VMXNET3_FLIP_RING_GEN(ring->gen);
178 }
179}
180
181struct vmxnet3_tx_data_ring {
182 struct Vmxnet3_TxDataDesc *base;
183 u32 size;
184 dma_addr_t basePA;
185};
186
187enum vmxnet3_buf_map_type {
188 VMXNET3_MAP_INVALID = 0,
189 VMXNET3_MAP_NONE,
190 VMXNET3_MAP_SINGLE,
191 VMXNET3_MAP_PAGE,
192};
193
194struct vmxnet3_tx_buf_info {
195 u32 map_type;
196 u16 len;
197 u16 sop_idx;
198 dma_addr_t dma_addr;
199 struct sk_buff *skb;
200};
201
202struct vmxnet3_tq_driver_stats {
203 u64 drop_total; /* # of pkts dropped by the driver, the
204 * counters below track droppings due to
205 * different reasons
206 */
207 u64 drop_too_many_frags;
208 u64 drop_oversized_hdr;
209 u64 drop_hdr_inspect_err;
210 u64 drop_tso;
211
212 u64 tx_ring_full;
213 u64 linearized; /* # of pkts linearized */
214 u64 copy_skb_header; /* # of times we have to copy skb header */
215 u64 oversized_hdr;
216};
217
218struct vmxnet3_tx_ctx {
219 bool ipv4;
759c9359 220 bool ipv6;
d1a890fa 221 u16 mss;
dacce2be
RD
222 u32 l4_offset; /* only valid for pkts requesting tso or csum
223 * offloading. For encap offload, it refers to
224 * inner L4 offset i.e. it includes outer header
225 * encap header and inner eth and ip header size
226 */
227
228 u32 l4_hdr_size; /* only valid if mss != 0
229 * Refers to inner L4 hdr size for encap
230 * offload
d1a890fa 231 */
d1a890fa
SB
232 u32 copy_size; /* # of bytes copied into the data ring */
233 union Vmxnet3_GenericDesc *sop_txd;
234 union Vmxnet3_GenericDesc *eop_txd;
235};
236
237struct vmxnet3_tx_queue {
09c5088e
SB
238 char name[IFNAMSIZ+8]; /* To identify interrupt */
239 struct vmxnet3_adapter *adapter;
d1a890fa
SB
240 spinlock_t tx_lock;
241 struct vmxnet3_cmd_ring tx_ring;
09c5088e 242 struct vmxnet3_tx_buf_info *buf_info;
b0eb57cb 243 dma_addr_t buf_info_pa;
d1a890fa
SB
244 struct vmxnet3_tx_data_ring data_ring;
245 struct vmxnet3_comp_ring comp_ring;
09c5088e 246 struct Vmxnet3_TxQueueCtrl *shared;
d1a890fa
SB
247 struct vmxnet3_tq_driver_stats stats;
248 bool stopped;
249 int num_stop; /* # of times the queue is
250 * stopped */
09c5088e 251 int qid;
3c8b3efc 252 u16 txdata_desc_size;
d1a890fa
SB
253} __attribute__((__aligned__(SMP_CACHE_BYTES)));
254
255enum vmxnet3_rx_buf_type {
256 VMXNET3_RX_BUF_NONE = 0,
257 VMXNET3_RX_BUF_SKB = 1,
258 VMXNET3_RX_BUF_PAGE = 2
259};
260
261struct vmxnet3_rx_buf_info {
262 enum vmxnet3_rx_buf_type buf_type;
263 u16 len;
264 union {
265 struct sk_buff *skb;
266 struct page *page;
267 };
268 dma_addr_t dma_addr;
269};
270
271struct vmxnet3_rx_ctx {
272 struct sk_buff *skb;
273 u32 sop_idx;
274};
275
276struct vmxnet3_rq_driver_stats {
277 u64 drop_total;
278 u64 drop_err;
279 u64 drop_fcs;
280 u64 rx_buf_alloc_failure;
281};
282
50a5ce3e
SK
283struct vmxnet3_rx_data_ring {
284 Vmxnet3_RxDataDesc *base;
285 dma_addr_t basePA;
286 u16 desc_size;
287};
288
d1a890fa 289struct vmxnet3_rx_queue {
09c5088e
SB
290 char name[IFNAMSIZ + 8]; /* To identify interrupt */
291 struct vmxnet3_adapter *adapter;
292 struct napi_struct napi;
d1a890fa 293 struct vmxnet3_cmd_ring rx_ring[2];
50a5ce3e 294 struct vmxnet3_rx_data_ring data_ring;
d1a890fa
SB
295 struct vmxnet3_comp_ring comp_ring;
296 struct vmxnet3_rx_ctx rx_ctx;
297 u32 qid; /* rqID in RCD for buffer from 1st ring */
298 u32 qid2; /* rqID in RCD for buffer from 2nd ring */
50a5ce3e 299 u32 dataRingQid; /* rqID in RCD for buffer from data ring */
d1a890fa 300 struct vmxnet3_rx_buf_info *buf_info[2];
b0eb57cb 301 dma_addr_t buf_info_pa;
d1a890fa
SB
302 struct Vmxnet3_RxQueueCtrl *shared;
303 struct vmxnet3_rq_driver_stats stats;
304} __attribute__((__aligned__(SMP_CACHE_BYTES)));
305
09c5088e
SB
306#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
307#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
308
309/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
310#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
311
312#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
313 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
7e96fbf2 314#define VMXNET3_LINUX_MIN_MSIX_VECT 2 /* 1 for tx-rx pair and 1 for event */
09c5088e 315
d1a890fa
SB
316
317struct vmxnet3_intr {
318 enum vmxnet3_intr_mask_mode mask_mode;
319 enum vmxnet3_intr_type type; /* MSI-X, MSI, or INTx? */
320 u8 num_intrs; /* # of intr vectors */
321 u8 event_intr_idx; /* idx of the intr vector for event */
322 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
c7673e4d 323 char event_msi_vector_name[IFNAMSIZ+17];
d1a890fa
SB
324#ifdef CONFIG_PCI_MSI
325 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
326#endif
327};
328
09c5088e
SB
329/* Interrupt sharing schemes, share_intr */
330#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
331#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
332#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
333
334
d1a890fa
SB
335#define VMXNET3_STATE_BIT_RESETTING 0
336#define VMXNET3_STATE_BIT_QUIESCED 1
337struct vmxnet3_adapter {
09c5088e
SB
338 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
339 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
72e85c45 340 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
09c5088e 341 struct vmxnet3_intr intr;
83d0feff 342 spinlock_t cmd_lock;
09c5088e
SB
343 struct Vmxnet3_DriverShared *shared;
344 struct Vmxnet3_PMConf *pm_conf;
345 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
346 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
347 struct net_device *netdev;
09c5088e 348 struct pci_dev *pdev;
d1a890fa 349
81e8e560
HH
350 u8 __iomem *hw_addr0; /* for BAR 0 */
351 u8 __iomem *hw_addr1; /* for BAR 1 */
45dac1d6
SB
352 u8 version;
353
09c5088e
SB
354#ifdef VMXNET3_RSS
355 struct UPT1_RSSConf *rss_conf;
356 bool rss;
357#endif
358 u32 num_rx_queues;
359 u32 num_tx_queues;
d1a890fa
SB
360
361 /* rx buffer related */
362 unsigned skb_buf_size;
363 int rx_buf_per_pkt; /* only apply to the 1st ring */
364 dma_addr_t shared_pa;
365 dma_addr_t queue_desc_pa;
4edef40e 366 dma_addr_t coal_conf_pa;
d1a890fa
SB
367
368 /* Wake-on-LAN */
369 u32 wol;
370
371 /* Link speed */
372 u32 link_speed; /* in mbps */
373
374 u64 tx_timeout_count;
f00e2b0a
NH
375
376 /* Ring sizes */
377 u32 tx_ring_size;
378 u32 rx_ring_size;
53831aa1 379 u32 rx_ring2_size;
f00e2b0a 380
3c8b3efc
SK
381 /* Size of buffer in the data ring */
382 u16 txdata_desc_size;
50a5ce3e
SK
383 u16 rxdata_desc_size;
384
385 bool rxdataring_enabled;
d3a8a9e5
RD
386 bool default_rss_fields;
387 enum Vmxnet3_RSSField rss_fields;
3c8b3efc 388
d1a890fa
SB
389 struct work_struct work;
390
391 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
392
09c5088e 393 int share_intr;
b0eb57cb 394
4edef40e
SK
395 struct Vmxnet3_CoalesceScheme *coal_conf;
396 bool default_coal_mode;
397
b0eb57cb
AK
398 dma_addr_t adapter_pa;
399 dma_addr_t pm_conf_pa;
400 dma_addr_t rss_conf_pa;
d1a890fa
SB
401};
402
403#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
b8744cab 404 writel((val), (adapter)->hw_addr0 + (reg))
d1a890fa 405#define VMXNET3_READ_BAR0_REG(adapter, reg) \
b8744cab 406 readl((adapter)->hw_addr0 + (reg))
d1a890fa
SB
407
408#define VMXNET3_WRITE_BAR1_REG(adapter, reg, val) \
b8744cab 409 writel((val), (adapter)->hw_addr1 + (reg))
d1a890fa 410#define VMXNET3_READ_BAR1_REG(adapter, reg) \
b8744cab 411 readl((adapter)->hw_addr1 + (reg))
d1a890fa
SB
412
413#define VMXNET3_WAKE_QUEUE_THRESHOLD(tq) (5)
414#define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \
415 ((rq)->rx_ring[ring_idx].size >> 3)
416
417#define VMXNET3_GET_ADDR_LO(dma) ((u32)(dma))
418#define VMXNET3_GET_ADDR_HI(dma) ((u32)(((u64)(dma)) >> 32))
419
190af10f
SK
420#define VMXNET3_VERSION_GE_2(adapter) \
421 (adapter->version >= VMXNET3_REV_2 + 1)
422#define VMXNET3_VERSION_GE_3(adapter) \
423 (adapter->version >= VMXNET3_REV_3 + 1)
123db31d
RD
424#define VMXNET3_VERSION_GE_4(adapter) \
425 (adapter->version >= VMXNET3_REV_4 + 1)
190af10f 426
d1a890fa
SB
427/* must be a multiple of VMXNET3_RING_SIZE_ALIGN */
428#define VMXNET3_DEF_TX_RING_SIZE 512
7475908f
SK
429#define VMXNET3_DEF_RX_RING_SIZE 1024
430#define VMXNET3_DEF_RX_RING2_SIZE 256
d1a890fa 431
50a5ce3e
SK
432#define VMXNET3_DEF_RXDATA_DESC_SIZE 128
433
d1a890fa
SB
434#define VMXNET3_MAX_ETH_HDR_SIZE 22
435#define VMXNET3_MAX_SKB_BUF_SIZE (3*1024)
436
50a5ce3e
SK
437#define VMXNET3_GET_RING_IDX(adapter, rqID) \
438 ((rqID >= adapter->num_rx_queues && \
439 rqID < 2 * adapter->num_rx_queues) ? 1 : 0) \
440
441#define VMXNET3_RX_DATA_RING(adapter, rqID) \
442 (rqID >= 2 * adapter->num_rx_queues && \
443 rqID < 3 * adapter->num_rx_queues) \
444
4edef40e
SK
445#define VMXNET3_COAL_STATIC_DEFAULT_DEPTH 64
446
447#define VMXNET3_COAL_RBC_RATE(usecs) (1000000 / usecs)
448#define VMXNET3_COAL_RBC_USECS(rbc_rate) (1000000 / rbc_rate)
d3a8a9e5
RD
449#define VMXNET3_RSS_FIELDS_DEFAULT (VMXNET3_RSS_FIELDS_TCPIP4 | \
450 VMXNET3_RSS_FIELDS_TCPIP6)
4edef40e 451
d1a890fa
SB
452int
453vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter);
454
455int
456vmxnet3_activate_dev(struct vmxnet3_adapter *adapter);
457
458void
459vmxnet3_force_close(struct vmxnet3_adapter *adapter);
460
461void
462vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
463
464void
09c5088e 465vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
d1a890fa
SB
466
467void
09c5088e 468vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
d1a890fa 469
3dd7400b
RD
470netdev_features_t
471vmxnet3_fix_features(struct net_device *netdev, netdev_features_t features);
472
1dac3b1b
RD
473netdev_features_t
474vmxnet3_features_check(struct sk_buff *skb,
475 struct net_device *netdev, netdev_features_t features);
476
a0d2730c 477int
c8f44aff 478vmxnet3_set_features(struct net_device *netdev, netdev_features_t features);
a0d2730c 479
d1a890fa
SB
480int
481vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
3c8b3efc 482 u32 tx_ring_size, u32 rx_ring_size, u32 rx_ring2_size,
50a5ce3e 483 u16 txdata_desc_size, u16 rxdata_desc_size);
d1a890fa 484
d8dea1eb 485void vmxnet3_set_ethtool_ops(struct net_device *netdev);
95305f6c 486
bc1f4470 487void vmxnet3_get_stats64(struct net_device *dev,
488 struct rtnl_link_stats64 *stats);
d1a890fa
SB
489
490extern char vmxnet3_driver_name[];
491#endif