]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/kernel/linux/kni/ethtool/ixgbe/ixgbe.h
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / kernel / linux / kni / ethtool / ixgbe / ixgbe.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*******************************************************************************
3
4 Intel 10 Gigabit PCI Express Linux driver
5 Copyright(c) 1999 - 2012 Intel Corporation.
6
7 Contact Information:
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10
11 *******************************************************************************/
12
13 #ifndef _IXGBE_H_
14 #define _IXGBE_H_
15
16 #ifndef IXGBE_NO_LRO
17 #include <net/tcp.h>
18 #endif
19
20 #include <linux/pci.h>
21 #include <linux/netdevice.h>
22 #ifdef HAVE_IRQ_AFFINITY_HINT
23 #include <linux/cpumask.h>
24 #endif /* HAVE_IRQ_AFFINITY_HINT */
25 #include <linux/vmalloc.h>
26
27 #ifdef SIOCETHTOOL
28 #include <linux/ethtool.h>
29 #endif
30 #ifdef NETIF_F_HW_VLAN_TX
31 #include <linux/if_vlan.h>
32 #endif
33 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
34 #define IXGBE_DCA
35 #include <linux/dca.h>
36 #endif
37 #include "ixgbe_dcb.h"
38
39 #include "kcompat.h"
40
41 #ifdef HAVE_SCTP
42 #include <linux/sctp.h>
43 #endif
44
45 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
46 #define IXGBE_FCOE
47 #include "ixgbe_fcoe.h"
48 #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
49
50 #if defined(CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE)
51 #define HAVE_IXGBE_PTP
52 #endif
53
54 #include "ixgbe_api.h"
55
56 #define PFX "ixgbe: "
57 #define DPRINTK(nlevel, klevel, fmt, args...) \
58 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
59 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
60 __func__ , ## args)))
61
62 /* TX/RX descriptor defines */
63 #define IXGBE_DEFAULT_TXD 512
64 #define IXGBE_DEFAULT_TX_WORK 256
65 #define IXGBE_MAX_TXD 4096
66 #define IXGBE_MIN_TXD 64
67
68 #define IXGBE_DEFAULT_RXD 512
69 #define IXGBE_DEFAULT_RX_WORK 256
70 #define IXGBE_MAX_RXD 4096
71 #define IXGBE_MIN_RXD 64
72
73
74 /* flow control */
75 #define IXGBE_MIN_FCRTL 0x40
76 #define IXGBE_MAX_FCRTL 0x7FF80
77 #define IXGBE_MIN_FCRTH 0x600
78 #define IXGBE_MAX_FCRTH 0x7FFF0
79 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF
80 #define IXGBE_MIN_FCPAUSE 0
81 #define IXGBE_MAX_FCPAUSE 0xFFFF
82
83 /* Supported Rx Buffer Sizes */
84 #define IXGBE_RXBUFFER_512 512 /* Used for packet split */
85 #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
86 #define IXGBE_RXBUFFER_1536 1536
87 #define IXGBE_RXBUFFER_2K 2048
88 #define IXGBE_RXBUFFER_3K 3072
89 #define IXGBE_RXBUFFER_4K 4096
90 #define IXGBE_RXBUFFER_7K 7168
91 #define IXGBE_RXBUFFER_8K 8192
92 #define IXGBE_RXBUFFER_15K 15360
93 #endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */
94 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */
95
96 /*
97 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN mans we
98 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
99 * this adds up to 512 bytes of extra data meaning the smallest allocation
100 * we could have is 1K.
101 * i.e. RXBUFFER_512 --> size-1024 slab
102 */
103 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_512
104
105 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
106
107 /* How many Rx Buffers do we bundle into one write to the hardware ? */
108 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
109
110 #define IXGBE_TX_FLAGS_CSUM (u32)(1)
111 #define IXGBE_TX_FLAGS_HW_VLAN (u32)(1 << 1)
112 #define IXGBE_TX_FLAGS_SW_VLAN (u32)(1 << 2)
113 #define IXGBE_TX_FLAGS_TSO (u32)(1 << 3)
114 #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 4)
115 #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
116 #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
117 #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
118 #define IXGBE_TX_FLAGS_TSTAMP (u32)(1 << 8)
119 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
120 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
121 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
122 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
123
124 #define IXGBE_MAX_RX_DESC_POLL 10
125
126 #define IXGBE_MAX_VF_MC_ENTRIES 30
127 #define IXGBE_MAX_VF_FUNCTIONS 64
128 #define IXGBE_MAX_VFTA_ENTRIES 128
129 #define MAX_EMULATION_MAC_ADDRS 16
130 #define IXGBE_MAX_PF_MACVLANS 15
131 #define IXGBE_82599_VF_DEVICE_ID 0x10ED
132 #define IXGBE_X540_VF_DEVICE_ID 0x1515
133
134 #ifdef CONFIG_PCI_IOV
135 #define VMDQ_P(p) ((p) + adapter->num_vfs)
136 #else
137 #define VMDQ_P(p) (p)
138 #endif
139
140 #define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
141 { \
142 u32 current_counter = IXGBE_READ_REG(hw, reg); \
143 if (current_counter < last_counter) \
144 counter += 0x100000000LL; \
145 last_counter = current_counter; \
146 counter &= 0xFFFFFFFF00000000LL; \
147 counter |= current_counter; \
148 }
149
150 #define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
151 { \
152 u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
153 u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
154 u64 current_counter = (current_counter_msb << 32) | \
155 current_counter_lsb; \
156 if (current_counter < last_counter) \
157 counter += 0x1000000000LL; \
158 last_counter = current_counter; \
159 counter &= 0xFFFFFFF000000000LL; \
160 counter |= current_counter; \
161 }
162
163 struct vf_stats {
164 u64 gprc;
165 u64 gorc;
166 u64 gptc;
167 u64 gotc;
168 u64 mprc;
169 };
170
171 struct vf_data_storage {
172 unsigned char vf_mac_addresses[ETH_ALEN];
173 u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
174 u16 num_vf_mc_hashes;
175 u16 default_vf_vlan_id;
176 u16 vlans_enabled;
177 bool clear_to_send;
178 struct vf_stats vfstats;
179 struct vf_stats last_vfstats;
180 struct vf_stats saved_rst_vfstats;
181 bool pf_set_mac;
182 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
183 u16 pf_qos;
184 u16 tx_rate;
185 u16 vlan_count;
186 u8 spoofchk_enabled;
187 struct pci_dev *vfdev;
188 };
189
190 struct vf_macvlans {
191 struct list_head l;
192 int vf;
193 bool free;
194 bool is_macvlan;
195 u8 vf_macvlan[ETH_ALEN];
196 };
197
198 #ifndef IXGBE_NO_LRO
199 #define IXGBE_LRO_MAX 32 /*Maximum number of LRO descriptors*/
200 #define IXGBE_LRO_GLOBAL 10
201
202 struct ixgbe_lro_stats {
203 u32 flushed;
204 u32 coal;
205 };
206
207 /*
208 * ixgbe_lro_header - header format to be aggregated by LRO
209 * @iph: IP header without options
210 * @tcp: TCP header
211 * @ts: Optional TCP timestamp data in TCP options
212 *
213 * This structure relies on the check above that verifies that the header
214 * is IPv4 and does not contain any options.
215 */
216 struct ixgbe_lrohdr {
217 struct iphdr iph;
218 struct tcphdr th;
219 __be32 ts[0];
220 };
221
222 struct ixgbe_lro_list {
223 struct sk_buff_head active;
224 struct ixgbe_lro_stats stats;
225 };
226
227 #endif /* IXGBE_NO_LRO */
228 #define IXGBE_MAX_TXD_PWR 14
229 #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
230
231 /* Tx Descriptors needed, worst case */
232 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
233 #ifdef MAX_SKB_FRAGS
234 #define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
235 #else
236 #define DESC_NEEDED 4
237 #endif
238
239 /* wrapper around a pointer to a socket buffer,
240 * so a DMA handle can be stored along with the buffer */
241 struct ixgbe_tx_buffer {
242 union ixgbe_adv_tx_desc *next_to_watch;
243 unsigned long time_stamp;
244 struct sk_buff *skb;
245 unsigned int bytecount;
246 unsigned short gso_segs;
247 __be16 protocol;
248 DEFINE_DMA_UNMAP_ADDR(dma);
249 DEFINE_DMA_UNMAP_LEN(len);
250 u32 tx_flags;
251 };
252
253 struct ixgbe_rx_buffer {
254 struct sk_buff *skb;
255 dma_addr_t dma;
256 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
257 struct page *page;
258 unsigned int page_offset;
259 #endif
260 };
261
262 struct ixgbe_queue_stats {
263 u64 packets;
264 u64 bytes;
265 };
266
267 struct ixgbe_tx_queue_stats {
268 u64 restart_queue;
269 u64 tx_busy;
270 u64 tx_done_old;
271 };
272
273 struct ixgbe_rx_queue_stats {
274 u64 rsc_count;
275 u64 rsc_flush;
276 u64 non_eop_descs;
277 u64 alloc_rx_page_failed;
278 u64 alloc_rx_buff_failed;
279 u64 csum_err;
280 };
281
282 enum ixgbe_ring_state_t {
283 __IXGBE_TX_FDIR_INIT_DONE,
284 __IXGBE_TX_DETECT_HANG,
285 __IXGBE_HANG_CHECK_ARMED,
286 __IXGBE_RX_RSC_ENABLED,
287 #ifndef HAVE_NDO_SET_FEATURES
288 __IXGBE_RX_CSUM_ENABLED,
289 #endif
290 __IXGBE_RX_CSUM_UDP_ZERO_ERR,
291 #ifdef IXGBE_FCOE
292 __IXGBE_RX_FCOE_BUFSZ,
293 #endif
294 };
295
296 #define check_for_tx_hang(ring) \
297 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
298 #define set_check_for_tx_hang(ring) \
299 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
300 #define clear_check_for_tx_hang(ring) \
301 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
302 #ifndef IXGBE_NO_HW_RSC
303 #define ring_is_rsc_enabled(ring) \
304 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
305 #else
306 #define ring_is_rsc_enabled(ring) false
307 #endif
308 #define set_ring_rsc_enabled(ring) \
309 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
310 #define clear_ring_rsc_enabled(ring) \
311 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
312 #define netdev_ring(ring) (ring->netdev)
313 #define ring_queue_index(ring) (ring->queue_index)
314
315
316 struct ixgbe_ring {
317 struct ixgbe_ring *next; /* pointer to next ring in q_vector */
318 struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
319 struct net_device *netdev; /* netdev ring belongs to */
320 struct device *dev; /* device for DMA mapping */
321 void *desc; /* descriptor ring memory */
322 union {
323 struct ixgbe_tx_buffer *tx_buffer_info;
324 struct ixgbe_rx_buffer *rx_buffer_info;
325 };
326 unsigned long state;
327 u8 __iomem *tail;
328 dma_addr_t dma; /* phys. address of descriptor ring */
329 unsigned int size; /* length in bytes */
330
331 u16 count; /* amount of descriptors */
332
333 u8 queue_index; /* needed for multiqueue queue management */
334 u8 reg_idx; /* holds the special value that gets
335 * the hardware register offset
336 * associated with this ring, which is
337 * different for DCB and RSS modes
338 */
339 u16 next_to_use;
340 u16 next_to_clean;
341
342 union {
343 #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
344 u16 rx_buf_len;
345 #else
346 u16 next_to_alloc;
347 #endif
348 struct {
349 u8 atr_sample_rate;
350 u8 atr_count;
351 };
352 };
353
354 u8 dcb_tc;
355 struct ixgbe_queue_stats stats;
356 union {
357 struct ixgbe_tx_queue_stats tx_stats;
358 struct ixgbe_rx_queue_stats rx_stats;
359 };
360 } ____cacheline_internodealigned_in_smp;
361
362 enum ixgbe_ring_f_enum {
363 RING_F_NONE = 0,
364 RING_F_VMDQ, /* SR-IOV uses the same ring feature */
365 RING_F_RSS,
366 RING_F_FDIR,
367 #ifdef IXGBE_FCOE
368 RING_F_FCOE,
369 #endif /* IXGBE_FCOE */
370 RING_F_ARRAY_SIZE /* must be last in enum set */
371 };
372
373 #define IXGBE_MAX_DCB_INDICES 8
374 #define IXGBE_MAX_RSS_INDICES 16
375 #define IXGBE_MAX_VMDQ_INDICES 64
376 #define IXGBE_MAX_FDIR_INDICES 64
377 #ifdef IXGBE_FCOE
378 #define IXGBE_MAX_FCOE_INDICES 8
379 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
380 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
381 #else
382 #define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
383 #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
384 #endif /* IXGBE_FCOE */
385 struct ixgbe_ring_feature {
386 int indices;
387 int mask;
388 };
389
390 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
391 /*
392 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
393 * this is twice the size of a half page we need to double the page order
394 * for FCoE enabled Rx queues.
395 */
396 #if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
397 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
398 {
399 return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
400 }
401 #else
402 #define ixgbe_rx_pg_order(_ring) 0
403 #endif
404 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
405 #define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
406
407 #endif
408 struct ixgbe_ring_container {
409 struct ixgbe_ring *ring; /* pointer to linked list of rings */
410 unsigned int total_bytes; /* total bytes processed this int */
411 unsigned int total_packets; /* total packets processed this int */
412 u16 work_limit; /* total work allowed per interrupt */
413 u8 count; /* total number of rings in vector */
414 u8 itr; /* current ITR setting for ring */
415 };
416
417 /* iterator for handling rings in ring container */
418 #define ixgbe_for_each_ring(pos, head) \
419 for (pos = (head).ring; pos != NULL; pos = pos->next)
420
421 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
422 ? 8 : 1)
423 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
424
425 /* MAX_MSIX_Q_VECTORS of these are allocated,
426 * but we only use one per queue-specific vector.
427 */
428 struct ixgbe_q_vector {
429 struct ixgbe_adapter *adapter;
430 int cpu; /* CPU for DCA */
431 u16 v_idx; /* index of q_vector within array, also used for
432 * finding the bit in EICR and friends that
433 * represents the vector for this ring */
434 u16 itr; /* Interrupt throttle rate written to EITR */
435 struct ixgbe_ring_container rx, tx;
436
437 #ifdef CONFIG_IXGBE_NAPI
438 struct napi_struct napi;
439 #endif
440 #ifndef HAVE_NETDEV_NAPI_LIST
441 struct net_device poll_dev;
442 #endif
443 #ifdef HAVE_IRQ_AFFINITY_HINT
444 cpumask_t affinity_mask;
445 #endif
446 #ifndef IXGBE_NO_LRO
447 struct ixgbe_lro_list lrolist; /* LRO list for queue vector*/
448 #endif
449 int numa_node;
450 char name[IFNAMSIZ + 9];
451
452 /* for dynamic allocation of rings associated with this q_vector */
453 struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp;
454 };
455
456 /*
457 * microsecond values for various ITR rates shifted by 2 to fit itr register
458 * with the first 3 bits reserved 0
459 */
460 #define IXGBE_MIN_RSC_ITR 24
461 #define IXGBE_100K_ITR 40
462 #define IXGBE_20K_ITR 200
463 #define IXGBE_16K_ITR 248
464 #define IXGBE_10K_ITR 400
465 #define IXGBE_8K_ITR 500
466
467 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
468 static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
469 const u32 stat_err_bits)
470 {
471 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
472 }
473
474 /* ixgbe_desc_unused - calculate if we have unused descriptors */
475 static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
476 {
477 u16 ntc = ring->next_to_clean;
478 u16 ntu = ring->next_to_use;
479
480 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
481 }
482
483 #define IXGBE_RX_DESC(R, i) \
484 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
485 #define IXGBE_TX_DESC(R, i) \
486 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
487 #define IXGBE_TX_CTXTDESC(R, i) \
488 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
489
490 #define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
491 #ifdef IXGBE_FCOE
492 /* use 3K as the baby jumbo frame size for FCoE */
493 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
494 #endif /* IXGBE_FCOE */
495
496 #define TCP_TIMER_VECTOR 0
497 #define OTHER_VECTOR 1
498 #define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR)
499
500 #define IXGBE_MAX_MSIX_Q_VECTORS_82599 64
501 #define IXGBE_MAX_MSIX_Q_VECTORS_82598 16
502
503 struct ixgbe_mac_addr {
504 u8 addr[ETH_ALEN];
505 u16 queue;
506 u16 state; /* bitmask */
507 };
508 #define IXGBE_MAC_STATE_DEFAULT 0x1
509 #define IXGBE_MAC_STATE_MODIFIED 0x2
510 #define IXGBE_MAC_STATE_IN_USE 0x4
511
512 #ifdef IXGBE_PROCFS
513 struct ixgbe_therm_proc_data {
514 struct ixgbe_hw *hw;
515 struct ixgbe_thermal_diode_data *sensor_data;
516 };
517
518 #endif /* IXGBE_PROCFS */
519
520 /*
521 * Only for array allocations in our adapter struct. On 82598, there will be
522 * unused entries in the array, but that's not a big deal. Also, in 82599,
523 * we can actually assign 64 queue vectors based on our extended-extended
524 * interrupt registers. This is different than 82598, which is limited to 16.
525 */
526 #define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599
527 #define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599
528
529 #define MIN_MSIX_Q_VECTORS 1
530 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
531
532 /* default to trying for four seconds */
533 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
534
535 /* board specific private data structure */
536 struct ixgbe_adapter {
537 #ifdef NETIF_F_HW_VLAN_TX
538 #ifdef HAVE_VLAN_RX_REGISTER
539 struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */
540 #else
541 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
542 #endif
543 #endif /* NETIF_F_HW_VLAN_TX */
544 /* OS defined structs */
545 struct net_device *netdev;
546 struct pci_dev *pdev;
547
548 unsigned long state;
549
550 /* Some features need tri-state capability,
551 * thus the additional *_CAPABLE flags.
552 */
553 u32 flags;
554 #define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0)
555 #define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
556 #define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2)
557 #define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3)
558 #ifndef IXGBE_NO_LLI
559 #define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4)
560 #endif
561 #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 8)
562 #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
563 #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 9)
564 #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 10)
565 #define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 11)
566 #else
567 #define IXGBE_FLAG_DCA_ENABLED (u32)0
568 #define IXGBE_FLAG_DCA_CAPABLE (u32)0
569 #define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0
570 #endif
571 #define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 12)
572 #define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 13)
573 #define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 14)
574 #define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 15)
575 #define IXGBE_FLAG_RSS_CAPABLE (u32)(1 << 16)
576 #define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 18)
577 #define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 19)
578 #define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 20)
579 #define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 21)
580 #define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 22)
581 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 23)
582 #ifdef IXGBE_FCOE
583 #define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 24)
584 #define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 25)
585 #endif /* IXGBE_FCOE */
586 #define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 26)
587 #define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 27)
588 #define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 28)
589 #define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 29)
590 #define IXGBE_FLAG_SRIOV_L2LOOPBACK_ENABLE (u32)(1 << 30)
591 #define IXGBE_FLAG_RX_BB_CAPABLE (u32)(1 << 31)
592
593 u32 flags2;
594 #ifndef IXGBE_NO_HW_RSC
595 #define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
596 #define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1)
597 #else
598 #define IXGBE_FLAG2_RSC_CAPABLE 0
599 #define IXGBE_FLAG2_RSC_ENABLED 0
600 #endif
601 #define IXGBE_FLAG2_VMDQ_DEFAULT_OVERRIDE (u32)(1 << 2)
602 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 4)
603 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 5)
604 #define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 6)
605 #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 7)
606 #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 8)
607 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 9)
608 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 10)
609 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 11)
610 #define IXGBE_FLAG2_OVERFLOW_CHECK_ENABLED (u32)(1 << 12)
611
612 /* Tx fast path data */
613 int num_tx_queues;
614 u16 tx_itr_setting;
615 u16 tx_work_limit;
616
617 /* Rx fast path data */
618 int num_rx_queues;
619 u16 rx_itr_setting;
620 u16 rx_work_limit;
621
622 /* TX */
623 struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
624
625 u64 restart_queue;
626 u64 lsc_int;
627 u32 tx_timeout_count;
628
629 /* RX */
630 struct ixgbe_ring *rx_ring[MAX_RX_QUEUES];
631 int num_rx_pools; /* == num_rx_queues in 82598 */
632 int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
633 u64 hw_csum_rx_error;
634 u64 hw_rx_no_dma_resources;
635 u64 rsc_total_count;
636 u64 rsc_total_flush;
637 u64 non_eop_descs;
638 #ifndef CONFIG_IXGBE_NAPI
639 u64 rx_dropped_backlog; /* count drops from rx intr handler */
640 #endif
641 u32 alloc_rx_page_failed;
642 u32 alloc_rx_buff_failed;
643
644 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
645
646 #ifdef HAVE_DCBNL_IEEE
647 struct ieee_pfc *ixgbe_ieee_pfc;
648 struct ieee_ets *ixgbe_ieee_ets;
649 #endif
650 struct ixgbe_dcb_config dcb_cfg;
651 struct ixgbe_dcb_config temp_dcb_cfg;
652 u8 dcb_set_bitmap;
653 u8 dcbx_cap;
654 #ifndef HAVE_MQPRIO
655 u8 tc;
656 #endif
657 enum ixgbe_fc_mode last_lfc_mode;
658
659 int num_msix_vectors;
660 int max_msix_q_vectors; /* true count of q_vectors for device */
661 struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
662 struct msix_entry *msix_entries;
663
664 #ifndef HAVE_NETDEV_STATS_IN_NETDEV
665 struct net_device_stats net_stats;
666 #endif
667 #ifndef IXGBE_NO_LRO
668 struct ixgbe_lro_stats lro_stats;
669 #endif
670
671 #ifdef ETHTOOL_TEST
672 u32 test_icr;
673 struct ixgbe_ring test_tx_ring;
674 struct ixgbe_ring test_rx_ring;
675 #endif
676
677 /* structs defined in ixgbe_hw.h */
678 struct ixgbe_hw hw;
679 u16 msg_enable;
680 struct ixgbe_hw_stats stats;
681 #ifndef IXGBE_NO_LLI
682 u32 lli_port;
683 u32 lli_size;
684 u32 lli_etype;
685 u32 lli_vlan_pri;
686 #endif /* IXGBE_NO_LLI */
687
688 u32 *config_space;
689 u64 tx_busy;
690 unsigned int tx_ring_count;
691 unsigned int rx_ring_count;
692
693 u32 link_speed;
694 bool link_up;
695 unsigned long link_check_timeout;
696
697 struct timer_list service_timer;
698 struct work_struct service_task;
699
700 struct hlist_head fdir_filter_list;
701 unsigned long fdir_overflow; /* number of times ATR was backed off */
702 union ixgbe_atr_input fdir_mask;
703 int fdir_filter_count;
704 u32 fdir_pballoc;
705 u32 atr_sample_rate;
706 spinlock_t fdir_perfect_lock;
707
708 #ifdef IXGBE_FCOE
709 struct ixgbe_fcoe fcoe;
710 #endif /* IXGBE_FCOE */
711 u32 wol;
712
713 u16 bd_number;
714
715 char eeprom_id[32];
716 u16 eeprom_cap;
717 bool netdev_registered;
718 u32 interrupt_event;
719 #ifdef HAVE_ETHTOOL_SET_PHYS_ID
720 u32 led_reg;
721 #endif
722
723 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
724 unsigned int num_vfs;
725 struct vf_data_storage *vfinfo;
726 int vf_rate_link_speed;
727 struct vf_macvlans vf_mvs;
728 struct vf_macvlans *mv_list;
729 #ifdef CONFIG_PCI_IOV
730 u32 timer_event_accumulator;
731 u32 vferr_refcount;
732 #endif
733 struct ixgbe_mac_addr *mac_table;
734 #ifdef IXGBE_SYSFS
735 struct kobject *info_kobj;
736 struct kobject *therm_kobj[IXGBE_MAX_SENSORS];
737 #else /* IXGBE_SYSFS */
738 #ifdef IXGBE_PROCFS
739 struct proc_dir_entry *eth_dir;
740 struct proc_dir_entry *info_dir;
741 struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS];
742 struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS];
743 #endif /* IXGBE_PROCFS */
744 #endif /* IXGBE_SYSFS */
745 };
746
747 struct ixgbe_fdir_filter {
748 struct hlist_node fdir_node;
749 union ixgbe_atr_input filter;
750 u16 sw_idx;
751 u16 action;
752 };
753
754 enum ixgbe_state_t {
755 __IXGBE_TESTING,
756 __IXGBE_RESETTING,
757 __IXGBE_DOWN,
758 __IXGBE_SERVICE_SCHED,
759 __IXGBE_IN_SFP_INIT,
760 };
761
762 struct ixgbe_cb {
763 #ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
764 union { /* Union defining head/tail partner */
765 struct sk_buff *head;
766 struct sk_buff *tail;
767 };
768 #endif
769 dma_addr_t dma;
770 #ifndef IXGBE_NO_LRO
771 __be32 tsecr; /* timestamp echo response */
772 u32 tsval; /* timestamp value in host order */
773 u32 next_seq; /* next expected sequence number */
774 u16 free; /* 65521 minus total size */
775 u16 mss; /* size of data portion of packet */
776 #endif /* IXGBE_NO_LRO */
777 #ifdef HAVE_VLAN_RX_REGISTER
778 u16 vid; /* VLAN tag */
779 #endif
780 u16 append_cnt; /* number of skb's appended */
781 #ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT
782 bool page_released;
783 #endif
784 };
785 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
786
787 #ifdef IXGBE_SYSFS
788 void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter);
789 int ixgbe_sysfs_init(struct ixgbe_adapter *adapter);
790 #endif /* IXGBE_SYSFS */
791 #ifdef IXGBE_PROCFS
792 void ixgbe_procfs_exit(struct ixgbe_adapter *adapter);
793 int ixgbe_procfs_init(struct ixgbe_adapter *adapter);
794 int ixgbe_procfs_topdir_init(void);
795 void ixgbe_procfs_topdir_exit(void);
796 #endif /* IXGBE_PROCFS */
797
798 extern struct dcbnl_rtnl_ops dcbnl_ops;
799 extern int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max);
800
801 extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
802
803 /* needed by ixgbe_main.c */
804 extern int ixgbe_validate_mac_addr(u8 *mc_addr);
805 extern void ixgbe_check_options(struct ixgbe_adapter *adapter);
806 extern void ixgbe_assign_netdev_ops(struct net_device *netdev);
807
808 /* needed by ixgbe_ethtool.c */
809 extern char ixgbe_driver_name[];
810 extern const char ixgbe_driver_version[];
811
812 extern void ixgbe_up(struct ixgbe_adapter *adapter);
813 extern void ixgbe_down(struct ixgbe_adapter *adapter);
814 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
815 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
816 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
817 extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
818 extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
819 extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
820 extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
821 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,
822 struct ixgbe_ring *);
823 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,
824 struct ixgbe_ring *);
825 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
826 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
827 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
828 extern bool ixgbe_is_ixgbe(struct pci_dev *pcidev);
829 extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
830 struct ixgbe_adapter *,
831 struct ixgbe_ring *);
832 extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
833 struct ixgbe_tx_buffer *);
834 extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
835 extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
836 struct ixgbe_ring *);
837 extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
838 struct ixgbe_ring *);
839 extern void ixgbe_set_rx_mode(struct net_device *netdev);
840 extern int ixgbe_write_mc_addr_list(struct net_device *netdev);
841 extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
842 #ifdef IXGBE_FCOE
843 extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
844 #endif /* IXGBE_FCOE */
845 extern void ixgbe_do_reset(struct net_device *netdev);
846 extern void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector);
847 extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
848 struct ixgbe_ring *);
849 extern void ixgbe_vlan_stripping_enable(struct ixgbe_adapter *adapter);
850 extern void ixgbe_vlan_stripping_disable(struct ixgbe_adapter *adapter);
851 #ifdef ETHTOOL_OPS_COMPAT
852 extern int ethtool_ioctl(struct ifreq *ifr);
853 #endif
854
855 #ifdef IXGBE_FCOE
856 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
857 extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
858 struct ixgbe_tx_buffer *first,
859 u8 *hdr_len);
860 extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
861 extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
862 union ixgbe_adv_rx_desc *rx_desc,
863 struct sk_buff *skb);
864 extern int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
865 struct scatterlist *sgl, unsigned int sgc);
866 #ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET
867 extern int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
868 struct scatterlist *sgl, unsigned int sgc);
869 #endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */
870 extern int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid);
871 #ifdef HAVE_NETDEV_OPS_FCOE_ENABLE
872 extern int ixgbe_fcoe_enable(struct net_device *netdev);
873 extern int ixgbe_fcoe_disable(struct net_device *netdev);
874 #endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */
875 #ifdef CONFIG_DCB
876 #ifdef HAVE_DCBNL_OPS_GETAPP
877 extern u8 ixgbe_fcoe_getapp(struct net_device *netdev);
878 #endif /* HAVE_DCBNL_OPS_GETAPP */
879 extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
880 #endif /* CONFIG_DCB */
881 #ifdef HAVE_NETDEV_OPS_FCOE_GETWWN
882 extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
883 #endif
884 #endif /* IXGBE_FCOE */
885
886 #ifdef CONFIG_DCB
887 #ifdef HAVE_DCBNL_IEEE
888 s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame);
889 #endif /* HAVE_DCBNL_IEEE */
890 #endif /* CONFIG_DCB */
891
892 extern void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring);
893 extern int ixgbe_get_settings(struct net_device *netdev,
894 struct ethtool_cmd *ecmd);
895 extern int ixgbe_write_uc_addr_list(struct ixgbe_adapter *adapter,
896 struct net_device *netdev, unsigned int vfn);
897 extern void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
898 extern int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
899 u8 *addr, u16 queue);
900 extern int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
901 u8 *addr, u16 queue);
902 extern int ixgbe_available_rars(struct ixgbe_adapter *adapter);
903 #ifndef HAVE_VLAN_RX_REGISTER
904 extern void ixgbe_vlan_mode(struct net_device *, u32);
905 #endif
906 #ifndef ixgbe_get_netdev_tc_txq
907 #define ixgbe_get_netdev_tc_txq(dev, tc) (&dev->tc_to_txq[tc])
908 #endif
909 extern void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
910 #endif /* _IXGBE_H_ */