]> git.proxmox.com Git - ceph.git/blame - ceph/src/spdk/dpdk/kernel/linux/kni/ethtool/igb/igb.h
bump version to 15.2.11-pve1
[ceph.git] / ceph / src / spdk / dpdk / kernel / linux / kni / ethtool / igb / igb.h
CommitLineData
11fdf7f2 1/* SPDX-License-Identifier: GPL-2.0 */
7c673cae
FG
2/*******************************************************************************
3
4 Intel(R) Gigabit Ethernet Linux driver
5 Copyright(c) 2007-2013 Intel Corporation.
6
7c673cae
FG
7 Contact Information:
8 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
9 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
10
11*******************************************************************************/
12
13/* Linux PRO/1000 Ethernet Driver main header file */
14
15#ifndef _IGB_H_
16#define _IGB_H_
17
18#include <linux/kobject.h>
19
20#ifndef IGB_NO_LRO
21#include <net/tcp.h>
22#endif
23
24#undef HAVE_HW_TIME_STAMP
25#ifdef HAVE_HW_TIME_STAMP
26#include <linux/pci.h>
27#include <linux/netdevice.h>
28#include <linux/vmalloc.h>
29
30#endif
31#ifdef SIOCETHTOOL
32#include <linux/ethtool.h>
33#endif
34
35struct igb_adapter;
36
37#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
38//#define IGB_DCA
39#endif
40#ifdef IGB_DCA
41#include <linux/dca.h>
42#endif
43
44#include "kcompat.h"
45
46#ifdef HAVE_SCTP
47#include <linux/sctp.h>
48#endif
49
50#include "e1000_api.h"
51#include "e1000_82575.h"
52#include "e1000_manage.h"
53#include "e1000_mbx.h"
54
55#define IGB_ERR(args...) printk(KERN_ERR "igb: " args)
56
57#define PFX "igb: "
58#define DPRINTK(nlevel, klevel, fmt, args...) \
59 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
60 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
61 __FUNCTION__ , ## args))
62
63#ifdef HAVE_PTP_1588_CLOCK
64#include <linux/clocksource.h>
65#include <linux/net_tstamp.h>
66#include <linux/ptp_clock_kernel.h>
67#endif /* HAVE_PTP_1588_CLOCK */
68
69#ifdef HAVE_I2C_SUPPORT
70#include <linux/i2c.h>
71#include <linux/i2c-algo-bit.h>
72#endif /* HAVE_I2C_SUPPORT */
73
74/* Interrupt defines */
75#define IGB_START_ITR 648 /* ~6000 ints/sec */
76#define IGB_4K_ITR 980
77#define IGB_20K_ITR 196
78#define IGB_70K_ITR 56
79
80/* Interrupt modes, as used by the IntMode parameter */
81#define IGB_INT_MODE_LEGACY 0
82#define IGB_INT_MODE_MSI 1
83#define IGB_INT_MODE_MSIX 2
84
85/* TX/RX descriptor defines */
86#define IGB_DEFAULT_TXD 256
87#define IGB_DEFAULT_TX_WORK 128
88#define IGB_MIN_TXD 80
89#define IGB_MAX_TXD 4096
90
91#define IGB_DEFAULT_RXD 256
92#define IGB_MIN_RXD 80
93#define IGB_MAX_RXD 4096
94
95#define IGB_MIN_ITR_USECS 10 /* 100k irq/sec */
96#define IGB_MAX_ITR_USECS 8191 /* 120 irq/sec */
97
98#define NON_Q_VECTORS 1
99#define MAX_Q_VECTORS 10
100
101/* Transmit and receive queues */
102#define IGB_MAX_RX_QUEUES 16
103#define IGB_MAX_TX_QUEUES 16
104
105#define IGB_MAX_VF_MC_ENTRIES 30
106#define IGB_MAX_VF_FUNCTIONS 8
107#define IGB_82576_VF_DEV_ID 0x10CA
108#define IGB_I350_VF_DEV_ID 0x1520
109#define IGB_MAX_UTA_ENTRIES 128
110#define MAX_EMULATION_MAC_ADDRS 16
111#define OUI_LEN 3
112#define IGB_MAX_VMDQ_QUEUES 8
113
114
115struct vf_data_storage {
116 unsigned char vf_mac_addresses[ETH_ALEN];
117 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
118 u16 num_vf_mc_hashes;
119 u16 default_vf_vlan_id;
120 u16 vlans_enabled;
121 unsigned char em_mac_addresses[MAX_EMULATION_MAC_ADDRS * ETH_ALEN];
122 u32 uta_table_copy[IGB_MAX_UTA_ENTRIES];
123 u32 flags;
124 unsigned long last_nack;
125#ifdef IFLA_VF_MAX
126 u16 pf_vlan; /* When set, guest VLAN config not allowed. */
127 u16 pf_qos;
128 u16 tx_rate;
129#ifdef HAVE_VF_SPOOFCHK_CONFIGURE
130 bool spoofchk_enabled;
131#endif
132#endif
133};
134
135#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
136#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
137#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
138#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
139
140/* RX descriptor control thresholds.
141 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
142 * descriptors available in its onboard memory.
143 * Setting this to 0 disables RX descriptor prefetch.
144 * HTHRESH - MAC will only prefetch if there are at least this many descriptors
145 * available in host memory.
146 * If PTHRESH is 0, this should also be 0.
147 * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
148 * descriptors until either it has this many to write back, or the
149 * ITR timer expires.
150 */
151#define IGB_RX_PTHRESH ((hw->mac.type == e1000_i354) ? 12 : 8)
152#define IGB_RX_HTHRESH 8
153#define IGB_TX_PTHRESH ((hw->mac.type == e1000_i354) ? 20 : 8)
154#define IGB_TX_HTHRESH 1
155#define IGB_RX_WTHRESH ((hw->mac.type == e1000_82576 && \
156 adapter->msix_entries) ? 1 : 4)
157
158/* this is the size past which hardware will drop packets when setting LPE=0 */
159#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
160
161/* NOTE: netdev_alloc_skb reserves 16 bytes, NET_IP_ALIGN means we
162 * reserve 2 more, and skb_shared_info adds an additional 384 more,
163 * this adds roughly 448 bytes of extra data meaning the smallest
164 * allocation we could have is 1K.
165 * i.e. RXBUFFER_512 --> size-1024 slab
166 */
167/* Supported Rx Buffer Sizes */
168#define IGB_RXBUFFER_256 256
169#define IGB_RXBUFFER_2048 2048
170#define IGB_RXBUFFER_16384 16384
171#define IGB_RX_HDR_LEN IGB_RXBUFFER_256
172#if MAX_SKB_FRAGS < 8
173#define IGB_RX_BUFSZ ALIGN(MAX_JUMBO_FRAME_SIZE / MAX_SKB_FRAGS, 1024)
174#else
175#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
176#endif
177
178
179/* Packet Buffer allocations */
180#define IGB_PBA_BYTES_SHIFT 0xA
181#define IGB_TX_HEAD_ADDR_SHIFT 7
182#define IGB_PBA_TX_MASK 0xFFFF0000
183
184#define IGB_FC_PAUSE_TIME 0x0680 /* 858 usec */
185
186/* How many Rx Buffers do we bundle into one write to the hardware ? */
187#define IGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
188
189#define IGB_EEPROM_APME 0x0400
190#define AUTO_ALL_MODES 0
191
192#ifndef IGB_MASTER_SLAVE
193/* Switch to override PHY master/slave setting */
194#define IGB_MASTER_SLAVE e1000_ms_hw_default
195#endif
196
197#define IGB_MNG_VLAN_NONE -1
198
199#ifndef IGB_NO_LRO
200#define IGB_LRO_MAX 32 /*Maximum number of LRO descriptors*/
201struct igb_lro_stats {
202 u32 flushed;
203 u32 coal;
204};
205
206/*
207 * igb_lro_header - header format to be aggregated by LRO
208 * @iph: IP header without options
209 * @tcp: TCP header
210 * @ts: Optional TCP timestamp data in TCP options
211 *
212 * This structure relies on the check above that verifies that the header
213 * is IPv4 and does not contain any options.
214 */
215struct igb_lrohdr {
216 struct iphdr iph;
217 struct tcphdr th;
218 __be32 ts[0];
219};
220
221struct igb_lro_list {
222 struct sk_buff_head active;
223 struct igb_lro_stats stats;
224};
225
226#endif /* IGB_NO_LRO */
227struct igb_cb {
228#ifndef IGB_NO_LRO
229#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
230 union { /* Union defining head/tail partner */
231 struct sk_buff *head;
232 struct sk_buff *tail;
233 };
234#endif
235 __be32 tsecr; /* timestamp echo response */
236 u32 tsval; /* timestamp value in host order */
237 u32 next_seq; /* next expected sequence number */
238 u16 free; /* 65521 minus total size */
239 u16 mss; /* size of data portion of packet */
240 u16 append_cnt; /* number of skb's appended */
241#endif /* IGB_NO_LRO */
242#ifdef HAVE_VLAN_RX_REGISTER
243 u16 vid; /* VLAN tag */
244#endif
245};
246#define IGB_CB(skb) ((struct igb_cb *)(skb)->cb)
247
248enum igb_tx_flags {
249 /* cmd_type flags */
250 IGB_TX_FLAGS_VLAN = 0x01,
251 IGB_TX_FLAGS_TSO = 0x02,
252 IGB_TX_FLAGS_TSTAMP = 0x04,
253
254 /* olinfo flags */
255 IGB_TX_FLAGS_IPV4 = 0x10,
256 IGB_TX_FLAGS_CSUM = 0x20,
257};
258
259/* VLAN info */
260#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
261#define IGB_TX_FLAGS_VLAN_SHIFT 16
262
263/*
264 * The largest size we can write to the descriptor is 65535. In order to
265 * maintain a power of two alignment we have to limit ourselves to 32K.
266 */
267#define IGB_MAX_TXD_PWR 15
268#define IGB_MAX_DATA_PER_TXD (1 << IGB_MAX_TXD_PWR)
269
270/* Tx Descriptors needed, worst case */
271#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
272#ifndef MAX_SKB_FRAGS
273#define DESC_NEEDED 4
274#elif (MAX_SKB_FRAGS < 16)
275#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
276#else
277#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
278#endif
279
280/* wrapper around a pointer to a socket buffer,
281 * so a DMA handle can be stored along with the buffer */
282struct igb_tx_buffer {
283 union e1000_adv_tx_desc *next_to_watch;
284 unsigned long time_stamp;
285 struct sk_buff *skb;
286 unsigned int bytecount;
287 u16 gso_segs;
288 __be16 protocol;
289 DEFINE_DMA_UNMAP_ADDR(dma);
290 DEFINE_DMA_UNMAP_LEN(len);
291 u32 tx_flags;
292};
293
294struct igb_rx_buffer {
295 dma_addr_t dma;
296#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
297 struct sk_buff *skb;
298#else
299 struct page *page;
300 u32 page_offset;
301#endif
302};
303
304struct igb_tx_queue_stats {
305 u64 packets;
306 u64 bytes;
307 u64 restart_queue;
308};
309
310struct igb_rx_queue_stats {
311 u64 packets;
312 u64 bytes;
313 u64 drops;
314 u64 csum_err;
315 u64 alloc_failed;
316 u64 ipv4_packets; /* IPv4 headers processed */
317 u64 ipv4e_packets; /* IPv4E headers with extensions processed */
318 u64 ipv6_packets; /* IPv6 headers processed */
319 u64 ipv6e_packets; /* IPv6E headers with extensions processed */
320 u64 tcp_packets; /* TCP headers processed */
321 u64 udp_packets; /* UDP headers processed */
322 u64 sctp_packets; /* SCTP headers processed */
323 u64 nfs_packets; /* NFS headers processe */
324};
325
326struct igb_ring_container {
327 struct igb_ring *ring; /* pointer to linked list of rings */
328 unsigned int total_bytes; /* total bytes processed this int */
329 unsigned int total_packets; /* total packets processed this int */
330 u16 work_limit; /* total work allowed per interrupt */
331 u8 count; /* total number of rings in vector */
332 u8 itr; /* current ITR setting for ring */
333};
334
335struct igb_ring {
336 struct igb_q_vector *q_vector; /* backlink to q_vector */
337 struct net_device *netdev; /* back pointer to net_device */
338 struct device *dev; /* device for dma mapping */
339 union { /* array of buffer info structs */
340 struct igb_tx_buffer *tx_buffer_info;
341 struct igb_rx_buffer *rx_buffer_info;
342 };
343#ifdef HAVE_PTP_1588_CLOCK
344 unsigned long last_rx_timestamp;
345#endif /* HAVE_PTP_1588_CLOCK */
346 void *desc; /* descriptor ring memory */
347 unsigned long flags; /* ring specific flags */
348 void __iomem *tail; /* pointer to ring tail register */
349 dma_addr_t dma; /* phys address of the ring */
350 unsigned int size; /* length of desc. ring in bytes */
351
352 u16 count; /* number of desc. in the ring */
353 u8 queue_index; /* logical index of the ring*/
354 u8 reg_idx; /* physical index of the ring */
355
356 /* everything past this point are written often */
357 u16 next_to_clean;
358 u16 next_to_use;
359 u16 next_to_alloc;
360
361 union {
362 /* TX */
363 struct {
364 struct igb_tx_queue_stats tx_stats;
365 };
366 /* RX */
367 struct {
368 struct igb_rx_queue_stats rx_stats;
369#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
370 u16 rx_buffer_len;
371#else
372 struct sk_buff *skb;
373#endif
374 };
375 };
376#ifdef CONFIG_IGB_VMDQ_NETDEV
377 struct net_device *vmdq_netdev;
378 int vqueue_index; /* queue index for virtual netdev */
379#endif
380} ____cacheline_internodealigned_in_smp;
381
382struct igb_q_vector {
383 struct igb_adapter *adapter; /* backlink */
384 int cpu; /* CPU for DCA */
385 u32 eims_value; /* EIMS mask value */
386
387 u16 itr_val;
388 u8 set_itr;
389 void __iomem *itr_register;
390
391 struct igb_ring_container rx, tx;
392
393 struct napi_struct napi;
394#ifndef IGB_NO_LRO
395 struct igb_lro_list lrolist; /* LRO list for queue vector*/
396#endif
397 char name[IFNAMSIZ + 9];
398#ifndef HAVE_NETDEV_NAPI_LIST
399 struct net_device poll_dev;
400#endif
401
402 /* for dynamic allocation of rings associated with this q_vector */
403 struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
404};
405
406enum e1000_ring_flags_t {
407#ifndef HAVE_NDO_SET_FEATURES
408 IGB_RING_FLAG_RX_CSUM,
409#endif
410 IGB_RING_FLAG_RX_SCTP_CSUM,
411 IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
412 IGB_RING_FLAG_TX_CTX_IDX,
413 IGB_RING_FLAG_TX_DETECT_HANG,
414};
415
416struct igb_mac_addr {
417 u8 addr[ETH_ALEN];
418 u16 queue;
419 u16 state; /* bitmask */
420};
421#define IGB_MAC_STATE_DEFAULT 0x1
422#define IGB_MAC_STATE_MODIFIED 0x2
423#define IGB_MAC_STATE_IN_USE 0x4
424
425#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
426
427#define IGB_RX_DESC(R, i) \
428 (&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
429#define IGB_TX_DESC(R, i) \
430 (&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
431#define IGB_TX_CTXTDESC(R, i) \
432 (&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
433
434#ifdef CONFIG_IGB_VMDQ_NETDEV
435#define netdev_ring(ring) \
436 ((ring->vmdq_netdev ? ring->vmdq_netdev : ring->netdev))
437#define ring_queue_index(ring) \
438 ((ring->vmdq_netdev ? ring->vqueue_index : ring->queue_index))
439#else
440#define netdev_ring(ring) (ring->netdev)
441#define ring_queue_index(ring) (ring->queue_index)
442#endif /* CONFIG_IGB_VMDQ_NETDEV */
443
444/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
445static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
446 const u32 stat_err_bits)
447{
448 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
449}
450
451/* igb_desc_unused - calculate if we have unused descriptors */
452static inline u16 igb_desc_unused(const struct igb_ring *ring)
453{
454 u16 ntc = ring->next_to_clean;
455 u16 ntu = ring->next_to_use;
456
457 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
458}
459
460#ifdef CONFIG_BQL
461static inline struct netdev_queue *txring_txq(const struct igb_ring *tx_ring)
462{
463 return netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index);
464}
465#endif /* CONFIG_BQL */
466
467// #ifdef EXT_THERMAL_SENSOR_SUPPORT
468// #ifdef IGB_PROCFS
469struct igb_therm_proc_data
470{
471 struct e1000_hw *hw;
472 struct e1000_thermal_diode_data *sensor_data;
473};
474
475// #endif /* IGB_PROCFS */
476// #endif /* EXT_THERMAL_SENSOR_SUPPORT */
477
478#ifdef IGB_HWMON
479#define IGB_HWMON_TYPE_LOC 0
480#define IGB_HWMON_TYPE_TEMP 1
481#define IGB_HWMON_TYPE_CAUTION 2
482#define IGB_HWMON_TYPE_MAX 3
483
484struct hwmon_attr {
485 struct device_attribute dev_attr;
486 struct e1000_hw *hw;
487 struct e1000_thermal_diode_data *sensor;
488 char name[12];
489 };
490
491struct hwmon_buff {
492 struct device *device;
493 struct hwmon_attr *hwmon_list;
494 unsigned int n_hwmon;
495 };
496#endif /* IGB_HWMON */
497
498/* board specific private data structure */
499struct igb_adapter {
500#ifdef HAVE_VLAN_RX_REGISTER
501 /* vlgrp must be first member of structure */
502 struct vlan_group *vlgrp;
503#else
504 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
505#endif
506 struct net_device *netdev;
507
508 unsigned long state;
509 unsigned int flags;
510
511 unsigned int num_q_vectors;
512 struct msix_entry *msix_entries;
513
514
515 /* TX */
516 u16 tx_work_limit;
517 u32 tx_timeout_count;
518 int num_tx_queues;
519 struct igb_ring *tx_ring[IGB_MAX_TX_QUEUES];
520
521 /* RX */
522 int num_rx_queues;
523 struct igb_ring *rx_ring[IGB_MAX_RX_QUEUES];
524
525 struct timer_list watchdog_timer;
526 struct timer_list dma_err_timer;
527 struct timer_list phy_info_timer;
528 u16 mng_vlan_id;
529 u32 bd_number;
530 u32 wol;
531 u32 en_mng_pt;
532 u16 link_speed;
533 u16 link_duplex;
534 u8 port_num;
535
536 /* Interrupt Throttle Rate */
537 u32 rx_itr_setting;
538 u32 tx_itr_setting;
539
540 struct work_struct reset_task;
541 struct work_struct watchdog_task;
542 struct work_struct dma_err_task;
543 bool fc_autoneg;
544 u8 tx_timeout_factor;
545
546#ifdef DEBUG
547 bool tx_hang_detected;
548 bool disable_hw_reset;
549#endif
550 u32 max_frame_size;
551
552 /* OS defined structs */
553 struct pci_dev *pdev;
554#ifndef HAVE_NETDEV_STATS_IN_NETDEV
555 struct net_device_stats net_stats;
556#endif
557#ifndef IGB_NO_LRO
558 struct igb_lro_stats lro_stats;
559#endif
560
561 /* structs defined in e1000_hw.h */
562 struct e1000_hw hw;
563 struct e1000_hw_stats stats;
564 struct e1000_phy_info phy_info;
565 struct e1000_phy_stats phy_stats;
566
567#ifdef ETHTOOL_TEST
568 u32 test_icr;
569 struct igb_ring test_tx_ring;
570 struct igb_ring test_rx_ring;
571#endif
572
573 int msg_enable;
574
575 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
576 u32 eims_enable_mask;
577 u32 eims_other;
578
579 /* to not mess up cache alignment, always add to the bottom */
580 u32 *config_space;
581 u16 tx_ring_count;
582 u16 rx_ring_count;
583 struct vf_data_storage *vf_data;
584#ifdef IFLA_VF_MAX
585 int vf_rate_link_speed;
586#endif
587 u32 lli_port;
588 u32 lli_size;
589 unsigned int vfs_allocated_count;
590 /* Malicious Driver Detection flag. Valid only when SR-IOV is enabled */
591 bool mdd;
592 int int_mode;
593 u32 rss_queues;
594 u32 vmdq_pools;
11fdf7f2 595 char fw_version[43];
7c673cae
FG
596 u32 wvbr;
597 struct igb_mac_addr *mac_table;
598#ifdef CONFIG_IGB_VMDQ_NETDEV
599 struct net_device *vmdq_netdev[IGB_MAX_VMDQ_QUEUES];
600#endif
601 int vferr_refcount;
602 int dmac;
603 u32 *shadow_vfta;
604
605 /* External Thermal Sensor support flag */
606 bool ets;
607#ifdef IGB_HWMON
608 struct hwmon_buff igb_hwmon_buff;
609#else /* IGB_HWMON */
610#ifdef IGB_PROCFS
611 struct proc_dir_entry *eth_dir;
612 struct proc_dir_entry *info_dir;
613 struct proc_dir_entry *therm_dir[E1000_MAX_SENSORS];
614 struct igb_therm_proc_data therm_data[E1000_MAX_SENSORS];
615 bool old_lsc;
616#endif /* IGB_PROCFS */
617#endif /* IGB_HWMON */
618 u32 etrack_id;
619
620#ifdef HAVE_PTP_1588_CLOCK
621 struct ptp_clock *ptp_clock;
622 struct ptp_clock_info ptp_caps;
623 struct delayed_work ptp_overflow_work;
624 struct work_struct ptp_tx_work;
625 struct sk_buff *ptp_tx_skb;
626 unsigned long ptp_tx_start;
627 unsigned long last_rx_ptp_check;
628 spinlock_t tmreg_lock;
629 struct cyclecounter cc;
630 struct timecounter tc;
631 u32 tx_hwtstamp_timeouts;
632 u32 rx_hwtstamp_cleared;
633#endif /* HAVE_PTP_1588_CLOCK */
634
635#ifdef HAVE_I2C_SUPPORT
636 struct i2c_algo_bit_data i2c_algo;
637 struct i2c_adapter i2c_adap;
638 struct i2c_client *i2c_client;
639#endif /* HAVE_I2C_SUPPORT */
640 unsigned long link_check_timeout;
641
642
643 int devrc;
644
645 int copper_tries;
646 u16 eee_advert;
647};
648
649#ifdef CONFIG_IGB_VMDQ_NETDEV
650struct igb_vmdq_adapter {
651#ifdef HAVE_VLAN_RX_REGISTER
652 /* vlgrp must be first member of structure */
653 struct vlan_group *vlgrp;
654#else
655 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
656#endif
657 struct igb_adapter *real_adapter;
658 struct net_device *vnetdev;
659 struct net_device_stats net_stats;
660 struct igb_ring *tx_ring;
661 struct igb_ring *rx_ring;
662};
663#endif
664
665#define IGB_FLAG_HAS_MSI (1 << 0)
666#define IGB_FLAG_DCA_ENABLED (1 << 1)
667#define IGB_FLAG_LLI_PUSH (1 << 2)
668#define IGB_FLAG_QUAD_PORT_A (1 << 3)
669#define IGB_FLAG_QUEUE_PAIRS (1 << 4)
670#define IGB_FLAG_EEE (1 << 5)
671#define IGB_FLAG_DMAC (1 << 6)
672#define IGB_FLAG_DETECT_BAD_DMA (1 << 7)
673#define IGB_FLAG_PTP (1 << 8)
674#define IGB_FLAG_RSS_FIELD_IPV4_UDP (1 << 9)
675#define IGB_FLAG_RSS_FIELD_IPV6_UDP (1 << 10)
676#define IGB_FLAG_WOL_SUPPORTED (1 << 11)
677#define IGB_FLAG_NEED_LINK_UPDATE (1 << 12)
678#define IGB_FLAG_LOOPBACK_ENABLE (1 << 13)
679#define IGB_FLAG_MEDIA_RESET (1 << 14)
680#define IGB_FLAG_MAS_ENABLE (1 << 15)
681
682/* Media Auto Sense */
683#define IGB_MAS_ENABLE_0 0X0001
684#define IGB_MAS_ENABLE_1 0X0002
685#define IGB_MAS_ENABLE_2 0X0004
686#define IGB_MAS_ENABLE_3 0X0008
687
688#define IGB_MIN_TXPBSIZE 20408
689#define IGB_TX_BUF_4096 4096
690
691#define IGB_DMCTLX_DCFLUSH_DIS 0x80000000 /* Disable DMA Coal Flush */
692
693/* DMA Coalescing defines */
694#define IGB_DMAC_DISABLE 0
695#define IGB_DMAC_MIN 250
696#define IGB_DMAC_500 500
697#define IGB_DMAC_EN_DEFAULT 1000
698#define IGB_DMAC_2000 2000
699#define IGB_DMAC_3000 3000
700#define IGB_DMAC_4000 4000
701#define IGB_DMAC_5000 5000
702#define IGB_DMAC_6000 6000
703#define IGB_DMAC_7000 7000
704#define IGB_DMAC_8000 8000
705#define IGB_DMAC_9000 9000
706#define IGB_DMAC_MAX 10000
707
708#define IGB_82576_TSYNC_SHIFT 19
709#define IGB_82580_TSYNC_SHIFT 24
710#define IGB_TS_HDR_LEN 16
711
712/* CEM Support */
713#define FW_HDR_LEN 0x4
714#define FW_CMD_DRV_INFO 0xDD
715#define FW_CMD_DRV_INFO_LEN 0x5
716#define FW_CMD_RESERVED 0X0
717#define FW_RESP_SUCCESS 0x1
718#define FW_UNUSED_VER 0x0
719#define FW_MAX_RETRIES 3
720#define FW_STATUS_SUCCESS 0x1
721#define FW_FAMILY_DRV_VER 0Xffffffff
722
723#define IGB_MAX_LINK_TRIES 20
724
725struct e1000_fw_hdr {
726 u8 cmd;
727 u8 buf_len;
728 union
729 {
730 u8 cmd_resv;
731 u8 ret_status;
732 } cmd_or_resp;
733 u8 checksum;
734};
735
736#pragma pack(push,1)
737struct e1000_fw_drv_info {
738 struct e1000_fw_hdr hdr;
739 u8 port_num;
740 u32 drv_version;
741 u16 pad; /* end spacing to ensure length is mult. of dword */
742 u8 pad2; /* end spacing to ensure length is mult. of dword2 */
743};
744#pragma pack(pop)
745
746enum e1000_state_t {
747 __IGB_TESTING,
748 __IGB_RESETTING,
749 __IGB_DOWN
750};
751
752extern char igb_driver_name[];
753extern char igb_driver_version[];
754
755extern int igb_up(struct igb_adapter *);
756extern void igb_down(struct igb_adapter *);
757extern void igb_reinit_locked(struct igb_adapter *);
758extern void igb_reset(struct igb_adapter *);
759extern int igb_set_spd_dplx(struct igb_adapter *, u16);
760extern int igb_setup_tx_resources(struct igb_ring *);
761extern int igb_setup_rx_resources(struct igb_ring *);
762extern void igb_free_tx_resources(struct igb_ring *);
763extern void igb_free_rx_resources(struct igb_ring *);
764extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
765extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
766extern void igb_setup_tctl(struct igb_adapter *);
767extern void igb_setup_rctl(struct igb_adapter *);
768extern netdev_tx_t igb_xmit_frame_ring(struct sk_buff *, struct igb_ring *);
769extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
770 struct igb_tx_buffer *);
771extern void igb_alloc_rx_buffers(struct igb_ring *, u16);
772extern void igb_clean_rx_ring(struct igb_ring *);
773extern void igb_update_stats(struct igb_adapter *);
774extern bool igb_has_link(struct igb_adapter *adapter);
775extern void igb_set_ethtool_ops(struct net_device *);
776extern void igb_check_options(struct igb_adapter *);
777extern void igb_power_up_link(struct igb_adapter *);
778#ifdef HAVE_PTP_1588_CLOCK
779extern void igb_ptp_init(struct igb_adapter *adapter);
780extern void igb_ptp_stop(struct igb_adapter *adapter);
781extern void igb_ptp_reset(struct igb_adapter *adapter);
782extern void igb_ptp_tx_work(struct work_struct *work);
783extern void igb_ptp_rx_hang(struct igb_adapter *adapter);
784extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
785extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
786 struct sk_buff *skb);
787extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
788 unsigned char *va,
789 struct sk_buff *skb);
790static inline void igb_ptp_rx_hwtstamp(struct igb_ring *rx_ring,
791 union e1000_adv_rx_desc *rx_desc,
792 struct sk_buff *skb)
793{
794 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
795#ifdef CONFIG_IGB_DISABLE_PACKET_SPLIT
796 igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
797 skb_pull(skb, IGB_TS_HDR_LEN);
798#endif
799 return;
800 }
801
802 if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS))
803 igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
804
805 /* Update the last_rx_timestamp timer in order to enable watchdog check
806 * for error case of latched timestamp on a dropped packet.
807 */
808 rx_ring->last_rx_timestamp = jiffies;
809}
810
811extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
812 struct ifreq *ifr, int cmd);
813#endif /* HAVE_PTP_1588_CLOCK */
814#ifdef ETHTOOL_OPS_COMPAT
815extern int ethtool_ioctl(struct ifreq *);
816#endif
817extern int igb_write_mc_addr_list(struct net_device *netdev);
818extern int igb_add_mac_filter(struct igb_adapter *adapter, u8 *addr, u16 queue);
819extern int igb_del_mac_filter(struct igb_adapter *adapter, u8* addr, u16 queue);
820extern int igb_available_rars(struct igb_adapter *adapter);
821extern s32 igb_vlvf_set(struct igb_adapter *, u32, bool, u32);
822extern void igb_configure_vt_default_pool(struct igb_adapter *adapter);
823extern void igb_enable_vlan_tags(struct igb_adapter *adapter);
824#ifndef HAVE_VLAN_RX_REGISTER
825extern void igb_vlan_mode(struct net_device *, u32);
826#endif
827
828#define E1000_PCS_CFG_IGN_SD 1
829
830#ifdef IGB_HWMON
831void igb_sysfs_exit(struct igb_adapter *adapter);
832int igb_sysfs_init(struct igb_adapter *adapter);
833#else
834#ifdef IGB_PROCFS
835int igb_procfs_init(struct igb_adapter* adapter);
836void igb_procfs_exit(struct igb_adapter* adapter);
837int igb_procfs_topdir_init(void);
838void igb_procfs_topdir_exit(void);
839#endif /* IGB_PROCFS */
840#endif /* IGB_HWMON */
841
842
843
844#endif /* _IGB_H_ */