]>
Commit | Line | Data |
---|---|---|
92915f71 GR |
1 | /******************************************************************************* |
2 | ||
3 | Intel 82599 Virtual Function driver | |
dec0d8e4 | 4 | Copyright(c) 1999 - 2015 Intel Corporation. |
92915f71 GR |
5 | |
6 | This program is free software; you can redistribute it and/or modify it | |
7 | under the terms and conditions of the GNU General Public License, | |
8 | version 2, as published by the Free Software Foundation. | |
9 | ||
10 | This program is distributed in the hope it will be useful, but WITHOUT | |
11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | more details. | |
14 | ||
15 | You should have received a copy of the GNU General Public License along with | |
dec0d8e4 | 16 | this program; if not, see <http://www.gnu.org/licenses/>. |
92915f71 GR |
17 | |
18 | The full GNU General Public License is included in this distribution in | |
19 | the file called "COPYING". | |
20 | ||
21 | Contact Information: | |
22 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> | |
23 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 | |
24 | ||
25 | *******************************************************************************/ | |
26 | ||
27 | #ifndef _IXGBEVF_H_ | |
28 | #define _IXGBEVF_H_ | |
29 | ||
30 | #include <linux/types.h> | |
dadcd65f | 31 | #include <linux/bitops.h> |
92915f71 GR |
32 | #include <linux/timer.h> |
33 | #include <linux/io.h> | |
34 | #include <linux/netdevice.h> | |
dadcd65f | 35 | #include <linux/if_vlan.h> |
4197aa7b | 36 | #include <linux/u64_stats_sync.h> |
92915f71 GR |
37 | |
38 | #include "vf.h" | |
39 | ||
c777cdfa JK |
40 | #ifdef CONFIG_NET_RX_BUSY_POLL |
41 | #include <net/busy_poll.h> | |
3b5dca26 | 42 | #define BP_EXTENDED_STATS |
c777cdfa JK |
43 | #endif |
44 | ||
e08400b7 ET |
45 | #define IXGBE_MAX_TXD_PWR 14 |
46 | #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR) | |
47 | ||
48 | /* Tx Descriptors needed, worst case */ | |
49 | #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) | |
50 | #define DESC_NEEDED (MAX_SKB_FRAGS + 4) | |
51 | ||
92915f71 | 52 | /* wrapper around a pointer to a socket buffer, |
dec0d8e4 JK |
53 | * so a DMA handle can be stored along with the buffer |
54 | */ | |
92915f71 | 55 | struct ixgbevf_tx_buffer { |
7ad1a093 ET |
56 | union ixgbe_adv_tx_desc *next_to_watch; |
57 | unsigned long time_stamp; | |
92915f71 | 58 | struct sk_buff *skb; |
7ad1a093 ET |
59 | unsigned int bytecount; |
60 | unsigned short gso_segs; | |
61 | __be16 protocol; | |
9bdfefd2 ET |
62 | DEFINE_DMA_UNMAP_ADDR(dma); |
63 | DEFINE_DMA_UNMAP_LEN(len); | |
7ad1a093 | 64 | u32 tx_flags; |
92915f71 GR |
65 | }; |
66 | ||
67 | struct ixgbevf_rx_buffer { | |
92915f71 | 68 | dma_addr_t dma; |
bad17234 ET |
69 | struct page *page; |
70 | unsigned int page_offset; | |
92915f71 GR |
71 | }; |
72 | ||
095e2617 ET |
73 | struct ixgbevf_stats { |
74 | u64 packets; | |
75 | u64 bytes; | |
76 | #ifdef BP_EXTENDED_STATS | |
77 | u64 yields; | |
78 | u64 misses; | |
79 | u64 cleaned; | |
80 | #endif | |
81 | }; | |
82 | ||
83 | struct ixgbevf_tx_queue_stats { | |
84 | u64 restart_queue; | |
85 | u64 tx_busy; | |
86 | u64 tx_done_old; | |
87 | }; | |
88 | ||
89 | struct ixgbevf_rx_queue_stats { | |
095e2617 ET |
90 | u64 alloc_rx_page_failed; |
91 | u64 alloc_rx_buff_failed; | |
92 | u64 csum_err; | |
93 | }; | |
94 | ||
e08400b7 ET |
95 | enum ixgbevf_ring_state_t { |
96 | __IXGBEVF_TX_DETECT_HANG, | |
97 | __IXGBEVF_HANG_CHECK_ARMED, | |
98 | }; | |
99 | ||
100 | #define check_for_tx_hang(ring) \ | |
101 | test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) | |
102 | #define set_check_for_tx_hang(ring) \ | |
103 | set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) | |
104 | #define clear_check_for_tx_hang(ring) \ | |
105 | clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state) | |
106 | ||
92915f71 | 107 | struct ixgbevf_ring { |
6b43c446 | 108 | struct ixgbevf_ring *next; |
fb40195c AD |
109 | struct net_device *netdev; |
110 | struct device *dev; | |
92915f71 GR |
111 | void *desc; /* descriptor ring memory */ |
112 | dma_addr_t dma; /* phys. address of descriptor ring */ | |
113 | unsigned int size; /* length in bytes */ | |
bad17234 ET |
114 | u16 count; /* amount of descriptors */ |
115 | u16 next_to_use; | |
116 | u16 next_to_clean; | |
117 | u16 next_to_alloc; | |
92915f71 | 118 | |
92915f71 GR |
119 | union { |
120 | struct ixgbevf_tx_buffer *tx_buffer_info; | |
121 | struct ixgbevf_rx_buffer *rx_buffer_info; | |
122 | }; | |
e08400b7 | 123 | unsigned long state; |
095e2617 ET |
124 | struct ixgbevf_stats stats; |
125 | struct u64_stats_sync syncp; | |
126 | union { | |
127 | struct ixgbevf_tx_queue_stats tx_stats; | |
128 | struct ixgbevf_rx_queue_stats rx_stats; | |
129 | }; | |
130 | ||
55fb277c | 131 | u64 hw_csum_rx_error; |
5cdab2f6 | 132 | u8 __iomem *tail; |
bad17234 | 133 | struct sk_buff *skb; |
92915f71 | 134 | |
dec0d8e4 JK |
135 | /* holds the special value that gets the hardware register offset |
136 | * associated with this ring, which is different for DCB and RSS modes | |
137 | */ | |
138 | u16 reg_idx; | |
095e2617 | 139 | int queue_index; /* needed for multiqueue queue management */ |
92915f71 GR |
140 | }; |
141 | ||
92915f71 GR |
142 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
143 | #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */ | |
144 | ||
56e94095 AD |
145 | #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES |
146 | #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES | |
9cba434f ET |
147 | #define IXGBEVF_MAX_RSS_QUEUES 2 |
148 | #define IXGBEVF_82599_RETA_SIZE 128 /* 128 entries */ | |
149 | #define IXGBEVF_X550_VFRETA_SIZE 64 /* 64 entries */ | |
ad1431e2 | 150 | #define IXGBEVF_RSS_HASH_KEY_SIZE 40 |
9cba434f | 151 | #define IXGBEVF_VFRSSRK_REGS 10 /* 10 registers for RSS key */ |
92915f71 | 152 | |
dec0d8e4 JK |
153 | #define IXGBEVF_DEFAULT_TXD 1024 |
154 | #define IXGBEVF_DEFAULT_RXD 512 | |
155 | #define IXGBEVF_MAX_TXD 4096 | |
156 | #define IXGBEVF_MIN_TXD 64 | |
157 | #define IXGBEVF_MAX_RXD 4096 | |
158 | #define IXGBEVF_MIN_RXD 64 | |
92915f71 GR |
159 | |
160 | /* Supported Rx Buffer Sizes */ | |
dec0d8e4 JK |
161 | #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */ |
162 | #define IXGBEVF_RXBUFFER_2048 2048 | |
92915f71 | 163 | |
dec0d8e4 JK |
164 | #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256 |
165 | #define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048 | |
92915f71 GR |
166 | |
167 | #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) | |
168 | ||
8d055cc0 JK |
169 | #define IXGBE_TX_FLAGS_CSUM BIT(0) |
170 | #define IXGBE_TX_FLAGS_VLAN BIT(1) | |
171 | #define IXGBE_TX_FLAGS_TSO BIT(2) | |
172 | #define IXGBE_TX_FLAGS_IPV4 BIT(3) | |
92915f71 GR |
173 | #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 |
174 | #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 | |
175 | #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 | |
176 | ||
6b43c446 AD |
177 | struct ixgbevf_ring_container { |
178 | struct ixgbevf_ring *ring; /* pointer to linked list of rings */ | |
5f3600eb AD |
179 | unsigned int total_bytes; /* total bytes processed this int */ |
180 | unsigned int total_packets; /* total packets processed this int */ | |
6b43c446 AD |
181 | u8 count; /* total number of rings in vector */ |
182 | u8 itr; /* current ITR setting for ring */ | |
183 | }; | |
184 | ||
185 | /* iterator for handling rings in ring container */ | |
186 | #define ixgbevf_for_each_ring(pos, head) \ | |
187 | for (pos = (head).ring; pos != NULL; pos = pos->next) | |
188 | ||
92915f71 GR |
189 | /* MAX_MSIX_Q_VECTORS of these are allocated, |
190 | * but we only use one per queue-specific vector. | |
191 | */ | |
192 | struct ixgbevf_q_vector { | |
193 | struct ixgbevf_adapter *adapter; | |
dec0d8e4 JK |
194 | /* index of q_vector within array, also used for finding the bit in |
195 | * EICR and friends that represents the vector for this ring | |
196 | */ | |
197 | u16 v_idx; | |
198 | u16 itr; /* Interrupt throttle rate written to EITR */ | |
92915f71 | 199 | struct napi_struct napi; |
6b43c446 | 200 | struct ixgbevf_ring_container rx, tx; |
fa71ae27 | 201 | char name[IFNAMSIZ + 9]; |
c777cdfa JK |
202 | #ifdef CONFIG_NET_RX_BUSY_POLL |
203 | unsigned int state; | |
204 | #define IXGBEVF_QV_STATE_IDLE 0 | |
205 | #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */ | |
206 | #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */ | |
207 | #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */ | |
dec0d8e4 JK |
208 | #define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL) |
209 | #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) | |
c777cdfa JK |
210 | #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ |
211 | #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ | |
dec0d8e4 JK |
212 | #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \ |
213 | IXGBEVF_QV_STATE_POLL_YIELD) | |
214 | #define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \ | |
215 | IXGBEVF_QV_STATE_POLL_YIELD) | |
c777cdfa JK |
216 | spinlock_t lock; |
217 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
92915f71 | 218 | }; |
dec0d8e4 | 219 | |
c777cdfa JK |
220 | #ifdef CONFIG_NET_RX_BUSY_POLL |
221 | static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) | |
222 | { | |
c777cdfa JK |
223 | spin_lock_init(&q_vector->lock); |
224 | q_vector->state = IXGBEVF_QV_STATE_IDLE; | |
225 | } | |
226 | ||
227 | /* called from the device poll routine to get ownership of a q_vector */ | |
228 | static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) | |
229 | { | |
230 | int rc = true; | |
dec0d8e4 | 231 | |
c777cdfa JK |
232 | spin_lock_bh(&q_vector->lock); |
233 | if (q_vector->state & IXGBEVF_QV_LOCKED) { | |
234 | WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); | |
235 | q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; | |
236 | rc = false; | |
3b5dca26 | 237 | #ifdef BP_EXTENDED_STATS |
095e2617 | 238 | q_vector->tx.ring->stats.yields++; |
3b5dca26 | 239 | #endif |
c777cdfa JK |
240 | } else { |
241 | /* we don't care if someone yielded */ | |
242 | q_vector->state = IXGBEVF_QV_STATE_NAPI; | |
243 | } | |
244 | spin_unlock_bh(&q_vector->lock); | |
245 | return rc; | |
246 | } | |
247 | ||
248 | /* returns true is someone tried to get the qv while napi had it */ | |
249 | static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) | |
250 | { | |
251 | int rc = false; | |
dec0d8e4 | 252 | |
c777cdfa JK |
253 | spin_lock_bh(&q_vector->lock); |
254 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | | |
255 | IXGBEVF_QV_STATE_NAPI_YIELD)); | |
256 | ||
257 | if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) | |
258 | rc = true; | |
259 | /* reset state to idle, unless QV is disabled */ | |
260 | q_vector->state &= IXGBEVF_QV_STATE_DISABLED; | |
261 | spin_unlock_bh(&q_vector->lock); | |
262 | return rc; | |
263 | } | |
264 | ||
265 | /* called from ixgbevf_low_latency_poll() */ | |
266 | static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) | |
267 | { | |
268 | int rc = true; | |
dec0d8e4 | 269 | |
c777cdfa JK |
270 | spin_lock_bh(&q_vector->lock); |
271 | if ((q_vector->state & IXGBEVF_QV_LOCKED)) { | |
272 | q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; | |
273 | rc = false; | |
3b5dca26 | 274 | #ifdef BP_EXTENDED_STATS |
095e2617 | 275 | q_vector->rx.ring->stats.yields++; |
3b5dca26 | 276 | #endif |
c777cdfa JK |
277 | } else { |
278 | /* preserve yield marks */ | |
279 | q_vector->state |= IXGBEVF_QV_STATE_POLL; | |
280 | } | |
281 | spin_unlock_bh(&q_vector->lock); | |
282 | return rc; | |
283 | } | |
284 | ||
285 | /* returns true if someone tried to get the qv while it was locked */ | |
286 | static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) | |
287 | { | |
288 | int rc = false; | |
dec0d8e4 | 289 | |
c777cdfa JK |
290 | spin_lock_bh(&q_vector->lock); |
291 | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); | |
292 | ||
293 | if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD) | |
294 | rc = true; | |
295 | /* reset state to idle, unless QV is disabled */ | |
296 | q_vector->state &= IXGBEVF_QV_STATE_DISABLED; | |
297 | spin_unlock_bh(&q_vector->lock); | |
298 | return rc; | |
299 | } | |
300 | ||
301 | /* true if a socket is polling, even if it did not get the lock */ | |
302 | static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) | |
303 | { | |
304 | WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED)); | |
305 | return q_vector->state & IXGBEVF_QV_USER_PEND; | |
306 | } | |
307 | ||
308 | /* false if QV is currently owned */ | |
309 | static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) | |
310 | { | |
311 | int rc = true; | |
dec0d8e4 | 312 | |
c777cdfa JK |
313 | spin_lock_bh(&q_vector->lock); |
314 | if (q_vector->state & IXGBEVF_QV_OWNED) | |
315 | rc = false; | |
e689e728 | 316 | q_vector->state |= IXGBEVF_QV_STATE_DISABLED; |
c777cdfa JK |
317 | spin_unlock_bh(&q_vector->lock); |
318 | return rc; | |
319 | } | |
320 | ||
321 | #endif /* CONFIG_NET_RX_BUSY_POLL */ | |
92915f71 | 322 | |
dec0d8e4 | 323 | /* microsecond values for various ITR rates shifted by 2 to fit itr register |
5f3600eb AD |
324 | * with the first 3 bits reserved 0 |
325 | */ | |
326 | #define IXGBE_MIN_RSC_ITR 24 | |
327 | #define IXGBE_100K_ITR 40 | |
328 | #define IXGBE_20K_ITR 200 | |
8a9ca110 | 329 | #define IXGBE_12K_ITR 336 |
5f3600eb | 330 | |
92915f71 GR |
331 | /* Helper macros to switch between ints/sec and what the register uses. |
332 | * And yes, it's the same math going both ways. The lowest value | |
333 | * supported by all of the ixgbe hardware is 8. | |
334 | */ | |
335 | #define EITR_INTS_PER_SEC_TO_REG(_eitr) \ | |
336 | ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8) | |
337 | #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG | |
338 | ||
ec62fe26 ET |
339 | /* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */ |
340 | static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc, | |
341 | const u32 stat_err_bits) | |
342 | { | |
343 | return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); | |
344 | } | |
345 | ||
f880d07b DS |
346 | static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) |
347 | { | |
348 | u16 ntc = ring->next_to_clean; | |
349 | u16 ntu = ring->next_to_use; | |
350 | ||
351 | return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; | |
352 | } | |
92915f71 | 353 | |
06380db6 MR |
354 | static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value) |
355 | { | |
356 | writel(value, ring->tail); | |
357 | } | |
358 | ||
dec0d8e4 | 359 | #define IXGBEVF_RX_DESC(R, i) \ |
908421f6 | 360 | (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) |
dec0d8e4 | 361 | #define IXGBEVF_TX_DESC(R, i) \ |
908421f6 | 362 | (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) |
dec0d8e4 | 363 | #define IXGBEVF_TX_CTXTDESC(R, i) \ |
908421f6 | 364 | (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) |
92915f71 | 365 | |
c88887e0 | 366 | #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ |
92915f71 | 367 | |
dec0d8e4 JK |
368 | #define OTHER_VECTOR 1 |
369 | #define NON_Q_VECTORS (OTHER_VECTOR) | |
92915f71 | 370 | |
dec0d8e4 | 371 | #define MAX_MSIX_Q_VECTORS 2 |
92915f71 | 372 | |
dec0d8e4 JK |
373 | #define MIN_MSIX_Q_VECTORS 1 |
374 | #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) | |
92915f71 GR |
375 | |
376 | /* board specific private data structure */ | |
377 | struct ixgbevf_adapter { | |
dff80520 | 378 | /* this field must be first, see ixgbevf_process_skb_fields */ |
dadcd65f | 379 | unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; |
dff80520 | 380 | |
92915f71 | 381 | struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; |
92915f71 GR |
382 | |
383 | /* Interrupt Throttle Rate */ | |
5f3600eb AD |
384 | u16 rx_itr_setting; |
385 | u16 tx_itr_setting; | |
386 | ||
387 | /* interrupt masks */ | |
388 | u32 eims_enable_mask; | |
389 | u32 eims_other; | |
92915f71 GR |
390 | |
391 | /* TX */ | |
92915f71 | 392 | int num_tx_queues; |
97031922 | 393 | struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */ |
92915f71 | 394 | u64 restart_queue; |
92915f71 | 395 | u32 tx_timeout_count; |
92915f71 GR |
396 | |
397 | /* RX */ | |
92915f71 | 398 | int num_rx_queues; |
97031922 | 399 | struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */ |
92915f71 GR |
400 | u64 hw_csum_rx_error; |
401 | u64 hw_rx_no_dma_resources; | |
92915f71 | 402 | int num_msix_vectors; |
92915f71 GR |
403 | u32 alloc_rx_page_failed; |
404 | u32 alloc_rx_buff_failed; | |
405 | ||
97031922 ET |
406 | struct msix_entry *msix_entries; |
407 | ||
92915f71 GR |
408 | /* OS defined structs */ |
409 | struct net_device *netdev; | |
410 | struct pci_dev *pdev; | |
92915f71 GR |
411 | |
412 | /* structs defined in ixgbe_vf.h */ | |
413 | struct ixgbe_hw hw; | |
414 | u16 msg_enable; | |
92915f71 GR |
415 | /* Interrupt Throttle Rate */ |
416 | u32 eitr_param; | |
417 | ||
97031922 ET |
418 | struct ixgbevf_hw_stats stats; |
419 | ||
92915f71 | 420 | unsigned long state; |
92915f71 GR |
421 | u64 tx_busy; |
422 | unsigned int tx_ring_count; | |
423 | unsigned int rx_ring_count; | |
424 | ||
dbf8b0d8 | 425 | u8 __iomem *io_addr; /* Mainly for iounmap use */ |
92915f71 GR |
426 | u32 link_speed; |
427 | bool link_up; | |
92915f71 | 428 | |
9ac5c5cc ET |
429 | struct timer_list service_timer; |
430 | struct work_struct service_task; | |
431 | ||
1c55ed76 | 432 | spinlock_t mbx_lock; |
e66c92ad | 433 | unsigned long last_reset; |
9cba434f ET |
434 | |
435 | u32 rss_key[IXGBEVF_VFRSSRK_REGS]; | |
436 | u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE]; | |
92915f71 GR |
437 | }; |
438 | ||
439 | enum ixbgevf_state_t { | |
440 | __IXGBEVF_TESTING, | |
441 | __IXGBEVF_RESETTING, | |
2e7cfbdd | 442 | __IXGBEVF_DOWN, |
bc0c7151 | 443 | __IXGBEVF_DISABLED, |
2e7cfbdd | 444 | __IXGBEVF_REMOVING, |
9ac5c5cc ET |
445 | __IXGBEVF_SERVICE_SCHED, |
446 | __IXGBEVF_SERVICE_INITED, | |
d5dd7c3f ET |
447 | __IXGBEVF_RESET_REQUESTED, |
448 | __IXGBEVF_QUEUE_RESET_REQUESTED, | |
92915f71 GR |
449 | }; |
450 | ||
451 | enum ixgbevf_boards { | |
452 | board_82599_vf, | |
2316aa2a | 453 | board_X540_vf, |
47068b0d ET |
454 | board_X550_vf, |
455 | board_X550EM_x_vf, | |
92915f71 GR |
456 | }; |
457 | ||
8443c1a4 HS |
458 | enum ixgbevf_xcast_modes { |
459 | IXGBEVF_XCAST_MODE_NONE = 0, | |
460 | IXGBEVF_XCAST_MODE_MULTI, | |
461 | IXGBEVF_XCAST_MODE_ALLMULTI, | |
462 | }; | |
463 | ||
3d8fe98f SH |
464 | extern const struct ixgbevf_info ixgbevf_82599_vf_info; |
465 | extern const struct ixgbevf_info ixgbevf_X540_vf_info; | |
47068b0d ET |
466 | extern const struct ixgbevf_info ixgbevf_X550_vf_info; |
467 | extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info; | |
b5417bf8 | 468 | extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops; |
92915f71 GR |
469 | |
470 | /* needed by ethtool.c */ | |
3d8fe98f | 471 | extern const char ixgbevf_driver_name[]; |
92915f71 GR |
472 | extern const char ixgbevf_driver_version[]; |
473 | ||
324d0867 SA |
474 | int ixgbevf_open(struct net_device *netdev); |
475 | int ixgbevf_close(struct net_device *netdev); | |
5ccc921a JP |
476 | void ixgbevf_up(struct ixgbevf_adapter *adapter); |
477 | void ixgbevf_down(struct ixgbevf_adapter *adapter); | |
478 | void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); | |
479 | void ixgbevf_reset(struct ixgbevf_adapter *adapter); | |
480 | void ixgbevf_set_ethtool_ops(struct net_device *netdev); | |
05d063aa ET |
481 | int ixgbevf_setup_rx_resources(struct ixgbevf_ring *); |
482 | int ixgbevf_setup_tx_resources(struct ixgbevf_ring *); | |
483 | void ixgbevf_free_rx_resources(struct ixgbevf_ring *); | |
484 | void ixgbevf_free_tx_resources(struct ixgbevf_ring *); | |
5ccc921a JP |
485 | void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); |
486 | int ethtool_ioctl(struct ifreq *ifr); | |
487 | ||
3849623e JK |
488 | extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector); |
489 | ||
5ccc921a JP |
490 | void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter); |
491 | void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter); | |
92915f71 GR |
492 | |
493 | #ifdef DEBUG | |
5ccc921a | 494 | char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw); |
92915f71 GR |
495 | #define hw_dbg(hw, format, arg...) \ |
496 | printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg) | |
497 | #else | |
498 | #define hw_dbg(hw, format, arg...) do {} while (0) | |
499 | #endif | |
500 | ||
501 | #endif /* _IXGBEVF_H_ */ |