]>
Commit | Line | Data |
---|---|---|
6b7c5b94 SP |
1 | /* |
2 | * Copyright (C) 2005 - 2009 ServerEngines | |
3 | * All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public License version 2 | |
7 | * as published by the Free Software Foundation. The full GNU General | |
8 | * Public License is included in this distribution in the file called COPYING. | |
9 | * | |
10 | * Contact Information: | |
11 | * linux-drivers@serverengines.com | |
12 | * | |
13 | * ServerEngines | |
14 | * 209 N. Fair Oaks Ave | |
15 | * Sunnyvale, CA 94085 | |
16 | */ | |
17 | ||
18 | #ifndef BE_H | |
19 | #define BE_H | |
20 | ||
21 | #include <linux/pci.h> | |
22 | #include <linux/etherdevice.h> | |
23 | #include <linux/version.h> | |
24 | #include <linux/delay.h> | |
25 | #include <net/tcp.h> | |
26 | #include <net/ip.h> | |
27 | #include <net/ipv6.h> | |
28 | #include <linux/if_vlan.h> | |
29 | #include <linux/workqueue.h> | |
30 | #include <linux/interrupt.h> | |
6b7c5b94 SP |
31 | |
32 | #include "be_hw.h" | |
33 | ||
5be93b9a | 34 | #define DRV_VER "2.0.400" |
6b7c5b94 SP |
35 | #define DRV_NAME "be2net" |
36 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | |
c4ca2374 | 37 | #define OC_NAME "Emulex OneConnect 10Gbps NIC" |
6b7c5b94 SP |
38 | #define DRV_DESC BE_NAME "Driver" |
39 | ||
c4ca2374 AK |
40 | #define BE_VENDOR_ID 0x19a2 |
41 | #define BE_DEVICE_ID1 0x211 | |
42 | #define OC_DEVICE_ID1 0x700 | |
43 | #define OC_DEVICE_ID2 0x701 | |
44 | ||
45 | static inline char *nic_name(struct pci_dev *pdev) | |
46 | { | |
47 | if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) | |
48 | return OC_NAME; | |
49 | else | |
50 | return BE_NAME; | |
51 | } | |
52 | ||
6b7c5b94 SP |
53 | /* Number of bytes of an RX frame that are copied to skb->data */ |
54 | #define BE_HDR_LEN 64 | |
55 | #define BE_MAX_JUMBO_FRAME_SIZE 9018 | |
56 | #define BE_MIN_MTU 256 | |
57 | ||
58 | #define BE_NUM_VLANS_SUPPORTED 64 | |
59 | #define BE_MAX_EQD 96 | |
60 | #define BE_MAX_TX_FRAG_COUNT 30 | |
61 | ||
62 | #define EVNT_Q_LEN 1024 | |
63 | #define TX_Q_LEN 2048 | |
64 | #define TX_CQ_LEN 1024 | |
65 | #define RX_Q_LEN 1024 /* Does not support any other value */ | |
66 | #define RX_CQ_LEN 1024 | |
5fb379ee | 67 | #define MCC_Q_LEN 128 /* total size not to exceed 8 pages */ |
6b7c5b94 SP |
68 | #define MCC_CQ_LEN 256 |
69 | ||
70 | #define BE_NAPI_WEIGHT 64 | |
71 | #define MAX_RX_POST BE_NAPI_WEIGHT /* Frags posted at a time */ | |
72 | #define RX_FRAGS_REFILL_WM (RX_Q_LEN - MAX_RX_POST) | |
73 | ||
6b7c5b94 SP |
74 | struct be_dma_mem { |
75 | void *va; | |
76 | dma_addr_t dma; | |
77 | u32 size; | |
78 | }; | |
79 | ||
80 | struct be_queue_info { | |
81 | struct be_dma_mem dma_mem; | |
82 | u16 len; | |
83 | u16 entry_size; /* Size of an element in the queue */ | |
84 | u16 id; | |
85 | u16 tail, head; | |
86 | bool created; | |
87 | atomic_t used; /* Number of valid elements in the queue */ | |
88 | }; | |
89 | ||
5fb379ee SP |
90 | static inline u32 MODULO(u16 val, u16 limit) |
91 | { | |
92 | BUG_ON(limit & (limit - 1)); | |
93 | return val & (limit - 1); | |
94 | } | |
95 | ||
96 | static inline void index_adv(u16 *index, u16 val, u16 limit) | |
97 | { | |
98 | *index = MODULO((*index + val), limit); | |
99 | } | |
100 | ||
101 | static inline void index_inc(u16 *index, u16 limit) | |
102 | { | |
103 | *index = MODULO((*index + 1), limit); | |
104 | } | |
105 | ||
106 | static inline void *queue_head_node(struct be_queue_info *q) | |
107 | { | |
108 | return q->dma_mem.va + q->head * q->entry_size; | |
109 | } | |
110 | ||
111 | static inline void *queue_tail_node(struct be_queue_info *q) | |
112 | { | |
113 | return q->dma_mem.va + q->tail * q->entry_size; | |
114 | } | |
115 | ||
116 | static inline void queue_head_inc(struct be_queue_info *q) | |
117 | { | |
118 | index_inc(&q->head, q->len); | |
119 | } | |
120 | ||
121 | static inline void queue_tail_inc(struct be_queue_info *q) | |
122 | { | |
123 | index_inc(&q->tail, q->len); | |
124 | } | |
125 | ||
126 | ||
127 | struct be_eq_obj { | |
128 | struct be_queue_info q; | |
129 | char desc[32]; | |
130 | ||
131 | /* Adaptive interrupt coalescing (AIC) info */ | |
132 | bool enable_aic; | |
133 | u16 min_eqd; /* in usecs */ | |
134 | u16 max_eqd; /* in usecs */ | |
135 | u16 cur_eqd; /* in usecs */ | |
136 | ||
137 | struct napi_struct napi; | |
138 | }; | |
139 | ||
140 | struct be_mcc_obj { | |
141 | struct be_queue_info q; | |
142 | struct be_queue_info cq; | |
143 | }; | |
144 | ||
6b7c5b94 SP |
145 | struct be_ctrl_info { |
146 | u8 __iomem *csr; | |
147 | u8 __iomem *db; /* Door Bell */ | |
148 | u8 __iomem *pcicfg; /* PCI config space */ | |
149 | int pci_func; | |
150 | ||
151 | /* Mbox used for cmd request/response */ | |
5fb379ee | 152 | spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ |
6b7c5b94 SP |
153 | struct be_dma_mem mbox_mem; |
154 | /* Mbox mem is adjusted to align to 16 bytes. The allocated addr | |
155 | * is stored for freeing purpose */ | |
156 | struct be_dma_mem mbox_mem_alloced; | |
5fb379ee SP |
157 | |
158 | /* MCC Rings */ | |
159 | struct be_mcc_obj mcc_obj; | |
160 | spinlock_t mcc_lock; /* For serializing mcc cmds to BE card */ | |
161 | spinlock_t mcc_cq_lock; | |
a8f447bd SP |
162 | |
163 | /* MCC Async callback */ | |
164 | void (*async_cb)(void *adapter, bool link_up); | |
165 | void *adapter_ctxt; | |
6b7c5b94 SP |
166 | }; |
167 | ||
168 | #include "be_cmds.h" | |
169 | ||
170 | struct be_drvr_stats { | |
171 | u32 be_tx_reqs; /* number of TX requests initiated */ | |
172 | u32 be_tx_stops; /* number of times TX Q was stopped */ | |
173 | u32 be_fwd_reqs; /* number of send reqs through forwarding i/f */ | |
174 | u32 be_tx_wrbs; /* number of tx WRBs used */ | |
175 | u32 be_tx_events; /* number of tx completion events */ | |
176 | u32 be_tx_compl; /* number of tx completion entries processed */ | |
4097f663 SP |
177 | ulong be_tx_jiffies; |
178 | u64 be_tx_bytes; | |
179 | u64 be_tx_bytes_prev; | |
6b7c5b94 SP |
180 | u32 be_tx_rate; |
181 | ||
182 | u32 cache_barrier[16]; | |
183 | ||
184 | u32 be_ethrx_post_fail;/* number of ethrx buffer alloc failures */ | |
185 | u32 be_polls; /* number of times NAPI called poll function */ | |
186 | u32 be_rx_events; /* number of ucast rx completion events */ | |
187 | u32 be_rx_compl; /* number of rx completion entries processed */ | |
4097f663 SP |
188 | ulong be_rx_jiffies; |
189 | u64 be_rx_bytes; | |
190 | u64 be_rx_bytes_prev; | |
6b7c5b94 SP |
191 | u32 be_rx_rate; |
192 | /* number of non ether type II frames dropped where | |
193 | * frame len > length field of Mac Hdr */ | |
194 | u32 be_802_3_dropped_frames; | |
195 | /* number of non ether type II frames malformed where | |
196 | * in frame len < length field of Mac Hdr */ | |
197 | u32 be_802_3_malformed_frames; | |
198 | u32 be_rxcp_err; /* Num rx completion entries w/ err set. */ | |
199 | ulong rx_fps_jiffies; /* jiffies at last FPS calc */ | |
200 | u32 be_rx_frags; | |
201 | u32 be_prev_rx_frags; | |
202 | u32 be_rx_fps; /* Rx frags per second */ | |
203 | }; | |
204 | ||
205 | struct be_stats_obj { | |
206 | struct be_drvr_stats drvr_stats; | |
207 | struct net_device_stats net_stats; | |
208 | struct be_dma_mem cmd; | |
209 | }; | |
210 | ||
6b7c5b94 SP |
211 | struct be_tx_obj { |
212 | struct be_queue_info q; | |
213 | struct be_queue_info cq; | |
214 | /* Remember the skbs that were transmitted */ | |
215 | struct sk_buff *sent_skb_list[TX_Q_LEN]; | |
216 | }; | |
217 | ||
218 | /* Struct to remember the pages posted for rx frags */ | |
219 | struct be_rx_page_info { | |
220 | struct page *page; | |
221 | dma_addr_t bus; | |
222 | u16 page_offset; | |
223 | bool last_page_user; | |
224 | }; | |
225 | ||
226 | struct be_rx_obj { | |
227 | struct be_queue_info q; | |
228 | struct be_queue_info cq; | |
229 | struct be_rx_page_info page_info_tbl[RX_Q_LEN]; | |
6b7c5b94 SP |
230 | }; |
231 | ||
232 | #define BE_NUM_MSIX_VECTORS 2 /* 1 each for Tx and Rx */ | |
233 | struct be_adapter { | |
234 | struct pci_dev *pdev; | |
235 | struct net_device *netdev; | |
236 | ||
237 | /* Mbox, pci config, csr address information */ | |
238 | struct be_ctrl_info ctrl; | |
239 | ||
240 | struct msix_entry msix_entries[BE_NUM_MSIX_VECTORS]; | |
241 | bool msix_enabled; | |
242 | bool isr_registered; | |
243 | ||
244 | /* TX Rings */ | |
245 | struct be_eq_obj tx_eq; | |
246 | struct be_tx_obj tx_obj; | |
247 | ||
248 | u32 cache_line_break[8]; | |
249 | ||
250 | /* Rx rings */ | |
251 | struct be_eq_obj rx_eq; | |
252 | struct be_rx_obj rx_obj; | |
253 | u32 big_page_size; /* Compounded page size shared by rx wrbs */ | |
ea1dae11 | 254 | bool rx_post_starved; /* Zero rx frags have been posted to BE */ |
6b7c5b94 SP |
255 | |
256 | struct vlan_group *vlan_grp; | |
257 | u16 num_vlans; | |
258 | u8 vlan_tag[VLAN_GROUP_ARRAY_LEN]; | |
259 | ||
260 | struct be_stats_obj stats; | |
261 | /* Work queue used to perform periodic tasks like getting statistics */ | |
262 | struct delayed_work work; | |
263 | ||
264 | /* Ethtool knobs and info */ | |
265 | bool rx_csum; /* BE card must perform rx-checksumming */ | |
6b7c5b94 SP |
266 | char fw_ver[FW_VER_LEN]; |
267 | u32 if_handle; /* Used to configure filtering */ | |
268 | u32 pmac_id; /* MAC addr handle used by BE card */ | |
269 | ||
a8f447bd | 270 | bool link_up; |
6b7c5b94 | 271 | u32 port_num; |
24307eef | 272 | bool promiscuous; |
6b7c5b94 SP |
273 | }; |
274 | ||
275 | extern struct ethtool_ops be_ethtool_ops; | |
276 | ||
277 | #define drvr_stats(adapter) (&adapter->stats.drvr_stats) | |
278 | ||
279 | #define BE_SET_NETDEV_OPS(netdev, ops) (netdev->netdev_ops = ops) | |
280 | ||
6b7c5b94 SP |
281 | #define PAGE_SHIFT_4K 12 |
282 | #define PAGE_SIZE_4K (1 << PAGE_SHIFT_4K) | |
283 | ||
284 | /* Returns number of pages spanned by the data starting at the given addr */ | |
285 | #define PAGES_4K_SPANNED(_address, size) \ | |
286 | ((u32)((((size_t)(_address) & (PAGE_SIZE_4K - 1)) + \ | |
287 | (size) + (PAGE_SIZE_4K - 1)) >> PAGE_SHIFT_4K)) | |
288 | ||
289 | /* Byte offset into the page corresponding to given address */ | |
290 | #define OFFSET_IN_PAGE(addr) \ | |
291 | ((size_t)(addr) & (PAGE_SIZE_4K-1)) | |
292 | ||
293 | /* Returns bit offset within a DWORD of a bitfield */ | |
294 | #define AMAP_BIT_OFFSET(_struct, field) \ | |
295 | (((size_t)&(((_struct *)0)->field))%32) | |
296 | ||
297 | /* Returns the bit mask of the field that is NOT shifted into location. */ | |
298 | static inline u32 amap_mask(u32 bitsize) | |
299 | { | |
300 | return (bitsize == 32 ? 0xFFFFFFFF : (1 << bitsize) - 1); | |
301 | } | |
302 | ||
303 | static inline void | |
304 | amap_set(void *ptr, u32 dw_offset, u32 mask, u32 offset, u32 value) | |
305 | { | |
306 | u32 *dw = (u32 *) ptr + dw_offset; | |
307 | *dw &= ~(mask << offset); | |
308 | *dw |= (mask & value) << offset; | |
309 | } | |
310 | ||
311 | #define AMAP_SET_BITS(_struct, field, ptr, val) \ | |
312 | amap_set(ptr, \ | |
313 | offsetof(_struct, field)/32, \ | |
314 | amap_mask(sizeof(((_struct *)0)->field)), \ | |
315 | AMAP_BIT_OFFSET(_struct, field), \ | |
316 | val) | |
317 | ||
318 | static inline u32 amap_get(void *ptr, u32 dw_offset, u32 mask, u32 offset) | |
319 | { | |
320 | u32 *dw = (u32 *) ptr; | |
321 | return mask & (*(dw + dw_offset) >> offset); | |
322 | } | |
323 | ||
324 | #define AMAP_GET_BITS(_struct, field, ptr) \ | |
325 | amap_get(ptr, \ | |
326 | offsetof(_struct, field)/32, \ | |
327 | amap_mask(sizeof(((_struct *)0)->field)), \ | |
328 | AMAP_BIT_OFFSET(_struct, field)) | |
329 | ||
330 | #define be_dws_cpu_to_le(wrb, len) swap_dws(wrb, len) | |
331 | #define be_dws_le_to_cpu(wrb, len) swap_dws(wrb, len) | |
332 | static inline void swap_dws(void *wrb, int len) | |
333 | { | |
334 | #ifdef __BIG_ENDIAN | |
335 | u32 *dw = wrb; | |
336 | BUG_ON(len % 4); | |
337 | do { | |
338 | *dw = cpu_to_le32(*dw); | |
339 | dw++; | |
340 | len -= 4; | |
341 | } while (len); | |
342 | #endif /* __BIG_ENDIAN */ | |
343 | } | |
344 | ||
345 | static inline u8 is_tcp_pkt(struct sk_buff *skb) | |
346 | { | |
347 | u8 val = 0; | |
348 | ||
349 | if (ip_hdr(skb)->version == 4) | |
350 | val = (ip_hdr(skb)->protocol == IPPROTO_TCP); | |
351 | else if (ip_hdr(skb)->version == 6) | |
352 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_TCP); | |
353 | ||
354 | return val; | |
355 | } | |
356 | ||
357 | static inline u8 is_udp_pkt(struct sk_buff *skb) | |
358 | { | |
359 | u8 val = 0; | |
360 | ||
361 | if (ip_hdr(skb)->version == 4) | |
362 | val = (ip_hdr(skb)->protocol == IPPROTO_UDP); | |
363 | else if (ip_hdr(skb)->version == 6) | |
364 | val = (ipv6_hdr(skb)->nexthdr == NEXTHDR_UDP); | |
365 | ||
366 | return val; | |
367 | } | |
368 | ||
5fb379ee SP |
369 | extern void be_cq_notify(struct be_ctrl_info *ctrl, u16 qid, bool arm, |
370 | u16 num_popped); | |
6b7c5b94 | 371 | #endif /* BE_H */ |