]>
Commit | Line | Data |
---|---|---|
f21fb3ed RV |
1 | /********************************************************************** |
2 | * Author: Cavium, Inc. | |
3 | * | |
4 | * Contact: support@cavium.com | |
5 | * Please include "LiquidIO" in the subject. | |
6 | * | |
50579d3d | 7 | * Copyright (c) 2003-2016 Cavium, Inc. |
f21fb3ed RV |
8 | * |
9 | * This file is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License, Version 2, as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This file is distributed in the hope that it will be useful, but | |
14 | * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty | |
15 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or | |
16 | * NONINFRINGEMENT. See the GNU General Public License for more | |
17 | * details. | |
f21fb3ed RV |
18 | **********************************************************************/ |
19 | ||
20 | /*! \file octeon_network.h | |
21 | * \brief Host NIC Driver: Structure and Macro definitions used by NIC Module. | |
22 | */ | |
23 | ||
24 | #ifndef __OCTEON_NETWORK_H__ | |
25 | #define __OCTEON_NETWORK_H__ | |
f21fb3ed RV |
26 | #include <linux/ptp_clock_kernel.h> |
27 | ||
4c2743f9 | 28 | #define LIO_MAX_MTU_SIZE (OCTNET_MAX_FRM_SIZE - OCTNET_FRM_HEADER_SIZE) |
109cc165 | 29 | #define LIO_MIN_MTU_SIZE ETH_MIN_MTU |
4c2743f9 | 30 | |
1f164717 RV |
31 | struct oct_nic_stats_resp { |
32 | u64 rh; | |
33 | struct oct_link_stats stats; | |
34 | u64 status; | |
35 | }; | |
36 | ||
37 | struct oct_nic_stats_ctrl { | |
38 | struct completion complete; | |
39 | struct net_device *netdev; | |
40 | }; | |
41 | ||
f21fb3ed RV |
42 | /** LiquidIO per-interface network private data */ |
43 | struct lio { | |
44 | /** State of the interface. Rx/Tx happens only in the RUNNING state. */ | |
45 | atomic_t ifstate; | |
46 | ||
47 | /** Octeon Interface index number. This device will be represented as | |
48 | * oct<ifidx> in the system. | |
49 | */ | |
50 | int ifidx; | |
51 | ||
52 | /** Octeon Input queue to use to transmit for this network interface. */ | |
53 | int txq; | |
54 | ||
55 | /** Octeon Output queue from which pkts arrive | |
56 | * for this network interface. | |
57 | */ | |
58 | int rxq; | |
59 | ||
fcd2b5e3 RV |
60 | /** Guards each glist */ |
61 | spinlock_t *glist_lock; | |
f21fb3ed | 62 | |
fcd2b5e3 RV |
63 | /** Array of gather component linked lists */ |
64 | struct list_head *glist; | |
67e303e0 VB |
65 | void **glists_virt_base; |
66 | dma_addr_t *glists_dma_base; | |
67 | u32 glist_entry_size; | |
f21fb3ed RV |
68 | |
69 | /** Pointer to the NIC properties for the Octeon device this network | |
70 | * interface is associated with. | |
71 | */ | |
72 | struct octdev_props *octprops; | |
73 | ||
74 | /** Pointer to the octeon device structure. */ | |
75 | struct octeon_device *oct_dev; | |
76 | ||
77 | struct net_device *netdev; | |
78 | ||
79 | /** Link information sent by the core application for this interface. */ | |
80 | struct oct_link_info linfo; | |
81 | ||
0cece6c5 RV |
82 | /** counter of link changes */ |
83 | u64 link_changes; | |
84 | ||
f21fb3ed RV |
85 | /** Size of Tx queue for this octeon device. */ |
86 | u32 tx_qsize; | |
87 | ||
88 | /** Size of Rx queue for this octeon device. */ | |
89 | u32 rx_qsize; | |
90 | ||
91 | /** Size of MTU this octeon device. */ | |
92 | u32 mtu; | |
93 | ||
94 | /** msg level flag per interface. */ | |
95 | u32 msg_enable; | |
96 | ||
97 | /** Copy of Interface capabilities: TSO, TSO6, LRO, Chescksums . */ | |
98 | u64 dev_capability; | |
99 | ||
01fb237a RV |
100 | /* Copy of transmit encapsulation capabilities: |
101 | * TSO, TSO6, Checksums for this device for Kernel | |
102 | * 3.10.0 onwards | |
103 | */ | |
104 | u64 enc_dev_capability; | |
105 | ||
f21fb3ed RV |
106 | /** Copy of beacaon reg in phy */ |
107 | u32 phy_beacon_val; | |
108 | ||
109 | /** Copy of ctrl reg in phy */ | |
110 | u32 led_ctrl_val; | |
111 | ||
112 | /* PTP clock information */ | |
113 | struct ptp_clock_info ptp_info; | |
114 | struct ptp_clock *ptp_clock; | |
115 | s64 ptp_adjust; | |
116 | ||
117 | /* for atomic access to Octeon PTP reg and data struct */ | |
118 | spinlock_t ptp_lock; | |
119 | ||
120 | /* Interface info */ | |
121 | u32 intf_open; | |
122 | ||
123 | /* work queue for txq status */ | |
124 | struct cavium_wq txq_status_wq; | |
7b6b6c95 RV |
125 | |
126 | /* work queue for link status */ | |
127 | struct cavium_wq link_status_wq; | |
128 | ||
50f7f94b | 129 | int netdev_uc_count; |
f21fb3ed RV |
130 | }; |
131 | ||
132 | #define LIO_SIZE (sizeof(struct lio)) | |
133 | #define GET_LIO(netdev) ((struct lio *)netdev_priv(netdev)) | |
134 | ||
97a25326 | 135 | #define CIU3_WDOG(c) (0x1010000020000ULL + ((c) << 3)) |
9ff1a9ba RV |
136 | #define CIU3_WDOG_MASK 12ULL |
137 | #define LIO_MONITOR_WDOG_EXPIRE 1 | |
138 | #define LIO_MONITOR_CORE_STUCK_MSGD 2 | |
139 | #define LIO_MAX_CORES 12 | |
140 | ||
f21fb3ed RV |
141 | /** |
142 | * \brief Enable or disable feature | |
143 | * @param netdev pointer to network device | |
144 | * @param cmd Command that just requires acknowledgment | |
0cece6c5 | 145 | * @param param1 Parameter to command |
f21fb3ed | 146 | */ |
0cece6c5 | 147 | int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1); |
f21fb3ed RV |
148 | |
149 | /** | |
150 | * \brief Link control command completion callback | |
151 | * @param nctrl_ptr pointer to control packet structure | |
152 | * | |
153 | * This routine is called by the callback function when a ctrl pkt sent to | |
154 | * core app completes. The nctrl_ptr contains a copy of the command type | |
155 | * and data sent to the core app. This routine is only called if the ctrl | |
156 | * pkt was sent successfully to the core app. | |
157 | */ | |
158 | void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr); | |
159 | ||
160 | /** | |
161 | * \brief Register ethtool operations | |
162 | * @param netdev pointer to network device | |
163 | */ | |
164 | void liquidio_set_ethtool_ops(struct net_device *netdev); | |
165 | ||
f21fb3ed RV |
166 | #define SKB_ADJ_MASK 0x3F |
167 | #define SKB_ADJ (SKB_ADJ_MASK + 1) | |
168 | ||
cabeb13b RV |
169 | #define MIN_SKB_SIZE 256 /* 8 bytes and more - 8 bytes for PTP */ |
170 | #define LIO_RXBUFFER_SZ 2048 | |
171 | ||
172 | static inline void | |
173 | *recv_buffer_alloc(struct octeon_device *oct, | |
174 | struct octeon_skb_page_info *pg_info) | |
175 | { | |
176 | struct page *page; | |
177 | struct sk_buff *skb; | |
178 | struct octeon_skb_page_info *skb_pg_info; | |
179 | ||
180 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | |
181 | if (unlikely(!page)) | |
182 | return NULL; | |
183 | ||
184 | skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); | |
185 | if (unlikely(!skb)) { | |
186 | __free_page(page); | |
187 | pg_info->page = NULL; | |
188 | return NULL; | |
189 | } | |
f21fb3ed RV |
190 | |
191 | if ((unsigned long)skb->data & SKB_ADJ_MASK) { | |
192 | u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); | |
193 | ||
194 | skb_reserve(skb, r); | |
195 | } | |
196 | ||
cabeb13b RV |
197 | skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); |
198 | /* Get DMA info */ | |
199 | pg_info->dma = dma_map_page(&oct->pci_dev->dev, page, 0, | |
200 | PAGE_SIZE, DMA_FROM_DEVICE); | |
201 | ||
202 | /* Mapping failed!! */ | |
203 | if (dma_mapping_error(&oct->pci_dev->dev, pg_info->dma)) { | |
204 | __free_page(page); | |
205 | dev_kfree_skb_any((struct sk_buff *)skb); | |
206 | pg_info->page = NULL; | |
207 | return NULL; | |
208 | } | |
209 | ||
210 | pg_info->page = page; | |
211 | pg_info->page_offset = 0; | |
212 | skb_pg_info->page = page; | |
213 | skb_pg_info->page_offset = 0; | |
214 | skb_pg_info->dma = pg_info->dma; | |
215 | ||
f21fb3ed RV |
216 | return (void *)skb; |
217 | } | |
218 | ||
cabeb13b RV |
219 | static inline void |
220 | *recv_buffer_fast_alloc(u32 size) | |
221 | { | |
222 | struct sk_buff *skb; | |
223 | struct octeon_skb_page_info *skb_pg_info; | |
224 | ||
225 | skb = dev_alloc_skb(size + SKB_ADJ); | |
226 | if (unlikely(!skb)) | |
227 | return NULL; | |
228 | ||
229 | if ((unsigned long)skb->data & SKB_ADJ_MASK) { | |
230 | u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); | |
231 | ||
232 | skb_reserve(skb, r); | |
233 | } | |
234 | ||
235 | skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); | |
236 | skb_pg_info->page = NULL; | |
237 | skb_pg_info->page_offset = 0; | |
238 | skb_pg_info->dma = 0; | |
239 | ||
240 | return skb; | |
241 | } | |
242 | ||
243 | static inline int | |
244 | recv_buffer_recycle(struct octeon_device *oct, void *buf) | |
245 | { | |
246 | struct octeon_skb_page_info *pg_info = buf; | |
247 | ||
248 | if (!pg_info->page) { | |
249 | dev_err(&oct->pci_dev->dev, "%s: pg_info->page NULL\n", | |
250 | __func__); | |
251 | return -ENOMEM; | |
252 | } | |
253 | ||
254 | if (unlikely(page_count(pg_info->page) != 1) || | |
255 | unlikely(page_to_nid(pg_info->page) != numa_node_id())) { | |
256 | dma_unmap_page(&oct->pci_dev->dev, | |
257 | pg_info->dma, (PAGE_SIZE << 0), | |
258 | DMA_FROM_DEVICE); | |
259 | pg_info->dma = 0; | |
260 | pg_info->page = NULL; | |
261 | pg_info->page_offset = 0; | |
262 | return -ENOMEM; | |
263 | } | |
264 | ||
265 | /* Flip to other half of the buffer */ | |
266 | if (pg_info->page_offset == 0) | |
267 | pg_info->page_offset = LIO_RXBUFFER_SZ; | |
268 | else | |
269 | pg_info->page_offset = 0; | |
270 | page_ref_inc(pg_info->page); | |
271 | ||
272 | return 0; | |
273 | } | |
274 | ||
275 | static inline void | |
276 | *recv_buffer_reuse(struct octeon_device *oct, void *buf) | |
277 | { | |
278 | struct octeon_skb_page_info *pg_info = buf, *skb_pg_info; | |
279 | struct sk_buff *skb; | |
280 | ||
281 | skb = dev_alloc_skb(MIN_SKB_SIZE + SKB_ADJ); | |
282 | if (unlikely(!skb)) { | |
283 | dma_unmap_page(&oct->pci_dev->dev, | |
284 | pg_info->dma, (PAGE_SIZE << 0), | |
285 | DMA_FROM_DEVICE); | |
286 | return NULL; | |
287 | } | |
288 | ||
289 | if ((unsigned long)skb->data & SKB_ADJ_MASK) { | |
290 | u32 r = SKB_ADJ - ((unsigned long)skb->data & SKB_ADJ_MASK); | |
291 | ||
292 | skb_reserve(skb, r); | |
293 | } | |
294 | ||
295 | skb_pg_info = ((struct octeon_skb_page_info *)(skb->cb)); | |
296 | skb_pg_info->page = pg_info->page; | |
297 | skb_pg_info->page_offset = pg_info->page_offset; | |
298 | skb_pg_info->dma = pg_info->dma; | |
299 | ||
300 | return skb; | |
301 | } | |
302 | ||
303 | static inline void | |
304 | recv_buffer_destroy(void *buffer, struct octeon_skb_page_info *pg_info) | |
305 | { | |
306 | struct sk_buff *skb = (struct sk_buff *)buffer; | |
307 | ||
308 | put_page(pg_info->page); | |
309 | pg_info->dma = 0; | |
310 | pg_info->page = NULL; | |
311 | pg_info->page_offset = 0; | |
312 | ||
313 | if (skb) | |
314 | dev_kfree_skb_any(skb); | |
315 | } | |
316 | ||
f21fb3ed | 317 | static inline void recv_buffer_free(void *buffer) |
cabeb13b RV |
318 | { |
319 | struct sk_buff *skb = (struct sk_buff *)buffer; | |
320 | struct octeon_skb_page_info *pg_info; | |
321 | ||
322 | pg_info = ((struct octeon_skb_page_info *)(skb->cb)); | |
323 | ||
324 | if (pg_info->page) { | |
325 | put_page(pg_info->page); | |
326 | pg_info->dma = 0; | |
327 | pg_info->page = NULL; | |
328 | pg_info->page_offset = 0; | |
329 | } | |
330 | ||
331 | dev_kfree_skb_any((struct sk_buff *)buffer); | |
332 | } | |
333 | ||
334 | static inline void | |
335 | recv_buffer_fast_free(void *buffer) | |
336 | { | |
337 | dev_kfree_skb_any((struct sk_buff *)buffer); | |
338 | } | |
339 | ||
340 | static inline void tx_buffer_free(void *buffer) | |
f21fb3ed RV |
341 | { |
342 | dev_kfree_skb_any((struct sk_buff *)buffer); | |
343 | } | |
344 | ||
345 | #define lio_dma_alloc(oct, size, dma_addr) \ | |
97a25326 | 346 | dma_alloc_coherent(&(oct)->pci_dev->dev, size, dma_addr, GFP_KERNEL) |
f21fb3ed | 347 | #define lio_dma_free(oct, size, virt_addr, dma_addr) \ |
97a25326 | 348 | dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr) |
f21fb3ed | 349 | |
67e303e0 VB |
350 | static inline void * |
351 | lio_alloc_info_buffer(struct octeon_device *oct, | |
352 | struct octeon_droq *droq) | |
353 | { | |
354 | void *virt_ptr; | |
355 | ||
356 | virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE), | |
357 | &droq->info_list_dma); | |
358 | if (virt_ptr) { | |
359 | droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE; | |
360 | droq->info_base_addr = virt_ptr; | |
361 | } | |
362 | ||
363 | return virt_ptr; | |
364 | } | |
365 | ||
366 | static inline void lio_free_info_buffer(struct octeon_device *oct, | |
367 | struct octeon_droq *droq) | |
368 | { | |
369 | lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr, | |
370 | droq->info_list_dma); | |
371 | } | |
372 | ||
cabeb13b RV |
373 | static inline |
374 | void *get_rbd(struct sk_buff *skb) | |
375 | { | |
376 | struct octeon_skb_page_info *pg_info; | |
377 | unsigned char *va; | |
378 | ||
379 | pg_info = ((struct octeon_skb_page_info *)(skb->cb)); | |
380 | va = page_address(pg_info->page) + pg_info->page_offset; | |
381 | ||
382 | return va; | |
383 | } | |
f21fb3ed RV |
384 | |
385 | static inline u64 | |
386 | lio_map_ring_info(struct octeon_droq *droq, u32 i) | |
387 | { | |
67e303e0 | 388 | return droq->info_list_dma + (i * sizeof(struct octeon_droq_info)); |
f21fb3ed RV |
389 | } |
390 | ||
391 | static inline u64 | |
cabeb13b | 392 | lio_map_ring(void *buf) |
f21fb3ed RV |
393 | { |
394 | dma_addr_t dma_addr; | |
395 | ||
cabeb13b RV |
396 | struct sk_buff *skb = (struct sk_buff *)buf; |
397 | struct octeon_skb_page_info *pg_info; | |
f21fb3ed | 398 | |
cabeb13b RV |
399 | pg_info = ((struct octeon_skb_page_info *)(skb->cb)); |
400 | if (!pg_info->page) { | |
401 | pr_err("%s: pg_info->page NULL\n", __func__); | |
402 | WARN_ON(1); | |
403 | } | |
404 | ||
405 | /* Get DMA info */ | |
406 | dma_addr = pg_info->dma; | |
407 | if (!pg_info->dma) { | |
408 | pr_err("%s: ERROR it should be already available\n", | |
409 | __func__); | |
410 | WARN_ON(1); | |
411 | } | |
412 | dma_addr += pg_info->page_offset; | |
f21fb3ed RV |
413 | |
414 | return (u64)dma_addr; | |
415 | } | |
416 | ||
417 | static inline void | |
418 | lio_unmap_ring(struct pci_dev *pci_dev, | |
cabeb13b RV |
419 | u64 buf_ptr) |
420 | ||
f21fb3ed | 421 | { |
cabeb13b RV |
422 | dma_unmap_page(&pci_dev->dev, |
423 | buf_ptr, (PAGE_SIZE << 0), | |
424 | DMA_FROM_DEVICE); | |
f21fb3ed RV |
425 | } |
426 | ||
cabeb13b | 427 | static inline void *octeon_fast_packet_alloc(u32 size) |
f21fb3ed | 428 | { |
cabeb13b | 429 | return recv_buffer_fast_alloc(size); |
f21fb3ed RV |
430 | } |
431 | ||
432 | static inline void octeon_fast_packet_next(struct octeon_droq *droq, | |
433 | struct sk_buff *nicbuf, | |
434 | int copy_len, | |
435 | int idx) | |
436 | { | |
437 | memcpy(skb_put(nicbuf, copy_len), | |
438 | get_rbd(droq->recv_buf_list[idx].buffer), copy_len); | |
439 | } | |
440 | ||
441 | #endif |