]> git.proxmox.com Git - ceph.git/blob - ceph/src/dpdk/lib/librte_eal/linuxapp/kni/ethtool/ixgbe/kcompat.c
add subtree-ish sources for 12.0.3
[ceph.git] / ceph / src / dpdk / lib / librte_eal / linuxapp / kni / ethtool / ixgbe / kcompat.c
1 /*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "LICENSE.GPL".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26 *******************************************************************************/
27
28 #include "ixgbe.h"
29 #include "kcompat.h"
30
31 /*****************************************************************************/
32 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) )
33 /* From lib/vsprintf.c */
34 #include <asm/div64.h>
35
36 static int skip_atoi(const char **s)
37 {
38 int i=0;
39
40 while (isdigit(**s))
41 i = i*10 + *((*s)++) - '0';
42 return i;
43 }
44
45 #define _kc_ZEROPAD 1 /* pad with zero */
46 #define _kc_SIGN 2 /* unsigned/signed long */
47 #define _kc_PLUS 4 /* show plus */
48 #define _kc_SPACE 8 /* space if plus */
49 #define _kc_LEFT 16 /* left justified */
50 #define _kc_SPECIAL 32 /* 0x */
51 #define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */
52
53 static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type)
54 {
55 char c,sign,tmp[66];
56 const char *digits;
57 const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz";
58 const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
59 int i;
60
61 digits = (type & _kc_LARGE) ? large_digits : small_digits;
62 if (type & _kc_LEFT)
63 type &= ~_kc_ZEROPAD;
64 if (base < 2 || base > 36)
65 return 0;
66 c = (type & _kc_ZEROPAD) ? '0' : ' ';
67 sign = 0;
68 if (type & _kc_SIGN) {
69 if (num < 0) {
70 sign = '-';
71 num = -num;
72 size--;
73 } else if (type & _kc_PLUS) {
74 sign = '+';
75 size--;
76 } else if (type & _kc_SPACE) {
77 sign = ' ';
78 size--;
79 }
80 }
81 if (type & _kc_SPECIAL) {
82 if (base == 16)
83 size -= 2;
84 else if (base == 8)
85 size--;
86 }
87 i = 0;
88 if (num == 0)
89 tmp[i++]='0';
90 else while (num != 0)
91 tmp[i++] = digits[do_div(num,base)];
92 if (i > precision)
93 precision = i;
94 size -= precision;
95 if (!(type&(_kc_ZEROPAD+_kc_LEFT))) {
96 while(size-->0) {
97 if (buf <= end)
98 *buf = ' ';
99 ++buf;
100 }
101 }
102 if (sign) {
103 if (buf <= end)
104 *buf = sign;
105 ++buf;
106 }
107 if (type & _kc_SPECIAL) {
108 if (base==8) {
109 if (buf <= end)
110 *buf = '0';
111 ++buf;
112 } else if (base==16) {
113 if (buf <= end)
114 *buf = '0';
115 ++buf;
116 if (buf <= end)
117 *buf = digits[33];
118 ++buf;
119 }
120 }
121 if (!(type & _kc_LEFT)) {
122 while (size-- > 0) {
123 if (buf <= end)
124 *buf = c;
125 ++buf;
126 }
127 }
128 while (i < precision--) {
129 if (buf <= end)
130 *buf = '0';
131 ++buf;
132 }
133 while (i-- > 0) {
134 if (buf <= end)
135 *buf = tmp[i];
136 ++buf;
137 }
138 while (size-- > 0) {
139 if (buf <= end)
140 *buf = ' ';
141 ++buf;
142 }
143 return buf;
144 }
145
146 int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
147 {
148 int len;
149 unsigned long long num;
150 int i, base;
151 char *str, *end, c;
152 const char *s;
153
154 int flags; /* flags to number() */
155
156 int field_width; /* width of output field */
157 int precision; /* min. # of digits for integers; max
158 number of chars for from string */
159 int qualifier; /* 'h', 'l', or 'L' for integer fields */
160 /* 'z' support added 23/7/1999 S.H. */
161 /* 'z' changed to 'Z' --davidm 1/25/99 */
162
163 str = buf;
164 end = buf + size - 1;
165
166 if (end < buf - 1) {
167 end = ((void *) -1);
168 size = end - buf + 1;
169 }
170
171 for (; *fmt ; ++fmt) {
172 if (*fmt != '%') {
173 if (str <= end)
174 *str = *fmt;
175 ++str;
176 continue;
177 }
178
179 /* process flags */
180 flags = 0;
181 repeat:
182 ++fmt; /* this also skips first '%' */
183 switch (*fmt) {
184 case '-': flags |= _kc_LEFT; goto repeat;
185 case '+': flags |= _kc_PLUS; goto repeat;
186 case ' ': flags |= _kc_SPACE; goto repeat;
187 case '#': flags |= _kc_SPECIAL; goto repeat;
188 case '0': flags |= _kc_ZEROPAD; goto repeat;
189 }
190
191 /* get field width */
192 field_width = -1;
193 if (isdigit(*fmt))
194 field_width = skip_atoi(&fmt);
195 else if (*fmt == '*') {
196 ++fmt;
197 /* it's the next argument */
198 field_width = va_arg(args, int);
199 if (field_width < 0) {
200 field_width = -field_width;
201 flags |= _kc_LEFT;
202 }
203 }
204
205 /* get the precision */
206 precision = -1;
207 if (*fmt == '.') {
208 ++fmt;
209 if (isdigit(*fmt))
210 precision = skip_atoi(&fmt);
211 else if (*fmt == '*') {
212 ++fmt;
213 /* it's the next argument */
214 precision = va_arg(args, int);
215 }
216 if (precision < 0)
217 precision = 0;
218 }
219
220 /* get the conversion qualifier */
221 qualifier = -1;
222 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') {
223 qualifier = *fmt;
224 ++fmt;
225 }
226
227 /* default base */
228 base = 10;
229
230 switch (*fmt) {
231 case 'c':
232 if (!(flags & _kc_LEFT)) {
233 while (--field_width > 0) {
234 if (str <= end)
235 *str = ' ';
236 ++str;
237 }
238 }
239 c = (unsigned char) va_arg(args, int);
240 if (str <= end)
241 *str = c;
242 ++str;
243 while (--field_width > 0) {
244 if (str <= end)
245 *str = ' ';
246 ++str;
247 }
248 continue;
249
250 case 's':
251 s = va_arg(args, char *);
252 if (!s)
253 s = "<NULL>";
254
255 len = strnlen(s, precision);
256
257 if (!(flags & _kc_LEFT)) {
258 while (len < field_width--) {
259 if (str <= end)
260 *str = ' ';
261 ++str;
262 }
263 }
264 for (i = 0; i < len; ++i) {
265 if (str <= end)
266 *str = *s;
267 ++str; ++s;
268 }
269 while (len < field_width--) {
270 if (str <= end)
271 *str = ' ';
272 ++str;
273 }
274 continue;
275
276 case 'p':
277 if (field_width == -1) {
278 field_width = 2*sizeof(void *);
279 flags |= _kc_ZEROPAD;
280 }
281 str = number(str, end,
282 (unsigned long) va_arg(args, void *),
283 16, field_width, precision, flags);
284 continue;
285
286
287 case 'n':
288 /* FIXME:
289 * What does C99 say about the overflow case here? */
290 if (qualifier == 'l') {
291 long * ip = va_arg(args, long *);
292 *ip = (str - buf);
293 } else if (qualifier == 'Z') {
294 size_t * ip = va_arg(args, size_t *);
295 *ip = (str - buf);
296 } else {
297 int * ip = va_arg(args, int *);
298 *ip = (str - buf);
299 }
300 continue;
301
302 case '%':
303 if (str <= end)
304 *str = '%';
305 ++str;
306 continue;
307
308 /* integer number formats - set up the flags and "break" */
309 case 'o':
310 base = 8;
311 break;
312
313 case 'X':
314 flags |= _kc_LARGE;
315 case 'x':
316 base = 16;
317 break;
318
319 case 'd':
320 case 'i':
321 flags |= _kc_SIGN;
322 case 'u':
323 break;
324
325 default:
326 if (str <= end)
327 *str = '%';
328 ++str;
329 if (*fmt) {
330 if (str <= end)
331 *str = *fmt;
332 ++str;
333 } else {
334 --fmt;
335 }
336 continue;
337 }
338 if (qualifier == 'L')
339 num = va_arg(args, long long);
340 else if (qualifier == 'l') {
341 num = va_arg(args, unsigned long);
342 if (flags & _kc_SIGN)
343 num = (signed long) num;
344 } else if (qualifier == 'Z') {
345 num = va_arg(args, size_t);
346 } else if (qualifier == 'h') {
347 num = (unsigned short) va_arg(args, int);
348 if (flags & _kc_SIGN)
349 num = (signed short) num;
350 } else {
351 num = va_arg(args, unsigned int);
352 if (flags & _kc_SIGN)
353 num = (signed int) num;
354 }
355 str = number(str, end, num, base,
356 field_width, precision, flags);
357 }
358 if (str <= end)
359 *str = '\0';
360 else if (size > 0)
361 /* don't write out a null byte if the buf size is zero */
362 *end = '\0';
363 /* the trailing null byte doesn't count towards the total
364 * ++str;
365 */
366 return str-buf;
367 }
368
369 int _kc_snprintf(char * buf, size_t size, const char *fmt, ...)
370 {
371 va_list args;
372 int i;
373
374 va_start(args, fmt);
375 i = _kc_vsnprintf(buf,size,fmt,args);
376 va_end(args);
377 return i;
378 }
379 #endif /* < 2.4.8 */
380
381
382
383 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) )
384 #ifdef CONFIG_PCI_IOV
385 int __kc_pci_vfs_assigned(struct pci_dev *dev)
386 {
387 unsigned int vfs_assigned = 0;
388 #ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED
389 int pos;
390 struct pci_dev *vfdev;
391 unsigned short dev_id;
392
393 /* only search if we are a PF */
394 if (!dev->is_physfn)
395 return 0;
396
397 /* find SR-IOV capability */
398 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
399 if (!pos)
400 return 0;
401
402 /*
403 * * determine the device ID for the VFs, the vendor ID will be the
404 * * same as the PF so there is no need to check for that one
405 * */
406 pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id);
407
408 /* loop through all the VFs to see if we own any that are assigned */
409 vfdev = pci_get_device(dev->vendor, dev_id, NULL);
410 while (vfdev) {
411 /*
412 * * It is considered assigned if it is a virtual function with
413 * * our dev as the physical function and the assigned bit is set
414 * */
415 if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
416 (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
417 vfs_assigned++;
418
419 vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
420 }
421
422 #endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */
423 return vfs_assigned;
424 }
425
426 #endif /* CONFIG_PCI_IOV */
427 #endif /* 3.10.0 */
428
429
430
431 /*****************************************************************************/
432 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) )
433
434 /**************************************/
435 /* PCI DMA MAPPING */
436
437 #if defined(CONFIG_HIGHMEM)
438
439 #ifndef PCI_DRAM_OFFSET
440 #define PCI_DRAM_OFFSET 0
441 #endif
442
443 u64
444 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
445 size_t size, int direction)
446 {
447 return ((u64) (page - mem_map) << PAGE_SHIFT) + offset +
448 PCI_DRAM_OFFSET;
449 }
450
451 #else /* CONFIG_HIGHMEM */
452
453 u64
454 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset,
455 size_t size, int direction)
456 {
457 return pci_map_single(dev, (void *)page_address(page) + offset, size,
458 direction);
459 }
460
461 #endif /* CONFIG_HIGHMEM */
462
463 void
464 _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size,
465 int direction)
466 {
467 return pci_unmap_single(dev, dma_addr, size, direction);
468 }
469
470 #endif /* 2.4.13 => 2.4.3 */
471
472 /*****************************************************************************/
473 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) )
474
475 /**************************************/
476 /* PCI DRIVER API */
477
478 int
479 _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask)
480 {
481 if (!pci_dma_supported(dev, mask))
482 return -EIO;
483 dev->dma_mask = mask;
484 return 0;
485 }
486
487 int
488 _kc_pci_request_regions(struct pci_dev *dev, char *res_name)
489 {
490 int i;
491
492 for (i = 0; i < 6; i++) {
493 if (pci_resource_len(dev, i) == 0)
494 continue;
495
496 if (pci_resource_flags(dev, i) & IORESOURCE_IO) {
497 if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
498 pci_release_regions(dev);
499 return -EBUSY;
500 }
501 } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
502 if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) {
503 pci_release_regions(dev);
504 return -EBUSY;
505 }
506 }
507 }
508 return 0;
509 }
510
511 void
512 _kc_pci_release_regions(struct pci_dev *dev)
513 {
514 int i;
515
516 for (i = 0; i < 6; i++) {
517 if (pci_resource_len(dev, i) == 0)
518 continue;
519
520 if (pci_resource_flags(dev, i) & IORESOURCE_IO)
521 release_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
522
523 else if (pci_resource_flags(dev, i) & IORESOURCE_MEM)
524 release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i));
525 }
526 }
527
528 /**************************************/
529 /* NETWORK DRIVER API */
530
531 struct net_device *
532 _kc_alloc_etherdev(int sizeof_priv)
533 {
534 struct net_device *dev;
535 int alloc_size;
536
537 alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31;
538 dev = kzalloc(alloc_size, GFP_KERNEL);
539 if (!dev)
540 return NULL;
541
542 if (sizeof_priv)
543 dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31);
544 dev->name[0] = '\0';
545 ether_setup(dev);
546
547 return dev;
548 }
549
550 int
551 _kc_is_valid_ether_addr(u8 *addr)
552 {
553 const char zaddr[6] = { 0, };
554
555 return !(addr[0] & 1) && memcmp(addr, zaddr, 6);
556 }
557
558 #endif /* 2.4.3 => 2.4.0 */
559
560 /*****************************************************************************/
561 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) )
562
563 int
564 _kc_pci_set_power_state(struct pci_dev *dev, int state)
565 {
566 return 0;
567 }
568
569 int
570 _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable)
571 {
572 return 0;
573 }
574
575 #endif /* 2.4.6 => 2.4.3 */
576
577 /*****************************************************************************/
578 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) )
579 void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page,
580 int off, int size)
581 {
582 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
583 frag->page = page;
584 frag->page_offset = off;
585 frag->size = size;
586 skb_shinfo(skb)->nr_frags = i + 1;
587 }
588
589 /*
590 * Original Copyright:
591 * find_next_bit.c: fallback find next bit implementation
592 *
593 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
594 * Written by David Howells (dhowells@redhat.com)
595 */
596
597 /**
598 * find_next_bit - find the next set bit in a memory region
599 * @addr: The address to base the search on
600 * @offset: The bitnumber to start searching at
601 * @size: The maximum size to search
602 */
603 unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
604 unsigned long offset)
605 {
606 const unsigned long *p = addr + BITOP_WORD(offset);
607 unsigned long result = offset & ~(BITS_PER_LONG-1);
608 unsigned long tmp;
609
610 if (offset >= size)
611 return size;
612 size -= result;
613 offset %= BITS_PER_LONG;
614 if (offset) {
615 tmp = *(p++);
616 tmp &= (~0UL << offset);
617 if (size < BITS_PER_LONG)
618 goto found_first;
619 if (tmp)
620 goto found_middle;
621 size -= BITS_PER_LONG;
622 result += BITS_PER_LONG;
623 }
624 while (size & ~(BITS_PER_LONG-1)) {
625 if ((tmp = *(p++)))
626 goto found_middle;
627 result += BITS_PER_LONG;
628 size -= BITS_PER_LONG;
629 }
630 if (!size)
631 return result;
632 tmp = *p;
633
634 found_first:
635 tmp &= (~0UL >> (BITS_PER_LONG - size));
636 if (tmp == 0UL) /* Are any bits set? */
637 return result + size; /* Nope. */
638 found_middle:
639 return result + ffs(tmp);
640 }
641
642 size_t _kc_strlcpy(char *dest, const char *src, size_t size)
643 {
644 size_t ret = strlen(src);
645
646 if (size) {
647 size_t len = (ret >= size) ? size - 1 : ret;
648 memcpy(dest, src, len);
649 dest[len] = '\0';
650 }
651 return ret;
652 }
653
654 #endif /* 2.6.0 => 2.4.6 */
655
656 /*****************************************************************************/
657 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) )
658 int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...)
659 {
660 va_list args;
661 int i;
662
663 va_start(args, fmt);
664 i = vsnprintf(buf, size, fmt, args);
665 va_end(args);
666 return (i >= size) ? (size - 1) : i;
667 }
668 #endif /* < 2.6.4 */
669
670 /*****************************************************************************/
671 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) )
672 DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1};
673 #endif /* < 2.6.10 */
674
675 /*****************************************************************************/
676 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) )
677 char *_kc_kstrdup(const char *s, unsigned int gfp)
678 {
679 size_t len;
680 char *buf;
681
682 if (!s)
683 return NULL;
684
685 len = strlen(s) + 1;
686 buf = kmalloc(len, gfp);
687 if (buf)
688 memcpy(buf, s, len);
689 return buf;
690 }
691 #endif /* < 2.6.13 */
692
693 /*****************************************************************************/
694 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) )
695 void *_kc_kzalloc(size_t size, int flags)
696 {
697 void *ret = kmalloc(size, flags);
698 if (ret)
699 memset(ret, 0, size);
700 return ret;
701 }
702 #endif /* <= 2.6.13 */
703
704 /*****************************************************************************/
705 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) )
706 int _kc_skb_pad(struct sk_buff *skb, int pad)
707 {
708 int ntail;
709
710 /* If the skbuff is non linear tailroom is always zero.. */
711 if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) {
712 memset(skb->data+skb->len, 0, pad);
713 return 0;
714 }
715
716 ntail = skb->data_len + pad - (skb->end - skb->tail);
717 if (likely(skb_cloned(skb) || ntail > 0)) {
718 if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC));
719 goto free_skb;
720 }
721
722 #ifdef MAX_SKB_FRAGS
723 if (skb_is_nonlinear(skb) &&
724 !__pskb_pull_tail(skb, skb->data_len))
725 goto free_skb;
726
727 #endif
728 memset(skb->data + skb->len, 0, pad);
729 return 0;
730
731 free_skb:
732 kfree_skb(skb);
733 return -ENOMEM;
734 }
735
736 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))
737 int _kc_pci_save_state(struct pci_dev *pdev)
738 {
739 struct adapter_struct *adapter = pci_get_drvdata(pdev);
740 int size = PCI_CONFIG_SPACE_LEN, i;
741 u16 pcie_cap_offset, pcie_link_status;
742
743 #if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) )
744 /* no ->dev for 2.4 kernels */
745 WARN_ON(pdev->dev.driver_data == NULL);
746 #endif
747 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
748 if (pcie_cap_offset) {
749 if (!pci_read_config_word(pdev,
750 pcie_cap_offset + PCIE_LINK_STATUS,
751 &pcie_link_status))
752 size = PCIE_CONFIG_SPACE_LEN;
753 }
754 pci_config_space_ich8lan();
755 #ifdef HAVE_PCI_ERS
756 if (adapter->config_space == NULL)
757 #else
758 WARN_ON(adapter->config_space != NULL);
759 #endif
760 adapter->config_space = kmalloc(size, GFP_KERNEL);
761 if (!adapter->config_space) {
762 printk(KERN_ERR "Out of memory in pci_save_state\n");
763 return -ENOMEM;
764 }
765 for (i = 0; i < (size / 4); i++)
766 pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]);
767 return 0;
768 }
769
770 void _kc_pci_restore_state(struct pci_dev *pdev)
771 {
772 struct adapter_struct *adapter = pci_get_drvdata(pdev);
773 int size = PCI_CONFIG_SPACE_LEN, i;
774 u16 pcie_cap_offset;
775 u16 pcie_link_status;
776
777 if (adapter->config_space != NULL) {
778 pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP);
779 if (pcie_cap_offset &&
780 !pci_read_config_word(pdev,
781 pcie_cap_offset + PCIE_LINK_STATUS,
782 &pcie_link_status))
783 size = PCIE_CONFIG_SPACE_LEN;
784
785 pci_config_space_ich8lan();
786 for (i = 0; i < (size / 4); i++)
787 pci_write_config_dword(pdev, i * 4, adapter->config_space[i]);
788 #ifndef HAVE_PCI_ERS
789 kfree(adapter->config_space);
790 adapter->config_space = NULL;
791 #endif
792 }
793 }
794 #endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */
795
796 #ifdef HAVE_PCI_ERS
797 void _kc_free_netdev(struct net_device *netdev)
798 {
799 struct adapter_struct *adapter = netdev_priv(netdev);
800
801 if (adapter->config_space != NULL)
802 kfree(adapter->config_space);
803 #ifdef CONFIG_SYSFS
804 if (netdev->reg_state == NETREG_UNINITIALIZED) {
805 kfree((char *)netdev - netdev->padded);
806 } else {
807 BUG_ON(netdev->reg_state != NETREG_UNREGISTERED);
808 netdev->reg_state = NETREG_RELEASED;
809 class_device_put(&netdev->class_dev);
810 }
811 #else
812 kfree((char *)netdev - netdev->padded);
813 #endif
814 }
815 #endif
816
817 void *_kc_kmemdup(const void *src, size_t len, unsigned gfp)
818 {
819 void *p;
820
821 p = kzalloc(len, gfp);
822 if (p)
823 memcpy(p, src, len);
824 return p;
825 }
826 #endif /* <= 2.6.19 */
827
828 /*****************************************************************************/
829 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) )
830 /* hexdump code taken from lib/hexdump.c */
831 static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
832 int groupsize, unsigned char *linebuf,
833 size_t linebuflen, bool ascii)
834 {
835 const u8 *ptr = buf;
836 u8 ch;
837 int j, lx = 0;
838 int ascii_column;
839
840 if (rowsize != 16 && rowsize != 32)
841 rowsize = 16;
842
843 if (!len)
844 goto nil;
845 if (len > rowsize) /* limit to one line at a time */
846 len = rowsize;
847 if ((len % groupsize) != 0) /* no mixed size output */
848 groupsize = 1;
849
850 switch (groupsize) {
851 case 8: {
852 const u64 *ptr8 = buf;
853 int ngroups = len / groupsize;
854
855 for (j = 0; j < ngroups; j++)
856 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
857 "%s%16.16llx", j ? " " : "",
858 (unsigned long long)*(ptr8 + j));
859 ascii_column = 17 * ngroups + 2;
860 break;
861 }
862
863 case 4: {
864 const u32 *ptr4 = buf;
865 int ngroups = len / groupsize;
866
867 for (j = 0; j < ngroups; j++)
868 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
869 "%s%8.8x", j ? " " : "", *(ptr4 + j));
870 ascii_column = 9 * ngroups + 2;
871 break;
872 }
873
874 case 2: {
875 const u16 *ptr2 = buf;
876 int ngroups = len / groupsize;
877
878 for (j = 0; j < ngroups; j++)
879 lx += scnprintf((char *)(linebuf + lx), linebuflen - lx,
880 "%s%4.4x", j ? " " : "", *(ptr2 + j));
881 ascii_column = 5 * ngroups + 2;
882 break;
883 }
884
885 default:
886 for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) {
887 ch = ptr[j];
888 linebuf[lx++] = hex_asc(ch >> 4);
889 linebuf[lx++] = hex_asc(ch & 0x0f);
890 linebuf[lx++] = ' ';
891 }
892 if (j)
893 lx--;
894
895 ascii_column = 3 * rowsize + 2;
896 break;
897 }
898 if (!ascii)
899 goto nil;
900
901 while (lx < (linebuflen - 1) && lx < (ascii_column - 1))
902 linebuf[lx++] = ' ';
903 for (j = 0; (j < len) && (lx + 2) < linebuflen; j++)
904 linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j]
905 : '.';
906 nil:
907 linebuf[lx++] = '\0';
908 }
909
910 void _kc_print_hex_dump(const char *level,
911 const char *prefix_str, int prefix_type,
912 int rowsize, int groupsize,
913 const void *buf, size_t len, bool ascii)
914 {
915 const u8 *ptr = buf;
916 int i, linelen, remaining = len;
917 unsigned char linebuf[200];
918
919 if (rowsize != 16 && rowsize != 32)
920 rowsize = 16;
921
922 for (i = 0; i < len; i += rowsize) {
923 linelen = min(remaining, rowsize);
924 remaining -= rowsize;
925 _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
926 linebuf, sizeof(linebuf), ascii);
927
928 switch (prefix_type) {
929 case DUMP_PREFIX_ADDRESS:
930 printk("%s%s%*p: %s\n", level, prefix_str,
931 (int)(2 * sizeof(void *)), ptr + i, linebuf);
932 break;
933 case DUMP_PREFIX_OFFSET:
934 printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
935 break;
936 default:
937 printk("%s%s%s\n", level, prefix_str, linebuf);
938 break;
939 }
940 }
941 }
942 #endif /* < 2.6.22 */
943
944 /*****************************************************************************/
945 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) )
946 int ixgbe_dcb_netlink_register(void)
947 {
948 return 0;
949 }
950
951 int ixgbe_dcb_netlink_unregister(void)
952 {
953 return 0;
954 }
955
956 int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max)
957 {
958 return 0;
959 }
960 #endif /* < 2.6.23 */
961
962 /*****************************************************************************/
963 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) )
964 #ifdef NAPI
965 struct net_device *napi_to_poll_dev(struct napi_struct *napi)
966 {
967 struct adapter_q_vector *q_vector = container_of(napi,
968 struct adapter_q_vector,
969 napi);
970 return &q_vector->poll_dev;
971 }
972
973 int __kc_adapter_clean(struct net_device *netdev, int *budget)
974 {
975 int work_done;
976 int work_to_do = min(*budget, netdev->quota);
977 /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */
978 struct napi_struct *napi = netdev->priv;
979 work_done = napi->poll(napi, work_to_do);
980 *budget -= work_done;
981 netdev->quota -= work_done;
982 return (work_done >= work_to_do) ? 1 : 0;
983 }
984 #endif /* NAPI */
985 #endif /* <= 2.6.24 */
986
987 /*****************************************************************************/
988 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) )
989 void _kc_pci_disable_link_state(struct pci_dev *pdev, int state)
990 {
991 struct pci_dev *parent = pdev->bus->self;
992 u16 link_state;
993 int pos;
994
995 if (!parent)
996 return;
997
998 pos = pci_find_capability(parent, PCI_CAP_ID_EXP);
999 if (pos) {
1000 pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state);
1001 link_state &= ~state;
1002 pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state);
1003 }
1004 }
1005 #endif /* < 2.6.26 */
1006
1007 /*****************************************************************************/
1008 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) )
1009 #ifdef HAVE_TX_MQ
1010 void _kc_netif_tx_stop_all_queues(struct net_device *netdev)
1011 {
1012 struct adapter_struct *adapter = netdev_priv(netdev);
1013 int i;
1014
1015 netif_stop_queue(netdev);
1016 if (netif_is_multiqueue(netdev))
1017 for (i = 0; i < adapter->num_tx_queues; i++)
1018 netif_stop_subqueue(netdev, i);
1019 }
1020 void _kc_netif_tx_wake_all_queues(struct net_device *netdev)
1021 {
1022 struct adapter_struct *adapter = netdev_priv(netdev);
1023 int i;
1024
1025 netif_wake_queue(netdev);
1026 if (netif_is_multiqueue(netdev))
1027 for (i = 0; i < adapter->num_tx_queues; i++)
1028 netif_wake_subqueue(netdev, i);
1029 }
1030 void _kc_netif_tx_start_all_queues(struct net_device *netdev)
1031 {
1032 struct adapter_struct *adapter = netdev_priv(netdev);
1033 int i;
1034
1035 netif_start_queue(netdev);
1036 if (netif_is_multiqueue(netdev))
1037 for (i = 0; i < adapter->num_tx_queues; i++)
1038 netif_start_subqueue(netdev, i);
1039 }
1040 #endif /* HAVE_TX_MQ */
1041
1042 #ifndef __WARN_printf
1043 void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...)
1044 {
1045 va_list args;
1046
1047 printk(KERN_WARNING "------------[ cut here ]------------\n");
1048 printk(KERN_WARNING "WARNING: at %s:%d %s()\n", file, line);
1049 va_start(args, fmt);
1050 vprintk(fmt, args);
1051 va_end(args);
1052
1053 dump_stack();
1054 }
1055 #endif /* __WARN_printf */
1056 #endif /* < 2.6.27 */
1057
1058 /*****************************************************************************/
1059 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) )
1060
1061 int
1062 _kc_pci_prepare_to_sleep(struct pci_dev *dev)
1063 {
1064 pci_power_t target_state;
1065 int error;
1066
1067 target_state = pci_choose_state(dev, PMSG_SUSPEND);
1068
1069 pci_enable_wake(dev, target_state, true);
1070
1071 error = pci_set_power_state(dev, target_state);
1072
1073 if (error)
1074 pci_enable_wake(dev, target_state, false);
1075
1076 return error;
1077 }
1078
1079 int
1080 _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable)
1081 {
1082 int err;
1083
1084 err = pci_enable_wake(dev, PCI_D3cold, enable);
1085 if (err)
1086 goto out;
1087
1088 err = pci_enable_wake(dev, PCI_D3hot, enable);
1089
1090 out:
1091 return err;
1092 }
1093 #endif /* < 2.6.28 */
1094
1095 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) )
1096 void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page,
1097 int off, int size)
1098 {
1099 skb_fill_page_desc(skb, i, page, off, size);
1100 skb->len += size;
1101 skb->data_len += size;
1102 skb->truesize += size;
1103 }
1104 #endif /* < 3.4.0 */
1105
1106 /*****************************************************************************/
1107 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) )
1108 #ifdef HAVE_NETDEV_SELECT_QUEUE
1109 #include <net/ip.h>
1110 static u32 _kc_simple_tx_hashrnd;
1111 static u32 _kc_simple_tx_hashrnd_initialized;
1112
1113 u16 _kc_skb_tx_hash(struct net_device *dev, struct sk_buff *skb)
1114 {
1115 u32 addr1, addr2, ports;
1116 u32 hash, ihl;
1117 u8 ip_proto = 0;
1118
1119 if (unlikely(!_kc_simple_tx_hashrnd_initialized)) {
1120 get_random_bytes(&_kc_simple_tx_hashrnd, 4);
1121 _kc_simple_tx_hashrnd_initialized = 1;
1122 }
1123
1124 switch (skb->protocol) {
1125 case htons(ETH_P_IP):
1126 if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
1127 ip_proto = ip_hdr(skb)->protocol;
1128 addr1 = ip_hdr(skb)->saddr;
1129 addr2 = ip_hdr(skb)->daddr;
1130 ihl = ip_hdr(skb)->ihl;
1131 break;
1132 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
1133 case htons(ETH_P_IPV6):
1134 ip_proto = ipv6_hdr(skb)->nexthdr;
1135 addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
1136 addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
1137 ihl = (40 >> 2);
1138 break;
1139 #endif
1140 default:
1141 return 0;
1142 }
1143
1144
1145 switch (ip_proto) {
1146 case IPPROTO_TCP:
1147 case IPPROTO_UDP:
1148 case IPPROTO_DCCP:
1149 case IPPROTO_ESP:
1150 case IPPROTO_AH:
1151 case IPPROTO_SCTP:
1152 case IPPROTO_UDPLITE:
1153 ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
1154 break;
1155
1156 default:
1157 ports = 0;
1158 break;
1159 }
1160
1161 hash = jhash_3words(addr1, addr2, ports, _kc_simple_tx_hashrnd);
1162
1163 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1164 }
1165 #endif /* HAVE_NETDEV_SELECT_QUEUE */
1166 #endif /* < 2.6.30 */
1167
1168 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) )
1169 #ifdef HAVE_TX_MQ
1170 #ifndef CONFIG_NETDEVICES_MULTIQUEUE
1171 void _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1172 {
1173 unsigned int real_num = dev->real_num_tx_queues;
1174 struct Qdisc *qdisc;
1175 int i;
1176
1177 if (unlikely(txq > dev->num_tx_queues))
1178 ;
1179 else if (txq > real_num)
1180 dev->real_num_tx_queues = txq;
1181 else if ( txq < real_num) {
1182 dev->real_num_tx_queues = txq;
1183 for (i = txq; i < dev->num_tx_queues; i++) {
1184 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
1185 if (qdisc) {
1186 spin_lock_bh(qdisc_lock(qdisc));
1187 qdisc_reset(qdisc);
1188 spin_unlock_bh(qdisc_lock(qdisc));
1189 }
1190 }
1191 }
1192 }
1193 #endif /* CONFIG_NETDEVICES_MULTIQUEUE */
1194 #endif /* HAVE_TX_MQ */
1195 #endif /* < 2.6.35 */
1196
1197 /*****************************************************************************/
1198 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) )
1199 static const u32 _kc_flags_dup_features =
1200 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
1201
1202 u32 _kc_ethtool_op_get_flags(struct net_device *dev)
1203 {
1204 return dev->features & _kc_flags_dup_features;
1205 }
1206
1207 int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported)
1208 {
1209 if (data & ~supported)
1210 return -EINVAL;
1211
1212 dev->features = ((dev->features & ~_kc_flags_dup_features) |
1213 (data & _kc_flags_dup_features));
1214 return 0;
1215 }
1216 #endif /* < 2.6.36 */
1217
1218 /******************************************************************************/
1219 #if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) )
1220 #if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)))
1221 u8 _kc_netdev_get_num_tc(struct net_device *dev)
1222 {
1223 struct adapter_struct *kc_adapter = netdev_priv(dev);
1224 if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED)
1225 return kc_adapter->tc;
1226 else
1227 return 0;
1228 }
1229
1230 u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up)
1231 {
1232 struct adapter_struct *kc_adapter = netdev_priv(dev);
1233 int tc;
1234 u8 map;
1235
1236 for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) {
1237 map = kc_adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap;
1238
1239 if (map & (1 << up))
1240 return tc;
1241 }
1242
1243 return 0;
1244 }
1245 #endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */
1246 #endif /* < 2.6.39 */