]>
Commit | Line | Data |
---|---|---|
d76c1ae4 IM |
1 | /* |
2 | * Copyright 2002, 2003 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Subject to the GNU Public License v.2 |
0df025b7 | 4 | * |
1da177e4 LT |
5 | * Wrappers of assembly checksum functions for x86-64. |
6 | */ | |
1da177e4 LT |
7 | #include <asm/checksum.h> |
8 | #include <linux/module.h> | |
9 | ||
0df025b7 PC |
10 | /** |
11 | * csum_partial_copy_from_user - Copy and checksum from user space. | |
12 | * @src: source address (user space) | |
1da177e4 LT |
13 | * @dst: destination address |
14 | * @len: number of bytes to be copied. | |
15 | * @isum: initial sum that is added into the result (32bit unfolded) | |
16 | * @errp: set to -EFAULT for an bad source address. | |
0df025b7 | 17 | * |
1da177e4 | 18 | * Returns an 32bit unfolded checksum of the buffer. |
0df025b7 PC |
19 | * src and dst are best aligned to 64bits. |
20 | */ | |
a4f89fb7 AV |
21 | __wsum |
22 | csum_partial_copy_from_user(const void __user *src, void *dst, | |
23 | int len, __wsum isum, int *errp) | |
0df025b7 | 24 | { |
1da177e4 LT |
25 | might_sleep(); |
26 | *errp = 0; | |
d76c1ae4 IM |
27 | |
28 | if (!likely(access_ok(VERIFY_READ, src, len))) | |
29 | goto out_err; | |
30 | ||
31 | /* | |
32 | * Why 6, not 7? To handle odd addresses aligned we | |
33 | * would need to do considerable complications to fix the | |
34 | * checksum which is defined as an 16bit accumulator. The | |
35 | * fix alignment code is primarily for performance | |
36 | * compatibility with 32bit and that will handle odd | |
37 | * addresses slowly too. | |
38 | */ | |
39 | if (unlikely((unsigned long)src & 6)) { | |
40 | while (((unsigned long)src & 6) && len >= 2) { | |
41 | __u16 val16; | |
42 | ||
43 | *errp = __get_user(val16, (const __u16 __user *)src); | |
44 | if (*errp) | |
45 | return isum; | |
46 | ||
47 | *(__u16 *)dst = val16; | |
48 | isum = (__force __wsum)add32_with_carry( | |
49 | (__force unsigned)isum, val16); | |
50 | src += 2; | |
51 | dst += 2; | |
52 | len -= 2; | |
1da177e4 | 53 | } |
0df025b7 | 54 | } |
d76c1ae4 IM |
55 | isum = csum_partial_copy_generic((__force const void *)src, |
56 | dst, len, isum, errp, NULL); | |
57 | if (unlikely(*errp)) | |
58 | goto out_err; | |
59 | ||
60 | return isum; | |
61 | ||
62 | out_err: | |
1da177e4 | 63 | *errp = -EFAULT; |
0df025b7 | 64 | memset(dst, 0, len); |
d76c1ae4 | 65 | |
0df025b7 PC |
66 | return isum; |
67 | } | |
1da177e4 LT |
68 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
69 | ||
0df025b7 PC |
70 | /** |
71 | * csum_partial_copy_to_user - Copy and checksum to user space. | |
1da177e4 LT |
72 | * @src: source address |
73 | * @dst: destination address (user space) | |
74 | * @len: number of bytes to be copied. | |
75 | * @isum: initial sum that is added into the result (32bit unfolded) | |
76 | * @errp: set to -EFAULT for an bad destination address. | |
0df025b7 | 77 | * |
1da177e4 LT |
78 | * Returns an 32bit unfolded checksum of the buffer. |
79 | * src and dst are best aligned to 64bits. | |
0df025b7 | 80 | */ |
a4f89fb7 AV |
81 | __wsum |
82 | csum_partial_copy_to_user(const void *src, void __user *dst, | |
83 | int len, __wsum isum, int *errp) | |
0df025b7 | 84 | { |
1da177e4 | 85 | might_sleep(); |
d76c1ae4 | 86 | |
1da177e4 LT |
87 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { |
88 | *errp = -EFAULT; | |
0df025b7 | 89 | return 0; |
1da177e4 LT |
90 | } |
91 | ||
92 | if (unlikely((unsigned long)dst & 6)) { | |
0df025b7 | 93 | while (((unsigned long)dst & 6) && len >= 2) { |
1da177e4 | 94 | __u16 val16 = *(__u16 *)src; |
d76c1ae4 | 95 | |
a4f89fb7 AV |
96 | isum = (__force __wsum)add32_with_carry( |
97 | (__force unsigned)isum, val16); | |
1da177e4 LT |
98 | *errp = __put_user(val16, (__u16 __user *)dst); |
99 | if (*errp) | |
100 | return isum; | |
0df025b7 PC |
101 | src += 2; |
102 | dst += 2; | |
1da177e4 LT |
103 | len -= 2; |
104 | } | |
105 | } | |
106 | ||
107 | *errp = 0; | |
d76c1ae4 IM |
108 | return csum_partial_copy_generic(src, (void __force *)dst, |
109 | len, isum, NULL, errp); | |
0df025b7 | 110 | } |
1da177e4 LT |
111 | EXPORT_SYMBOL(csum_partial_copy_to_user); |
112 | ||
0df025b7 | 113 | /** |
1da177e4 LT |
114 | * csum_partial_copy_nocheck - Copy and checksum. |
115 | * @src: source address | |
116 | * @dst: destination address | |
117 | * @len: number of bytes to be copied. | |
118 | * @isum: initial sum that is added into the result (32bit unfolded) | |
0df025b7 | 119 | * |
1da177e4 | 120 | * Returns an 32bit unfolded checksum of the buffer. |
0df025b7 | 121 | */ |
a4f89fb7 AV |
122 | __wsum |
123 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | |
0df025b7 PC |
124 | { |
125 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); | |
126 | } | |
2ee60e17 | 127 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
1da177e4 | 128 | |
a4f89fb7 AV |
129 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
130 | const struct in6_addr *daddr, | |
131 | __u32 len, unsigned short proto, __wsum sum) | |
1da177e4 LT |
132 | { |
133 | __u64 rest, sum64; | |
0df025b7 | 134 | |
a4f89fb7 AV |
135 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
136 | (__force __u64)sum; | |
d76c1ae4 IM |
137 | |
138 | asm(" addq (%[saddr]),%[sum]\n" | |
139 | " adcq 8(%[saddr]),%[sum]\n" | |
140 | " adcq (%[daddr]),%[sum]\n" | |
141 | " adcq 8(%[daddr]),%[sum]\n" | |
142 | " adcq $0,%[sum]\n" | |
143 | ||
0df025b7 PC |
144 | : [sum] "=r" (sum64) |
145 | : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); | |
1da177e4 | 146 | |
d76c1ae4 IM |
147 | return csum_fold( |
148 | (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | |
149 | } | |
1da177e4 | 150 | EXPORT_SYMBOL(csum_ipv6_magic); |