]>
Commit | Line | Data |
---|---|---|
d76c1ae4 IM |
1 | /* |
2 | * Copyright 2002, 2003 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Subject to the GNU Public License v.2 |
0df025b7 | 4 | * |
1da177e4 LT |
5 | * Wrappers of assembly checksum functions for x86-64. |
6 | */ | |
1da177e4 | 7 | #include <asm/checksum.h> |
e683014c | 8 | #include <linux/export.h> |
13d4ea09 | 9 | #include <linux/uaccess.h> |
7263dda4 | 10 | #include <asm/smap.h> |
1da177e4 | 11 | |
0df025b7 PC |
12 | /** |
13 | * csum_partial_copy_from_user - Copy and checksum from user space. | |
14 | * @src: source address (user space) | |
1da177e4 LT |
15 | * @dst: destination address |
16 | * @len: number of bytes to be copied. | |
17 | * @isum: initial sum that is added into the result (32bit unfolded) | |
18 | * @errp: set to -EFAULT for an bad source address. | |
0df025b7 | 19 | * |
1da177e4 | 20 | * Returns an 32bit unfolded checksum of the buffer. |
0df025b7 PC |
21 | * src and dst are best aligned to 64bits. |
22 | */ | |
a4f89fb7 AV |
23 | __wsum |
24 | csum_partial_copy_from_user(const void __user *src, void *dst, | |
25 | int len, __wsum isum, int *errp) | |
0df025b7 | 26 | { |
1da177e4 LT |
27 | might_sleep(); |
28 | *errp = 0; | |
d76c1ae4 IM |
29 | |
30 | if (!likely(access_ok(VERIFY_READ, src, len))) | |
31 | goto out_err; | |
32 | ||
33 | /* | |
34 | * Why 6, not 7? To handle odd addresses aligned we | |
35 | * would need to do considerable complications to fix the | |
36 | * checksum which is defined as an 16bit accumulator. The | |
37 | * fix alignment code is primarily for performance | |
38 | * compatibility with 32bit and that will handle odd | |
39 | * addresses slowly too. | |
40 | */ | |
41 | if (unlikely((unsigned long)src & 6)) { | |
42 | while (((unsigned long)src & 6) && len >= 2) { | |
43 | __u16 val16; | |
44 | ||
3b91270a LT |
45 | if (__get_user(val16, (const __u16 __user *)src)) |
46 | goto out_err; | |
d76c1ae4 IM |
47 | |
48 | *(__u16 *)dst = val16; | |
49 | isum = (__force __wsum)add32_with_carry( | |
50 | (__force unsigned)isum, val16); | |
51 | src += 2; | |
52 | dst += 2; | |
53 | len -= 2; | |
1da177e4 | 54 | } |
0df025b7 | 55 | } |
7263dda4 | 56 | stac(); |
d76c1ae4 IM |
57 | isum = csum_partial_copy_generic((__force const void *)src, |
58 | dst, len, isum, errp, NULL); | |
7263dda4 | 59 | clac(); |
d76c1ae4 IM |
60 | if (unlikely(*errp)) |
61 | goto out_err; | |
62 | ||
63 | return isum; | |
64 | ||
65 | out_err: | |
1da177e4 | 66 | *errp = -EFAULT; |
0df025b7 | 67 | memset(dst, 0, len); |
d76c1ae4 | 68 | |
0df025b7 PC |
69 | return isum; |
70 | } | |
1da177e4 LT |
71 | EXPORT_SYMBOL(csum_partial_copy_from_user); |
72 | ||
0df025b7 PC |
73 | /** |
74 | * csum_partial_copy_to_user - Copy and checksum to user space. | |
1da177e4 LT |
75 | * @src: source address |
76 | * @dst: destination address (user space) | |
77 | * @len: number of bytes to be copied. | |
78 | * @isum: initial sum that is added into the result (32bit unfolded) | |
79 | * @errp: set to -EFAULT for an bad destination address. | |
0df025b7 | 80 | * |
1da177e4 LT |
81 | * Returns an 32bit unfolded checksum of the buffer. |
82 | * src and dst are best aligned to 64bits. | |
0df025b7 | 83 | */ |
a4f89fb7 AV |
84 | __wsum |
85 | csum_partial_copy_to_user(const void *src, void __user *dst, | |
86 | int len, __wsum isum, int *errp) | |
0df025b7 | 87 | { |
7263dda4 PA |
88 | __wsum ret; |
89 | ||
1da177e4 | 90 | might_sleep(); |
d76c1ae4 | 91 | |
1da177e4 LT |
92 | if (unlikely(!access_ok(VERIFY_WRITE, dst, len))) { |
93 | *errp = -EFAULT; | |
0df025b7 | 94 | return 0; |
1da177e4 LT |
95 | } |
96 | ||
97 | if (unlikely((unsigned long)dst & 6)) { | |
0df025b7 | 98 | while (((unsigned long)dst & 6) && len >= 2) { |
1da177e4 | 99 | __u16 val16 = *(__u16 *)src; |
d76c1ae4 | 100 | |
a4f89fb7 AV |
101 | isum = (__force __wsum)add32_with_carry( |
102 | (__force unsigned)isum, val16); | |
1da177e4 LT |
103 | *errp = __put_user(val16, (__u16 __user *)dst); |
104 | if (*errp) | |
105 | return isum; | |
0df025b7 PC |
106 | src += 2; |
107 | dst += 2; | |
1da177e4 LT |
108 | len -= 2; |
109 | } | |
110 | } | |
111 | ||
112 | *errp = 0; | |
7263dda4 PA |
113 | stac(); |
114 | ret = csum_partial_copy_generic(src, (void __force *)dst, | |
115 | len, isum, NULL, errp); | |
116 | clac(); | |
117 | return ret; | |
0df025b7 | 118 | } |
1da177e4 LT |
119 | EXPORT_SYMBOL(csum_partial_copy_to_user); |
120 | ||
0df025b7 | 121 | /** |
1da177e4 LT |
122 | * csum_partial_copy_nocheck - Copy and checksum. |
123 | * @src: source address | |
124 | * @dst: destination address | |
125 | * @len: number of bytes to be copied. | |
c15acff3 | 126 | * @sum: initial sum that is added into the result (32bit unfolded) |
0df025b7 | 127 | * |
1da177e4 | 128 | * Returns an 32bit unfolded checksum of the buffer. |
0df025b7 | 129 | */ |
a4f89fb7 AV |
130 | __wsum |
131 | csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) | |
0df025b7 PC |
132 | { |
133 | return csum_partial_copy_generic(src, dst, len, sum, NULL, NULL); | |
134 | } | |
2ee60e17 | 135 | EXPORT_SYMBOL(csum_partial_copy_nocheck); |
1da177e4 | 136 | |
a4f89fb7 AV |
137 | __sum16 csum_ipv6_magic(const struct in6_addr *saddr, |
138 | const struct in6_addr *daddr, | |
1e940829 | 139 | __u32 len, __u8 proto, __wsum sum) |
1da177e4 LT |
140 | { |
141 | __u64 rest, sum64; | |
0df025b7 | 142 | |
a4f89fb7 AV |
143 | rest = (__force __u64)htonl(len) + (__force __u64)htons(proto) + |
144 | (__force __u64)sum; | |
d76c1ae4 IM |
145 | |
146 | asm(" addq (%[saddr]),%[sum]\n" | |
147 | " adcq 8(%[saddr]),%[sum]\n" | |
148 | " adcq (%[daddr]),%[sum]\n" | |
149 | " adcq 8(%[daddr]),%[sum]\n" | |
150 | " adcq $0,%[sum]\n" | |
151 | ||
0df025b7 PC |
152 | : [sum] "=r" (sum64) |
153 | : "[sum]" (rest), [saddr] "r" (saddr), [daddr] "r" (daddr)); | |
1da177e4 | 154 | |
d76c1ae4 IM |
155 | return csum_fold( |
156 | (__force __wsum)add32_with_carry(sum64 & 0xffffffff, sum64>>32)); | |
157 | } | |
1da177e4 | 158 | EXPORT_SYMBOL(csum_ipv6_magic); |