]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/include/asm/checksum.h
timekeeping: Repair ktime_get_coarse*() granularity
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / include / asm / checksum.h
1 #ifndef _ASM_POWERPC_CHECKSUM_H
2 #define _ASM_POWERPC_CHECKSUM_H
3 #ifdef __KERNEL__
4
5 /*
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12 #include <linux/bitops.h>
13 #include <linux/in6.h>
14 /*
15 * Computes the checksum of a memory block at src, length len,
16 * and adds in "sum" (32-bit), while copying the block to dst.
17 * If an access exception occurs on src or dst, it stores -EFAULT
18 * to *src_err or *dst_err respectively (if that pointer is not
19 * NULL), and, for an error on src, zeroes the rest of dst.
20 *
21 * Like csum_partial, this must be called with even lengths,
22 * except for the last fragment.
23 */
24 extern __wsum csum_partial_copy_generic(const void *src, void *dst,
25 int len, __wsum sum,
26 int *src_err, int *dst_err);
27
28 #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
29 extern __wsum csum_and_copy_from_user(const void __user *src, void *dst,
30 int len, __wsum sum, int *err_ptr);
31 #define HAVE_CSUM_COPY_USER
32 extern __wsum csum_and_copy_to_user(const void *src, void __user *dst,
33 int len, __wsum sum, int *err_ptr);
34
35 #define csum_partial_copy_nocheck(src, dst, len, sum) \
36 csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL)
37
38
39 /*
40 * turns a 32-bit partial checksum (e.g. from csum_partial) into a
41 * 1's complement 16-bit checksum.
42 */
43 static inline __sum16 csum_fold(__wsum sum)
44 {
45 unsigned int tmp;
46
47 /* swap the two 16-bit halves of sum */
48 __asm__("rlwinm %0,%1,16,0,31" : "=r" (tmp) : "r" (sum));
49 /* if there is a carry from adding the two 16-bit halves,
50 it will carry from the lower half into the upper half,
51 giving us the correct sum in the upper half. */
52 return (__force __sum16)(~((__force u32)sum + tmp) >> 16);
53 }
54
55 static inline u32 from64to32(u64 x)
56 {
57 return (x + ror64(x, 32)) >> 32;
58 }
59
60 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
61 __u8 proto, __wsum sum)
62 {
63 #ifdef __powerpc64__
64 u64 s = (__force u32)sum;
65
66 s += (__force u32)saddr;
67 s += (__force u32)daddr;
68 #ifdef __BIG_ENDIAN__
69 s += proto + len;
70 #else
71 s += (proto + len) << 8;
72 #endif
73 return (__force __wsum) from64to32(s);
74 #else
75 __asm__("\n\
76 addc %0,%0,%1 \n\
77 adde %0,%0,%2 \n\
78 adde %0,%0,%3 \n\
79 addze %0,%0 \n\
80 "
81 : "=r" (sum)
82 : "r" (daddr), "r"(saddr), "r"(proto + len), "0"(sum));
83 return sum;
84 #endif
85 }
86
87 /*
88 * computes the checksum of the TCP/UDP pseudo-header
89 * returns a 16-bit checksum, already complemented
90 */
91 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
92 __u8 proto, __wsum sum)
93 {
94 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
95 }
96
97 #define HAVE_ARCH_CSUM_ADD
98 static inline __wsum csum_add(__wsum csum, __wsum addend)
99 {
100 #ifdef __powerpc64__
101 u64 res = (__force u64)csum;
102 #endif
103 if (__builtin_constant_p(csum) && csum == 0)
104 return addend;
105 if (__builtin_constant_p(addend) && addend == 0)
106 return csum;
107
108 #ifdef __powerpc64__
109 res += (__force u64)addend;
110 return (__force __wsum)((u32)res + (res >> 32));
111 #else
112 asm("addc %0,%0,%1;"
113 "addze %0,%0;"
114 : "+r" (csum) : "r" (addend) : "xer");
115 return csum;
116 #endif
117 }
118
119 /*
120 * This is a version of ip_compute_csum() optimized for IP headers,
121 * which always checksum on 4 octet boundaries. ihl is the number
122 * of 32-bit words and is always >= 5.
123 */
124 static inline __wsum ip_fast_csum_nofold(const void *iph, unsigned int ihl)
125 {
126 const u32 *ptr = (const u32 *)iph + 1;
127 #ifdef __powerpc64__
128 unsigned int i;
129 u64 s = *(const u32 *)iph;
130
131 for (i = 0; i < ihl - 1; i++, ptr++)
132 s += *ptr;
133 return (__force __wsum)from64to32(s);
134 #else
135 __wsum sum, tmp;
136
137 asm("mtctr %3;"
138 "addc %0,%4,%5;"
139 "1: lwzu %1, 4(%2);"
140 "adde %0,%0,%1;"
141 "bdnz 1b;"
142 "addze %0,%0;"
143 : "=r" (sum), "=r" (tmp), "+b" (ptr)
144 : "r" (ihl - 2), "r" (*(const u32 *)iph), "r" (*ptr)
145 : "ctr", "xer", "memory");
146
147 return sum;
148 #endif
149 }
150
151 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
152 {
153 return csum_fold(ip_fast_csum_nofold(iph, ihl));
154 }
155
156 /*
157 * computes the checksum of a memory block at buff, length len,
158 * and adds in "sum" (32-bit)
159 *
160 * returns a 32-bit number suitable for feeding into itself
161 * or csum_tcpudp_magic
162 *
163 * this function must be called with even lengths, except
164 * for the last fragment, which may be odd
165 *
166 * it's best to have buff aligned on a 32-bit boundary
167 */
168 __wsum __csum_partial(const void *buff, int len, __wsum sum);
169
170 static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
171 {
172 if (__builtin_constant_p(len) && len <= 16 && (len & 1) == 0) {
173 if (len == 2)
174 sum = csum_add(sum, (__force __wsum)*(const u16 *)buff);
175 if (len >= 4)
176 sum = csum_add(sum, (__force __wsum)*(const u32 *)buff);
177 if (len == 6)
178 sum = csum_add(sum, (__force __wsum)
179 *(const u16 *)(buff + 4));
180 if (len >= 8)
181 sum = csum_add(sum, (__force __wsum)
182 *(const u32 *)(buff + 4));
183 if (len == 10)
184 sum = csum_add(sum, (__force __wsum)
185 *(const u16 *)(buff + 8));
186 if (len >= 12)
187 sum = csum_add(sum, (__force __wsum)
188 *(const u32 *)(buff + 8));
189 if (len == 14)
190 sum = csum_add(sum, (__force __wsum)
191 *(const u16 *)(buff + 12));
192 if (len >= 16)
193 sum = csum_add(sum, (__force __wsum)
194 *(const u32 *)(buff + 12));
195 } else if (__builtin_constant_p(len) && (len & 3) == 0) {
196 sum = csum_add(sum, ip_fast_csum_nofold(buff, len >> 2));
197 } else {
198 sum = __csum_partial(buff, len, sum);
199 }
200 return sum;
201 }
202
203 /*
204 * this routine is used for miscellaneous IP-like checksums, mainly
205 * in icmp.c
206 */
207 static inline __sum16 ip_compute_csum(const void *buff, int len)
208 {
209 return csum_fold(csum_partial(buff, len, 0));
210 }
211
212 #define _HAVE_ARCH_IPV6_CSUM
213 __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
214 const struct in6_addr *daddr,
215 __u32 len, __u8 proto, __wsum sum);
216
217 #endif /* __KERNEL__ */
218 #endif