]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _S390_CHECKSUM_H |
2 | #define _S390_CHECKSUM_H | |
3 | ||
4 | /* | |
5 | * include/asm-s390/checksum.h | |
6 | * S390 fast network checksum routines | |
7 | * see also arch/S390/lib/checksum.c | |
8 | * | |
9 | * S390 version | |
10 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | |
11 | * Author(s): Ulrich Hild (first version) | |
12 | * Martin Schwidefsky (heavily optimized CKSM version) | |
13 | * D.J. Barrow (third attempt) | |
14 | */ | |
15 | ||
16 | #include <asm/uaccess.h> | |
17 | ||
18 | /* | |
19 | * computes the checksum of a memory block at buff, length len, | |
20 | * and adds in "sum" (32-bit) | |
21 | * | |
22 | * returns a 32-bit number suitable for feeding into itself | |
23 | * or csum_tcpudp_magic | |
24 | * | |
25 | * this function must be called with even lengths, except | |
26 | * for the last fragment, which may be odd | |
27 | * | |
28 | * it's best to have buff aligned on a 32-bit boundary | |
29 | */ | |
f994aae1 AV |
30 | static inline __wsum |
31 | csum_partial(const void *buff, int len, __wsum sum) | |
1da177e4 | 32 | { |
94c12cc7 MS |
33 | register unsigned long reg2 asm("2") = (unsigned long) buff; |
34 | register unsigned long reg3 asm("3") = (unsigned long) len; | |
1da177e4 | 35 | |
94c12cc7 MS |
36 | asm volatile( |
37 | "0: cksm %0,%1\n" /* do checksum on longs */ | |
38 | " jo 0b\n" | |
39 | : "+d" (sum), "+d" (reg2), "+d" (reg3) : : "cc", "memory"); | |
1da177e4 LT |
40 | return sum; |
41 | } | |
42 | ||
43 | /* | |
44 | * the same as csum_partial_copy, but copies from user space. | |
45 | * | |
46 | * here even more important to align src and dst on a 32-bit (or even | |
47 | * better 64-bit) boundary | |
48 | * | |
49 | * Copy from userspace and compute checksum. If we catch an exception | |
50 | * then zero the rest of the buffer. | |
51 | */ | |
f994aae1 AV |
52 | static inline __wsum |
53 | csum_partial_copy_from_user(const void __user *src, void *dst, | |
54 | int len, __wsum sum, | |
1da177e4 LT |
55 | int *err_ptr) |
56 | { | |
57 | int missing; | |
58 | ||
59 | missing = copy_from_user(dst, src, len); | |
60 | if (missing) { | |
61 | memset(dst + len - missing, 0, missing); | |
62 | *err_ptr = -EFAULT; | |
63 | } | |
64 | ||
65 | return csum_partial(dst, len, sum); | |
66 | } | |
67 | ||
68 | ||
f994aae1 AV |
69 | static inline __wsum |
70 | csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum) | |
1da177e4 LT |
71 | { |
72 | memcpy(dst,src,len); | |
94c12cc7 | 73 | return csum_partial(dst, len, sum); |
1da177e4 LT |
74 | } |
75 | ||
76 | /* | |
77 | * Fold a partial checksum without adding pseudo headers | |
78 | */ | |
f994aae1 | 79 | static inline __sum16 csum_fold(__wsum sum) |
1da177e4 LT |
80 | { |
81 | #ifndef __s390x__ | |
82 | register_pair rp; | |
83 | ||
94c12cc7 MS |
84 | asm volatile( |
85 | " slr %N1,%N1\n" /* %0 = H L */ | |
86 | " lr %1,%0\n" /* %0 = H L, %1 = H L 0 0 */ | |
87 | " srdl %1,16\n" /* %0 = H L, %1 = 0 H L 0 */ | |
88 | " alr %1,%N1\n" /* %0 = H L, %1 = L H L 0 */ | |
89 | " alr %0,%1\n" /* %0 = H+L+C L+H */ | |
90 | " srl %0,16\n" /* %0 = H+L+C */ | |
91 | : "+&d" (sum), "=d" (rp) : : "cc"); | |
1da177e4 | 92 | #else /* __s390x__ */ |
94c12cc7 MS |
93 | asm volatile( |
94 | " sr 3,3\n" /* %0 = H*65536 + L */ | |
95 | " lr 2,%0\n" /* %0 = H L, 2/3 = H L / 0 0 */ | |
96 | " srdl 2,16\n" /* %0 = H L, 2/3 = 0 H / L 0 */ | |
97 | " alr 2,3\n" /* %0 = H L, 2/3 = L H / L 0 */ | |
98 | " alr %0,2\n" /* %0 = H+L+C L+H */ | |
99 | " srl %0,16\n" /* %0 = H+L+C */ | |
1da177e4 LT |
100 | : "+&d" (sum) : : "cc", "2", "3"); |
101 | #endif /* __s390x__ */ | |
f994aae1 | 102 | return (__force __sum16) ~sum; |
1da177e4 LT |
103 | } |
104 | ||
105 | /* | |
106 | * This is a version of ip_compute_csum() optimized for IP headers, | |
107 | * which always checksum on 4 octet boundaries. | |
108 | * | |
109 | */ | |
f994aae1 | 110 | static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) |
1da177e4 | 111 | { |
94c12cc7 | 112 | return csum_fold(csum_partial(iph, ihl*4, 0)); |
1da177e4 LT |
113 | } |
114 | ||
115 | /* | |
116 | * computes the checksum of the TCP/UDP pseudo-header | |
117 | * returns a 32-bit checksum | |
118 | */ | |
f994aae1 AV |
119 | static inline __wsum |
120 | csum_tcpudp_nofold(__be32 saddr, __be32 daddr, | |
1da177e4 | 121 | unsigned short len, unsigned short proto, |
f994aae1 | 122 | __wsum sum) |
1da177e4 | 123 | { |
afbc1e99 HC |
124 | __u32 csum = (__force __u32)sum; |
125 | ||
126 | csum += (__force __u32)saddr; | |
127 | if (csum < (__force __u32)saddr) | |
128 | csum++; | |
129 | ||
130 | csum += (__force __u32)daddr; | |
131 | if (csum < (__force __u32)daddr) | |
132 | csum++; | |
133 | ||
134 | csum += len + proto; | |
135 | if (csum < len + proto) | |
136 | csum++; | |
137 | ||
138 | return (__force __wsum)csum; | |
1da177e4 LT |
139 | } |
140 | ||
141 | /* | |
142 | * computes the checksum of the TCP/UDP pseudo-header | |
143 | * returns a 16-bit checksum, already complemented | |
144 | */ | |
145 | ||
f994aae1 AV |
146 | static inline __sum16 |
147 | csum_tcpudp_magic(__be32 saddr, __be32 daddr, | |
1da177e4 | 148 | unsigned short len, unsigned short proto, |
f994aae1 | 149 | __wsum sum) |
1da177e4 LT |
150 | { |
151 | return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum)); | |
152 | } | |
153 | ||
154 | /* | |
155 | * this routine is used for miscellaneous IP-like checksums, mainly | |
156 | * in icmp.c | |
157 | */ | |
158 | ||
f994aae1 | 159 | static inline __sum16 ip_compute_csum(const void *buff, int len) |
1da177e4 LT |
160 | { |
161 | return csum_fold(csum_partial(buff, len, 0)); | |
162 | } | |
163 | ||
164 | #endif /* _S390_CHECKSUM_H */ | |
165 | ||
166 |