]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/parisc/lib/checksum.c
Merge tag 'fsnotify_for_v5.8-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-jammy-kernel.git] / arch / parisc / lib / checksum.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
1da177e4
LT
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * MIPS specific IP/TCP/UDP checksumming routines
8 *
9 * Authors: Ralf Baechle, <ralf@waldorf-gmbh.de>
10 * Lots of code moved from tcp.c and ip.c; see those files
11 * for more names.
1da177e4
LT
12 */
13#include <linux/module.h>
14#include <linux/types.h>
15
16#include <net/checksum.h>
17#include <asm/byteorder.h>
18#include <asm/string.h>
7c0f6ba6 19#include <linux/uaccess.h>
1da177e4
LT
20
21#define addc(_t,_r) \
22 __asm__ __volatile__ ( \
23" add %0, %1, %0\n" \
24" addc %0, %%r0, %0\n" \
25 : "=r"(_t) \
26 : "r"(_r), "0"(_t));
27
28static inline unsigned short from32to16(unsigned int x)
29{
30 /* 32 bits --> 16 bits + carry */
31 x = (x & 0xffff) + (x >> 16);
32 /* 16 bits + carry --> 16 bits including carry */
33 x = (x & 0xffff) + (x >> 16);
34 return (unsigned short)x;
35}
36
37static inline unsigned int do_csum(const unsigned char * buff, int len)
38{
39 int odd, count;
40 unsigned int result = 0;
41
42 if (len <= 0)
43 goto out;
44 odd = 1 & (unsigned long) buff;
45 if (odd) {
46 result = be16_to_cpu(*buff);
47 len--;
48 buff++;
49 }
50 count = len >> 1; /* nr of 16-bit words.. */
51 if (count) {
52 if (2 & (unsigned long) buff) {
53 result += *(unsigned short *) buff;
54 count--;
55 len -= 2;
56 buff += 2;
57 }
58 count >>= 1; /* nr of 32-bit words.. */
59 if (count) {
60 while (count >= 4) {
61 unsigned int r1, r2, r3, r4;
62 r1 = *(unsigned int *)(buff + 0);
63 r2 = *(unsigned int *)(buff + 4);
64 r3 = *(unsigned int *)(buff + 8);
65 r4 = *(unsigned int *)(buff + 12);
66 addc(result, r1);
67 addc(result, r2);
68 addc(result, r3);
69 addc(result, r4);
70 count -= 4;
71 buff += 16;
72 }
73 while (count) {
74 unsigned int w = *(unsigned int *) buff;
75 count--;
76 buff += 4;
77 addc(result, w);
78 }
79 result = (result & 0xffff) + (result >> 16);
80 }
81 if (len & 2) {
82 result += *(unsigned short *) buff;
83 buff += 2;
84 }
85 }
86 if (len & 1)
87 result += le16_to_cpu(*buff);
88 result = from32to16(result);
89 if (odd)
90 result = swab16(result);
91out:
92 return result;
93}
94
95/*
96 * computes a partial checksum, e.g. for TCP/UDP fragments
97 */
7814e4b6
AV
98/*
99 * why bother folding?
100 */
101__wsum csum_partial(const void *buff, int len, __wsum sum)
1da177e4
LT
102{
103 unsigned int result = do_csum(buff, len);
104 addc(result, sum);
7814e4b6 105 return (__force __wsum)from32to16(result);
1da177e4
LT
106}
107
108EXPORT_SYMBOL(csum_partial);
109
110/*
111 * copy while checksumming, otherwise like csum_partial
112 */
7814e4b6
AV
113__wsum csum_partial_copy_nocheck(const void *src, void *dst,
114 int len, __wsum sum)
1da177e4
LT
115{
116 /*
117 * It's 2:30 am and I don't feel like doing it real ...
118 * This is lots slower than the real thing (tm)
119 */
120 sum = csum_partial(src, len, sum);
121 memcpy(dst, src, len);
122
123 return sum;
124}
125EXPORT_SYMBOL(csum_partial_copy_nocheck);