]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm26/lib/csumpartial.S
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / arm26 / lib / csumpartial.S
1 /*
2 * linux/arch/arm26/lib/csumpartial.S
3 *
4 * Copyright (C) 1995-1998 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10 #include <linux/linkage.h>
11 #include <asm/assembler.h>
12
13 .text
14
15 /*
16 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
17 * Params : r0 = buffer, r1 = len, r2 = checksum
18 * Returns : r0 = new checksum
19 */
20
21 buf .req r0
22 len .req r1
23 sum .req r2
24 td0 .req r3
25 td1 .req r4 @ save before use
26 td2 .req r5 @ save before use
27 td3 .req lr
28
29 .zero: mov r0, sum
30 add sp, sp, #4
31 ldr pc, [sp], #4
32
33 /*
34 * Handle 0 to 7 bytes, with any alignment of source and
35 * destination pointers. Note that when we get here, C = 0
36 */
37 .less8: teq len, #0 @ check for zero count
38 beq .zero
39
40 /* we must have at least one byte. */
41 tst buf, #1 @ odd address?
42 ldrneb td0, [buf], #1
43 subne len, len, #1
44 adcnes sum, sum, td0, lsl #byte(1)
45
46 .less4: tst len, #6
47 beq .less8_byte
48
49 /* we are now half-word aligned */
50
51 .less8_wordlp:
52 #if __LINUX_ARM_ARCH__ >= 4
53 ldrh td0, [buf], #2
54 sub len, len, #2
55 #else
56 ldrb td0, [buf], #1
57 ldrb td3, [buf], #1
58 sub len, len, #2
59 orr td0, td0, td3, lsl #8
60 #endif
61 adcs sum, sum, td0
62 tst len, #6
63 bne .less8_wordlp
64
65 .less8_byte: tst len, #1 @ odd number of bytes
66 ldrneb td0, [buf], #1 @ include last byte
67 adcnes sum, sum, td0, lsl #byte(0) @ update checksum
68
69 .done: adc r0, sum, #0 @ collect up the last carry
70 ldr td0, [sp], #4
71 tst td0, #1 @ check buffer alignment
72 movne td0, r0, lsl #8 @ rotate checksum by 8 bits
73 orrne r0, td0, r0, lsr #24
74 ldr pc, [sp], #4 @ return
75
76 .not_aligned: tst buf, #1 @ odd address
77 ldrneb td0, [buf], #1 @ make even
78 subne len, len, #1
79 adcnes sum, sum, td0, lsl #byte(1) @ update checksum
80
81 tst buf, #2 @ 32-bit aligned?
82 #if __LINUX_ARM_ARCH__ >= 4
83 ldrneh td0, [buf], #2 @ make 32-bit aligned
84 subne len, len, #2
85 #else
86 ldrneb td0, [buf], #1
87 ldrneb ip, [buf], #1
88 subne len, len, #2
89 orrne td0, td0, ip, lsl #8
90 #endif
91 adcnes sum, sum, td0 @ update checksum
92 mov pc, lr
93
94 ENTRY(csum_partial)
95 stmfd sp!, {buf, lr}
96 cmp len, #8 @ Ensure that we have at least
97 blo .less8 @ 8 bytes to copy.
98
99 adds sum, sum, #0 @ C = 0
100 tst buf, #3 @ Test destination alignment
101 blne .not_aligned @ aligh destination, return here
102
103 1: bics ip, len, #31
104 beq 3f
105
106 stmfd sp!, {r4 - r5}
107 2: ldmia buf!, {td0, td1, td2, td3}
108 adcs sum, sum, td0
109 adcs sum, sum, td1
110 adcs sum, sum, td2
111 adcs sum, sum, td3
112 ldmia buf!, {td0, td1, td2, td3}
113 adcs sum, sum, td0
114 adcs sum, sum, td1
115 adcs sum, sum, td2
116 adcs sum, sum, td3
117 sub ip, ip, #32
118 teq ip, #0
119 bne 2b
120 ldmfd sp!, {r4 - r5}
121
122 3: tst len, #0x1c @ should not change C
123 beq .less4
124
125 4: ldr td0, [buf], #4
126 sub len, len, #4
127 adcs sum, sum, td0
128 tst len, #0x1c
129 bne 4b
130 b .less4