]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/microblaze/lib/memmove.c
x86, vdso: Clean up 32-bit vs 64-bit vdso params
[mirror_ubuntu-artful-kernel.git] / arch / microblaze / lib / memmove.c
CommitLineData
322ae8eb
MS
1/*
2 * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
3 * Copyright (C) 2008-2009 PetaLogix
4 * Copyright (C) 2007 John Williams
5 *
6 * Reasonably optimised generic C-code for memcpy on Microblaze
7 * This is generic C code to do efficient, alignment-aware memmove.
8 *
9 * It is based on demo code originally Copyright 2001 by Intel Corp, taken from
10 * http://www.embedded.com/showArticle.jhtml?articleID=19205567
11 *
af901ca1 12 * Attempts were made, unsuccessfully, to contact the original
322ae8eb
MS
13 * author of this code (Michael Morrow, Intel). Below is the original
14 * copyright notice.
15 *
16 * This software has been developed by Intel Corporation.
17 * Intel specifically disclaims all warranties, express or
18 * implied, and all liability, including consequential and
19 * other indirect damages, for the use of this program, including
20 * liability for infringement of any proprietary rights,
21 * and including the warranties of merchantability and fitness
22 * for a particular purpose. Intel does not assume any
23 * responsibility for and errors which may appear in this program
24 * not any responsibility to update it.
25 */
26
d64af918 27#include <linux/export.h>
322ae8eb
MS
28#include <linux/types.h>
29#include <linux/stddef.h>
30#include <linux/compiler.h>
322ae8eb
MS
31#include <linux/string.h>
32
33#ifdef __HAVE_ARCH_MEMMOVE
93e2e851 34#ifndef CONFIG_OPT_LIB_FUNCTION
322ae8eb
MS
35void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
36{
37 const char *src = v_src;
38 char *dst = v_dst;
39
322ae8eb
MS
40 if (!c)
41 return v_dst;
42
43 /* Use memcpy when source is higher than dest */
44 if (v_dst <= v_src)
45 return memcpy(v_dst, v_src, c);
46
322ae8eb
MS
47 /* copy backwards, from end to beginning */
48 src += c;
49 dst += c;
50
51 /* Simple, byte oriented memmove. */
52 while (c--)
53 *--dst = *--src;
54
55 return v_dst;
93e2e851
MS
56}
57#else /* CONFIG_OPT_LIB_FUNCTION */
58void *memmove(void *v_dst, const void *v_src, __kernel_size_t c)
59{
60 const char *src = v_src;
61 char *dst = v_dst;
62 const uint32_t *i_src;
63 uint32_t *i_dst;
64
65 if (!c)
66 return v_dst;
67
68 /* Use memcpy when source is higher than dest */
69 if (v_dst <= v_src)
70 return memcpy(v_dst, v_src, c);
71
322ae8eb
MS
72 /* The following code tries to optimize the copy by using unsigned
73 * alignment. This will work fine if both source and destination are
74 * aligned on the same boundary. However, if they are aligned on
75 * different boundaries shifts will be necessary. This might result in
76 * bad performance on MicroBlaze systems without a barrel shifter.
77 */
78 /* FIXME this part needs more test */
79 /* Do a descending copy - this is a bit trickier! */
80 dst += c;
81 src += c;
82
83 if (c >= 4) {
84 unsigned value, buf_hold;
85
25985edc
LDM
86 /* Align the destination to a word boundary. */
87 /* This is done in an endian independent manner. */
322ae8eb
MS
88
89 switch ((unsigned long)dst & 3) {
90 case 3:
91 *--dst = *--src;
92 --c;
93 case 2:
94 *--dst = *--src;
95 --c;
96 case 1:
97 *--dst = *--src;
98 --c;
99 }
100
101 i_dst = (void *)dst;
102 /* Choose a copy scheme based on the source */
103 /* alignment relative to dstination. */
104 switch ((unsigned long)src & 3) {
105 case 0x0: /* Both byte offsets are aligned */
106
107 i_src = (const void *)src;
108
109 for (; c >= 4; c -= 4)
110 *--i_dst = *--i_src;
111
112 src = (const void *)i_src;
113 break;
114 case 0x1: /* Unaligned - Off by 1 */
115 /* Word align the source */
116 i_src = (const void *) (((unsigned)src + 4) & ~3);
1180b28c 117#ifndef __MICROBLAZEEL__
322ae8eb
MS
118 /* Load the holding buffer */
119 buf_hold = *--i_src >> 24;
120
121 for (; c >= 4; c -= 4) {
122 value = *--i_src;
123 *--i_dst = buf_hold << 8 | value;
124 buf_hold = value >> 24;
125 }
1180b28c
MS
126#else
127 /* Load the holding buffer */
128 buf_hold = (*--i_src & 0xFF) << 24;
322ae8eb 129
1180b28c
MS
130 for (; c >= 4; c -= 4) {
131 value = *--i_src;
6bd55f0b
MS
132 *--i_dst = buf_hold |
133 ((value & 0xFFFFFF00) >> 8);
1180b28c
MS
134 buf_hold = (value & 0xFF) << 24;
135 }
136#endif
322ae8eb
MS
137 /* Realign the source */
138 src = (const void *)i_src;
139 src += 1;
140 break;
141 case 0x2: /* Unaligned - Off by 2 */
142 /* Word align the source */
143 i_src = (const void *) (((unsigned)src + 4) & ~3);
1180b28c 144#ifndef __MICROBLAZEEL__
322ae8eb
MS
145 /* Load the holding buffer */
146 buf_hold = *--i_src >> 16;
147
148 for (; c >= 4; c -= 4) {
149 value = *--i_src;
150 *--i_dst = buf_hold << 16 | value;
151 buf_hold = value >> 16;
152 }
1180b28c
MS
153#else
154 /* Load the holding buffer */
155 buf_hold = (*--i_src & 0xFFFF) << 16;
322ae8eb 156
1180b28c
MS
157 for (; c >= 4; c -= 4) {
158 value = *--i_src;
6bd55f0b
MS
159 *--i_dst = buf_hold |
160 ((value & 0xFFFF0000) >> 16);
1180b28c
MS
161 buf_hold = (value & 0xFFFF) << 16;
162 }
163#endif
322ae8eb
MS
164 /* Realign the source */
165 src = (const void *)i_src;
166 src += 2;
167 break;
168 case 0x3: /* Unaligned - Off by 3 */
169 /* Word align the source */
170 i_src = (const void *) (((unsigned)src + 4) & ~3);
1180b28c 171#ifndef __MICROBLAZEEL__
322ae8eb
MS
172 /* Load the holding buffer */
173 buf_hold = *--i_src >> 8;
174
175 for (; c >= 4; c -= 4) {
176 value = *--i_src;
177 *--i_dst = buf_hold << 24 | value;
178 buf_hold = value >> 8;
179 }
1180b28c
MS
180#else
181 /* Load the holding buffer */
182 buf_hold = (*--i_src & 0xFFFFFF) << 8;
322ae8eb 183
1180b28c
MS
184 for (; c >= 4; c -= 4) {
185 value = *--i_src;
6bd55f0b
MS
186 *--i_dst = buf_hold |
187 ((value & 0xFF000000) >> 24);
473ff660 188 buf_hold = (value & 0xFFFFFF) << 8;
1180b28c
MS
189 }
190#endif
322ae8eb
MS
191 /* Realign the source */
192 src = (const void *)i_src;
193 src += 3;
194 break;
195 }
196 dst = (void *)i_dst;
197 }
198
25985edc 199 /* simple fast copy, ... unless a cache boundary is crossed */
322ae8eb
MS
200 /* Finish off any remaining bytes */
201 switch (c) {
202 case 4:
203 *--dst = *--src;
204 case 3:
205 *--dst = *--src;
206 case 2:
207 *--dst = *--src;
208 case 1:
209 *--dst = *--src;
210 }
211 return v_dst;
322ae8eb 212}
93e2e851 213#endif /* CONFIG_OPT_LIB_FUNCTION */
322ae8eb
MS
214EXPORT_SYMBOL(memmove);
215#endif /* __HAVE_ARCH_MEMMOVE */