Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[PATCH] m68k: cleanup inline mem functions

Use the builtin functions for memset/memclr/memcpy, special optimizations for
page operations have dedicated functions now. Uninline memmove/memchr and
move all functions into a single file and clean it up a little.

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by

Roman Zippel and committed by
Linus Torvalds
072dffda 2855b970

+245 -555
-4
arch/m68k/kernel/m68k_ksyms.c
··· 74 74 EXPORT_SYMBOL(__ashldi3); 75 75 EXPORT_SYMBOL(__ashrdi3); 76 76 EXPORT_SYMBOL(__lshrdi3); 77 - EXPORT_SYMBOL(memcpy); 78 - EXPORT_SYMBOL(memset); 79 - EXPORT_SYMBOL(memcmp); 80 - EXPORT_SYMBOL(memscan); 81 77 EXPORT_SYMBOL(__muldi3); 82 78 83 79 EXPORT_SYMBOL(__down_failed);
+1 -1
arch/m68k/lib/Makefile
··· 5 5 EXTRA_AFLAGS := -traditional 6 6 7 7 lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ 8 - checksum.o memcmp.o memcpy.o memset.o semaphore.o 8 + checksum.o string.o semaphore.o
-11
arch/m68k/lib/memcmp.c
··· 1 - #include <linux/types.h> 2 - 3 - int memcmp(const void * cs,const void * ct,size_t count) 4 - { 5 - const unsigned char *su1, *su2; 6 - 7 - for( su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) 8 - if (*su1 != *su2) 9 - return((*su1 < *su2) ? -1 : +1); 10 - return(0); 11 - }
-75
arch/m68k/lib/memcpy.c
··· 1 - #include <linux/types.h> 2 - 3 - void * memcpy(void * to, const void * from, size_t n) 4 - { 5 - void *xto = to; 6 - size_t temp, temp1; 7 - 8 - if (!n) 9 - return xto; 10 - if ((long) to & 1) 11 - { 12 - char *cto = to; 13 - const char *cfrom = from; 14 - *cto++ = *cfrom++; 15 - to = cto; 16 - from = cfrom; 17 - n--; 18 - } 19 - if (n > 2 && (long) to & 2) 20 - { 21 - short *sto = to; 22 - const short *sfrom = from; 23 - *sto++ = *sfrom++; 24 - to = sto; 25 - from = sfrom; 26 - n -= 2; 27 - } 28 - temp = n >> 2; 29 - if (temp) 30 - { 31 - long *lto = to; 32 - const long *lfrom = from; 33 - 34 - __asm__ __volatile__("movel %2,%3\n\t" 35 - "andw #7,%3\n\t" 36 - "lsrl #3,%2\n\t" 37 - "negw %3\n\t" 38 - "jmp %%pc@(1f,%3:w:2)\n\t" 39 - "4:\t" 40 - "movel %0@+,%1@+\n\t" 41 - "movel %0@+,%1@+\n\t" 42 - "movel %0@+,%1@+\n\t" 43 - "movel %0@+,%1@+\n\t" 44 - "movel %0@+,%1@+\n\t" 45 - "movel %0@+,%1@+\n\t" 46 - "movel %0@+,%1@+\n\t" 47 - "movel %0@+,%1@+\n\t" 48 - "1:\t" 49 - "dbra %2,4b\n\t" 50 - "clrw %2\n\t" 51 - "subql #1,%2\n\t" 52 - "jpl 4b\n\t" 53 - : "=a" (lfrom), "=a" (lto), "=d" (temp), 54 - "=&d" (temp1) 55 - : "0" (lfrom), "1" (lto), "2" (temp) 56 - ); 57 - to = lto; 58 - from = lfrom; 59 - } 60 - if (n & 2) 61 - { 62 - short *sto = to; 63 - const short *sfrom = from; 64 - *sto++ = *sfrom++; 65 - to = sto; 66 - from = sfrom; 67 - } 68 - if (n & 1) 69 - { 70 - char *cto = to; 71 - const char *cfrom = from; 72 - *cto = *cfrom; 73 - } 74 - return xto; 75 - }
-68
arch/m68k/lib/memset.c
··· 1 - #include <linux/types.h> 2 - 3 - void * memset(void * s, int c, size_t count) 4 - { 5 - void *xs = s; 6 - size_t temp, temp1; 7 - 8 - if (!count) 9 - return xs; 10 - c &= 0xff; 11 - c |= c << 8; 12 - c |= c << 16; 13 - if ((long) s & 1) 14 - { 15 - char *cs = s; 16 - *cs++ = c; 17 - s = cs; 18 - count--; 19 - } 20 - if (count > 2 && (long) s & 2) 21 - { 22 - short *ss = s; 23 - *ss++ = c; 24 - s = ss; 25 - count -= 2; 26 - } 27 - temp = count >> 2; 28 - if (temp) 29 - { 30 - long *ls = s; 31 - 32 - __asm__ __volatile__("movel %1,%2\n\t" 33 - "andw #7,%2\n\t" 34 - "lsrl #3,%1\n\t" 35 - "negw %2\n\t" 36 - "jmp %%pc@(2f,%2:w:2)\n\t" 37 - "1:\t" 38 - "movel %3,%0@+\n\t" 39 - "movel %3,%0@+\n\t" 40 - "movel %3,%0@+\n\t" 41 - "movel %3,%0@+\n\t" 42 - "movel %3,%0@+\n\t" 43 - "movel %3,%0@+\n\t" 44 - "movel %3,%0@+\n\t" 45 - "movel %3,%0@+\n\t" 46 - "2:\t" 47 - "dbra %1,1b\n\t" 48 - "clrw %1\n\t" 49 - "subql #1,%1\n\t" 50 - "jpl 1b\n\t" 51 - : "=a" (ls), "=d" (temp), "=&d" (temp1) 52 - : "d" (c), "0" (ls), "1" (temp) 53 - ); 54 - s = ls; 55 - } 56 - if (count & 2) 57 - { 58 - short *ss = s; 59 - *ss++ = c; 60 - s = ss; 61 - } 62 - if (count & 1) 63 - { 64 - char *cs = s; 65 - *cs = c; 66 - } 67 - return xs; 68 - }
+237
arch/m68k/lib/string.c
··· 1 + 2 + #include <linux/types.h> 3 + #include <linux/module.h> 4 + 5 + void *memset(void *s, int c, size_t count) 6 + { 7 + void *xs = s; 8 + size_t temp, temp1; 9 + 10 + if (!count) 11 + return xs; 12 + c &= 0xff; 13 + c |= c << 8; 14 + c |= c << 16; 15 + if ((long)s & 1) { 16 + char *cs = s; 17 + *cs++ = c; 18 + s = cs; 19 + count--; 20 + } 21 + if (count > 2 && (long)s & 2) { 22 + short *ss = s; 23 + *ss++ = c; 24 + s = ss; 25 + count -= 2; 26 + } 27 + temp = count >> 2; 28 + if (temp) { 29 + long *ls = s; 30 + 31 + asm volatile ( 32 + " movel %1,%2\n" 33 + " andw #7,%2\n" 34 + " lsrl #3,%1\n" 35 + " negw %2\n" 36 + " jmp %%pc@(2f,%2:w:2)\n" 37 + "1: movel %3,%0@+\n" 38 + " movel %3,%0@+\n" 39 + " movel %3,%0@+\n" 40 + " movel %3,%0@+\n" 41 + " movel %3,%0@+\n" 42 + " movel %3,%0@+\n" 43 + " movel %3,%0@+\n" 44 + " movel %3,%0@+\n" 45 + "2: dbra %1,1b\n" 46 + " clrw %1\n" 47 + " subql #1,%1\n" 48 + " jpl 1b" 49 + : "=a" (ls), "=d" (temp), "=&d" (temp1) 50 + : "d" (c), "0" (ls), "1" (temp)); 51 + s = ls; 52 + } 53 + if (count & 2) { 54 + short *ss = s; 55 + *ss++ = c; 56 + s = ss; 57 + } 58 + if (count & 1) { 59 + char *cs = s; 60 + *cs = c; 61 + } 62 + return xs; 63 + } 64 + EXPORT_SYMBOL(memset); 65 + 66 + void *memcpy(void *to, const void *from, size_t n) 67 + { 68 + void *xto = to; 69 + size_t temp, temp1; 70 + 71 + if (!n) 72 + return xto; 73 + if ((long)to & 1) { 74 + char *cto = to; 75 + const char *cfrom = from; 76 + *cto++ = *cfrom++; 77 + to = cto; 78 + from = cfrom; 79 + n--; 80 + } 81 + if (n > 2 && (long)to & 2) { 82 + short *sto = to; 83 + const short *sfrom = from; 84 + *sto++ = *sfrom++; 85 + to = sto; 86 + from = sfrom; 87 + n -= 2; 88 + } 89 + temp = n >> 2; 90 + if (temp) { 91 + long *lto = to; 92 + const long *lfrom = from; 93 + 94 + asm volatile ( 95 + " movel %2,%3\n" 96 + " andw #7,%3\n" 97 + " lsrl #3,%2\n" 98 + " negw %3\n" 99 + " jmp %%pc@(1f,%3:w:2)\n" 100 + "4: movel %0@+,%1@+\n" 101 + " movel %0@+,%1@+\n" 102 + " movel %0@+,%1@+\n" 103 + " movel %0@+,%1@+\n" 104 + " movel %0@+,%1@+\n" 105 + " movel %0@+,%1@+\n" 106 + " movel %0@+,%1@+\n" 107 + " movel %0@+,%1@+\n" 108 + "1: dbra %2,4b\n" 109 + " clrw %2\n" 110 + " subql #1,%2\n" 111 + " jpl 4b" 112 + : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) 113 + : "0" (lfrom), "1" (lto), "2" (temp)); 114 + to = lto; 115 + from = lfrom; 116 + } 117 + if (n & 2) { 118 + short *sto = to; 119 + const short *sfrom = from; 120 + *sto++ = *sfrom++; 121 + to = sto; 122 + from = sfrom; 123 + } 124 + if (n & 1) { 125 + char *cto = to; 126 + const char *cfrom = from; 127 + *cto = *cfrom; 128 + } 129 + return xto; 130 + } 131 + EXPORT_SYMBOL(memcpy); 132 + 133 + void *memmove(void *dest, const void *src, size_t n) 134 + { 135 + void *xdest = dest; 136 + size_t temp; 137 + 138 + if (!n) 139 + return xdest; 140 + 141 + if (dest < src) { 142 + if ((long)dest & 1) { 143 + char *cdest = dest; 144 + const char *csrc = src; 145 + *cdest++ = *csrc++; 146 + dest = cdest; 147 + src = csrc; 148 + n--; 149 + } 150 + if (n > 2 && (long)dest & 2) { 151 + short *sdest = dest; 152 + const short *ssrc = src; 153 + *sdest++ = *ssrc++; 154 + dest = sdest; 155 + src = ssrc; 156 + n -= 2; 157 + } 158 + temp = n >> 2; 159 + if (temp) { 160 + long *ldest = dest; 161 + const long *lsrc = src; 162 + temp--; 163 + do 164 + *ldest++ = *lsrc++; 165 + while (temp--); 166 + dest = ldest; 167 + src = lsrc; 168 + } 169 + if (n & 2) { 170 + short *sdest = dest; 171 + const short *ssrc = src; 172 + *sdest++ = *ssrc++; 173 + dest = sdest; 174 + src = ssrc; 175 + } 176 + if (n & 1) { 177 + char *cdest = dest; 178 + const char *csrc = src; 179 + *cdest = *csrc; 180 + } 181 + } else { 182 + dest = (char *)dest + n; 183 + src = (const char *)src + n; 184 + if ((long)dest & 1) { 185 + char *cdest = dest; 186 + const char *csrc = src; 187 + *--cdest = *--csrc; 188 + dest = cdest; 189 + src = csrc; 190 + n--; 191 + } 192 + if (n > 2 && (long)dest & 2) { 193 + short *sdest = dest; 194 + const short *ssrc = src; 195 + *--sdest = *--ssrc; 196 + dest = sdest; 197 + src = ssrc; 198 + n -= 2; 199 + } 200 + temp = n >> 2; 201 + if (temp) { 202 + long *ldest = dest; 203 + const long *lsrc = src; 204 + temp--; 205 + do 206 + *--ldest = *--lsrc; 207 + while (temp--); 208 + dest = ldest; 209 + src = lsrc; 210 + } 211 + if (n & 2) { 212 + short *sdest = dest; 213 + const short *ssrc = src; 214 + *--sdest = *--ssrc; 215 + dest = sdest; 216 + src = ssrc; 217 + } 218 + if (n & 1) { 219 + char *cdest = dest; 220 + const char *csrc = src; 221 + *--cdest = *--csrc; 222 + } 223 + } 224 + return xdest; 225 + } 226 + EXPORT_SYMBOL(memmove); 227 + 228 + int memcmp(const void *cs, const void *ct, size_t count) 229 + { 230 + const unsigned char *su1, *su2; 231 + 232 + for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--) 233 + if (*su1 != *su2) 234 + return *su1 < *su2 ? -1 : +1; 235 + return 0; 236 + } 237 + EXPORT_SYMBOL(memcmp);
+7 -396
include/asm-m68k/string.h
··· 80 80 return( (char *) s); 81 81 } 82 82 83 - #if 0 84 - #define __HAVE_ARCH_STRPBRK 85 - static inline char *strpbrk(const char *cs,const char *ct) 86 - { 87 - const char *sc1,*sc2; 88 - 89 - for( sc1 = cs; *sc1 != '\0'; ++sc1) 90 - for( sc2 = ct; *sc2 != '\0'; ++sc2) 91 - if (*sc1 == *sc2) 92 - return((char *) sc1); 93 - return( NULL ); 94 - } 95 - #endif 96 - 97 - #if 0 98 - #define __HAVE_ARCH_STRSPN 99 - static inline size_t strspn(const char *s, const char *accept) 100 - { 101 - const char *p; 102 - const char *a; 103 - size_t count = 0; 104 - 105 - for (p = s; *p != '\0'; ++p) 106 - { 107 - for (a = accept; *a != '\0'; ++a) 108 - if (*p == *a) 109 - break; 110 - if (*a == '\0') 111 - return count; 112 - else 113 - ++count; 114 - } 115 - 116 - return count; 117 - } 118 - #endif 119 - 120 83 /* strstr !! */ 121 84 122 85 #define __HAVE_ARCH_STRLEN ··· 136 173 } 137 174 138 175 #define __HAVE_ARCH_MEMSET 139 - /* 140 - * This is really ugly, but its highly optimizatiable by the 141 - * compiler and is meant as compensation for gcc's missing 142 - * __builtin_memset(). For the 680[23]0 it might be worth considering 143 - * the optimal number of misaligned writes compared to the number of 144 - * tests'n'branches needed to align the destination address. The 145 - * 680[46]0 doesn't really care due to their copy-back caches. 146 - * 10/09/96 - Jes Sorensen 147 - */ 148 - static inline void * __memset_g(void * s, int c, size_t count) 149 - { 150 - void *xs = s; 151 - size_t temp; 152 - 153 - if (!count) 154 - return xs; 155 - 156 - c &= 0xff; 157 - c |= c << 8; 158 - c |= c << 16; 159 - 160 - if (count < 36){ 161 - long *ls = s; 162 - 163 - switch(count){ 164 - case 32: case 33: case 34: case 35: 165 - *ls++ = c; 166 - case 28: case 29: case 30: case 31: 167 - *ls++ = c; 168 - case 24: case 25: case 26: case 27: 169 - *ls++ = c; 170 - case 20: case 21: case 22: case 23: 171 - *ls++ = c; 172 - case 16: case 17: case 18: case 19: 173 - *ls++ = c; 174 - case 12: case 13: case 14: case 15: 175 - *ls++ = c; 176 - case 8: case 9: case 10: case 11: 177 - *ls++ = c; 178 - case 4: case 5: case 6: case 7: 179 - *ls++ = c; 180 - break; 181 - default: 182 - break; 183 - } 184 - s = ls; 185 - if (count & 0x02){ 186 - short *ss = s; 187 - *ss++ = c; 188 - s = ss; 189 - } 190 - if (count & 0x01){ 191 - char *cs = s; 192 - *cs++ = c; 193 - s = cs; 194 - } 195 - return xs; 196 - } 197 - 198 - if ((long) s & 1) 199 - { 200 - char *cs = s; 201 - *cs++ = c; 202 - s = cs; 203 - count--; 204 - } 205 - if (count > 2 && (long) s & 2) 206 - { 207 - short *ss = s; 208 - *ss++ = c; 209 - s = ss; 210 - count -= 2; 211 - } 212 - temp = count >> 2; 213 - if (temp) 214 - { 215 - long *ls = s; 216 - temp--; 217 - do 218 - *ls++ = c; 219 - while (temp--); 220 - s = ls; 221 - } 222 - if (count & 2) 223 - { 224 - short *ss = s; 225 - *ss++ = c; 226 - s = ss; 227 - } 228 - if (count & 1) 229 - { 230 - char *cs = s; 231 - *cs = c; 232 - } 233 - return xs; 234 - } 235 - 236 - /* 237 - * __memset_page assumes that data is longword aligned. Most, if not 238 - * all, of these page sized memsets are performed on page aligned 239 - * areas, thus we do not need to check if the destination is longword 240 - * aligned. Of course we suffer a serious performance loss if this is 241 - * not the case but I think the risk of this ever happening is 242 - * extremely small. We spend a lot of time clearing pages in 243 - * get_empty_page() so I think it is worth it anyway. Besides, the 244 - * 680[46]0 do not really care about misaligned writes due to their 245 - * copy-back cache. 246 - * 247 - * The optimized case for the 680[46]0 is implemented using the move16 248 - * instruction. My tests showed that this implementation is 35-45% 249 - * faster than the original implementation using movel, the only 250 - * caveat is that the destination address must be 16-byte aligned. 251 - * 01/09/96 - Jes Sorensen 252 - */ 253 - static inline void * __memset_page(void * s,int c,size_t count) 254 - { 255 - unsigned long data, tmp; 256 - void *xs = s; 257 - 258 - c = c & 255; 259 - data = c | (c << 8); 260 - data |= data << 16; 261 - 262 - #ifdef CPU_M68040_OR_M68060_ONLY 263 - 264 - if (((unsigned long) s) & 0x0f) 265 - __memset_g(s, c, count); 266 - else{ 267 - unsigned long *sp = s; 268 - *sp++ = data; 269 - *sp++ = data; 270 - *sp++ = data; 271 - *sp++ = data; 272 - 273 - __asm__ __volatile__("1:\t" 274 - ".chip 68040\n\t" 275 - "move16 %2@+,%0@+\n\t" 276 - ".chip 68k\n\t" 277 - "subqw #8,%2\n\t" 278 - "subqw #8,%2\n\t" 279 - "dbra %1,1b\n\t" 280 - : "=a" (sp), "=d" (tmp) 281 - : "a" (s), "0" (sp), "1" ((count - 16) / 16 - 1) 282 - ); 283 - } 284 - 285 - #else 286 - __asm__ __volatile__("1:\t" 287 - "movel %2,%0@+\n\t" 288 - "movel %2,%0@+\n\t" 289 - "movel %2,%0@+\n\t" 290 - "movel %2,%0@+\n\t" 291 - "movel %2,%0@+\n\t" 292 - "movel %2,%0@+\n\t" 293 - "movel %2,%0@+\n\t" 294 - "movel %2,%0@+\n\t" 295 - "dbra %1,1b\n\t" 296 - : "=a" (s), "=d" (tmp) 297 - : "d" (data), "0" (s), "1" (count / 32 - 1) 298 - ); 299 - #endif 300 - 301 - return xs; 302 - } 303 - 304 - extern void *memset(void *,int,__kernel_size_t); 305 - 306 - #define __memset_const(s,c,count) \ 307 - ((count==PAGE_SIZE) ? \ 308 - __memset_page((s),(c),(count)) : \ 309 - __memset_g((s),(c),(count))) 310 - 311 - #define memset(s, c, count) \ 312 - (__builtin_constant_p(count) ? \ 313 - __memset_const((s),(c),(count)) : \ 314 - __memset_g((s),(c),(count))) 176 + extern void *memset(void *, int, __kernel_size_t); 177 + #define memset(d, c, n) __builtin_memset(d, c, n) 315 178 316 179 #define __HAVE_ARCH_MEMCPY 317 - extern void * memcpy(void *, const void *, size_t ); 318 - /* 319 - * __builtin_memcpy() does not handle page-sized memcpys very well, 320 - * thus following the same assumptions as for page-sized memsets, this 321 - * function copies page-sized areas using an unrolled loop, without 322 - * considering alignment. 323 - * 324 - * For the 680[46]0 only kernels we use the move16 instruction instead 325 - * as it writes through the data-cache, invalidating the cache-lines 326 - * touched. In this way we do not use up the entire data-cache (well, 327 - * half of it on the 68060) by copying a page. An unrolled loop of two 328 - * move16 instructions seem to the fastest. The only caveat is that 329 - * both source and destination must be 16-byte aligned, if not we fall 330 - * back to the generic memcpy function. - Jes 331 - */ 332 - static inline void * __memcpy_page(void * to, const void * from, size_t count) 333 - { 334 - unsigned long tmp; 335 - void *xto = to; 336 - 337 - #ifdef CPU_M68040_OR_M68060_ONLY 338 - 339 - if (((unsigned long) to | (unsigned long) from) & 0x0f) 340 - return memcpy(to, from, count); 341 - 342 - __asm__ __volatile__("1:\t" 343 - ".chip 68040\n\t" 344 - "move16 %1@+,%0@+\n\t" 345 - "move16 %1@+,%0@+\n\t" 346 - ".chip 68k\n\t" 347 - "dbra %2,1b\n\t" 348 - : "=a" (to), "=a" (from), "=d" (tmp) 349 - : "0" (to), "1" (from) , "2" (count / 32 - 1) 350 - ); 351 - #else 352 - __asm__ __volatile__("1:\t" 353 - "movel %1@+,%0@+\n\t" 354 - "movel %1@+,%0@+\n\t" 355 - "movel %1@+,%0@+\n\t" 356 - "movel %1@+,%0@+\n\t" 357 - "movel %1@+,%0@+\n\t" 358 - "movel %1@+,%0@+\n\t" 359 - "movel %1@+,%0@+\n\t" 360 - "movel %1@+,%0@+\n\t" 361 - "dbra %2,1b\n\t" 362 - : "=a" (to), "=a" (from), "=d" (tmp) 363 - : "0" (to), "1" (from) , "2" (count / 32 - 1) 364 - ); 365 - #endif 366 - return xto; 367 - } 368 - 369 - #define __memcpy_const(to, from, n) \ 370 - ((n==PAGE_SIZE) ? \ 371 - __memcpy_page((to),(from),(n)) : \ 372 - __builtin_memcpy((to),(from),(n))) 373 - 374 - #define memcpy(to, from, n) \ 375 - (__builtin_constant_p(n) ? \ 376 - __memcpy_const((to),(from),(n)) : \ 377 - memcpy((to),(from),(n))) 180 + extern void *memcpy(void *, const void *, __kernel_size_t); 181 + #define memcpy(d, s, n) __builtin_memcpy(d, s, n) 378 182 379 183 #define __HAVE_ARCH_MEMMOVE 380 - static inline void * memmove(void * dest,const void * src, size_t n) 381 - { 382 - void *xdest = dest; 383 - size_t temp; 384 - 385 - if (!n) 386 - return xdest; 387 - 388 - if (dest < src) 389 - { 390 - if ((long) dest & 1) 391 - { 392 - char *cdest = dest; 393 - const char *csrc = src; 394 - *cdest++ = *csrc++; 395 - dest = cdest; 396 - src = csrc; 397 - n--; 398 - } 399 - if (n > 2 && (long) dest & 2) 400 - { 401 - short *sdest = dest; 402 - const short *ssrc = src; 403 - *sdest++ = *ssrc++; 404 - dest = sdest; 405 - src = ssrc; 406 - n -= 2; 407 - } 408 - temp = n >> 2; 409 - if (temp) 410 - { 411 - long *ldest = dest; 412 - const long *lsrc = src; 413 - temp--; 414 - do 415 - *ldest++ = *lsrc++; 416 - while (temp--); 417 - dest = ldest; 418 - src = lsrc; 419 - } 420 - if (n & 2) 421 - { 422 - short *sdest = dest; 423 - const short *ssrc = src; 424 - *sdest++ = *ssrc++; 425 - dest = sdest; 426 - src = ssrc; 427 - } 428 - if (n & 1) 429 - { 430 - char *cdest = dest; 431 - const char *csrc = src; 432 - *cdest = *csrc; 433 - } 434 - } 435 - else 436 - { 437 - dest = (char *) dest + n; 438 - src = (const char *) src + n; 439 - if ((long) dest & 1) 440 - { 441 - char *cdest = dest; 442 - const char *csrc = src; 443 - *--cdest = *--csrc; 444 - dest = cdest; 445 - src = csrc; 446 - n--; 447 - } 448 - if (n > 2 && (long) dest & 2) 449 - { 450 - short *sdest = dest; 451 - const short *ssrc = src; 452 - *--sdest = *--ssrc; 453 - dest = sdest; 454 - src = ssrc; 455 - n -= 2; 456 - } 457 - temp = n >> 2; 458 - if (temp) 459 - { 460 - long *ldest = dest; 461 - const long *lsrc = src; 462 - temp--; 463 - do 464 - *--ldest = *--lsrc; 465 - while (temp--); 466 - dest = ldest; 467 - src = lsrc; 468 - } 469 - if (n & 2) 470 - { 471 - short *sdest = dest; 472 - const short *ssrc = src; 473 - *--sdest = *--ssrc; 474 - dest = sdest; 475 - src = ssrc; 476 - } 477 - if (n & 1) 478 - { 479 - char *cdest = dest; 480 - const char *csrc = src; 481 - *--cdest = *--csrc; 482 - } 483 - } 484 - return xdest; 485 - } 184 + extern void *memmove(void *, const void *, __kernel_size_t); 486 185 487 186 #define __HAVE_ARCH_MEMCMP 488 - extern int memcmp(const void * ,const void * ,size_t ); 489 - #define memcmp(cs, ct, n) \ 490 - (__builtin_constant_p(n) ? \ 491 - __builtin_memcmp((cs),(ct),(n)) : \ 492 - memcmp((cs),(ct),(n))) 493 - 494 - #define __HAVE_ARCH_MEMCHR 495 - static inline void *memchr(const void *cs, int c, size_t count) 496 - { 497 - /* Someone else can optimize this, I don't care - tonym@mac.linux-m68k.org */ 498 - unsigned char *ret = (unsigned char *)cs; 499 - for(;count>0;count--,ret++) 500 - if(*ret == c) return ret; 501 - 502 - return NULL; 503 - } 187 + extern int memcmp(const void *, const void *, __kernel_size_t); 188 + #define memcmp(d, s, n) __builtin_memcmp(d, s, n) 504 189 505 190 #endif /* _M68K_STRING_H_ */