at v6.2 132 lines 3.4 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_STRING_64_H 3#define _ASM_X86_STRING_64_H 4 5#ifdef __KERNEL__ 6#include <linux/jump_label.h> 7 8/* Written 2002 by Andi Kleen */ 9 10/* Even with __builtin_ the compiler may decide to use the out of line 11 function. */ 12 13#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) 14#include <linux/kmsan_string.h> 15#endif 16 17#define __HAVE_ARCH_MEMCPY 1 18#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) 19#undef memcpy 20#define memcpy __msan_memcpy 21#else 22extern void *memcpy(void *to, const void *from, size_t len); 23#endif 24extern void *__memcpy(void *to, const void *from, size_t len); 25 26#define __HAVE_ARCH_MEMSET 27#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) 28extern void *__msan_memset(void *s, int c, size_t n); 29#undef memset 30#define memset __msan_memset 31#else 32void *memset(void *s, int c, size_t n); 33#endif 34void *__memset(void *s, int c, size_t n); 35 36#define __HAVE_ARCH_MEMSET16 37static inline void *memset16(uint16_t *s, uint16_t v, size_t n) 38{ 39 long d0, d1; 40 asm volatile("rep\n\t" 41 "stosw" 42 : "=&c" (d0), "=&D" (d1) 43 : "a" (v), "1" (s), "0" (n) 44 : "memory"); 45 return s; 46} 47 48#define __HAVE_ARCH_MEMSET32 49static inline void *memset32(uint32_t *s, uint32_t v, size_t n) 50{ 51 long d0, d1; 52 asm volatile("rep\n\t" 53 "stosl" 54 : "=&c" (d0), "=&D" (d1) 55 : "a" (v), "1" (s), "0" (n) 56 : "memory"); 57 return s; 58} 59 60#define __HAVE_ARCH_MEMSET64 61static inline void *memset64(uint64_t *s, uint64_t v, size_t n) 62{ 63 long d0, d1; 64 asm volatile("rep\n\t" 65 "stosq" 66 : "=&c" (d0), "=&D" (d1) 67 : "a" (v), "1" (s), "0" (n) 68 : "memory"); 69 return s; 70} 71 72#define __HAVE_ARCH_MEMMOVE 73#if defined(__SANITIZE_MEMORY__) && defined(__NO_FORTIFY) 74#undef memmove 75void *__msan_memmove(void *dest, const void *src, size_t len); 76#define memmove __msan_memmove 77#else 78void *memmove(void *dest, const void *src, size_t count); 79#endif 80void *__memmove(void *dest, const void *src, size_t count); 81 82int memcmp(const void *cs, const void *ct, size_t count); 83size_t strlen(const char *s); 84char *strcpy(char *dest, const char *src); 85char *strcat(char *dest, const char *src); 86int strcmp(const char *cs, const char *ct); 87 88#if (defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)) 89/* 90 * For files that not instrumented (e.g. mm/slub.c) we 91 * should use not instrumented version of mem* functions. 92 */ 93 94#undef memcpy 95#define memcpy(dst, src, len) __memcpy(dst, src, len) 96#undef memmove 97#define memmove(dst, src, len) __memmove(dst, src, len) 98#undef memset 99#define memset(s, c, n) __memset(s, c, n) 100 101#ifndef __NO_FORTIFY 102#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ 103#endif 104 105#endif 106 107#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE 108#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1 109void __memcpy_flushcache(void *dst, const void *src, size_t cnt); 110static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) 111{ 112 if (__builtin_constant_p(cnt)) { 113 switch (cnt) { 114 case 4: 115 asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src)); 116 return; 117 case 8: 118 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); 119 return; 120 case 16: 121 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src)); 122 asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8))); 123 return; 124 } 125 } 126 __memcpy_flushcache(dst, src, cnt); 127} 128#endif 129 130#endif /* __KERNEL__ */ 131 132#endif /* _ASM_X86_STRING_64_H */