Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_STRING_64_H
3#define _ASM_X86_STRING_64_H
4
5#ifdef __KERNEL__
6#include <linux/jump_label.h>
7
8/* Written 2002 by Andi Kleen */
9
10/* Even with __builtin_ the compiler may decide to use the out of line
11 function. */
12
13#define __HAVE_ARCH_MEMCPY 1
14extern void *memcpy(void *to, const void *from, size_t len);
15extern void *__memcpy(void *to, const void *from, size_t len);
16
17#ifndef CONFIG_FORTIFY_SOURCE
18#if (__GNUC__ == 4 && __GNUC_MINOR__ < 3) || __GNUC__ < 4
19#define memcpy(dst, src, len) \
20({ \
21 size_t __len = (len); \
22 void *__ret; \
23 if (__builtin_constant_p(len) && __len >= 64) \
24 __ret = __memcpy((dst), (src), __len); \
25 else \
26 __ret = __builtin_memcpy((dst), (src), __len); \
27 __ret; \
28})
29#endif
30#endif /* !CONFIG_FORTIFY_SOURCE */
31
32#define __HAVE_ARCH_MEMSET
33void *memset(void *s, int c, size_t n);
34void *__memset(void *s, int c, size_t n);
35
36#define __HAVE_ARCH_MEMSET16
37static inline void *memset16(uint16_t *s, uint16_t v, size_t n)
38{
39 long d0, d1;
40 asm volatile("rep\n\t"
41 "stosw"
42 : "=&c" (d0), "=&D" (d1)
43 : "a" (v), "1" (s), "0" (n)
44 : "memory");
45 return s;
46}
47
48#define __HAVE_ARCH_MEMSET32
49static inline void *memset32(uint32_t *s, uint32_t v, size_t n)
50{
51 long d0, d1;
52 asm volatile("rep\n\t"
53 "stosl"
54 : "=&c" (d0), "=&D" (d1)
55 : "a" (v), "1" (s), "0" (n)
56 : "memory");
57 return s;
58}
59
60#define __HAVE_ARCH_MEMSET64
61static inline void *memset64(uint64_t *s, uint64_t v, size_t n)
62{
63 long d0, d1;
64 asm volatile("rep\n\t"
65 "stosq"
66 : "=&c" (d0), "=&D" (d1)
67 : "a" (v), "1" (s), "0" (n)
68 : "memory");
69 return s;
70}
71
72#define __HAVE_ARCH_MEMMOVE
73void *memmove(void *dest, const void *src, size_t count);
74void *__memmove(void *dest, const void *src, size_t count);
75
76int memcmp(const void *cs, const void *ct, size_t count);
77size_t strlen(const char *s);
78char *strcpy(char *dest, const char *src);
79char *strcat(char *dest, const char *src);
80int strcmp(const char *cs, const char *ct);
81
82#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
83
84/*
85 * For files that not instrumented (e.g. mm/slub.c) we
86 * should use not instrumented version of mem* functions.
87 */
88
89#undef memcpy
90#define memcpy(dst, src, len) __memcpy(dst, src, len)
91#define memmove(dst, src, len) __memmove(dst, src, len)
92#define memset(s, c, n) __memset(s, c, n)
93
94#ifndef __NO_FORTIFY
95#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
96#endif
97
98#endif
99
100#define __HAVE_ARCH_MEMCPY_MCSAFE 1
101__must_check unsigned long __memcpy_mcsafe(void *dst, const void *src,
102 size_t cnt);
103DECLARE_STATIC_KEY_FALSE(mcsafe_key);
104
105/**
106 * memcpy_mcsafe - copy memory with indication if a machine check happened
107 *
108 * @dst: destination address
109 * @src: source address
110 * @cnt: number of bytes to copy
111 *
112 * Low level memory copy function that catches machine checks
113 * We only call into the "safe" function on systems that can
114 * actually do machine check recovery. Everyone else can just
115 * use memcpy().
116 *
117 * Return 0 for success, or number of bytes not copied if there was an
118 * exception.
119 */
120static __always_inline __must_check unsigned long
121memcpy_mcsafe(void *dst, const void *src, size_t cnt)
122{
123#ifdef CONFIG_X86_MCE
124 if (static_branch_unlikely(&mcsafe_key))
125 return __memcpy_mcsafe(dst, src, cnt);
126 else
127#endif
128 memcpy(dst, src, cnt);
129 return 0;
130}
131
132#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
133#define __HAVE_ARCH_MEMCPY_FLUSHCACHE 1
134void __memcpy_flushcache(void *dst, const void *src, size_t cnt);
135static __always_inline void memcpy_flushcache(void *dst, const void *src, size_t cnt)
136{
137 if (__builtin_constant_p(cnt)) {
138 switch (cnt) {
139 case 4:
140 asm ("movntil %1, %0" : "=m"(*(u32 *)dst) : "r"(*(u32 *)src));
141 return;
142 case 8:
143 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
144 return;
145 case 16:
146 asm ("movntiq %1, %0" : "=m"(*(u64 *)dst) : "r"(*(u64 *)src));
147 asm ("movntiq %1, %0" : "=m"(*(u64 *)(dst + 8)) : "r"(*(u64 *)(src + 8)));
148 return;
149 }
150 }
151 __memcpy_flushcache(dst, src, cnt);
152}
153#endif
154
155#endif /* __KERNEL__ */
156
157#endif /* _ASM_X86_STRING_64_H */