Serenity Operating System
1/*
2 * Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice, this
9 * list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
16 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
21 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
22 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
23 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
24 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 */
26
27#include <AK/Assertions.h>
28#include <AK/String.h>
29#include <AK/Types.h>
30#include <LibBareMetal/StdLib.h>
31
32#ifdef KERNEL
33# include <Kernel/Arch/i386/CPU.h>
34# include <Kernel/Heap/kmalloc.h>
35# include <Kernel/VM/MemoryManager.h>
36#endif
37
38#ifdef KERNEL
39String copy_string_from_user(const char* user_str, size_t user_str_size)
40{
41 Kernel::SmapDisabler disabler;
42 size_t length = strnlen(user_str, user_str_size);
43 return String(user_str, length);
44}
45#endif
46
47extern "C" {
48
49#ifdef KERNEL
50void copy_to_user(void* dest_ptr, const void* src_ptr, size_t n)
51{
52 ASSERT(Kernel::is_user_range(VirtualAddress(dest_ptr), n));
53 Kernel::SmapDisabler disabler;
54 memcpy(dest_ptr, src_ptr, n);
55}
56
57void copy_from_user(void* dest_ptr, const void* src_ptr, size_t n)
58{
59 ASSERT(Kernel::is_user_range(VirtualAddress(src_ptr), n));
60 Kernel::SmapDisabler disabler;
61 memcpy(dest_ptr, src_ptr, n);
62}
63#endif
64
65void* memcpy(void* dest_ptr, const void* src_ptr, size_t n)
66{
67 size_t dest = (size_t)dest_ptr;
68 size_t src = (size_t)src_ptr;
69 // FIXME: Support starting at an unaligned address.
70 if (!(dest & 0x3) && !(src & 0x3) && n >= 12) {
71 size_t size_ts = n / sizeof(size_t);
72 asm volatile(
73 "rep movsl\n"
74 : "=S"(src), "=D"(dest)
75 : "S"(src), "D"(dest), "c"(size_ts)
76 : "memory");
77 n -= size_ts * sizeof(size_t);
78 if (n == 0)
79 return dest_ptr;
80 }
81 asm volatile(
82 "rep movsb\n" ::"S"(src), "D"(dest), "c"(n)
83 : "memory");
84 return dest_ptr;
85}
86
87void* memmove(void* dest, const void* src, size_t n)
88{
89 if (dest < src)
90 return memcpy(dest, src, n);
91
92 u8* pd = (u8*)dest;
93 const u8* ps = (const u8*)src;
94 for (pd += n, ps += n; n--;)
95 *--pd = *--ps;
96 return dest;
97}
98
99char* strcpy(char* dest, const char* src)
100{
101 auto* dest_ptr = dest;
102 auto* src_ptr = src;
103 while ((*dest_ptr++ = *src_ptr++) != '\0')
104 ;
105 return dest;
106}
107
108char* strncpy(char* dest, const char* src, size_t n)
109{
110 size_t i;
111 for (i = 0; i < n && src[i] != '\0'; ++i)
112 dest[i] = src[i];
113 for (; i < n; ++i)
114 dest[i] = '\0';
115 return dest;
116}
117
118#ifdef KERNEL
119void memset_user(void* dest_ptr, int c, size_t n)
120{
121 ASSERT(Kernel::is_user_range(VirtualAddress(dest_ptr), n));
122 Kernel::SmapDisabler disabler;
123 memset(dest_ptr, c, n);
124}
125#endif
126
127void* memset(void* dest_ptr, int c, size_t n)
128{
129 size_t dest = (size_t)dest_ptr;
130 // FIXME: Support starting at an unaligned address.
131 if (!(dest & 0x3) && n >= 12) {
132 size_t size_ts = n / sizeof(size_t);
133 size_t expanded_c = (u8)c;
134 expanded_c |= expanded_c << 8;
135 expanded_c |= expanded_c << 16;
136 asm volatile(
137 "rep stosl\n"
138 : "=D"(dest)
139 : "D"(dest), "c"(size_ts), "a"(expanded_c)
140 : "memory");
141 n -= size_ts * sizeof(size_t);
142 if (n == 0)
143 return dest_ptr;
144 }
145 asm volatile(
146 "rep stosb\n"
147 : "=D"(dest), "=c"(n)
148 : "0"(dest), "1"(n), "a"(c)
149 : "memory");
150 return dest_ptr;
151}
152
153char* strrchr(const char* str, int ch)
154{
155 char* last = nullptr;
156 char c;
157 for (; (c = *str); ++str) {
158 if (c == ch)
159 last = const_cast<char*>(str);
160 }
161 return last;
162}
163
164size_t strlen(const char* str)
165{
166 size_t len = 0;
167 while (*(str++))
168 ++len;
169 return len;
170}
171
172size_t strnlen(const char* str, size_t maxlen)
173{
174 size_t len = 0;
175 for (; len < maxlen && *str; str++)
176 len++;
177 return len;
178}
179
180int strcmp(const char* s1, const char* s2)
181{
182 for (; *s1 == *s2; ++s1, ++s2) {
183 if (*s1 == 0)
184 return 0;
185 }
186 return *(const u8*)s1 < *(const u8*)s2 ? -1 : 1;
187}
188
189char* strdup(const char* str)
190{
191 size_t len = strlen(str);
192 char* new_str = (char*)kmalloc(len + 1);
193 strcpy(new_str, str);
194 return new_str;
195}
196
197int memcmp(const void* v1, const void* v2, size_t n)
198{
199 auto* s1 = (const u8*)v1;
200 auto* s2 = (const u8*)v2;
201 while (n-- > 0) {
202 if (*s1++ != *s2++)
203 return s1[-1] < s2[-1] ? -1 : 1;
204 }
205 return 0;
206}
207
208int strncmp(const char* s1, const char* s2, size_t n)
209{
210 if (!n)
211 return 0;
212 do {
213 if (*s1 != *s2++)
214 return *(const unsigned char*)s1 - *(const unsigned char*)--s2;
215 if (*s1++ == 0)
216 break;
217 } while (--n);
218 return 0;
219}
220
221char* strstr(const char* haystack, const char* needle)
222{
223 char nch;
224 char hch;
225
226 if ((nch = *needle++) != 0) {
227 size_t len = strlen(needle);
228 do {
229 do {
230 if ((hch = *haystack++) == 0)
231 return nullptr;
232 } while (hch != nch);
233 } while (strncmp(haystack, needle, len) != 0);
234 --haystack;
235 }
236 return const_cast<char*>(haystack);
237}
238
239[[noreturn]] void __cxa_pure_virtual()
240{
241 ASSERT_NOT_REACHED();
242}
243
244#ifdef KERNEL
245void* realloc(void* p, size_t s)
246{
247 return krealloc(p, s);
248}
249#endif
250
251void free(void* p)
252{
253 return kfree(p);
254}
255
256[[noreturn]] void __stack_chk_fail()
257{
258 ASSERT_NOT_REACHED();
259}
260
261[[noreturn]] void __stack_chk_fail_local()
262{
263 ASSERT_NOT_REACHED();
264}
265}