Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.12-rc6 830 lines 24 kB view raw
1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1996, 1997, 1998, 1999, 2000, 03, 04 by Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 */ 9#ifndef _ASM_UACCESS_H 10#define _ASM_UACCESS_H 11 12#include <linux/config.h> 13#include <linux/kernel.h> 14#include <linux/errno.h> 15#include <linux/thread_info.h> 16#include <asm-generic/uaccess.h> 17 18/* 19 * The fs value determines whether argument validity checking should be 20 * performed or not. If get_fs() == USER_DS, checking is performed, with 21 * get_fs() == KERNEL_DS, checking is bypassed. 22 * 23 * For historical reasons, these macros are grossly misnamed. 24 */ 25#ifdef CONFIG_MIPS32 26 27#define __UA_LIMIT 0x80000000UL 28 29#define __UA_ADDR ".word" 30#define __UA_LA "la" 31#define __UA_ADDU "addu" 32#define __UA_t0 "$8" 33#define __UA_t1 "$9" 34 35#endif /* CONFIG_MIPS32 */ 36 37#ifdef CONFIG_MIPS64 38 39#define __UA_LIMIT (- TASK_SIZE) 40 41#define __UA_ADDR ".dword" 42#define __UA_LA "dla" 43#define __UA_ADDU "daddu" 44#define __UA_t0 "$12" 45#define __UA_t1 "$13" 46 47#endif /* CONFIG_MIPS64 */ 48 49/* 50 * USER_DS is a bitmask that has the bits set that may not be set in a valid 51 * userspace address. Note that we limit 32-bit userspace to 0x7fff8000 but 52 * the arithmetic we're doing only works if the limit is a power of two, so 53 * we use 0x80000000 here on 32-bit kernels. If a process passes an invalid 54 * address in this range it's the process's problem, not ours :-) 55 */ 56 57#define KERNEL_DS ((mm_segment_t) { 0UL }) 58#define USER_DS ((mm_segment_t) { __UA_LIMIT }) 59 60#define VERIFY_READ 0 61#define VERIFY_WRITE 1 62 63#define get_ds() (KERNEL_DS) 64#define get_fs() (current_thread_info()->addr_limit) 65#define set_fs(x) (current_thread_info()->addr_limit = (x)) 66 67#define segment_eq(a,b) ((a).seg == (b).seg) 68 69 70/* 71 * Is a address valid? This does a straighforward calculation rather 72 * than tests. 73 * 74 * Address valid if: 75 * - "addr" doesn't have any high-bits set 76 * - AND "size" doesn't have any high-bits set 77 * - AND "addr+size" doesn't have any high-bits set 78 * - OR we are in kernel mode. 79 * 80 * __ua_size() is a trick to avoid runtime checking of positive constant 81 * sizes; for those we already know at compile time that the size is ok. 82 */ 83#define __ua_size(size) \ 84 ((__builtin_constant_p(size) && (signed long) (size) > 0) ? 0 : (size)) 85 86/* 87 * access_ok: - Checks if a user space pointer is valid 88 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 89 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 90 * to write to a block, it is always safe to read from it. 91 * @addr: User space pointer to start of block to check 92 * @size: Size of block to check 93 * 94 * Context: User context only. This function may sleep. 95 * 96 * Checks if a pointer to a block of memory in user space is valid. 97 * 98 * Returns true (nonzero) if the memory block may be valid, false (zero) 99 * if it is definitely invalid. 100 * 101 * Note that, depending on architecture, this function probably just 102 * checks that the pointer is in the user space range - after calling 103 * this function, memory access functions may still return -EFAULT. 104 */ 105 106#define __access_mask get_fs().seg 107 108#define __access_ok(addr, size, mask) \ 109 (((signed long)((mask) & ((addr) | ((addr) + (size)) | __ua_size(size)))) == 0) 110 111#define access_ok(type, addr, size) \ 112 likely(__access_ok((unsigned long)(addr), (size),__access_mask)) 113 114/* 115 * verify_area: - Obsolete/deprecated and will go away soon, 116 * use access_ok() instead. 117 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE 118 * @addr: User space pointer to start of block to check 119 * @size: Size of block to check 120 * 121 * Context: User context only. This function may sleep. 122 * 123 * This function has been replaced by access_ok(). 124 * 125 * Checks if a pointer to a block of memory in user space is valid. 126 * 127 * Returns zero if the memory block may be valid, -EFAULT 128 * if it is definitely invalid. 129 * 130 * See access_ok() for more details. 131 */ 132static inline int __deprecated verify_area(int type, const void * addr, unsigned long size) 133{ 134 return access_ok(type, addr, size) ? 0 : -EFAULT; 135} 136 137/* 138 * put_user: - Write a simple value into user space. 139 * @x: Value to copy to user space. 140 * @ptr: Destination address, in user space. 141 * 142 * Context: User context only. This function may sleep. 143 * 144 * This macro copies a single simple value from kernel space to user 145 * space. It supports simple types like char and int, but not larger 146 * data types like structures or arrays. 147 * 148 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 149 * to the result of dereferencing @ptr. 150 * 151 * Returns zero on success, or -EFAULT on error. 152 */ 153#define put_user(x,ptr) \ 154 __put_user_check((x),(ptr),sizeof(*(ptr))) 155 156/* 157 * get_user: - Get a simple variable from user space. 158 * @x: Variable to store result. 159 * @ptr: Source address, in user space. 160 * 161 * Context: User context only. This function may sleep. 162 * 163 * This macro copies a single simple variable from user space to kernel 164 * space. It supports simple types like char and int, but not larger 165 * data types like structures or arrays. 166 * 167 * @ptr must have pointer-to-simple-variable type, and the result of 168 * dereferencing @ptr must be assignable to @x without a cast. 169 * 170 * Returns zero on success, or -EFAULT on error. 171 * On error, the variable @x is set to zero. 172 */ 173#define get_user(x,ptr) \ 174 __get_user_check((x),(ptr),sizeof(*(ptr))) 175 176/* 177 * __put_user: - Write a simple value into user space, with less checking. 178 * @x: Value to copy to user space. 179 * @ptr: Destination address, in user space. 180 * 181 * Context: User context only. This function may sleep. 182 * 183 * This macro copies a single simple value from kernel space to user 184 * space. It supports simple types like char and int, but not larger 185 * data types like structures or arrays. 186 * 187 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 188 * to the result of dereferencing @ptr. 189 * 190 * Caller must check the pointer with access_ok() before calling this 191 * function. 192 * 193 * Returns zero on success, or -EFAULT on error. 194 */ 195#define __put_user(x,ptr) \ 196 __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 197 198/* 199 * __get_user: - Get a simple variable from user space, with less checking. 200 * @x: Variable to store result. 201 * @ptr: Source address, in user space. 202 * 203 * Context: User context only. This function may sleep. 204 * 205 * This macro copies a single simple variable from user space to kernel 206 * space. It supports simple types like char and int, but not larger 207 * data types like structures or arrays. 208 * 209 * @ptr must have pointer-to-simple-variable type, and the result of 210 * dereferencing @ptr must be assignable to @x without a cast. 211 * 212 * Caller must check the pointer with access_ok() before calling this 213 * function. 214 * 215 * Returns zero on success, or -EFAULT on error. 216 * On error, the variable @x is set to zero. 217 */ 218#define __get_user(x,ptr) \ 219 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 220 221struct __large_struct { unsigned long buf[100]; }; 222#define __m(x) (*(struct __large_struct *)(x)) 223 224/* 225 * Yuck. We need two variants, one for 64bit operation and one 226 * for 32 bit mode and old iron. 227 */ 228#ifdef __mips64 229#define __GET_USER_DW(__gu_err) __get_user_asm("ld", __gu_err) 230#else 231#define __GET_USER_DW(__gu_err) __get_user_asm_ll32(__gu_err) 232#endif 233 234#define __get_user_nocheck(x,ptr,size) \ 235({ \ 236 __typeof(*(ptr)) __gu_val = 0; \ 237 long __gu_addr; \ 238 long __gu_err = 0; \ 239 \ 240 might_sleep(); \ 241 __gu_addr = (long) (ptr); \ 242 switch (size) { \ 243 case 1: __get_user_asm("lb", __gu_err); break; \ 244 case 2: __get_user_asm("lh", __gu_err); break; \ 245 case 4: __get_user_asm("lw", __gu_err); break; \ 246 case 8: __GET_USER_DW(__gu_err); break; \ 247 default: __get_user_unknown(); break; \ 248 } \ 249 x = (__typeof__(*(ptr))) __gu_val; \ 250 __gu_err; \ 251}) 252 253#define __get_user_check(x,ptr,size) \ 254({ \ 255 __typeof__(*(ptr)) __gu_val = 0; \ 256 long __gu_addr; \ 257 long __gu_err; \ 258 \ 259 might_sleep(); \ 260 __gu_addr = (long) (ptr); \ 261 __gu_err = access_ok(VERIFY_READ, (void *) __gu_addr, size) \ 262 ? 0 : -EFAULT; \ 263 \ 264 if (likely(!__gu_err)) { \ 265 switch (size) { \ 266 case 1: __get_user_asm("lb", __gu_err); break; \ 267 case 2: __get_user_asm("lh", __gu_err); break; \ 268 case 4: __get_user_asm("lw", __gu_err); break; \ 269 case 8: __GET_USER_DW(__gu_err); break; \ 270 default: __get_user_unknown(); break; \ 271 } \ 272 } \ 273 x = (__typeof__(*(ptr))) __gu_val; \ 274 __gu_err; \ 275}) 276 277#define __get_user_asm(insn,__gu_err) \ 278({ \ 279 __asm__ __volatile__( \ 280 "1: " insn " %1, %3 \n" \ 281 "2: \n" \ 282 " .section .fixup,\"ax\" \n" \ 283 "3: li %0, %4 \n" \ 284 " j 2b \n" \ 285 " .previous \n" \ 286 " .section __ex_table,\"a\" \n" \ 287 " "__UA_ADDR "\t1b, 3b \n" \ 288 " .previous \n" \ 289 : "=r" (__gu_err), "=r" (__gu_val) \ 290 : "0" (__gu_err), "o" (__m(__gu_addr)), "i" (-EFAULT)); \ 291}) 292 293/* 294 * Get a long long 64 using 32 bit registers. 295 */ 296#define __get_user_asm_ll32(__gu_err) \ 297({ \ 298 __asm__ __volatile__( \ 299 "1: lw %1, %3 \n" \ 300 "2: lw %D1, %4 \n" \ 301 " move %0, $0 \n" \ 302 "3: .section .fixup,\"ax\" \n" \ 303 "4: li %0, %5 \n" \ 304 " move %1, $0 \n" \ 305 " move %D1, $0 \n" \ 306 " j 3b \n" \ 307 " .previous \n" \ 308 " .section __ex_table,\"a\" \n" \ 309 " " __UA_ADDR " 1b, 4b \n" \ 310 " " __UA_ADDR " 2b, 4b \n" \ 311 " .previous \n" \ 312 : "=r" (__gu_err), "=&r" (__gu_val) \ 313 : "0" (__gu_err), "o" (__m(__gu_addr)), \ 314 "o" (__m(__gu_addr + 4)), "i" (-EFAULT)); \ 315}) 316 317extern void __get_user_unknown(void); 318 319/* 320 * Yuck. We need two variants, one for 64bit operation and one 321 * for 32 bit mode and old iron. 322 */ 323#ifdef __mips64 324#define __PUT_USER_DW(__pu_val) __put_user_asm("sd", __pu_val) 325#else 326#define __PUT_USER_DW(__pu_val) __put_user_asm_ll32(__pu_val) 327#endif 328 329#define __put_user_nocheck(x,ptr,size) \ 330({ \ 331 __typeof__(*(ptr)) __pu_val; \ 332 long __pu_addr; \ 333 long __pu_err = 0; \ 334 \ 335 might_sleep(); \ 336 __pu_val = (x); \ 337 __pu_addr = (long) (ptr); \ 338 switch (size) { \ 339 case 1: __put_user_asm("sb", __pu_val); break; \ 340 case 2: __put_user_asm("sh", __pu_val); break; \ 341 case 4: __put_user_asm("sw", __pu_val); break; \ 342 case 8: __PUT_USER_DW(__pu_val); break; \ 343 default: __put_user_unknown(); break; \ 344 } \ 345 __pu_err; \ 346}) 347 348#define __put_user_check(x,ptr,size) \ 349({ \ 350 __typeof__(*(ptr)) __pu_val; \ 351 long __pu_addr; \ 352 long __pu_err; \ 353 \ 354 might_sleep(); \ 355 __pu_val = (x); \ 356 __pu_addr = (long) (ptr); \ 357 __pu_err = access_ok(VERIFY_WRITE, (void *) __pu_addr, size) \ 358 ? 0 : -EFAULT; \ 359 \ 360 if (likely(!__pu_err)) { \ 361 switch (size) { \ 362 case 1: __put_user_asm("sb", __pu_val); break; \ 363 case 2: __put_user_asm("sh", __pu_val); break; \ 364 case 4: __put_user_asm("sw", __pu_val); break; \ 365 case 8: __PUT_USER_DW(__pu_val); break; \ 366 default: __put_user_unknown(); break; \ 367 } \ 368 } \ 369 __pu_err; \ 370}) 371 372#define __put_user_asm(insn, __pu_val) \ 373({ \ 374 __asm__ __volatile__( \ 375 "1: " insn " %z2, %3 # __put_user_asm\n" \ 376 "2: \n" \ 377 " .section .fixup,\"ax\" \n" \ 378 "3: li %0, %4 \n" \ 379 " j 2b \n" \ 380 " .previous \n" \ 381 " .section __ex_table,\"a\" \n" \ 382 " " __UA_ADDR " 1b, 3b \n" \ 383 " .previous \n" \ 384 : "=r" (__pu_err) \ 385 : "0" (__pu_err), "Jr" (__pu_val), "o" (__m(__pu_addr)), \ 386 "i" (-EFAULT)); \ 387}) 388 389#define __put_user_asm_ll32(__pu_val) \ 390({ \ 391 __asm__ __volatile__( \ 392 "1: sw %2, %3 # __put_user_asm_ll32 \n" \ 393 "2: sw %D2, %4 \n" \ 394 "3: \n" \ 395 " .section .fixup,\"ax\" \n" \ 396 "4: li %0, %5 \n" \ 397 " j 3b \n" \ 398 " .previous \n" \ 399 " .section __ex_table,\"a\" \n" \ 400 " " __UA_ADDR " 1b, 4b \n" \ 401 " " __UA_ADDR " 2b, 4b \n" \ 402 " .previous" \ 403 : "=r" (__pu_err) \ 404 : "0" (__pu_err), "r" (__pu_val), "o" (__m(__pu_addr)), \ 405 "o" (__m(__pu_addr + 4)), "i" (-EFAULT)); \ 406}) 407 408extern void __put_user_unknown(void); 409 410/* 411 * We're generating jump to subroutines which will be outside the range of 412 * jump instructions 413 */ 414#ifdef MODULE 415#define __MODULE_JAL(destination) \ 416 ".set\tnoat\n\t" \ 417 __UA_LA "\t$1, " #destination "\n\t" \ 418 "jalr\t$1\n\t" \ 419 ".set\tat\n\t" 420#else 421#define __MODULE_JAL(destination) \ 422 "jal\t" #destination "\n\t" 423#endif 424 425extern size_t __copy_user(void *__to, const void *__from, size_t __n); 426 427#define __invoke_copy_to_user(to,from,n) \ 428({ \ 429 register void *__cu_to_r __asm__ ("$4"); \ 430 register const void *__cu_from_r __asm__ ("$5"); \ 431 register long __cu_len_r __asm__ ("$6"); \ 432 \ 433 __cu_to_r = (to); \ 434 __cu_from_r = (from); \ 435 __cu_len_r = (n); \ 436 __asm__ __volatile__( \ 437 __MODULE_JAL(__copy_user) \ 438 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 439 : \ 440 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ 441 "memory"); \ 442 __cu_len_r; \ 443}) 444 445/* 446 * __copy_to_user: - Copy a block of data into user space, with less checking. 447 * @to: Destination address, in user space. 448 * @from: Source address, in kernel space. 449 * @n: Number of bytes to copy. 450 * 451 * Context: User context only. This function may sleep. 452 * 453 * Copy data from kernel space to user space. Caller must check 454 * the specified block with access_ok() before calling this function. 455 * 456 * Returns number of bytes that could not be copied. 457 * On success, this will be zero. 458 */ 459#define __copy_to_user(to,from,n) \ 460({ \ 461 void *__cu_to; \ 462 const void *__cu_from; \ 463 long __cu_len; \ 464 \ 465 might_sleep(); \ 466 __cu_to = (to); \ 467 __cu_from = (from); \ 468 __cu_len = (n); \ 469 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \ 470 __cu_len; \ 471}) 472 473#define __copy_to_user_inatomic __copy_to_user 474#define __copy_from_user_inatomic __copy_from_user 475 476/* 477 * copy_to_user: - Copy a block of data into user space. 478 * @to: Destination address, in user space. 479 * @from: Source address, in kernel space. 480 * @n: Number of bytes to copy. 481 * 482 * Context: User context only. This function may sleep. 483 * 484 * Copy data from kernel space to user space. 485 * 486 * Returns number of bytes that could not be copied. 487 * On success, this will be zero. 488 */ 489#define copy_to_user(to,from,n) \ 490({ \ 491 void *__cu_to; \ 492 const void *__cu_from; \ 493 long __cu_len; \ 494 \ 495 might_sleep(); \ 496 __cu_to = (to); \ 497 __cu_from = (from); \ 498 __cu_len = (n); \ 499 if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) \ 500 __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ 501 __cu_len); \ 502 __cu_len; \ 503}) 504 505#define __invoke_copy_from_user(to,from,n) \ 506({ \ 507 register void *__cu_to_r __asm__ ("$4"); \ 508 register const void *__cu_from_r __asm__ ("$5"); \ 509 register long __cu_len_r __asm__ ("$6"); \ 510 \ 511 __cu_to_r = (to); \ 512 __cu_from_r = (from); \ 513 __cu_len_r = (n); \ 514 __asm__ __volatile__( \ 515 ".set\tnoreorder\n\t" \ 516 __MODULE_JAL(__copy_user) \ 517 ".set\tnoat\n\t" \ 518 __UA_ADDU "\t$1, %1, %2\n\t" \ 519 ".set\tat\n\t" \ 520 ".set\treorder" \ 521 : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ 522 : \ 523 : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \ 524 "memory"); \ 525 __cu_len_r; \ 526}) 527 528/* 529 * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. 530 * @from: Source address, in user space. 531 * @n: Number of bytes to copy. 532 * 533 * Context: User context only. This function may sleep. 534 * 535 * Copy data from user space to kernel space. Caller must check 536 * the specified block with access_ok() before calling this function. 537 * 538 * Returns number of bytes that could not be copied. 539 * On success, this will be zero. 540 * 541 * If some data could not be copied, this function will pad the copied 542 * data to the requested size using zero bytes. 543 */ 544#define __copy_from_user(to,from,n) \ 545({ \ 546 void *__cu_to; \ 547 const void *__cu_from; \ 548 long __cu_len; \ 549 \ 550 might_sleep(); \ 551 __cu_to = (to); \ 552 __cu_from = (from); \ 553 __cu_len = (n); \ 554 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 555 __cu_len); \ 556 __cu_len; \ 557}) 558 559/* 560 * copy_from_user: - Copy a block of data from user space. 561 * @to: Destination address, in kernel space. 562 * @from: Source address, in user space. 563 * @n: Number of bytes to copy. 564 * 565 * Context: User context only. This function may sleep. 566 * 567 * Copy data from user space to kernel space. 568 * 569 * Returns number of bytes that could not be copied. 570 * On success, this will be zero. 571 * 572 * If some data could not be copied, this function will pad the copied 573 * data to the requested size using zero bytes. 574 */ 575#define copy_from_user(to,from,n) \ 576({ \ 577 void *__cu_to; \ 578 const void *__cu_from; \ 579 long __cu_len; \ 580 \ 581 might_sleep(); \ 582 __cu_to = (to); \ 583 __cu_from = (from); \ 584 __cu_len = (n); \ 585 if (access_ok(VERIFY_READ, __cu_from, __cu_len)) \ 586 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 587 __cu_len); \ 588 __cu_len; \ 589}) 590 591#define __copy_in_user(to, from, n) __copy_from_user(to, from, n) 592 593#define copy_in_user(to,from,n) \ 594({ \ 595 void *__cu_to; \ 596 const void *__cu_from; \ 597 long __cu_len; \ 598 \ 599 might_sleep(); \ 600 __cu_to = (to); \ 601 __cu_from = (from); \ 602 __cu_len = (n); \ 603 if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \ 604 access_ok(VERIFY_WRITE, __cu_to, __cu_len))) \ 605 __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ 606 __cu_len); \ 607 __cu_len; \ 608}) 609 610/* 611 * __clear_user: - Zero a block of memory in user space, with less checking. 612 * @to: Destination address, in user space. 613 * @n: Number of bytes to zero. 614 * 615 * Zero a block of memory in user space. Caller must check 616 * the specified block with access_ok() before calling this function. 617 * 618 * Returns number of bytes that could not be cleared. 619 * On success, this will be zero. 620 */ 621static inline __kernel_size_t 622__clear_user(void *addr, __kernel_size_t size) 623{ 624 __kernel_size_t res; 625 626 might_sleep(); 627 __asm__ __volatile__( 628 "move\t$4, %1\n\t" 629 "move\t$5, $0\n\t" 630 "move\t$6, %2\n\t" 631 __MODULE_JAL(__bzero) 632 "move\t%0, $6" 633 : "=r" (res) 634 : "r" (addr), "r" (size) 635 : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"); 636 637 return res; 638} 639 640#define clear_user(addr,n) \ 641({ \ 642 void * __cl_addr = (addr); \ 643 unsigned long __cl_size = (n); \ 644 if (__cl_size && access_ok(VERIFY_WRITE, \ 645 ((unsigned long)(__cl_addr)), __cl_size)) \ 646 __cl_size = __clear_user(__cl_addr, __cl_size); \ 647 __cl_size; \ 648}) 649 650/* 651 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. 652 * @dst: Destination address, in kernel space. This buffer must be at 653 * least @count bytes long. 654 * @src: Source address, in user space. 655 * @count: Maximum number of bytes to copy, including the trailing NUL. 656 * 657 * Copies a NUL-terminated string from userspace to kernel space. 658 * Caller must check the specified block with access_ok() before calling 659 * this function. 660 * 661 * On success, returns the length of the string (not including the trailing 662 * NUL). 663 * 664 * If access to userspace fails, returns -EFAULT (some data may have been 665 * copied). 666 * 667 * If @count is smaller than the length of the string, copies @count bytes 668 * and returns @count. 669 */ 670static inline long 671__strncpy_from_user(char *__to, const char *__from, long __len) 672{ 673 long res; 674 675 might_sleep(); 676 __asm__ __volatile__( 677 "move\t$4, %1\n\t" 678 "move\t$5, %2\n\t" 679 "move\t$6, %3\n\t" 680 __MODULE_JAL(__strncpy_from_user_nocheck_asm) 681 "move\t%0, $2" 682 : "=r" (res) 683 : "r" (__to), "r" (__from), "r" (__len) 684 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 685 686 return res; 687} 688 689/* 690 * strncpy_from_user: - Copy a NUL terminated string from userspace. 691 * @dst: Destination address, in kernel space. This buffer must be at 692 * least @count bytes long. 693 * @src: Source address, in user space. 694 * @count: Maximum number of bytes to copy, including the trailing NUL. 695 * 696 * Copies a NUL-terminated string from userspace to kernel space. 697 * 698 * On success, returns the length of the string (not including the trailing 699 * NUL). 700 * 701 * If access to userspace fails, returns -EFAULT (some data may have been 702 * copied). 703 * 704 * If @count is smaller than the length of the string, copies @count bytes 705 * and returns @count. 706 */ 707static inline long 708strncpy_from_user(char *__to, const char *__from, long __len) 709{ 710 long res; 711 712 might_sleep(); 713 __asm__ __volatile__( 714 "move\t$4, %1\n\t" 715 "move\t$5, %2\n\t" 716 "move\t$6, %3\n\t" 717 __MODULE_JAL(__strncpy_from_user_asm) 718 "move\t%0, $2" 719 : "=r" (res) 720 : "r" (__to), "r" (__from), "r" (__len) 721 : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory"); 722 723 return res; 724} 725 726/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 727static inline long __strlen_user(const char *s) 728{ 729 long res; 730 731 might_sleep(); 732 __asm__ __volatile__( 733 "move\t$4, %1\n\t" 734 __MODULE_JAL(__strlen_user_nocheck_asm) 735 "move\t%0, $2" 736 : "=r" (res) 737 : "r" (s) 738 : "$2", "$4", __UA_t0, "$31"); 739 740 return res; 741} 742 743/* 744 * strlen_user: - Get the size of a string in user space. 745 * @str: The string to measure. 746 * 747 * Context: User context only. This function may sleep. 748 * 749 * Get the size of a NUL-terminated string in user space. 750 * 751 * Returns the size of the string INCLUDING the terminating NUL. 752 * On exception, returns 0. 753 * 754 * If there is a limit on the length of a valid string, you may wish to 755 * consider using strnlen_user() instead. 756 */ 757static inline long strlen_user(const char *s) 758{ 759 long res; 760 761 might_sleep(); 762 __asm__ __volatile__( 763 "move\t$4, %1\n\t" 764 __MODULE_JAL(__strlen_user_asm) 765 "move\t%0, $2" 766 : "=r" (res) 767 : "r" (s) 768 : "$2", "$4", __UA_t0, "$31"); 769 770 return res; 771} 772 773/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ 774static inline long __strnlen_user(const char *s, long n) 775{ 776 long res; 777 778 might_sleep(); 779 __asm__ __volatile__( 780 "move\t$4, %1\n\t" 781 "move\t$5, %2\n\t" 782 __MODULE_JAL(__strnlen_user_nocheck_asm) 783 "move\t%0, $2" 784 : "=r" (res) 785 : "r" (s), "r" (n) 786 : "$2", "$4", "$5", __UA_t0, "$31"); 787 788 return res; 789} 790 791/* 792 * strlen_user: - Get the size of a string in user space. 793 * @str: The string to measure. 794 * 795 * Context: User context only. This function may sleep. 796 * 797 * Get the size of a NUL-terminated string in user space. 798 * 799 * Returns the size of the string INCLUDING the terminating NUL. 800 * On exception, returns 0. 801 * 802 * If there is a limit on the length of a valid string, you may wish to 803 * consider using strnlen_user() instead. 804 */ 805static inline long strnlen_user(const char *s, long n) 806{ 807 long res; 808 809 might_sleep(); 810 __asm__ __volatile__( 811 "move\t$4, %1\n\t" 812 "move\t$5, %2\n\t" 813 __MODULE_JAL(__strnlen_user_asm) 814 "move\t%0, $2" 815 : "=r" (res) 816 : "r" (s), "r" (n) 817 : "$2", "$4", "$5", __UA_t0, "$31"); 818 819 return res; 820} 821 822struct exception_table_entry 823{ 824 unsigned long insn; 825 unsigned long nextinsn; 826}; 827 828extern int fixup_exception(struct pt_regs *regs); 829 830#endif /* _ASM_UACCESS_H */