Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.20 571 lines 14 kB view raw
1/* $Id: uaccess.h,v 1.11 2003/10/13 07:21:20 lethal Exp $ 2 * 3 * User space memory access functions 4 * 5 * Copyright (C) 1999, 2002 Niibe Yutaka 6 * Copyright (C) 2003 Paul Mundt 7 * 8 * Based on: 9 * MIPS implementation version 1.15 by 10 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle 11 * and i386 version. 12 */ 13#ifndef __ASM_SH_UACCESS_H 14#define __ASM_SH_UACCESS_H 15 16#include <linux/errno.h> 17#include <linux/sched.h> 18 19#define VERIFY_READ 0 20#define VERIFY_WRITE 1 21 22/* 23 * The fs value determines whether argument validity checking should be 24 * performed or not. If get_fs() == USER_DS, checking is performed, with 25 * get_fs() == KERNEL_DS, checking is bypassed. 26 * 27 * For historical reasons (Data Segment Register?), these macros are misnamed. 28 */ 29 30#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 31 32#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) 33#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 34 35#define segment_eq(a,b) ((a).seg == (b).seg) 36 37#define get_ds() (KERNEL_DS) 38 39#if !defined(CONFIG_MMU) 40/* NOMMU is always true */ 41#define __addr_ok(addr) (1) 42 43static inline mm_segment_t get_fs(void) 44{ 45 return USER_DS; 46} 47 48static inline void set_fs(mm_segment_t s) 49{ 50} 51 52/* 53 * __access_ok: Check if address with size is OK or not. 54 * 55 * If we don't have an MMU (or if its disabled) the only thing we really have 56 * to look out for is if the address resides somewhere outside of what 57 * available RAM we have. 58 * 59 * TODO: This check could probably also stand to be restricted somewhat more.. 60 * though it still does the Right Thing(tm) for the time being. 61 */ 62static inline int __access_ok(unsigned long addr, unsigned long size) 63{ 64 extern unsigned long memory_start, memory_end; 65 66 return ((addr >= memory_start) && ((addr + size) < memory_end)); 67} 68#else /* CONFIG_MMU */ 69#define __addr_ok(addr) \ 70 ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) 71 72#define get_fs() (current_thread_info()->addr_limit) 73#define set_fs(x) (current_thread_info()->addr_limit = (x)) 74 75/* 76 * __access_ok: Check if address with size is OK or not. 77 * 78 * We do three checks: 79 * (1) is it user space? 80 * (2) addr + size --> carry? 81 * (3) addr + size >= 0x80000000 (PAGE_OFFSET) 82 * 83 * (1) (2) (3) | RESULT 84 * 0 0 0 | ok 85 * 0 0 1 | ok 86 * 0 1 0 | bad 87 * 0 1 1 | bad 88 * 1 0 0 | ok 89 * 1 0 1 | bad 90 * 1 1 0 | bad 91 * 1 1 1 | bad 92 */ 93static inline int __access_ok(unsigned long addr, unsigned long size) 94{ 95 unsigned long flag, tmp; 96 97 __asm__("stc r7_bank, %0\n\t" 98 "mov.l @(8,%0), %0\n\t" 99 "clrt\n\t" 100 "addc %2, %1\n\t" 101 "and %1, %0\n\t" 102 "rotcl %0\n\t" 103 "rotcl %0\n\t" 104 "and #3, %0" 105 : "=&z" (flag), "=r" (tmp) 106 : "r" (addr), "1" (size) 107 : "t"); 108 109 return flag == 0; 110} 111#endif /* CONFIG_MMU */ 112 113static inline int access_ok(int type, const void __user *p, unsigned long size) 114{ 115 unsigned long addr = (unsigned long)p; 116 return __access_ok(addr, size); 117} 118 119/* 120 * Uh, these should become the main single-value transfer routines ... 121 * They automatically use the right size if we just have the right 122 * pointer type ... 123 * 124 * As SuperH uses the same address space for kernel and user data, we 125 * can just do these as direct assignments. 126 * 127 * Careful to not 128 * (a) re-use the arguments for side effects (sizeof is ok) 129 * (b) require any knowledge of processes at this stage 130 */ 131#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) 132#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) 133 134/* 135 * The "__xxx" versions do not do address space checking, useful when 136 * doing multiple accesses to the same area (the user has to do the 137 * checks by hand with "access_ok()") 138 */ 139#define __put_user(x,ptr) \ 140 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 141#define __get_user(x,ptr) \ 142 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 143 144struct __large_struct { unsigned long buf[100]; }; 145#define __m(x) (*(struct __large_struct *)(x)) 146 147#define __get_user_size(x,ptr,size,retval) \ 148do { \ 149 retval = 0; \ 150 switch (size) { \ 151 case 1: \ 152 __get_user_asm(x, ptr, retval, "b"); \ 153 break; \ 154 case 2: \ 155 __get_user_asm(x, ptr, retval, "w"); \ 156 break; \ 157 case 4: \ 158 __get_user_asm(x, ptr, retval, "l"); \ 159 break; \ 160 default: \ 161 __get_user_unknown(); \ 162 break; \ 163 } \ 164} while (0) 165 166#define __get_user_nocheck(x,ptr,size) \ 167({ \ 168 long __gu_err, __gu_val; \ 169 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 170 (x) = (__typeof__(*(ptr)))__gu_val; \ 171 __gu_err; \ 172}) 173 174#ifdef CONFIG_MMU 175#define __get_user_check(x,ptr,size) \ 176({ \ 177 long __gu_err, __gu_val; \ 178 switch (size) { \ 179 case 1: \ 180 __get_user_1(__gu_val, (ptr), __gu_err); \ 181 break; \ 182 case 2: \ 183 __get_user_2(__gu_val, (ptr), __gu_err); \ 184 break; \ 185 case 4: \ 186 __get_user_4(__gu_val, (ptr), __gu_err); \ 187 break; \ 188 default: \ 189 __get_user_unknown(); \ 190 break; \ 191 } \ 192 \ 193 (x) = (__typeof__(*(ptr)))__gu_val; \ 194 __gu_err; \ 195}) 196 197#define __get_user_1(x,addr,err) ({ \ 198__asm__("stc r7_bank, %1\n\t" \ 199 "mov.l @(8,%1), %1\n\t" \ 200 "and %2, %1\n\t" \ 201 "cmp/pz %1\n\t" \ 202 "bt/s 1f\n\t" \ 203 " mov #0, %0\n\t" \ 204 "0:\n" \ 205 "mov #-14, %0\n\t" \ 206 "bra 2f\n\t" \ 207 " mov #0, %1\n" \ 208 "1:\n\t" \ 209 "mov.b @%2, %1\n\t" \ 210 "extu.b %1, %1\n" \ 211 "2:\n" \ 212 ".section __ex_table,\"a\"\n\t" \ 213 ".long 1b, 0b\n\t" \ 214 ".previous" \ 215 : "=&r" (err), "=&r" (x) \ 216 : "r" (addr) \ 217 : "t"); \ 218}) 219 220#define __get_user_2(x,addr,err) ({ \ 221__asm__("stc r7_bank, %1\n\t" \ 222 "mov.l @(8,%1), %1\n\t" \ 223 "and %2, %1\n\t" \ 224 "cmp/pz %1\n\t" \ 225 "bt/s 1f\n\t" \ 226 " mov #0, %0\n\t" \ 227 "0:\n" \ 228 "mov #-14, %0\n\t" \ 229 "bra 2f\n\t" \ 230 " mov #0, %1\n" \ 231 "1:\n\t" \ 232 "mov.w @%2, %1\n\t" \ 233 "extu.w %1, %1\n" \ 234 "2:\n" \ 235 ".section __ex_table,\"a\"\n\t" \ 236 ".long 1b, 0b\n\t" \ 237 ".previous" \ 238 : "=&r" (err), "=&r" (x) \ 239 : "r" (addr) \ 240 : "t"); \ 241}) 242 243#define __get_user_4(x,addr,err) ({ \ 244__asm__("stc r7_bank, %1\n\t" \ 245 "mov.l @(8,%1), %1\n\t" \ 246 "and %2, %1\n\t" \ 247 "cmp/pz %1\n\t" \ 248 "bt/s 1f\n\t" \ 249 " mov #0, %0\n\t" \ 250 "0:\n" \ 251 "mov #-14, %0\n\t" \ 252 "bra 2f\n\t" \ 253 " mov #0, %1\n" \ 254 "1:\n\t" \ 255 "mov.l @%2, %1\n\t" \ 256 "2:\n" \ 257 ".section __ex_table,\"a\"\n\t" \ 258 ".long 1b, 0b\n\t" \ 259 ".previous" \ 260 : "=&r" (err), "=&r" (x) \ 261 : "r" (addr) \ 262 : "t"); \ 263}) 264#else /* CONFIG_MMU */ 265#define __get_user_check(x,ptr,size) \ 266({ \ 267 long __gu_err, __gu_val; \ 268 if (__access_ok((unsigned long)(ptr), (size))) { \ 269 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 270 (x) = (__typeof__(*(ptr)))__gu_val; \ 271 } else \ 272 __gu_err = -EFAULT; \ 273 __gu_err; \ 274}) 275#endif 276 277#define __get_user_asm(x, addr, err, insn) \ 278({ \ 279__asm__ __volatile__( \ 280 "1:\n\t" \ 281 "mov." insn " %2, %1\n\t" \ 282 "mov #0, %0\n" \ 283 "2:\n" \ 284 ".section .fixup,\"ax\"\n" \ 285 "3:\n\t" \ 286 "mov #0, %1\n\t" \ 287 "mov.l 4f, %0\n\t" \ 288 "jmp @%0\n\t" \ 289 " mov %3, %0\n" \ 290 "4: .long 2b\n\t" \ 291 ".previous\n" \ 292 ".section __ex_table,\"a\"\n\t" \ 293 ".long 1b, 3b\n\t" \ 294 ".previous" \ 295 :"=&r" (err), "=&r" (x) \ 296 :"m" (__m(addr)), "i" (-EFAULT)); }) 297 298extern void __get_user_unknown(void); 299 300#define __put_user_size(x,ptr,size,retval) \ 301do { \ 302 retval = 0; \ 303 switch (size) { \ 304 case 1: \ 305 __put_user_asm(x, ptr, retval, "b"); \ 306 break; \ 307 case 2: \ 308 __put_user_asm(x, ptr, retval, "w"); \ 309 break; \ 310 case 4: \ 311 __put_user_asm(x, ptr, retval, "l"); \ 312 break; \ 313 case 8: \ 314 __put_user_u64(x, ptr, retval); \ 315 break; \ 316 default: \ 317 __put_user_unknown(); \ 318 } \ 319} while (0) 320 321#define __put_user_nocheck(x,ptr,size) \ 322({ \ 323 long __pu_err; \ 324 __put_user_size((x),(ptr),(size),__pu_err); \ 325 __pu_err; \ 326}) 327 328#define __put_user_check(x,ptr,size) \ 329({ \ 330 long __pu_err = -EFAULT; \ 331 __typeof__(*(ptr)) *__pu_addr = (ptr); \ 332 \ 333 if (__access_ok((unsigned long)__pu_addr,size)) \ 334 __put_user_size((x),__pu_addr,(size),__pu_err); \ 335 __pu_err; \ 336}) 337 338#define __put_user_asm(x, addr, err, insn) \ 339({ \ 340__asm__ __volatile__( \ 341 "1:\n\t" \ 342 "mov." insn " %1, %2\n\t" \ 343 "mov #0, %0\n" \ 344 "2:\n" \ 345 ".section .fixup,\"ax\"\n" \ 346 "3:\n\t" \ 347 "nop\n\t" \ 348 "mov.l 4f, %0\n\t" \ 349 "jmp @%0\n\t" \ 350 "mov %3, %0\n" \ 351 "4: .long 2b\n\t" \ 352 ".previous\n" \ 353 ".section __ex_table,\"a\"\n\t" \ 354 ".long 1b, 3b\n\t" \ 355 ".previous" \ 356 :"=&r" (err) \ 357 :"r" (x), "m" (__m(addr)), "i" (-EFAULT) \ 358 :"memory"); }) 359 360#if defined(__LITTLE_ENDIAN__) 361#define __put_user_u64(val,addr,retval) \ 362({ \ 363__asm__ __volatile__( \ 364 "1:\n\t" \ 365 "mov.l %R1,%2\n\t" \ 366 "mov.l %S1,%T2\n\t" \ 367 "mov #0,%0\n" \ 368 "2:\n" \ 369 ".section .fixup,\"ax\"\n" \ 370 "3:\n\t" \ 371 "nop\n\t" \ 372 "mov.l 4f,%0\n\t" \ 373 "jmp @%0\n\t" \ 374 " mov %3,%0\n" \ 375 "4: .long 2b\n\t" \ 376 ".previous\n" \ 377 ".section __ex_table,\"a\"\n\t" \ 378 ".long 1b, 3b\n\t" \ 379 ".previous" \ 380 : "=r" (retval) \ 381 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ 382 : "memory"); }) 383#else 384#define __put_user_u64(val,addr,retval) \ 385({ \ 386__asm__ __volatile__( \ 387 "1:\n\t" \ 388 "mov.l %S1,%2\n\t" \ 389 "mov.l %R1,%T2\n\t" \ 390 "mov #0,%0\n" \ 391 "2:\n" \ 392 ".section .fixup,\"ax\"\n" \ 393 "3:\n\t" \ 394 "nop\n\t" \ 395 "mov.l 4f,%0\n\t" \ 396 "jmp @%0\n\t" \ 397 " mov %3,%0\n" \ 398 "4: .long 2b\n\t" \ 399 ".previous\n" \ 400 ".section __ex_table,\"a\"\n\t" \ 401 ".long 1b, 3b\n\t" \ 402 ".previous" \ 403 : "=r" (retval) \ 404 : "r" (val), "m" (__m(addr)), "i" (-EFAULT) \ 405 : "memory"); }) 406#endif 407 408extern void __put_user_unknown(void); 409 410/* Generic arbitrary sized copy. */ 411/* Return the number of bytes NOT copied */ 412extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 413 414#define copy_to_user(to,from,n) ({ \ 415void *__copy_to = (void *) (to); \ 416__kernel_size_t __copy_size = (__kernel_size_t) (n); \ 417__kernel_size_t __copy_res; \ 418if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ 419__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ 420} else __copy_res = __copy_size; \ 421__copy_res; }) 422 423#define __copy_to_user(to,from,n) \ 424 __copy_user((void *)(to), \ 425 (void *)(from), n) 426 427#define __copy_to_user_inatomic __copy_to_user 428#define __copy_from_user_inatomic __copy_from_user 429 430 431#define copy_from_user(to,from,n) ({ \ 432void *__copy_to = (void *) (to); \ 433void *__copy_from = (void *) (from); \ 434__kernel_size_t __copy_size = (__kernel_size_t) (n); \ 435__kernel_size_t __copy_res; \ 436if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ 437__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ 438} else __copy_res = __copy_size; \ 439__copy_res; }) 440 441#define __copy_from_user(to,from,n) \ 442 __copy_user((void *)(to), \ 443 (void *)(from), n) 444 445/* 446 * Clear the area and return remaining number of bytes 447 * (on failure. Usually it's 0.) 448 */ 449extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); 450 451#define clear_user(addr,n) ({ \ 452void * __cl_addr = (addr); \ 453unsigned long __cl_size = (n); \ 454if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ 455__cl_size = __clear_user(__cl_addr, __cl_size); \ 456__cl_size; }) 457 458static __inline__ int 459__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count) 460{ 461 __kernel_size_t res; 462 unsigned long __dummy, _d, _s; 463 464 __asm__ __volatile__( 465 "9:\n" 466 "mov.b @%2+, %1\n\t" 467 "cmp/eq #0, %1\n\t" 468 "bt/s 2f\n" 469 "1:\n" 470 "mov.b %1, @%3\n\t" 471 "dt %7\n\t" 472 "bf/s 9b\n\t" 473 " add #1, %3\n\t" 474 "2:\n\t" 475 "sub %7, %0\n" 476 "3:\n" 477 ".section .fixup,\"ax\"\n" 478 "4:\n\t" 479 "mov.l 5f, %1\n\t" 480 "jmp @%1\n\t" 481 " mov %8, %0\n\t" 482 ".balign 4\n" 483 "5: .long 3b\n" 484 ".previous\n" 485 ".section __ex_table,\"a\"\n" 486 " .balign 4\n" 487 " .long 9b,4b\n" 488 ".previous" 489 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d) 490 : "0" (__count), "2" (__src), "3" (__dest), "r" (__count), 491 "i" (-EFAULT) 492 : "memory", "t"); 493 494 return res; 495} 496 497#define strncpy_from_user(dest,src,count) ({ \ 498unsigned long __sfu_src = (unsigned long) (src); \ 499int __sfu_count = (int) (count); \ 500long __sfu_res = -EFAULT; \ 501if(__access_ok(__sfu_src, __sfu_count)) { \ 502__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ 503} __sfu_res; }) 504 505/* 506 * Return the size of a string (including the ending 0!) 507 */ 508static __inline__ long __strnlen_user(const char __user *__s, long __n) 509{ 510 unsigned long res; 511 unsigned long __dummy; 512 513 __asm__ __volatile__( 514 "9:\n" 515 "cmp/eq %4, %0\n\t" 516 "bt 2f\n" 517 "1:\t" 518 "mov.b @(%0,%3), %1\n\t" 519 "tst %1, %1\n\t" 520 "bf/s 9b\n\t" 521 " add #1, %0\n" 522 "2:\n" 523 ".section .fixup,\"ax\"\n" 524 "3:\n\t" 525 "mov.l 4f, %1\n\t" 526 "jmp @%1\n\t" 527 " mov #0, %0\n" 528 ".balign 4\n" 529 "4: .long 2b\n" 530 ".previous\n" 531 ".section __ex_table,\"a\"\n" 532 " .balign 4\n" 533 " .long 1b,3b\n" 534 ".previous" 535 : "=z" (res), "=&r" (__dummy) 536 : "0" (0), "r" (__s), "r" (__n) 537 : "t"); 538 return res; 539} 540 541static __inline__ long strnlen_user(const char __user *s, long n) 542{ 543 if (!__addr_ok(s)) 544 return 0; 545 else 546 return __strnlen_user(s, n); 547} 548 549#define strlen_user(str) strnlen_user(str, ~0UL >> 1) 550 551/* 552 * The exception table consists of pairs of addresses: the first is the 553 * address of an instruction that is allowed to fault, and the second is 554 * the address at which the program should continue. No registers are 555 * modified, so it is entirely up to the continuation code to figure out 556 * what to do. 557 * 558 * All the routines below use bits of fixup code that are out of line 559 * with the main instruction path. This means when everything is well, 560 * we don't even have to jump over them. Further, they do not intrude 561 * on our cache or tlb entries. 562 */ 563 564struct exception_table_entry 565{ 566 unsigned long insn, fixup; 567}; 568 569extern int fixup_exception(struct pt_regs *regs); 570 571#endif /* __ASM_SH_UACCESS_H */