Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.37-rc6 580 lines 18 kB view raw
1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 */ 14 15#ifndef _ASM_TILE_UACCESS_H 16#define _ASM_TILE_UACCESS_H 17 18/* 19 * User space memory access functions 20 */ 21#include <linux/sched.h> 22#include <linux/mm.h> 23#include <asm-generic/uaccess-unaligned.h> 24#include <asm/processor.h> 25#include <asm/page.h> 26 27#define VERIFY_READ 0 28#define VERIFY_WRITE 1 29 30/* 31 * The fs value determines whether argument validity checking should be 32 * performed or not. If get_fs() == USER_DS, checking is performed, with 33 * get_fs() == KERNEL_DS, checking is bypassed. 34 * 35 * For historical reasons, these macros are grossly misnamed. 36 */ 37#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) }) 38 39#define KERNEL_DS MAKE_MM_SEG(-1UL) 40#define USER_DS MAKE_MM_SEG(PAGE_OFFSET) 41 42#define get_ds() (KERNEL_DS) 43#define get_fs() (current_thread_info()->addr_limit) 44#define set_fs(x) (current_thread_info()->addr_limit = (x)) 45 46#define segment_eq(a, b) ((a).seg == (b).seg) 47 48#ifndef __tilegx__ 49/* 50 * We could allow mapping all 16 MB at 0xfc000000, but we set up a 51 * special hack in arch_setup_additional_pages() to auto-create a mapping 52 * for the first 16 KB, and it would seem strange to have different 53 * user-accessible semantics for memory at 0xfc000000 and above 0xfc004000. 54 */ 55static inline int is_arch_mappable_range(unsigned long addr, 56 unsigned long size) 57{ 58 return (addr >= MEM_USER_INTRPT && 59 addr < (MEM_USER_INTRPT + INTRPT_SIZE) && 60 size <= (MEM_USER_INTRPT + INTRPT_SIZE) - addr); 61} 62#define is_arch_mappable_range is_arch_mappable_range 63#else 64#define is_arch_mappable_range(addr, size) 0 65#endif 66 67/* 68 * Test whether a block of memory is a valid user space address. 69 * Returns 0 if the range is valid, nonzero otherwise. 70 */ 71int __range_ok(unsigned long addr, unsigned long size); 72 73/** 74 * access_ok: - Checks if a user space pointer is valid 75 * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that 76 * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe 77 * to write to a block, it is always safe to read from it. 78 * @addr: User space pointer to start of block to check 79 * @size: Size of block to check 80 * 81 * Context: User context only. This function may sleep. 82 * 83 * Checks if a pointer to a block of memory in user space is valid. 84 * 85 * Returns true (nonzero) if the memory block may be valid, false (zero) 86 * if it is definitely invalid. 87 * 88 * Note that, depending on architecture, this function probably just 89 * checks that the pointer is in the user space range - after calling 90 * this function, memory access functions may still return -EFAULT. 91 */ 92#define access_ok(type, addr, size) ({ \ 93 __chk_user_ptr(addr); \ 94 likely(__range_ok((unsigned long)(addr), (size)) == 0); \ 95}) 96 97/* 98 * The exception table consists of pairs of addresses: the first is the 99 * address of an instruction that is allowed to fault, and the second is 100 * the address at which the program should continue. No registers are 101 * modified, so it is entirely up to the continuation code to figure out 102 * what to do. 103 * 104 * All the routines below use bits of fixup code that are out of line 105 * with the main instruction path. This means when everything is well, 106 * we don't even have to jump over them. Further, they do not intrude 107 * on our cache or tlb entries. 108 */ 109 110struct exception_table_entry { 111 unsigned long insn, fixup; 112}; 113 114extern int fixup_exception(struct pt_regs *regs); 115 116/* 117 * We return the __get_user_N function results in a structure, 118 * thus in r0 and r1. If "err" is zero, "val" is the result 119 * of the read; otherwise, "err" is -EFAULT. 120 * 121 * We rarely need 8-byte values on a 32-bit architecture, but 122 * we size the structure to accommodate. In practice, for the 123 * the smaller reads, we can zero the high word for free, and 124 * the caller will ignore it by virtue of casting anyway. 125 */ 126struct __get_user { 127 unsigned long long val; 128 int err; 129}; 130 131/* 132 * FIXME: we should express these as inline extended assembler, since 133 * they're fundamentally just a variable dereference and some 134 * supporting exception_table gunk. Note that (a la i386) we can 135 * extend the copy_to_user and copy_from_user routines to call into 136 * such extended assembler routines, though we will have to use a 137 * different return code in that case (1, 2, or 4, rather than -EFAULT). 138 */ 139extern struct __get_user __get_user_1(const void __user *); 140extern struct __get_user __get_user_2(const void __user *); 141extern struct __get_user __get_user_4(const void __user *); 142extern struct __get_user __get_user_8(const void __user *); 143extern int __put_user_1(long, void __user *); 144extern int __put_user_2(long, void __user *); 145extern int __put_user_4(long, void __user *); 146extern int __put_user_8(long long, void __user *); 147 148/* Unimplemented routines to cause linker failures */ 149extern struct __get_user __get_user_bad(void); 150extern int __put_user_bad(void); 151 152/* 153 * Careful: we have to cast the result to the type of the pointer 154 * for sign reasons. 155 */ 156/** 157 * __get_user: - Get a simple variable from user space, with less checking. 158 * @x: Variable to store result. 159 * @ptr: Source address, in user space. 160 * 161 * Context: User context only. This function may sleep. 162 * 163 * This macro copies a single simple variable from user space to kernel 164 * space. It supports simple types like char and int, but not larger 165 * data types like structures or arrays. 166 * 167 * @ptr must have pointer-to-simple-variable type, and the result of 168 * dereferencing @ptr must be assignable to @x without a cast. 169 * 170 * Returns zero on success, or -EFAULT on error. 171 * On error, the variable @x is set to zero. 172 * 173 * Caller must check the pointer with access_ok() before calling this 174 * function. 175 */ 176#define __get_user(x, ptr) \ 177({ struct __get_user __ret; \ 178 __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ 179 __chk_user_ptr(__gu_addr); \ 180 switch (sizeof(*(__gu_addr))) { \ 181 case 1: \ 182 __ret = __get_user_1(__gu_addr); \ 183 break; \ 184 case 2: \ 185 __ret = __get_user_2(__gu_addr); \ 186 break; \ 187 case 4: \ 188 __ret = __get_user_4(__gu_addr); \ 189 break; \ 190 case 8: \ 191 __ret = __get_user_8(__gu_addr); \ 192 break; \ 193 default: \ 194 __ret = __get_user_bad(); \ 195 break; \ 196 } \ 197 (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ 198 __ret.val; \ 199 __ret.err; \ 200}) 201 202/** 203 * __put_user: - Write a simple value into user space, with less checking. 204 * @x: Value to copy to user space. 205 * @ptr: Destination address, in user space. 206 * 207 * Context: User context only. This function may sleep. 208 * 209 * This macro copies a single simple value from kernel space to user 210 * space. It supports simple types like char and int, but not larger 211 * data types like structures or arrays. 212 * 213 * @ptr must have pointer-to-simple-variable type, and @x must be assignable 214 * to the result of dereferencing @ptr. 215 * 216 * Caller must check the pointer with access_ok() before calling this 217 * function. 218 * 219 * Returns zero on success, or -EFAULT on error. 220 * 221 * Implementation note: The "case 8" logic of casting to the type of 222 * the result of subtracting the value from itself is basically a way 223 * of keeping all integer types the same, but casting any pointers to 224 * ptrdiff_t, i.e. also an integer type. This way there are no 225 * questionable casts seen by the compiler on an ILP32 platform. 226 */ 227#define __put_user(x, ptr) \ 228({ \ 229 int __pu_err = 0; \ 230 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 231 typeof(*__pu_addr) __pu_val = (x); \ 232 __chk_user_ptr(__pu_addr); \ 233 switch (sizeof(__pu_val)) { \ 234 case 1: \ 235 __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ 236 break; \ 237 case 2: \ 238 __pu_err = __put_user_2((long)__pu_val, __pu_addr); \ 239 break; \ 240 case 4: \ 241 __pu_err = __put_user_4((long)__pu_val, __pu_addr); \ 242 break; \ 243 case 8: \ 244 __pu_err = \ 245 __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\ 246 __pu_addr); \ 247 break; \ 248 default: \ 249 __pu_err = __put_user_bad(); \ 250 break; \ 251 } \ 252 __pu_err; \ 253}) 254 255/* 256 * The versions of get_user and put_user without initial underscores 257 * check the address of their arguments to make sure they are not 258 * in kernel space. 259 */ 260#define put_user(x, ptr) \ 261({ \ 262 __typeof__(*(ptr)) __user *__Pu_addr = (ptr); \ 263 access_ok(VERIFY_WRITE, (__Pu_addr), sizeof(*(__Pu_addr))) ? \ 264 __put_user((x), (__Pu_addr)) : \ 265 -EFAULT; \ 266}) 267 268#define get_user(x, ptr) \ 269({ \ 270 __typeof__(*(ptr)) const __user *__Gu_addr = (ptr); \ 271 access_ok(VERIFY_READ, (__Gu_addr), sizeof(*(__Gu_addr))) ? \ 272 __get_user((x), (__Gu_addr)) : \ 273 ((x) = 0, -EFAULT); \ 274}) 275 276/** 277 * __copy_to_user() - copy data into user space, with less checking. 278 * @to: Destination address, in user space. 279 * @from: Source address, in kernel space. 280 * @n: Number of bytes to copy. 281 * 282 * Context: User context only. This function may sleep. 283 * 284 * Copy data from kernel space to user space. Caller must check 285 * the specified block with access_ok() before calling this function. 286 * 287 * Returns number of bytes that could not be copied. 288 * On success, this will be zero. 289 * 290 * An alternate version - __copy_to_user_inatomic() - is designed 291 * to be called from atomic context, typically bracketed by calls 292 * to pagefault_disable() and pagefault_enable(). 293 */ 294extern unsigned long __must_check __copy_to_user_inatomic( 295 void __user *to, const void *from, unsigned long n); 296 297static inline unsigned long __must_check 298__copy_to_user(void __user *to, const void *from, unsigned long n) 299{ 300 might_fault(); 301 return __copy_to_user_inatomic(to, from, n); 302} 303 304static inline unsigned long __must_check 305copy_to_user(void __user *to, const void *from, unsigned long n) 306{ 307 if (access_ok(VERIFY_WRITE, to, n)) 308 n = __copy_to_user(to, from, n); 309 return n; 310} 311 312/** 313 * __copy_from_user() - copy data from user space, with less checking. 314 * @to: Destination address, in kernel space. 315 * @from: Source address, in user space. 316 * @n: Number of bytes to copy. 317 * 318 * Context: User context only. This function may sleep. 319 * 320 * Copy data from user space to kernel space. Caller must check 321 * the specified block with access_ok() before calling this function. 322 * 323 * Returns number of bytes that could not be copied. 324 * On success, this will be zero. 325 * 326 * If some data could not be copied, this function will pad the copied 327 * data to the requested size using zero bytes. 328 * 329 * An alternate version - __copy_from_user_inatomic() - is designed 330 * to be called from atomic context, typically bracketed by calls 331 * to pagefault_disable() and pagefault_enable(). This version 332 * does *NOT* pad with zeros. 333 */ 334extern unsigned long __must_check __copy_from_user_inatomic( 335 void *to, const void __user *from, unsigned long n); 336extern unsigned long __must_check __copy_from_user_zeroing( 337 void *to, const void __user *from, unsigned long n); 338 339static inline unsigned long __must_check 340__copy_from_user(void *to, const void __user *from, unsigned long n) 341{ 342 might_fault(); 343 return __copy_from_user_zeroing(to, from, n); 344} 345 346static inline unsigned long __must_check 347_copy_from_user(void *to, const void __user *from, unsigned long n) 348{ 349 if (access_ok(VERIFY_READ, from, n)) 350 n = __copy_from_user(to, from, n); 351 else 352 memset(to, 0, n); 353 return n; 354} 355 356#ifdef CONFIG_DEBUG_COPY_FROM_USER 357extern void copy_from_user_overflow(void) 358 __compiletime_warning("copy_from_user() size is not provably correct"); 359 360static inline unsigned long __must_check copy_from_user(void *to, 361 const void __user *from, 362 unsigned long n) 363{ 364 int sz = __compiletime_object_size(to); 365 366 if (likely(sz == -1 || sz >= n)) 367 n = _copy_from_user(to, from, n); 368 else 369 copy_from_user_overflow(); 370 371 return n; 372} 373#else 374#define copy_from_user _copy_from_user 375#endif 376 377#ifdef __tilegx__ 378/** 379 * __copy_in_user() - copy data within user space, with less checking. 380 * @to: Destination address, in user space. 381 * @from: Source address, in kernel space. 382 * @n: Number of bytes to copy. 383 * 384 * Context: User context only. This function may sleep. 385 * 386 * Copy data from user space to user space. Caller must check 387 * the specified blocks with access_ok() before calling this function. 388 * 389 * Returns number of bytes that could not be copied. 390 * On success, this will be zero. 391 */ 392extern unsigned long __copy_in_user_inatomic( 393 void __user *to, const void __user *from, unsigned long n); 394 395static inline unsigned long __must_check 396__copy_in_user(void __user *to, const void __user *from, unsigned long n) 397{ 398 might_sleep(); 399 return __copy_in_user_inatomic(to, from, n); 400} 401 402static inline unsigned long __must_check 403copy_in_user(void __user *to, const void __user *from, unsigned long n) 404{ 405 if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) 406 n = __copy_in_user(to, from, n); 407 return n; 408} 409#endif 410 411 412/** 413 * strlen_user: - Get the size of a string in user space. 414 * @str: The string to measure. 415 * 416 * Context: User context only. This function may sleep. 417 * 418 * Get the size of a NUL-terminated string in user space. 419 * 420 * Returns the size of the string INCLUDING the terminating NUL. 421 * On exception, returns 0. 422 * 423 * If there is a limit on the length of a valid string, you may wish to 424 * consider using strnlen_user() instead. 425 */ 426extern long strnlen_user_asm(const char __user *str, long n); 427static inline long __must_check strnlen_user(const char __user *str, long n) 428{ 429 might_fault(); 430 return strnlen_user_asm(str, n); 431} 432#define strlen_user(str) strnlen_user(str, LONG_MAX) 433 434/** 435 * strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. 436 * @dst: Destination address, in kernel space. This buffer must be at 437 * least @count bytes long. 438 * @src: Source address, in user space. 439 * @count: Maximum number of bytes to copy, including the trailing NUL. 440 * 441 * Copies a NUL-terminated string from userspace to kernel space. 442 * Caller must check the specified block with access_ok() before calling 443 * this function. 444 * 445 * On success, returns the length of the string (not including the trailing 446 * NUL). 447 * 448 * If access to userspace fails, returns -EFAULT (some data may have been 449 * copied). 450 * 451 * If @count is smaller than the length of the string, copies @count bytes 452 * and returns @count. 453 */ 454extern long strncpy_from_user_asm(char *dst, const char __user *src, long); 455static inline long __must_check __strncpy_from_user( 456 char *dst, const char __user *src, long count) 457{ 458 might_fault(); 459 return strncpy_from_user_asm(dst, src, count); 460} 461static inline long __must_check strncpy_from_user( 462 char *dst, const char __user *src, long count) 463{ 464 if (access_ok(VERIFY_READ, src, 1)) 465 return __strncpy_from_user(dst, src, count); 466 return -EFAULT; 467} 468 469/** 470 * clear_user: - Zero a block of memory in user space. 471 * @mem: Destination address, in user space. 472 * @len: Number of bytes to zero. 473 * 474 * Zero a block of memory in user space. 475 * 476 * Returns number of bytes that could not be cleared. 477 * On success, this will be zero. 478 */ 479extern unsigned long clear_user_asm(void __user *mem, unsigned long len); 480static inline unsigned long __must_check __clear_user( 481 void __user *mem, unsigned long len) 482{ 483 might_fault(); 484 return clear_user_asm(mem, len); 485} 486static inline unsigned long __must_check clear_user( 487 void __user *mem, unsigned long len) 488{ 489 if (access_ok(VERIFY_WRITE, mem, len)) 490 return __clear_user(mem, len); 491 return len; 492} 493 494/** 495 * flush_user: - Flush a block of memory in user space from cache. 496 * @mem: Destination address, in user space. 497 * @len: Number of bytes to flush. 498 * 499 * Returns number of bytes that could not be flushed. 500 * On success, this will be zero. 501 */ 502extern unsigned long flush_user_asm(void __user *mem, unsigned long len); 503static inline unsigned long __must_check __flush_user( 504 void __user *mem, unsigned long len) 505{ 506 int retval; 507 508 might_fault(); 509 retval = flush_user_asm(mem, len); 510 mb_incoherent(); 511 return retval; 512} 513 514static inline unsigned long __must_check flush_user( 515 void __user *mem, unsigned long len) 516{ 517 if (access_ok(VERIFY_WRITE, mem, len)) 518 return __flush_user(mem, len); 519 return len; 520} 521 522/** 523 * inv_user: - Invalidate a block of memory in user space from cache. 524 * @mem: Destination address, in user space. 525 * @len: Number of bytes to invalidate. 526 * 527 * Returns number of bytes that could not be invalidated. 528 * On success, this will be zero. 529 * 530 * Note that on Tile64, the "inv" operation is in fact a 531 * "flush and invalidate", so cache write-backs will occur prior 532 * to the cache being marked invalid. 533 */ 534extern unsigned long inv_user_asm(void __user *mem, unsigned long len); 535static inline unsigned long __must_check __inv_user( 536 void __user *mem, unsigned long len) 537{ 538 int retval; 539 540 might_fault(); 541 retval = inv_user_asm(mem, len); 542 mb_incoherent(); 543 return retval; 544} 545static inline unsigned long __must_check inv_user( 546 void __user *mem, unsigned long len) 547{ 548 if (access_ok(VERIFY_WRITE, mem, len)) 549 return __inv_user(mem, len); 550 return len; 551} 552 553/** 554 * finv_user: - Flush-inval a block of memory in user space from cache. 555 * @mem: Destination address, in user space. 556 * @len: Number of bytes to invalidate. 557 * 558 * Returns number of bytes that could not be flush-invalidated. 559 * On success, this will be zero. 560 */ 561extern unsigned long finv_user_asm(void __user *mem, unsigned long len); 562static inline unsigned long __must_check __finv_user( 563 void __user *mem, unsigned long len) 564{ 565 int retval; 566 567 might_fault(); 568 retval = finv_user_asm(mem, len); 569 mb_incoherent(); 570 return retval; 571} 572static inline unsigned long __must_check finv_user( 573 void __user *mem, unsigned long len) 574{ 575 if (access_ok(VERIFY_WRITE, mem, len)) 576 return __finv_user(mem, len); 577 return len; 578} 579 580#endif /* _ASM_TILE_UACCESS_H */