at v2.6.13-rc7 400 lines 12 kB view raw
1#ifdef __KERNEL__ 2#ifndef _PPC_UACCESS_H 3#define _PPC_UACCESS_H 4 5#ifndef __ASSEMBLY__ 6#include <linux/sched.h> 7#include <linux/errno.h> 8#include <asm/processor.h> 9 10#define VERIFY_READ 0 11#define VERIFY_WRITE 1 12 13/* 14 * The fs value determines whether argument validity checking should be 15 * performed or not. If get_fs() == USER_DS, checking is performed, with 16 * get_fs() == KERNEL_DS, checking is bypassed. 17 * 18 * For historical reasons, these macros are grossly misnamed. 19 * 20 * The fs/ds values are now the highest legal address in the "segment". 21 * This simplifies the checking in the routines below. 22 */ 23 24#define KERNEL_DS ((mm_segment_t) { ~0UL }) 25#define USER_DS ((mm_segment_t) { TASK_SIZE - 1 }) 26 27#define get_ds() (KERNEL_DS) 28#define get_fs() (current->thread.fs) 29#define set_fs(val) (current->thread.fs = (val)) 30 31#define segment_eq(a,b) ((a).seg == (b).seg) 32 33#define __access_ok(addr,size) \ 34 ((addr) <= current->thread.fs.seg \ 35 && ((size) == 0 || (size) - 1 <= current->thread.fs.seg - (addr))) 36 37#define access_ok(type, addr, size) \ 38 (__chk_user_ptr(addr),__access_ok((unsigned long)(addr),(size))) 39 40/* this function will go away soon - use access_ok() instead */ 41extern inline int __deprecated verify_area(int type, const void __user * addr, unsigned long size) 42{ 43 return access_ok(type, addr, size) ? 0 : -EFAULT; 44} 45 46 47/* 48 * The exception table consists of pairs of addresses: the first is the 49 * address of an instruction that is allowed to fault, and the second is 50 * the address at which the program should continue. No registers are 51 * modified, so it is entirely up to the continuation code to figure out 52 * what to do. 53 * 54 * All the routines below use bits of fixup code that are out of line 55 * with the main instruction path. This means when everything is well, 56 * we don't even have to jump over them. Further, they do not intrude 57 * on our cache or tlb entries. 58 */ 59 60struct exception_table_entry 61{ 62 unsigned long insn, fixup; 63}; 64 65/* 66 * These are the main single-value transfer routines. They automatically 67 * use the right size if we just have the right pointer type. 68 * 69 * This gets kind of ugly. We want to return _two_ values in "get_user()" 70 * and yet we don't want to do any pointers, because that is too much 71 * of a performance impact. Thus we have a few rather ugly macros here, 72 * and hide all the ugliness from the user. 73 * 74 * The "__xxx" versions of the user access functions are versions that 75 * do not verify the address space, that must have been done previously 76 * with a separate "access_ok()" call (this is used when we do multiple 77 * accesses to the same area of user memory). 78 * 79 * As we use the same address space for kernel and user data on the 80 * PowerPC, we can just do these as direct assignments. (Of course, the 81 * exception handling means that it's no longer "just"...) 82 * 83 * The "user64" versions of the user access functions are versions that 84 * allow access of 64-bit data. The "get_user" functions do not 85 * properly handle 64-bit data because the value gets down cast to a long. 86 * The "put_user" functions already handle 64-bit data properly but we add 87 * "user64" versions for completeness 88 */ 89#define get_user(x,ptr) \ 90 __get_user_check((x),(ptr),sizeof(*(ptr))) 91#define get_user64(x,ptr) \ 92 __get_user64_check((x),(ptr),sizeof(*(ptr))) 93#define put_user(x,ptr) \ 94 __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 95#define put_user64(x,ptr) put_user(x,ptr) 96 97#define __get_user(x,ptr) \ 98 __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 99#define __get_user64(x,ptr) \ 100 __get_user64_nocheck((x),(ptr),sizeof(*(ptr))) 101#define __put_user(x,ptr) \ 102 __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) 103#define __put_user64(x,ptr) __put_user(x,ptr) 104 105extern long __put_user_bad(void); 106 107#define __put_user_nocheck(x,ptr,size) \ 108({ \ 109 long __pu_err; \ 110 __chk_user_ptr(ptr); \ 111 __put_user_size((x),(ptr),(size),__pu_err); \ 112 __pu_err; \ 113}) 114 115#define __put_user_check(x,ptr,size) \ 116({ \ 117 long __pu_err = -EFAULT; \ 118 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 119 if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ 120 __put_user_size((x),__pu_addr,(size),__pu_err); \ 121 __pu_err; \ 122}) 123 124#define __put_user_size(x,ptr,size,retval) \ 125do { \ 126 retval = 0; \ 127 switch (size) { \ 128 case 1: \ 129 __put_user_asm(x, ptr, retval, "stb"); \ 130 break; \ 131 case 2: \ 132 __put_user_asm(x, ptr, retval, "sth"); \ 133 break; \ 134 case 4: \ 135 __put_user_asm(x, ptr, retval, "stw"); \ 136 break; \ 137 case 8: \ 138 __put_user_asm2(x, ptr, retval); \ 139 break; \ 140 default: \ 141 __put_user_bad(); \ 142 } \ 143} while (0) 144 145/* 146 * We don't tell gcc that we are accessing memory, but this is OK 147 * because we do not write to any memory gcc knows about, so there 148 * are no aliasing issues. 149 */ 150#define __put_user_asm(x, addr, err, op) \ 151 __asm__ __volatile__( \ 152 "1: "op" %1,0(%2)\n" \ 153 "2:\n" \ 154 ".section .fixup,\"ax\"\n" \ 155 "3: li %0,%3\n" \ 156 " b 2b\n" \ 157 ".previous\n" \ 158 ".section __ex_table,\"a\"\n" \ 159 " .align 2\n" \ 160 " .long 1b,3b\n" \ 161 ".previous" \ 162 : "=r" (err) \ 163 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 164 165#define __put_user_asm2(x, addr, err) \ 166 __asm__ __volatile__( \ 167 "1: stw %1,0(%2)\n" \ 168 "2: stw %1+1,4(%2)\n" \ 169 "3:\n" \ 170 ".section .fixup,\"ax\"\n" \ 171 "4: li %0,%3\n" \ 172 " b 3b\n" \ 173 ".previous\n" \ 174 ".section __ex_table,\"a\"\n" \ 175 " .align 2\n" \ 176 " .long 1b,4b\n" \ 177 " .long 2b,4b\n" \ 178 ".previous" \ 179 : "=r" (err) \ 180 : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err)) 181 182#define __get_user_nocheck(x, ptr, size) \ 183({ \ 184 long __gu_err; \ 185 unsigned long __gu_val; \ 186 __chk_user_ptr(ptr); \ 187 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 188 (x) = (__typeof__(*(ptr)))__gu_val; \ 189 __gu_err; \ 190}) 191 192#define __get_user64_nocheck(x, ptr, size) \ 193({ \ 194 long __gu_err; \ 195 long long __gu_val; \ 196 __chk_user_ptr(ptr); \ 197 __get_user_size64(__gu_val, (ptr), (size), __gu_err); \ 198 (x) = (__typeof__(*(ptr)))__gu_val; \ 199 __gu_err; \ 200}) 201 202#define __get_user_check(x, ptr, size) \ 203({ \ 204 long __gu_err = -EFAULT; \ 205 unsigned long __gu_val = 0; \ 206 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 207 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 208 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ 209 (x) = (__typeof__(*(ptr)))__gu_val; \ 210 __gu_err; \ 211}) 212 213#define __get_user64_check(x, ptr, size) \ 214({ \ 215 long __gu_err = -EFAULT; \ 216 long long __gu_val = 0; \ 217 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 218 if (access_ok(VERIFY_READ, __gu_addr, (size))) \ 219 __get_user_size64(__gu_val, __gu_addr, (size), __gu_err); \ 220 (x) = (__typeof__(*(ptr)))__gu_val; \ 221 __gu_err; \ 222}) 223 224extern long __get_user_bad(void); 225 226#define __get_user_size(x, ptr, size, retval) \ 227do { \ 228 retval = 0; \ 229 switch (size) { \ 230 case 1: \ 231 __get_user_asm(x, ptr, retval, "lbz"); \ 232 break; \ 233 case 2: \ 234 __get_user_asm(x, ptr, retval, "lhz"); \ 235 break; \ 236 case 4: \ 237 __get_user_asm(x, ptr, retval, "lwz"); \ 238 break; \ 239 default: \ 240 x = __get_user_bad(); \ 241 } \ 242} while (0) 243 244#define __get_user_size64(x, ptr, size, retval) \ 245do { \ 246 retval = 0; \ 247 switch (size) { \ 248 case 1: \ 249 __get_user_asm(x, ptr, retval, "lbz"); \ 250 break; \ 251 case 2: \ 252 __get_user_asm(x, ptr, retval, "lhz"); \ 253 break; \ 254 case 4: \ 255 __get_user_asm(x, ptr, retval, "lwz"); \ 256 break; \ 257 case 8: \ 258 __get_user_asm2(x, ptr, retval); \ 259 break; \ 260 default: \ 261 x = __get_user_bad(); \ 262 } \ 263} while (0) 264 265#define __get_user_asm(x, addr, err, op) \ 266 __asm__ __volatile__( \ 267 "1: "op" %1,0(%2)\n" \ 268 "2:\n" \ 269 ".section .fixup,\"ax\"\n" \ 270 "3: li %0,%3\n" \ 271 " li %1,0\n" \ 272 " b 2b\n" \ 273 ".previous\n" \ 274 ".section __ex_table,\"a\"\n" \ 275 " .align 2\n" \ 276 " .long 1b,3b\n" \ 277 ".previous" \ 278 : "=r"(err), "=r"(x) \ 279 : "b"(addr), "i"(-EFAULT), "0"(err)) 280 281#define __get_user_asm2(x, addr, err) \ 282 __asm__ __volatile__( \ 283 "1: lwz %1,0(%2)\n" \ 284 "2: lwz %1+1,4(%2)\n" \ 285 "3:\n" \ 286 ".section .fixup,\"ax\"\n" \ 287 "4: li %0,%3\n" \ 288 " li %1,0\n" \ 289 " li %1+1,0\n" \ 290 " b 3b\n" \ 291 ".previous\n" \ 292 ".section __ex_table,\"a\"\n" \ 293 " .align 2\n" \ 294 " .long 1b,4b\n" \ 295 " .long 2b,4b\n" \ 296 ".previous" \ 297 : "=r"(err), "=&r"(x) \ 298 : "b"(addr), "i"(-EFAULT), "0"(err)) 299 300/* more complex routines */ 301 302extern int __copy_tofrom_user(void __user *to, const void __user *from, 303 unsigned long size); 304 305extern inline unsigned long 306copy_from_user(void *to, const void __user *from, unsigned long n) 307{ 308 unsigned long over; 309 310 if (access_ok(VERIFY_READ, from, n)) 311 return __copy_tofrom_user((__force void __user *)to, from, n); 312 if ((unsigned long)from < TASK_SIZE) { 313 over = (unsigned long)from + n - TASK_SIZE; 314 return __copy_tofrom_user((__force void __user *)to, from, n - over) + over; 315 } 316 return n; 317} 318 319extern inline unsigned long 320copy_to_user(void __user *to, const void *from, unsigned long n) 321{ 322 unsigned long over; 323 324 if (access_ok(VERIFY_WRITE, to, n)) 325 return __copy_tofrom_user(to, (__force void __user *) from, n); 326 if ((unsigned long)to < TASK_SIZE) { 327 over = (unsigned long)to + n - TASK_SIZE; 328 return __copy_tofrom_user(to, (__force void __user *) from, n - over) + over; 329 } 330 return n; 331} 332 333static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long size) 334{ 335 return __copy_tofrom_user((__force void __user *)to, from, size); 336} 337 338static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long size) 339{ 340 return __copy_tofrom_user(to, (__force void __user *)from, size); 341} 342 343#define __copy_to_user_inatomic __copy_to_user 344#define __copy_from_user_inatomic __copy_from_user 345 346extern unsigned long __clear_user(void __user *addr, unsigned long size); 347 348extern inline unsigned long 349clear_user(void __user *addr, unsigned long size) 350{ 351 if (access_ok(VERIFY_WRITE, addr, size)) 352 return __clear_user(addr, size); 353 if ((unsigned long)addr < TASK_SIZE) { 354 unsigned long over = (unsigned long)addr + size - TASK_SIZE; 355 return __clear_user(addr, size - over) + over; 356 } 357 return size; 358} 359 360extern int __strncpy_from_user(char *dst, const char __user *src, long count); 361 362extern inline long 363strncpy_from_user(char *dst, const char __user *src, long count) 364{ 365 if (access_ok(VERIFY_READ, src, 1)) 366 return __strncpy_from_user(dst, src, count); 367 return -EFAULT; 368} 369 370/* 371 * Return the size of a string (including the ending 0) 372 * 373 * Return 0 for error 374 */ 375 376extern int __strnlen_user(const char __user *str, long len, unsigned long top); 377 378/* 379 * Returns the length of the string at str (including the null byte), 380 * or 0 if we hit a page we can't access, 381 * or something > len if we didn't find a null byte. 382 * 383 * The `top' parameter to __strnlen_user is to make sure that 384 * we can never overflow from the user area into kernel space. 385 */ 386extern __inline__ int strnlen_user(const char __user *str, long len) 387{ 388 unsigned long top = current->thread.fs.seg; 389 390 if ((unsigned long)str > top) 391 return 0; 392 return __strnlen_user(str, len, top); 393} 394 395#define strlen_user(str) strnlen_user((str), 0x7ffffffe) 396 397#endif /* __ASSEMBLY__ */ 398 399#endif /* _PPC_UACCESS_H */ 400#endif /* __KERNEL__ */