Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 27d30b0f4e0c8e63f48ef400a64fc073b71bfe59 316 lines 9.3 kB view raw
1#ifndef __ASM_SH64_UACCESS_H 2#define __ASM_SH64_UACCESS_H 3 4/* 5 * This file is subject to the terms and conditions of the GNU General Public 6 * License. See the file "COPYING" in the main directory of this archive 7 * for more details. 8 * 9 * include/asm-sh64/uaccess.h 10 * 11 * Copyright (C) 2000, 2001 Paolo Alberelli 12 * Copyright (C) 2003, 2004 Paul Mundt 13 * 14 * User space memory access functions 15 * 16 * Copyright (C) 1999 Niibe Yutaka 17 * 18 * Based on: 19 * MIPS implementation version 1.15 by 20 * Copyright (C) 1996, 1997, 1998 by Ralf Baechle 21 * and i386 version. 22 * 23 */ 24 25#include <linux/errno.h> 26#include <linux/sched.h> 27 28#define VERIFY_READ 0 29#define VERIFY_WRITE 1 30 31/* 32 * The fs value determines whether argument validity checking should be 33 * performed or not. If get_fs() == USER_DS, checking is performed, with 34 * get_fs() == KERNEL_DS, checking is bypassed. 35 * 36 * For historical reasons (Data Segment Register?), these macros are misnamed. 37 */ 38 39#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) 40 41#define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF) 42#define USER_DS MAKE_MM_SEG(0x80000000) 43 44#define get_ds() (KERNEL_DS) 45#define get_fs() (current_thread_info()->addr_limit) 46#define set_fs(x) (current_thread_info()->addr_limit=(x)) 47 48#define segment_eq(a,b) ((a).seg == (b).seg) 49 50#define __addr_ok(addr) ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) 51 52/* 53 * Uhhuh, this needs 33-bit arithmetic. We have a carry.. 54 * 55 * sum := addr + size; carry? --> flag = true; 56 * if (sum >= addr_limit) flag = true; 57 */ 58#define __range_ok(addr,size) (((unsigned long) (addr) + (size) < (current_thread_info()->addr_limit.seg)) ? 0 : 1) 59 60#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) 61#define __access_ok(addr,size) (__range_ok(addr,size) == 0) 62 63/* 64 * Uh, these should become the main single-value transfer routines ... 65 * They automatically use the right size if we just have the right 66 * pointer type ... 67 * 68 * As MIPS uses the same address space for kernel and user data, we 69 * can just do these as direct assignments. 70 * 71 * Careful to not 72 * (a) re-use the arguments for side effects (sizeof is ok) 73 * (b) require any knowledge of processes at this stage 74 */ 75#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) 76#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) 77 78/* 79 * The "__xxx" versions do not do address space checking, useful when 80 * doing multiple accesses to the same area (the user has to do the 81 * checks by hand with "access_ok()") 82 */ 83#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) 84#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) 85 86/* 87 * The "xxx_ret" versions return constant specified in third argument, if 88 * something bad happens. These macros can be optimized for the 89 * case of just returning from the function xxx_ret is used. 90 */ 91 92#define put_user_ret(x,ptr,ret) ({ \ 93if (put_user(x,ptr)) return ret; }) 94 95#define get_user_ret(x,ptr,ret) ({ \ 96if (get_user(x,ptr)) return ret; }) 97 98#define __put_user_ret(x,ptr,ret) ({ \ 99if (__put_user(x,ptr)) return ret; }) 100 101#define __get_user_ret(x,ptr,ret) ({ \ 102if (__get_user(x,ptr)) return ret; }) 103 104struct __large_struct { unsigned long buf[100]; }; 105#define __m(x) (*(struct __large_struct *)(x)) 106 107#define __get_user_size(x,ptr,size,retval) \ 108do { \ 109 retval = 0; \ 110 switch (size) { \ 111 case 1: \ 112 retval = __get_user_asm_b(x, ptr); \ 113 break; \ 114 case 2: \ 115 retval = __get_user_asm_w(x, ptr); \ 116 break; \ 117 case 4: \ 118 retval = __get_user_asm_l(x, ptr); \ 119 break; \ 120 case 8: \ 121 retval = __get_user_asm_q(x, ptr); \ 122 break; \ 123 default: \ 124 __get_user_unknown(); \ 125 break; \ 126 } \ 127} while (0) 128 129#define __get_user_nocheck(x,ptr,size) \ 130({ \ 131 long __gu_err, __gu_val; \ 132 __get_user_size((void *)&__gu_val, (long)(ptr), \ 133 (size), __gu_err); \ 134 (x) = (__typeof__(*(ptr)))__gu_val; \ 135 __gu_err; \ 136}) 137 138#define __get_user_check(x,ptr,size) \ 139({ \ 140 long __gu_addr = (long)(ptr); \ 141 long __gu_err = -EFAULT, __gu_val; \ 142 if (__access_ok(__gu_addr, (size))) \ 143 __get_user_size((void *)&__gu_val, __gu_addr, \ 144 (size), __gu_err); \ 145 (x) = (__typeof__(*(ptr))) __gu_val; \ 146 __gu_err; \ 147}) 148 149extern long __get_user_asm_b(void *, long); 150extern long __get_user_asm_w(void *, long); 151extern long __get_user_asm_l(void *, long); 152extern long __get_user_asm_q(void *, long); 153extern void __get_user_unknown(void); 154 155#define __put_user_size(x,ptr,size,retval) \ 156do { \ 157 retval = 0; \ 158 switch (size) { \ 159 case 1: \ 160 retval = __put_user_asm_b(x, ptr); \ 161 break; \ 162 case 2: \ 163 retval = __put_user_asm_w(x, ptr); \ 164 break; \ 165 case 4: \ 166 retval = __put_user_asm_l(x, ptr); \ 167 break; \ 168 case 8: \ 169 retval = __put_user_asm_q(x, ptr); \ 170 break; \ 171 default: \ 172 __put_user_unknown(); \ 173 } \ 174} while (0) 175 176#define __put_user_nocheck(x,ptr,size) \ 177({ \ 178 long __pu_err; \ 179 __typeof__(*(ptr)) __pu_val = (x); \ 180 __put_user_size((void *)&__pu_val, (long)(ptr), (size), __pu_err); \ 181 __pu_err; \ 182}) 183 184#define __put_user_check(x,ptr,size) \ 185({ \ 186 long __pu_err = -EFAULT; \ 187 long __pu_addr = (long)(ptr); \ 188 __typeof__(*(ptr)) __pu_val = (x); \ 189 \ 190 if (__access_ok(__pu_addr, (size))) \ 191 __put_user_size((void *)&__pu_val, __pu_addr, (size), __pu_err);\ 192 __pu_err; \ 193}) 194 195extern long __put_user_asm_b(void *, long); 196extern long __put_user_asm_w(void *, long); 197extern long __put_user_asm_l(void *, long); 198extern long __put_user_asm_q(void *, long); 199extern void __put_user_unknown(void); 200 201 202/* Generic arbitrary sized copy. */ 203/* Return the number of bytes NOT copied */ 204/* XXX: should be such that: 4byte and the rest. */ 205extern __kernel_size_t __copy_user(void *__to, const void *__from, __kernel_size_t __n); 206 207#define copy_to_user(to,from,n) ({ \ 208void *__copy_to = (void *) (to); \ 209__kernel_size_t __copy_size = (__kernel_size_t) (n); \ 210__kernel_size_t __copy_res; \ 211if(__copy_size && __access_ok((unsigned long)__copy_to, __copy_size)) { \ 212__copy_res = __copy_user(__copy_to, (void *) (from), __copy_size); \ 213} else __copy_res = __copy_size; \ 214__copy_res; }) 215 216#define copy_to_user_ret(to,from,n,retval) ({ \ 217if (copy_to_user(to,from,n)) \ 218 return retval; \ 219}) 220 221#define __copy_to_user(to,from,n) \ 222 __copy_user((void *)(to), \ 223 (void *)(from), n) 224 225#define __copy_to_user_ret(to,from,n,retval) ({ \ 226if (__copy_to_user(to,from,n)) \ 227 return retval; \ 228}) 229 230#define copy_from_user(to,from,n) ({ \ 231void *__copy_to = (void *) (to); \ 232void *__copy_from = (void *) (from); \ 233__kernel_size_t __copy_size = (__kernel_size_t) (n); \ 234__kernel_size_t __copy_res; \ 235if(__copy_size && __access_ok((unsigned long)__copy_from, __copy_size)) { \ 236__copy_res = __copy_user(__copy_to, __copy_from, __copy_size); \ 237} else __copy_res = __copy_size; \ 238__copy_res; }) 239 240#define copy_from_user_ret(to,from,n,retval) ({ \ 241if (copy_from_user(to,from,n)) \ 242 return retval; \ 243}) 244 245#define __copy_from_user(to,from,n) \ 246 __copy_user((void *)(to), \ 247 (void *)(from), n) 248 249#define __copy_from_user_ret(to,from,n,retval) ({ \ 250if (__copy_from_user(to,from,n)) \ 251 return retval; \ 252}) 253 254#define __copy_to_user_inatomic __copy_to_user 255#define __copy_from_user_inatomic __copy_from_user 256 257/* XXX: Not sure it works well.. 258 should be such that: 4byte clear and the rest. */ 259extern __kernel_size_t __clear_user(void *addr, __kernel_size_t size); 260 261#define clear_user(addr,n) ({ \ 262void * __cl_addr = (addr); \ 263unsigned long __cl_size = (n); \ 264if (__cl_size && __access_ok(((unsigned long)(__cl_addr)), __cl_size)) \ 265__cl_size = __clear_user(__cl_addr, __cl_size); \ 266__cl_size; }) 267 268extern int __strncpy_from_user(unsigned long __dest, unsigned long __src, int __count); 269 270#define strncpy_from_user(dest,src,count) ({ \ 271unsigned long __sfu_src = (unsigned long) (src); \ 272int __sfu_count = (int) (count); \ 273long __sfu_res = -EFAULT; \ 274if(__access_ok(__sfu_src, __sfu_count)) { \ 275__sfu_res = __strncpy_from_user((unsigned long) (dest), __sfu_src, __sfu_count); \ 276} __sfu_res; }) 277 278#define strlen_user(str) strnlen_user(str, ~0UL >> 1) 279 280/* 281 * Return the size of a string (including the ending 0!) 282 */ 283extern long __strnlen_user(const char *__s, long __n); 284 285static inline long strnlen_user(const char *s, long n) 286{ 287 if (!__addr_ok(s)) 288 return 0; 289 else 290 return __strnlen_user(s, n); 291} 292 293struct exception_table_entry 294{ 295 unsigned long insn, fixup; 296}; 297 298#define ARCH_HAS_SEARCH_EXTABLE 299 300/* If gcc inlines memset, it will use st.q instructions. Therefore, we need 301 kmalloc allocations to be 8-byte aligned. Without this, the alignment 302 becomes BYTE_PER_WORD i.e. only 4 (since sizeof(long)==sizeof(void*)==4 on 303 sh64 at the moment). */ 304#define ARCH_KMALLOC_MINALIGN 8 305 306/* 307 * We want 8-byte alignment for the slab caches as well, otherwise we have 308 * the same BYTES_PER_WORD (sizeof(void *)) min align in kmem_cache_create(). 309 */ 310#define ARCH_SLAB_MINALIGN 8 311 312/* Returns 0 if exception not found and fixup.unit otherwise. */ 313extern unsigned long search_exception_table(unsigned long addr); 314extern const struct exception_table_entry *search_exception_tables (unsigned long addr); 315 316#endif /* __ASM_SH64_UACCESS_H */