Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.20-rc4 367 lines 9.9 kB view raw
1#ifndef __M68K_UACCESS_H 2#define __M68K_UACCESS_H 3 4/* 5 * User space memory access functions 6 */ 7#include <linux/compiler.h> 8#include <linux/errno.h> 9#include <linux/types.h> 10#include <asm/segment.h> 11 12#define VERIFY_READ 0 13#define VERIFY_WRITE 1 14 15/* We let the MMU do all checking */ 16#define access_ok(type,addr,size) 1 17 18/* 19 * The exception table consists of pairs of addresses: the first is the 20 * address of an instruction that is allowed to fault, and the second is 21 * the address at which the program should continue. No registers are 22 * modified, so it is entirely up to the continuation code to figure out 23 * what to do. 24 * 25 * All the routines below use bits of fixup code that are out of line 26 * with the main instruction path. This means when everything is well, 27 * we don't even have to jump over them. Further, they do not intrude 28 * on our cache or tlb entries. 29 */ 30 31struct exception_table_entry 32{ 33 unsigned long insn, fixup; 34}; 35 36extern int __put_user_bad(void); 37extern int __get_user_bad(void); 38 39#define __put_user_asm(res, x, ptr, bwl, reg, err) \ 40asm volatile ("\n" \ 41 "1: moves."#bwl" %2,%1\n" \ 42 "2:\n" \ 43 " .section .fixup,\"ax\"\n" \ 44 " .even\n" \ 45 "10: moveq.l %3,%0\n" \ 46 " jra 2b\n" \ 47 " .previous\n" \ 48 "\n" \ 49 " .section __ex_table,\"a\"\n" \ 50 " .align 4\n" \ 51 " .long 1b,10b\n" \ 52 " .long 2b,10b\n" \ 53 " .previous" \ 54 : "+d" (res), "=m" (*(ptr)) \ 55 : #reg (x), "i" (err)) 56 57/* 58 * These are the main single-value transfer routines. They automatically 59 * use the right size if we just have the right pointer type. 60 */ 61 62#define __put_user(x, ptr) \ 63({ \ 64 typeof(*(ptr)) __pu_val = (x); \ 65 int __pu_err = 0; \ 66 __chk_user_ptr(ptr); \ 67 switch (sizeof (*(ptr))) { \ 68 case 1: \ 69 __put_user_asm(__pu_err, __pu_val, ptr, b, d, -EFAULT); \ 70 break; \ 71 case 2: \ 72 __put_user_asm(__pu_err, __pu_val, ptr, w, d, -EFAULT); \ 73 break; \ 74 case 4: \ 75 __put_user_asm(__pu_err, __pu_val, ptr, l, r, -EFAULT); \ 76 break; \ 77 case 8: \ 78 { \ 79 const void __user *__pu_ptr = (ptr); \ 80 asm volatile ("\n" \ 81 "1: moves.l %2,(%1)+\n" \ 82 "2: moves.l %R2,(%1)\n" \ 83 "3:\n" \ 84 " .section .fixup,\"ax\"\n" \ 85 " .even\n" \ 86 "10: movel %3,%0\n" \ 87 " jra 3b\n" \ 88 " .previous\n" \ 89 "\n" \ 90 " .section __ex_table,\"a\"\n" \ 91 " .align 4\n" \ 92 " .long 1b,10b\n" \ 93 " .long 2b,10b\n" \ 94 " .long 3b,10b\n" \ 95 " .previous" \ 96 : "+d" (__pu_err), "+a" (__pu_ptr) \ 97 : "r" (__pu_val), "i" (-EFAULT) \ 98 : "memory"); \ 99 break; \ 100 } \ 101 default: \ 102 __pu_err = __put_user_bad(); \ 103 break; \ 104 } \ 105 __pu_err; \ 106}) 107#define put_user(x, ptr) __put_user(x, ptr) 108 109 110#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ 111 type __gu_val; \ 112 asm volatile ("\n" \ 113 "1: moves."#bwl" %2,%1\n" \ 114 "2:\n" \ 115 " .section .fixup,\"ax\"\n" \ 116 " .even\n" \ 117 "10: move.l %3,%0\n" \ 118 " sub."#bwl" %1,%1\n" \ 119 " jra 2b\n" \ 120 " .previous\n" \ 121 "\n" \ 122 " .section __ex_table,\"a\"\n" \ 123 " .align 4\n" \ 124 " .long 1b,10b\n" \ 125 " .previous" \ 126 : "+d" (res), "=&" #reg (__gu_val) \ 127 : "m" (*(ptr)), "i" (err)); \ 128 (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ 129}) 130 131#define __get_user(x, ptr) \ 132({ \ 133 int __gu_err = 0; \ 134 __chk_user_ptr(ptr); \ 135 switch (sizeof(*(ptr))) { \ 136 case 1: \ 137 __get_user_asm(__gu_err, x, ptr, u8, b, d, -EFAULT); \ 138 break; \ 139 case 2: \ 140 __get_user_asm(__gu_err, x, ptr, u16, w, d, -EFAULT); \ 141 break; \ 142 case 4: \ 143 __get_user_asm(__gu_err, x, ptr, u32, l, r, -EFAULT); \ 144 break; \ 145/* case 8: disabled because gcc-4.1 has a broken typeof \ 146 { \ 147 const void *__gu_ptr = (ptr); \ 148 u64 __gu_val; \ 149 asm volatile ("\n" \ 150 "1: moves.l (%2)+,%1\n" \ 151 "2: moves.l (%2),%R1\n" \ 152 "3:\n" \ 153 " .section .fixup,\"ax\"\n" \ 154 " .even\n" \ 155 "10: move.l %3,%0\n" \ 156 " sub.l %1,%1\n" \ 157 " sub.l %R1,%R1\n" \ 158 " jra 3b\n" \ 159 " .previous\n" \ 160 "\n" \ 161 " .section __ex_table,\"a\"\n" \ 162 " .align 4\n" \ 163 " .long 1b,10b\n" \ 164 " .long 2b,10b\n" \ 165 " .previous" \ 166 : "+d" (__gu_err), "=&r" (__gu_val), \ 167 "+a" (__gu_ptr) \ 168 : "i" (-EFAULT) \ 169 : "memory"); \ 170 (x) = (typeof(*(ptr)))__gu_val; \ 171 break; \ 172 } */ \ 173 default: \ 174 __gu_err = __get_user_bad(); \ 175 break; \ 176 } \ 177 __gu_err; \ 178}) 179#define get_user(x, ptr) __get_user(x, ptr) 180 181unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); 182unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); 183 184#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ 185 asm volatile ("\n" \ 186 "1: moves."#s1" (%2)+,%3\n" \ 187 " move."#s1" %3,(%1)+\n" \ 188 "2: moves."#s2" (%2)+,%3\n" \ 189 " move."#s2" %3,(%1)+\n" \ 190 " .ifnc \""#s3"\",\"\"\n" \ 191 "3: moves."#s3" (%2)+,%3\n" \ 192 " move."#s3" %3,(%1)+\n" \ 193 " .endif\n" \ 194 "4:\n" \ 195 " .section __ex_table,\"a\"\n" \ 196 " .align 4\n" \ 197 " .long 1b,10f\n" \ 198 " .long 2b,20f\n" \ 199 " .ifnc \""#s3"\",\"\"\n" \ 200 " .long 3b,30f\n" \ 201 " .endif\n" \ 202 " .previous\n" \ 203 "\n" \ 204 " .section .fixup,\"ax\"\n" \ 205 " .even\n" \ 206 "10: clr."#s1" (%1)+\n" \ 207 "20: clr."#s2" (%1)+\n" \ 208 " .ifnc \""#s3"\",\"\"\n" \ 209 "30: clr."#s3" (%1)+\n" \ 210 " .endif\n" \ 211 " moveq.l #"#n",%0\n" \ 212 " jra 4b\n" \ 213 " .previous\n" \ 214 : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ 215 : : "memory") 216 217static __always_inline unsigned long 218__constant_copy_from_user(void *to, const void __user *from, unsigned long n) 219{ 220 unsigned long res = 0, tmp; 221 222 switch (n) { 223 case 1: 224 __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); 225 break; 226 case 2: 227 __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, d, 2); 228 break; 229 case 3: 230 __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); 231 break; 232 case 4: 233 __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); 234 break; 235 case 5: 236 __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); 237 break; 238 case 6: 239 __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); 240 break; 241 case 7: 242 __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); 243 break; 244 case 8: 245 __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); 246 break; 247 case 9: 248 __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); 249 break; 250 case 10: 251 __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); 252 break; 253 case 12: 254 __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); 255 break; 256 default: 257 /* we limit the inlined version to 3 moves */ 258 return __generic_copy_from_user(to, from, n); 259 } 260 261 return res; 262} 263 264#define __constant_copy_to_user_asm(res, to, from, tmp, n, s1, s2, s3) \ 265 asm volatile ("\n" \ 266 " move."#s1" (%2)+,%3\n" \ 267 "11: moves."#s1" %3,(%1)+\n" \ 268 "12: move."#s2" (%2)+,%3\n" \ 269 "21: moves."#s2" %3,(%1)+\n" \ 270 "22:\n" \ 271 " .ifnc \""#s3"\",\"\"\n" \ 272 " move."#s3" (%2)+,%3\n" \ 273 "31: moves."#s3" %3,(%1)+\n" \ 274 "32:\n" \ 275 " .endif\n" \ 276 "4:\n" \ 277 "\n" \ 278 " .section __ex_table,\"a\"\n" \ 279 " .align 4\n" \ 280 " .long 11b,5f\n" \ 281 " .long 12b,5f\n" \ 282 " .long 21b,5f\n" \ 283 " .long 22b,5f\n" \ 284 " .ifnc \""#s3"\",\"\"\n" \ 285 " .long 31b,5f\n" \ 286 " .long 32b,5f\n" \ 287 " .endif\n" \ 288 " .previous\n" \ 289 "\n" \ 290 " .section .fixup,\"ax\"\n" \ 291 " .even\n" \ 292 "5: moveq.l #"#n",%0\n" \ 293 " jra 4b\n" \ 294 " .previous\n" \ 295 : "+d" (res), "+a" (to), "+a" (from), "=&d" (tmp) \ 296 : : "memory") 297 298static __always_inline unsigned long 299__constant_copy_to_user(void __user *to, const void *from, unsigned long n) 300{ 301 unsigned long res = 0, tmp; 302 303 switch (n) { 304 case 1: 305 __put_user_asm(res, *(u8 *)from, (u8 __user *)to, b, d, 1); 306 break; 307 case 2: 308 __put_user_asm(res, *(u16 *)from, (u16 __user *)to, w, d, 2); 309 break; 310 case 3: 311 __constant_copy_to_user_asm(res, to, from, tmp, 3, w, b,); 312 break; 313 case 4: 314 __put_user_asm(res, *(u32 *)from, (u32 __user *)to, l, r, 4); 315 break; 316 case 5: 317 __constant_copy_to_user_asm(res, to, from, tmp, 5, l, b,); 318 break; 319 case 6: 320 __constant_copy_to_user_asm(res, to, from, tmp, 6, l, w,); 321 break; 322 case 7: 323 __constant_copy_to_user_asm(res, to, from, tmp, 7, l, w, b); 324 break; 325 case 8: 326 __constant_copy_to_user_asm(res, to, from, tmp, 8, l, l,); 327 break; 328 case 9: 329 __constant_copy_to_user_asm(res, to, from, tmp, 9, l, l, b); 330 break; 331 case 10: 332 __constant_copy_to_user_asm(res, to, from, tmp, 10, l, l, w); 333 break; 334 case 12: 335 __constant_copy_to_user_asm(res, to, from, tmp, 12, l, l, l); 336 break; 337 default: 338 /* limit the inlined version to 3 moves */ 339 return __generic_copy_to_user(to, from, n); 340 } 341 342 return res; 343} 344 345#define __copy_from_user(to, from, n) \ 346(__builtin_constant_p(n) ? \ 347 __constant_copy_from_user(to, from, n) : \ 348 __generic_copy_from_user(to, from, n)) 349 350#define __copy_to_user(to, from, n) \ 351(__builtin_constant_p(n) ? \ 352 __constant_copy_to_user(to, from, n) : \ 353 __generic_copy_to_user(to, from, n)) 354 355#define __copy_to_user_inatomic __copy_to_user 356#define __copy_from_user_inatomic __copy_from_user 357 358#define copy_from_user(to, from, n) __copy_from_user(to, from, n) 359#define copy_to_user(to, from, n) __copy_to_user(to, from, n) 360 361long strncpy_from_user(char *dst, const char __user *src, long count); 362long strnlen_user(const char __user *src, long n); 363unsigned long clear_user(void __user *to, unsigned long n); 364 365#define strlen_user(str) strnlen_user(str, 32767) 366 367#endif /* _M68K_UACCESS_H */