Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'metag-for-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag

Pull metag updates from James Hogan:
"These patches primarily make some usercopy improvements (following on
from the recent usercopy fixes):

- reformat and simplify rapf copy loops

- add 64-bit get_user support

And fix a couple more uaccess issues, partily pointed out by Al:

- fix access_ok() serious shortcomings

- fix strncpy_from_user() address validation

Also included is a trivial removal of a redundant increment"

* tag 'metag-for-v4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag:
metag/mm: Drop pointless increment
metag/uaccess: Check access_ok in strncpy_from_user
metag/uaccess: Fix access_ok()
metag/usercopy: Add 64-bit get_user support
metag/usercopy: Simplify rapf loop fixup corner case
metag/usercopy: Reformat rapf loop inline asm

+143 -152
+38 -20
arch/metag/include/asm/uaccess.h
··· 24 24 25 25 #define segment_eq(a, b) ((a).seg == (b).seg) 26 26 27 - #define __kernel_ok (uaccess_kernel()) 28 - /* 29 - * Explicitly allow NULL pointers here. Parts of the kernel such 30 - * as readv/writev use access_ok to validate pointers, but want 31 - * to allow NULL pointers for various reasons. NULL pointers are 32 - * safe to allow through because the first page is not mappable on 33 - * Meta. 34 - * 35 - * We also wish to avoid letting user code access the system area 36 - * and the kernel half of the address space. 37 - */ 38 - #define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \ 39 - ((addr) > PAGE_OFFSET && \ 40 - (addr) < LINCORE_BASE)) 41 - 42 27 static inline int __access_ok(unsigned long addr, unsigned long size) 43 28 { 44 - return __kernel_ok || !__user_bad(addr, size); 29 + /* 30 + * Allow access to the user mapped memory area, but not the system area 31 + * before it. The check extends to the top of the address space when 32 + * kernel access is allowed (there's no real reason to user copy to the 33 + * system area in any case). 34 + */ 35 + if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg && 36 + size <= get_fs().seg - addr)) 37 + return true; 38 + /* 39 + * Explicitly allow NULL pointers here. Parts of the kernel such 40 + * as readv/writev use access_ok to validate pointers, but want 41 + * to allow NULL pointers for various reasons. NULL pointers are 42 + * safe to allow through because the first page is not mappable on 43 + * Meta. 44 + */ 45 + if (!addr) 46 + return true; 47 + /* Allow access to core code memory area... */ 48 + if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT && 49 + size <= LINCORE_CODE_LIMIT + 1 - addr) 50 + return true; 51 + /* ... but no other areas. */ 52 + return false; 45 53 } 46 54 47 55 #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ ··· 121 113 122 114 #define __get_user_nocheck(x, ptr, size) \ 123 115 ({ \ 124 - long __gu_err, __gu_val; \ 116 + long __gu_err; \ 117 + long long __gu_val; \ 125 118 __get_user_size(__gu_val, (ptr), (size), __gu_err); \ 126 119 (x) = (__force __typeof__(*(ptr)))__gu_val; \ 127 120 __gu_err; \ ··· 130 121 131 122 #define __get_user_check(x, ptr, size) \ 132 123 ({ \ 133 - long __gu_err = -EFAULT, __gu_val = 0; \ 124 + long __gu_err = -EFAULT; \ 125 + long long __gu_val = 0; \ 134 126 const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ 135 127 if (access_ok(VERIFY_READ, __gu_addr, size)) \ 136 128 __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ ··· 142 132 extern unsigned char __get_user_asm_b(const void __user *addr, long *err); 143 133 extern unsigned short __get_user_asm_w(const void __user *addr, long *err); 144 134 extern unsigned int __get_user_asm_d(const void __user *addr, long *err); 135 + extern unsigned long long __get_user_asm_l(const void __user *addr, long *err); 145 136 146 137 #define __get_user_size(x, ptr, size, retval) \ 147 138 do { \ ··· 154 143 x = __get_user_asm_w(ptr, &retval); break; \ 155 144 case 4: \ 156 145 x = __get_user_asm_d(ptr, &retval); break; \ 146 + case 8: \ 147 + x = __get_user_asm_l(ptr, &retval); break; \ 157 148 default: \ 158 149 (x) = __get_user_bad(); \ 159 150 } \ ··· 174 161 extern long __must_check __strncpy_from_user(char *dst, const char __user *src, 175 162 long count); 176 163 177 - #define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count) 178 - 164 + static inline long 165 + strncpy_from_user(char *dst, const char __user *src, long count) 166 + { 167 + if (!access_ok(VERIFY_READ, src, 1)) 168 + return -EFAULT; 169 + return __strncpy_from_user(dst, src, count); 170 + } 179 171 /* 180 172 * Return the size of a string (including the ending 0) 181 173 *
+105 -131
arch/metag/lib/usercopy.c
··· 246 246 #define __asm_copy_user_64bit_rapf_loop( \ 247 247 to, from, ret, n, id, FIXUP) \ 248 248 asm volatile ( \ 249 - ".balign 8\n" \ 250 - "MOV RAPF, %1\n" \ 251 - "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ 252 - "MOV D0Ar6, #0\n" \ 253 - "LSR D1Ar5, %3, #6\n" \ 254 - "SUB TXRPT, D1Ar5, #2\n" \ 255 - "MOV RAPF, %1\n" \ 249 + ".balign 8\n" \ 250 + " MOV RAPF, %1\n" \ 251 + " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ 252 + " MOV D0Ar6, #0\n" \ 253 + " LSR D1Ar5, %3, #6\n" \ 254 + " SUB TXRPT, D1Ar5, #2\n" \ 255 + " MOV RAPF, %1\n" \ 256 256 "$Lloop"id":\n" \ 257 - "ADD RAPF, %1, #64\n" \ 258 - "21:\n" \ 259 - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 260 - "22:\n" \ 261 - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 262 - "23:\n" \ 263 - "SUB %3, %3, #32\n" \ 264 - "24:\n" \ 265 - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 266 - "25:\n" \ 267 - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 268 - "26:\n" \ 269 - "SUB %3, %3, #32\n" \ 270 - "DCACHE [%1+#-64], D0Ar6\n" \ 271 - "BR $Lloop"id"\n" \ 257 + " ADD RAPF, %1, #64\n" \ 258 + "21: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 259 + "22: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 260 + "23: SUB %3, %3, #32\n" \ 261 + "24: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 262 + "25: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 263 + "26: SUB %3, %3, #32\n" \ 264 + " DCACHE [%1+#-64], D0Ar6\n" \ 265 + " BR $Lloop"id"\n" \ 272 266 \ 273 - "MOV RAPF, %1\n" \ 274 - "27:\n" \ 275 - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 276 - "28:\n" \ 277 - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 278 - "29:\n" \ 279 - "SUB %3, %3, #32\n" \ 280 - "30:\n" \ 281 - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 282 - "31:\n" \ 283 - "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 284 - "32:\n" \ 285 - "SUB %0, %0, #8\n" \ 286 - "33:\n" \ 287 - "SETL [%0++], D0.7, D1.7\n" \ 288 - "SUB %3, %3, #32\n" \ 289 - "1:" \ 290 - "DCACHE [%1+#-64], D0Ar6\n" \ 291 - "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ 292 - "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ 293 - "GETL D0.5, D1.5, [A0StP+#-24]\n" \ 294 - "GETL D0.6, D1.6, [A0StP+#-16]\n" \ 295 - "GETL D0.7, D1.7, [A0StP+#-8]\n" \ 296 - "SUB A0StP, A0StP, #40\n" \ 267 + " MOV RAPF, %1\n" \ 268 + "27: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 269 + "28: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 270 + "29: SUB %3, %3, #32\n" \ 271 + "30: MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 272 + "31: MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 273 + "32: SETL [%0+#-8], D0.7, D1.7\n" \ 274 + " SUB %3, %3, #32\n" \ 275 + "1: DCACHE [%1+#-64], D0Ar6\n" \ 276 + " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ 277 + " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ 278 + " GETL D0.5, D1.5, [A0StP+#-24]\n" \ 279 + " GETL D0.6, D1.6, [A0StP+#-16]\n" \ 280 + " GETL D0.7, D1.7, [A0StP+#-8]\n" \ 281 + " SUB A0StP, A0StP, #40\n" \ 297 282 " .section .fixup,\"ax\"\n" \ 298 - "4:\n" \ 299 - " ADD %0, %0, #8\n" \ 300 - "3:\n" \ 301 - " MOV D0Ar2, TXSTATUS\n" \ 283 + "3: MOV D0Ar2, TXSTATUS\n" \ 302 284 " MOV D1Ar1, TXSTATUS\n" \ 303 285 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ 304 286 " MOV TXSTATUS, D1Ar1\n" \ 305 287 FIXUP \ 306 - " MOVT D0Ar2,#HI(1b)\n" \ 307 - " JUMP D0Ar2,#LO(1b)\n" \ 288 + " MOVT D0Ar2, #HI(1b)\n" \ 289 + " JUMP D0Ar2, #LO(1b)\n" \ 308 290 " .previous\n" \ 309 291 " .section __ex_table,\"a\"\n" \ 310 292 " .long 21b,3b\n" \ ··· 301 319 " .long 30b,3b\n" \ 302 320 " .long 31b,3b\n" \ 303 321 " .long 32b,3b\n" \ 304 - " .long 33b,4b\n" \ 305 322 " .previous\n" \ 306 323 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 307 324 : "0" (to), "1" (from), "2" (ret), "3" (n) \ ··· 378 397 #define __asm_copy_user_32bit_rapf_loop( \ 379 398 to, from, ret, n, id, FIXUP) \ 380 399 asm volatile ( \ 381 - ".balign 8\n" \ 382 - "MOV RAPF, %1\n" \ 383 - "MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ 384 - "MOV D0Ar6, #0\n" \ 385 - "LSR D1Ar5, %3, #6\n" \ 386 - "SUB TXRPT, D1Ar5, #2\n" \ 387 - "MOV RAPF, %1\n" \ 388 - "$Lloop"id":\n" \ 389 - "ADD RAPF, %1, #64\n" \ 390 - "21:\n" \ 391 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 392 - "22:\n" \ 393 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 394 - "23:\n" \ 395 - "SUB %3, %3, #16\n" \ 396 - "24:\n" \ 397 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 398 - "25:\n" \ 399 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 400 - "26:\n" \ 401 - "SUB %3, %3, #16\n" \ 402 - "27:\n" \ 403 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 404 - "28:\n" \ 405 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 406 - "29:\n" \ 407 - "SUB %3, %3, #16\n" \ 408 - "30:\n" \ 409 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 - "31:\n" \ 411 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 412 - "32:\n" \ 413 - "SUB %3, %3, #16\n" \ 414 - "DCACHE [%1+#-64], D0Ar6\n" \ 415 - "BR $Lloop"id"\n" \ 400 + ".balign 8\n" \ 401 + " MOV RAPF, %1\n" \ 402 + " MSETL [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n" \ 403 + " MOV D0Ar6, #0\n" \ 404 + " LSR D1Ar5, %3, #6\n" \ 405 + " SUB TXRPT, D1Ar5, #2\n" \ 406 + " MOV RAPF, %1\n" \ 407 + "$Lloop"id":\n" \ 408 + " ADD RAPF, %1, #64\n" \ 409 + "21: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 410 + "22: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 411 + "23: SUB %3, %3, #16\n" \ 412 + "24: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 413 + "25: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 414 + "26: SUB %3, %3, #16\n" \ 415 + "27: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 416 + "28: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 417 + "29: SUB %3, %3, #16\n" \ 418 + "30: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 419 + "31: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 420 + "32: SUB %3, %3, #16\n" \ 421 + " DCACHE [%1+#-64], D0Ar6\n" \ 422 + " BR $Lloop"id"\n" \ 416 423 \ 417 - "MOV RAPF, %1\n" \ 418 - "33:\n" \ 419 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 420 - "34:\n" \ 421 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 422 - "35:\n" \ 423 - "SUB %3, %3, #16\n" \ 424 - "36:\n" \ 425 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 426 - "37:\n" \ 427 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 428 - "38:\n" \ 429 - "SUB %3, %3, #16\n" \ 430 - "39:\n" \ 431 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 432 - "40:\n" \ 433 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 434 - "41:\n" \ 435 - "SUB %3, %3, #16\n" \ 436 - "42:\n" \ 437 - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 438 - "43:\n" \ 439 - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 440 - "44:\n" \ 441 - "SUB %0, %0, #4\n" \ 442 - "45:\n" \ 443 - "SETD [%0++], D0.7\n" \ 444 - "SUB %3, %3, #16\n" \ 445 - "1:" \ 446 - "DCACHE [%1+#-64], D0Ar6\n" \ 447 - "GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ 448 - "GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ 449 - "GETL D0.5, D1.5, [A0StP+#-24]\n" \ 450 - "GETL D0.6, D1.6, [A0StP+#-16]\n" \ 451 - "GETL D0.7, D1.7, [A0StP+#-8]\n" \ 452 - "SUB A0StP, A0StP, #40\n" \ 424 + " MOV RAPF, %1\n" \ 425 + "33: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 426 + "34: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 427 + "35: SUB %3, %3, #16\n" \ 428 + "36: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 429 + "37: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 430 + "38: SUB %3, %3, #16\n" \ 431 + "39: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 432 + "40: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 433 + "41: SUB %3, %3, #16\n" \ 434 + "42: MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ 435 + "43: MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ 436 + "44: SETD [%0+#-4], D0.7\n" \ 437 + " SUB %3, %3, #16\n" \ 438 + "1: DCACHE [%1+#-64], D0Ar6\n" \ 439 + " GETL D0Ar6, D1Ar5, [A0StP+#-40]\n" \ 440 + " GETL D0FrT, D1RtP, [A0StP+#-32]\n" \ 441 + " GETL D0.5, D1.5, [A0StP+#-24]\n" \ 442 + " GETL D0.6, D1.6, [A0StP+#-16]\n" \ 443 + " GETL D0.7, D1.7, [A0StP+#-8]\n" \ 444 + " SUB A0StP, A0StP, #40\n" \ 453 445 " .section .fixup,\"ax\"\n" \ 454 - "4:\n" \ 455 - " ADD %0, %0, #4\n" \ 456 - "3:\n" \ 457 - " MOV D0Ar2, TXSTATUS\n" \ 446 + "3: MOV D0Ar2, TXSTATUS\n" \ 458 447 " MOV D1Ar1, TXSTATUS\n" \ 459 448 " AND D1Ar1, D1Ar1, #0xFFFFF8FF\n" \ 460 449 " MOV TXSTATUS, D1Ar1\n" \ 461 450 FIXUP \ 462 - " MOVT D0Ar2,#HI(1b)\n" \ 463 - " JUMP D0Ar2,#LO(1b)\n" \ 451 + " MOVT D0Ar2, #HI(1b)\n" \ 452 + " JUMP D0Ar2, #LO(1b)\n" \ 464 453 " .previous\n" \ 465 454 " .section __ex_table,\"a\"\n" \ 466 455 " .long 21b,3b\n" \ ··· 457 506 " .long 42b,3b\n" \ 458 507 " .long 43b,3b\n" \ 459 508 " .long 44b,3b\n" \ 460 - " .long 45b,4b\n" \ 461 509 " .previous\n" \ 462 510 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ 463 511 : "0" (to), "1" (from), "2" (ret), "3" (n) \ ··· 1043 1093 return x; 1044 1094 } 1045 1095 EXPORT_SYMBOL(__get_user_asm_d); 1096 + 1097 + unsigned long long __get_user_asm_l(const void __user *addr, long *err) 1098 + { 1099 + register unsigned long long x asm ("D0Re0") = 0; 1100 + asm volatile ( 1101 + " GETL %0,%t0,[%2]\n" 1102 + "1:\n" 1103 + " GETL %0,%t0,[%2]\n" 1104 + "2:\n" 1105 + " .section .fixup,\"ax\"\n" 1106 + "3: MOV D0FrT,%3\n" 1107 + " SETD [%1],D0FrT\n" 1108 + " MOVT D0FrT,#HI(2b)\n" 1109 + " JUMP D0FrT,#LO(2b)\n" 1110 + " .previous\n" 1111 + " .section __ex_table,\"a\"\n" 1112 + " .long 1b,3b\n" 1113 + " .previous\n" 1114 + : "=r" (x) 1115 + : "r" (err), "r" (addr), "P" (-EFAULT) 1116 + : "D0FrT"); 1117 + return x; 1118 + } 1119 + EXPORT_SYMBOL(__get_user_asm_l); 1046 1120 1047 1121 long __put_user_asm_b(unsigned int x, void __user *addr) 1048 1122 {
-1
arch/metag/mm/mmu-meta1.c
··· 152 152 153 153 p_swapper_pg_dir++; 154 154 addr += PGDIR_SIZE; 155 - entry++; 156 155 } 157 156 }