Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm

Pull ARM fixes from Russell King:
"Just two fixes: wire up the new system calls added during the last
merge window, and fix another user access site"

* 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm:
ARM: alignment: fix alignment handling for uaccess changes
ARM: wire up new syscalls

Changed files
+30 -6
arch
arm
include
asm
uapi
asm
kernel
mm
+1 -1
arch/arm/include/asm/unistd.h
··· 19 19 * This may need to be greater than __NR_last_syscall+1 in order to 20 20 * account for the padding in the syscall table 21 21 */ 22 - #define __NR_syscalls (388) 22 + #define __NR_syscalls (392) 23 23 24 24 /* 25 25 * *NOTE*: This is a ghost syscall private to the kernel. Only the
+2
arch/arm/include/uapi/asm/unistd.h
··· 414 414 #define __NR_memfd_create (__NR_SYSCALL_BASE+385) 415 415 #define __NR_bpf (__NR_SYSCALL_BASE+386) 416 416 #define __NR_execveat (__NR_SYSCALL_BASE+387) 417 + #define __NR_userfaultfd (__NR_SYSCALL_BASE+388) 418 + #define __NR_membarrier (__NR_SYSCALL_BASE+389) 417 419 418 420 /* 419 421 * The following SWIs are ARM private.
+2
arch/arm/kernel/calls.S
··· 397 397 /* 385 */ CALL(sys_memfd_create) 398 398 CALL(sys_bpf) 399 399 CALL(sys_execveat) 400 + CALL(sys_userfaultfd) 401 + CALL(sys_membarrier) 400 402 #ifndef syscalls_counted 401 403 .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 402 404 #define syscalls_counted
+25 -5
arch/arm/mm/alignment.c
··· 365 365 user: 366 366 if (LDST_L_BIT(instr)) { 367 367 unsigned long val; 368 + unsigned int __ua_flags = uaccess_save_and_enable(); 369 + 368 370 get16t_unaligned_check(val, addr); 371 + uaccess_restore(__ua_flags); 369 372 370 373 /* signed half-word? */ 371 374 if (instr & 0x40) 372 375 val = (signed long)((signed short) val); 373 376 374 377 regs->uregs[rd] = val; 375 - } else 378 + } else { 379 + unsigned int __ua_flags = uaccess_save_and_enable(); 376 380 put16t_unaligned_check(regs->uregs[rd], addr); 381 + uaccess_restore(__ua_flags); 382 + } 377 383 378 384 return TYPE_LDST; 379 385 ··· 426 420 427 421 user: 428 422 if (load) { 429 - unsigned long val; 423 + unsigned long val, val2; 424 + unsigned int __ua_flags = uaccess_save_and_enable(); 425 + 430 426 get32t_unaligned_check(val, addr); 427 + get32t_unaligned_check(val2, addr + 4); 428 + 429 + uaccess_restore(__ua_flags); 430 + 431 431 regs->uregs[rd] = val; 432 - get32t_unaligned_check(val, addr + 4); 433 - regs->uregs[rd2] = val; 432 + regs->uregs[rd2] = val2; 434 433 } else { 434 + unsigned int __ua_flags = uaccess_save_and_enable(); 435 435 put32t_unaligned_check(regs->uregs[rd], addr); 436 436 put32t_unaligned_check(regs->uregs[rd2], addr + 4); 437 + uaccess_restore(__ua_flags); 437 438 } 438 439 439 440 return TYPE_LDST; ··· 471 458 trans: 472 459 if (LDST_L_BIT(instr)) { 473 460 unsigned int val; 461 + unsigned int __ua_flags = uaccess_save_and_enable(); 474 462 get32t_unaligned_check(val, addr); 463 + uaccess_restore(__ua_flags); 475 464 regs->uregs[rd] = val; 476 - } else 465 + } else { 466 + unsigned int __ua_flags = uaccess_save_and_enable(); 477 467 put32t_unaligned_check(regs->uregs[rd], addr); 468 + uaccess_restore(__ua_flags); 469 + } 478 470 return TYPE_LDST; 479 471 480 472 fault: ··· 549 531 #endif 550 532 551 533 if (user_mode(regs)) { 534 + unsigned int __ua_flags = uaccess_save_and_enable(); 552 535 for (regbits = REGMASK_BITS(instr), rd = 0; regbits; 553 536 regbits >>= 1, rd += 1) 554 537 if (regbits & 1) { ··· 561 542 put32t_unaligned_check(regs->uregs[rd], eaddr); 562 543 eaddr += 4; 563 544 } 545 + uaccess_restore(__ua_flags); 564 546 } else { 565 547 for (regbits = REGMASK_BITS(instr), rd = 0; regbits; 566 548 regbits >>= 1, rd += 1)