Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: Avoid premature usercopy failure

Al reminds us that the usercopy API must only return complete failure
if absolutely nothing could be copied. Currently, if userspace does
something silly like giving us an unaligned pointer to Device memory,
or a size which overruns MTE tag bounds, we may fail to honour that
requirement when faulting on a multi-byte access even though a smaller
access could have succeeded.

Add a mitigation to the fixup routines to fall back to a single-byte
copy if we faulted on a larger access before anything has been written
to the destination, to guarantee making *some* forward progress. We
needn't be too concerned about the overall performance since this should
only occur when callers are doing something a bit dodgy in the first
place. Particularly broken userspace might still be able to trick
generic_perform_write() into an infinite loop by targeting write() at
an mmap() of some read-only device register where the fault-in load
succeeds but any store synchronously aborts such that copy_to_user() is
genuinely unable to make progress, but, well, don't do that...

CC: stable@vger.kernel.org
Reported-by: Chen Huang <chenhuang5@huawei.com>
Suggested-by: Al Viro <viro@zeniv.linux.org.uk>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/dc03d5c675731a1f24a62417dba5429ad744234e.1626098433.git.robin.murphy@arm.com
Signed-off-by: Will Deacon <will@kernel.org>

authored by

Robin Murphy and committed by
Will Deacon
295cf156 8cdd23c2

+35 -13
+10 -3
arch/arm64/lib/copy_from_user.S
··· 29 29 .endm 30 30 31 31 .macro ldrh1 reg, ptr, val 32 - user_ldst 9998f, ldtrh, \reg, \ptr, \val 32 + user_ldst 9997f, ldtrh, \reg, \ptr, \val 33 33 .endm 34 34 35 35 .macro strh1 reg, ptr, val ··· 37 37 .endm 38 38 39 39 .macro ldr1 reg, ptr, val 40 - user_ldst 9998f, ldtr, \reg, \ptr, \val 40 + user_ldst 9997f, ldtr, \reg, \ptr, \val 41 41 .endm 42 42 43 43 .macro str1 reg, ptr, val ··· 45 45 .endm 46 46 47 47 .macro ldp1 reg1, reg2, ptr, val 48 - user_ldp 9998f, \reg1, \reg2, \ptr, \val 48 + user_ldp 9997f, \reg1, \reg2, \ptr, \val 49 49 .endm 50 50 51 51 .macro stp1 reg1, reg2, ptr, val ··· 53 53 .endm 54 54 55 55 end .req x5 56 + srcin .req x15 56 57 SYM_FUNC_START(__arch_copy_from_user) 57 58 add end, x0, x2 59 + mov srcin, x1 58 60 #include "copy_template.S" 59 61 mov x0, #0 // Nothing to copy 60 62 ret ··· 65 63 66 64 .section .fixup,"ax" 67 65 .align 2 66 + 9997: cmp dst, dstin 67 + b.ne 9998f 68 + // Before being absolutely sure we couldn't copy anything, try harder 69 + USER(9998f, ldtrb tmp1w, [srcin]) 70 + strb tmp1w, [dst], #1 68 71 9998: sub x0, end, dst // bytes not copied 69 72 ret 70 73 .previous
+14 -7
arch/arm64/lib/copy_in_user.S
··· 30 30 .endm 31 31 32 32 .macro ldrh1 reg, ptr, val 33 - user_ldst 9998f, ldtrh, \reg, \ptr, \val 33 + user_ldst 9997f, ldtrh, \reg, \ptr, \val 34 34 .endm 35 35 36 36 .macro strh1 reg, ptr, val 37 - user_ldst 9998f, sttrh, \reg, \ptr, \val 37 + user_ldst 9997f, sttrh, \reg, \ptr, \val 38 38 .endm 39 39 40 40 .macro ldr1 reg, ptr, val 41 - user_ldst 9998f, ldtr, \reg, \ptr, \val 41 + user_ldst 9997f, ldtr, \reg, \ptr, \val 42 42 .endm 43 43 44 44 .macro str1 reg, ptr, val 45 - user_ldst 9998f, sttr, \reg, \ptr, \val 45 + user_ldst 9997f, sttr, \reg, \ptr, \val 46 46 .endm 47 47 48 48 .macro ldp1 reg1, reg2, ptr, val 49 - user_ldp 9998f, \reg1, \reg2, \ptr, \val 49 + user_ldp 9997f, \reg1, \reg2, \ptr, \val 50 50 .endm 51 51 52 52 .macro stp1 reg1, reg2, ptr, val 53 - user_stp 9998f, \reg1, \reg2, \ptr, \val 53 + user_stp 9997f, \reg1, \reg2, \ptr, \val 54 54 .endm 55 55 56 56 end .req x5 57 - 57 + srcin .req x15 58 58 SYM_FUNC_START(__arch_copy_in_user) 59 59 add end, x0, x2 60 + mov srcin, x1 60 61 #include "copy_template.S" 61 62 mov x0, #0 62 63 ret ··· 66 65 67 66 .section .fixup,"ax" 68 67 .align 2 68 + 9997: cmp dst, dstin 69 + b.ne 9998f 70 + // Before being absolutely sure we couldn't copy anything, try harder 71 + USER(9998f, ldtrb tmp1w, [srcin]) 72 + USER(9998f, sttrb tmp1w, [dst]) 73 + add dst, dst, #1 69 74 9998: sub x0, end, dst // bytes not copied 70 75 ret 71 76 .previous
+11 -3
arch/arm64/lib/copy_to_user.S
··· 32 32 .endm 33 33 34 34 .macro strh1 reg, ptr, val 35 - user_ldst 9998f, sttrh, \reg, \ptr, \val 35 + user_ldst 9997f, sttrh, \reg, \ptr, \val 36 36 .endm 37 37 38 38 .macro ldr1 reg, ptr, val ··· 40 40 .endm 41 41 42 42 .macro str1 reg, ptr, val 43 - user_ldst 9998f, sttr, \reg, \ptr, \val 43 + user_ldst 9997f, sttr, \reg, \ptr, \val 44 44 .endm 45 45 46 46 .macro ldp1 reg1, reg2, ptr, val ··· 48 48 .endm 49 49 50 50 .macro stp1 reg1, reg2, ptr, val 51 - user_stp 9998f, \reg1, \reg2, \ptr, \val 51 + user_stp 9997f, \reg1, \reg2, \ptr, \val 52 52 .endm 53 53 54 54 end .req x5 55 + srcin .req x15 55 56 SYM_FUNC_START(__arch_copy_to_user) 56 57 add end, x0, x2 58 + mov srcin, x1 57 59 #include "copy_template.S" 58 60 mov x0, #0 59 61 ret ··· 64 62 65 63 .section .fixup,"ax" 66 64 .align 2 65 + 9997: cmp dst, dstin 66 + b.ne 9998f 67 + // Before being absolutely sure we couldn't copy anything, try harder 68 + ldrb tmp1w, [srcin] 69 + USER(9998f, sttrb tmp1w, [dst]) 70 + add dst, dst, #1 67 71 9998: sub x0, end, dst // bytes not copied 68 72 ret 69 73 .previous