Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

arm64: mte: Enable TCO in functions that can read beyond buffer limits

load_unaligned_zeropad() and __get/put_kernel_nofault() functions can
read past some buffer limits which may include some MTE granule with a
different tag.

When MTE async mode is enabled, the load operation crosses the boundaries
and the next granule has a different tag the PE sets the TFSR_EL1.TF1 bit
as if an asynchronous tag fault is happened.

Enable Tag Check Override (TCO) in these functions before the load and
disable it afterwards to prevent this to happen.

Note: The same condition can be hit in MTE sync mode but we deal with it
through the exception handling.
In the current implementation, mte_async_mode flag is set only at boot
time but in future kasan might acquire some runtime features that
that change the mode dynamically, hence we disable it when sync mode is
selected for future proof.

Cc: Will Deacon <will@kernel.org>
Reported-by: Branislav Rankov <Branislav.Rankov@arm.com>
Tested-by: Branislav Rankov <Branislav.Rankov@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Andrey Konovalov <andreyknvl@google.com>
Tested-by: Andrey Konovalov <andreyknvl@google.com>
Signed-off-by: Vincenzo Frascino <vincenzo.frascino@arm.com>
Link: https://lore.kernel.org/r/20210315132019.33202-6-vincenzo.frascino@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>

authored by

Vincenzo Frascino and committed by
Catalin Marinas
e60beb95 8f7b5054

+63
+15
arch/arm64/include/asm/mte.h
··· 90 90 91 91 #endif /* CONFIG_ARM64_MTE */ 92 92 93 + #ifdef CONFIG_KASAN_HW_TAGS 94 + /* Whether the MTE asynchronous mode is enabled. */ 95 + DECLARE_STATIC_KEY_FALSE(mte_async_mode); 96 + 97 + static inline bool system_uses_mte_async_mode(void) 98 + { 99 + return static_branch_unlikely(&mte_async_mode); 100 + } 101 + #else 102 + static inline bool system_uses_mte_async_mode(void) 103 + { 104 + return false; 105 + } 106 + #endif /* CONFIG_KASAN_HW_TAGS */ 107 + 93 108 #endif /* __ASSEMBLY__ */ 94 109 #endif /* __ASM_MTE_H */
+22
arch/arm64/include/asm/uaccess.h
··· 20 20 21 21 #include <asm/cpufeature.h> 22 22 #include <asm/mmu.h> 23 + #include <asm/mte.h> 23 24 #include <asm/ptrace.h> 24 25 #include <asm/memory.h> 25 26 #include <asm/extable.h> ··· 189 188 ARM64_MTE, CONFIG_KASAN_HW_TAGS)); 190 189 } 191 190 191 + /* 192 + * These functions disable tag checking only if in MTE async mode 193 + * since the sync mode generates exceptions synchronously and the 194 + * nofault or load_unaligned_zeropad can handle them. 195 + */ 196 + static inline void __uaccess_disable_tco_async(void) 197 + { 198 + if (system_uses_mte_async_mode()) 199 + __uaccess_disable_tco(); 200 + } 201 + 202 + static inline void __uaccess_enable_tco_async(void) 203 + { 204 + if (system_uses_mte_async_mode()) 205 + __uaccess_enable_tco(); 206 + } 207 + 192 208 static inline void uaccess_disable_privileged(void) 193 209 { 194 210 __uaccess_disable_tco(); ··· 325 307 do { \ 326 308 int __gkn_err = 0; \ 327 309 \ 310 + __uaccess_enable_tco_async(); \ 328 311 __raw_get_mem("ldr", *((type *)(dst)), \ 329 312 (__force type *)(src), __gkn_err); \ 313 + __uaccess_disable_tco_async(); \ 330 314 if (unlikely(__gkn_err)) \ 331 315 goto err_label; \ 332 316 } while (0) ··· 400 380 do { \ 401 381 int __pkn_err = 0; \ 402 382 \ 383 + __uaccess_enable_tco_async(); \ 403 384 __raw_put_mem("str", *((type *)(src)), \ 404 385 (__force type *)(dst), __pkn_err); \ 386 + __uaccess_disable_tco_async(); \ 405 387 if (unlikely(__pkn_err)) \ 406 388 goto err_label; \ 407 389 } while(0)
+4
arch/arm64/include/asm/word-at-a-time.h
··· 55 55 { 56 56 unsigned long ret, offset; 57 57 58 + __uaccess_enable_tco_async(); 59 + 58 60 /* Load word from unaligned pointer addr */ 59 61 asm( 60 62 "1: ldr %0, %3\n" ··· 77 75 _ASM_EXTABLE(1b, 3b) 78 76 : "=&r" (ret), "=&r" (offset) 79 77 : "r" (addr), "Q" (*(unsigned long *)addr)); 78 + 79 + __uaccess_disable_tco_async(); 80 80 81 81 return ret; 82 82 }
+22
arch/arm64/kernel/mte.c
··· 26 26 27 27 static bool report_fault_once = true; 28 28 29 + /* Whether the MTE asynchronous mode is enabled. */ 30 + DEFINE_STATIC_KEY_FALSE(mte_async_mode); 31 + EXPORT_SYMBOL_GPL(mte_async_mode); 32 + 29 33 static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) 30 34 { 31 35 pte_t old_pte = READ_ONCE(*ptep); ··· 122 118 123 119 void mte_enable_kernel_sync(void) 124 120 { 121 + /* 122 + * Make sure we enter this function when no PE has set 123 + * async mode previously. 124 + */ 125 + WARN_ONCE(system_uses_mte_async_mode(), 126 + "MTE async mode enabled system wide!"); 127 + 125 128 __mte_enable_kernel("synchronous", SCTLR_ELx_TCF_SYNC); 126 129 } 127 130 128 131 void mte_enable_kernel_async(void) 129 132 { 130 133 __mte_enable_kernel("asynchronous", SCTLR_ELx_TCF_ASYNC); 134 + 135 + /* 136 + * MTE async mode is set system wide by the first PE that 137 + * executes this function. 138 + * 139 + * Note: If in future KASAN acquires a runtime switching 140 + * mode in between sync and async, this strategy needs 141 + * to be reviewed. 142 + */ 143 + if (!system_uses_mte_async_mode()) 144 + static_branch_enable(&mte_async_mode); 131 145 } 132 146 133 147 void mte_set_report_once(bool state)