Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mm: Introduce untagged_addr_remote()

untagged_addr() removes tags/metadata from the address and brings it to
the canonical form. The helper is implemented on arm64 and sparc. Both of
them do untagging based on global rules.

However, Linear Address Masking (LAM) on x86 introduces per-process
settings for untagging. As a result, untagged_addr() is now only
suitable for untagging addresses for the current proccess.

The new helper untagged_addr_remote() has to be used when the address
targets remote process. It requires the mmap lock for target mm to be
taken.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Alexander Potapenko <glider@google.com>
Link: https://lore.kernel.org/all/20230312112612.31869-6-kirill.shutemov%40linux.intel.com

authored by

Kirill A. Shutemov and committed by
Dave Hansen
428e106a 82721d8b

+43 -23
+2
arch/sparc/include/asm/uaccess_64.h
··· 8 8 9 9 #include <linux/compiler.h> 10 10 #include <linux/string.h> 11 + #include <linux/mm_types.h> 11 12 #include <asm/asi.h> 12 13 #include <asm/spitfire.h> 14 + #include <asm/pgtable.h> 13 15 14 16 #include <asm/processor.h> 15 17 #include <asm-generic/access_ok.h>
+1 -1
drivers/vfio/vfio_iommu_type1.c
··· 580 580 goto done; 581 581 } 582 582 583 - vaddr = untagged_addr(vaddr); 583 + vaddr = untagged_addr_remote(mm, vaddr); 584 584 585 585 retry: 586 586 vma = vma_lookup(mm, vaddr);
+7 -2
fs/proc/task_mmu.c
··· 1689 1689 1690 1690 /* watch out for wraparound */ 1691 1691 start_vaddr = end_vaddr; 1692 - if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) 1693 - start_vaddr = untagged_addr(svpfn << PAGE_SHIFT); 1692 + if (svpfn <= (ULONG_MAX >> PAGE_SHIFT)) { 1693 + ret = mmap_read_lock_killable(mm); 1694 + if (ret) 1695 + goto out_free; 1696 + start_vaddr = untagged_addr_remote(mm, svpfn << PAGE_SHIFT); 1697 + mmap_read_unlock(mm); 1698 + } 1694 1699 1695 1700 /* Ensure the address is inside the task */ 1696 1701 if (start_vaddr > mm->task_size)
-11
include/linux/mm.h
··· 96 96 #include <asm/page.h> 97 97 #include <asm/processor.h> 98 98 99 - /* 100 - * Architectures that support memory tagging (assigning tags to memory regions, 101 - * embedding these tags into addresses that point to these memory regions, and 102 - * checking that the memory and the pointer tags match on memory accesses) 103 - * redefine this macro to strip tags from pointers. 104 - * It's defined as noop for architectures that don't support memory tagging. 105 - */ 106 - #ifndef untagged_addr 107 - #define untagged_addr(addr) (addr) 108 - #endif 109 - 110 99 #ifndef __pa_symbol 111 100 #define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 112 101 #endif
+22
include/linux/uaccess.h
··· 11 11 #include <asm/uaccess.h> 12 12 13 13 /* 14 + * Architectures that support memory tagging (assigning tags to memory regions, 15 + * embedding these tags into addresses that point to these memory regions, and 16 + * checking that the memory and the pointer tags match on memory accesses) 17 + * redefine this macro to strip tags from pointers. 18 + * 19 + * Passing down mm_struct allows to define untagging rules on per-process 20 + * basis. 21 + * 22 + * It's defined as noop for architectures that don't support memory tagging. 23 + */ 24 + #ifndef untagged_addr 25 + #define untagged_addr(addr) (addr) 26 + #endif 27 + 28 + #ifndef untagged_addr_remote 29 + #define untagged_addr_remote(mm, addr) ({ \ 30 + mmap_assert_locked(mm); \ 31 + untagged_addr(addr); \ 32 + }) 33 + #endif 34 + 35 + /* 14 36 * Architectures should provide two primitives (raw_copy_{to,from}_user()) 15 37 * and get rid of their private instances of copy_{to,from}_user() and 16 38 * __copy_{to,from}_user{,_inatomic}().
+2 -2
mm/gup.c
··· 1085 1085 if (!nr_pages) 1086 1086 return 0; 1087 1087 1088 - start = untagged_addr(start); 1088 + start = untagged_addr_remote(mm, start); 1089 1089 1090 1090 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN))); 1091 1091 ··· 1259 1259 struct vm_area_struct *vma; 1260 1260 vm_fault_t ret; 1261 1261 1262 - address = untagged_addr(address); 1262 + address = untagged_addr_remote(mm, address); 1263 1263 1264 1264 if (unlocked) 1265 1265 fault_flags |= FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+3 -2
mm/madvise.c
··· 1402 1402 size_t len; 1403 1403 struct blk_plug plug; 1404 1404 1405 - start = untagged_addr(start); 1406 - 1407 1405 if (!madvise_behavior_valid(behavior)) 1408 1406 return -EINVAL; 1409 1407 ··· 1432 1434 } else { 1433 1435 mmap_read_lock(mm); 1434 1436 } 1437 + 1438 + start = untagged_addr_remote(mm, start); 1439 + end = start + len; 1435 1440 1436 1441 blk_start_plug(&plug); 1437 1442 error = madvise_walk_vmas(mm, start, end, behavior,
+6 -5
mm/migrate.c
··· 2097 2097 * target node 2098 2098 * 1 - when it has been queued 2099 2099 */ 2100 - static int add_page_for_migration(struct mm_struct *mm, unsigned long addr, 2100 + static int add_page_for_migration(struct mm_struct *mm, const void __user *p, 2101 2101 int node, struct list_head *pagelist, bool migrate_all) 2102 2102 { 2103 2103 struct vm_area_struct *vma; 2104 + unsigned long addr; 2104 2105 struct page *page; 2105 2106 int err; 2106 2107 bool isolated; 2107 2108 2108 2109 mmap_read_lock(mm); 2110 + addr = (unsigned long)untagged_addr_remote(mm, p); 2111 + 2109 2112 err = -EFAULT; 2110 2113 vma = vma_lookup(mm, addr); 2111 2114 if (!vma || !vma_migratable(vma)) ··· 2214 2211 2215 2212 for (i = start = 0; i < nr_pages; i++) { 2216 2213 const void __user *p; 2217 - unsigned long addr; 2218 2214 int node; 2219 2215 2220 2216 err = -EFAULT; ··· 2221 2219 goto out_flush; 2222 2220 if (get_user(node, nodes + i)) 2223 2221 goto out_flush; 2224 - addr = (unsigned long)untagged_addr(p); 2225 2222 2226 2223 err = -ENODEV; 2227 2224 if (node < 0 || node >= MAX_NUMNODES) ··· 2248 2247 * Errors in the page lookup or isolation are not fatal and we simply 2249 2248 * report them via status 2250 2249 */ 2251 - err = add_page_for_migration(mm, addr, current_node, 2252 - &pagelist, flags & MPOL_MF_MOVE_ALL); 2250 + err = add_page_for_migration(mm, p, current_node, &pagelist, 2251 + flags & MPOL_MF_MOVE_ALL); 2253 2252 2254 2253 if (err > 0) { 2255 2254 /* The page is successfully queued for migration */