Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-hotfixes-stable-2023-10-01-08-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"Fourteen hotfixes, eleven of which are cc:stable. The remainder
pertain to issues which were introduced after 6.5"

* tag 'mm-hotfixes-stable-2023-10-01-08-34' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm:
Crash: add lock to serialize crash hotplug handling
selftests/mm: fix awk usage in charge_reserved_hugetlb.sh and hugetlb_reparenting_test.sh that may cause error
mm: mempolicy: keep VMA walk if both MPOL_MF_STRICT and MPOL_MF_MOVE are specified
mm/damon/vaddr-test: fix memory leak in damon_do_test_apply_three_regions()
mm, memcg: reconsider kmem.limit_in_bytes deprecation
mm: zswap: fix potential memory corruption on duplicate store
arm64: hugetlb: fix set_huge_pte_at() to work with all swap entries
mm: hugetlb: add huge page size param to set_huge_pte_at()
maple_tree: add MAS_UNDERFLOW and MAS_OVERFLOW states
maple_tree: add mas_is_active() to detect in-tree walks
nilfs2: fix potential use after free in nilfs_gccache_submit_read_data()
mm: abstract moving to the next PFN
mm: report success more often from filemap_map_folio_range()
fs: binfmt_elf_efpic: fix personality for ELF-FDPIC

+458 -172
+7
Documentation/admin-guide/cgroup-v1/memory.rst
··· 92 memory.oom_control set/show oom controls. 93 memory.numa_stat show the number of memory usage per numa 94 node 95 memory.kmem.usage_in_bytes show current kernel memory allocation 96 memory.kmem.failcnt show the number of kernel memory usage 97 hits limits
··· 92 memory.oom_control set/show oom controls. 93 memory.numa_stat show the number of memory usage per numa 94 node 95 + memory.kmem.limit_in_bytes Deprecated knob to set and read the kernel 96 + memory hard limit. Kernel hard limit is not 97 + supported since 5.16. Writing any value to 98 + do file will not have any effect same as if 99 + nokmem kernel parameter was specified. 100 + Kernel memory is still charged and reported 101 + by memory.kmem.usage_in_bytes. 102 memory.kmem.usage_in_bytes show current kernel memory allocation 103 memory.kmem.failcnt show the number of kernel memory usage 104 hits limits
+1 -1
arch/arm64/include/asm/hugetlb.h
··· 28 #define arch_make_huge_pte arch_make_huge_pte 29 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 30 extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 31 - pte_t *ptep, pte_t pte); 32 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS 33 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, 34 unsigned long addr, pte_t *ptep,
··· 28 #define arch_make_huge_pte arch_make_huge_pte 29 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 30 extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 31 + pte_t *ptep, pte_t pte, unsigned long sz); 32 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS 33 extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, 34 unsigned long addr, pte_t *ptep,
+7 -16
arch/arm64/mm/hugetlbpage.c
··· 241 flush_tlb_range(&vma, saddr, addr); 242 } 243 244 - static inline struct folio *hugetlb_swap_entry_to_folio(swp_entry_t entry) 245 - { 246 - VM_BUG_ON(!is_migration_entry(entry) && !is_hwpoison_entry(entry)); 247 - 248 - return page_folio(pfn_to_page(swp_offset_pfn(entry))); 249 - } 250 - 251 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 252 - pte_t *ptep, pte_t pte) 253 { 254 size_t pgsize; 255 int i; ··· 250 unsigned long pfn, dpfn; 251 pgprot_t hugeprot; 252 253 if (!pte_present(pte)) { 254 - struct folio *folio; 255 - 256 - folio = hugetlb_swap_entry_to_folio(pte_to_swp_entry(pte)); 257 - ncontig = num_contig_ptes(folio_size(folio), &pgsize); 258 - 259 - for (i = 0; i < ncontig; i++, ptep++) 260 set_pte_at(mm, addr, ptep, pte); 261 return; 262 } ··· 263 return; 264 } 265 266 - ncontig = find_num_contig(mm, addr, ptep, &pgsize); 267 pfn = pte_pfn(pte); 268 dpfn = pgsize >> PAGE_SHIFT; 269 hugeprot = pte_pgprot(pte); ··· 560 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 561 pte_t old_pte, pte_t pte) 562 { 563 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 564 }
··· 241 flush_tlb_range(&vma, saddr, addr); 242 } 243 244 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 245 + pte_t *ptep, pte_t pte, unsigned long sz) 246 { 247 size_t pgsize; 248 int i; ··· 257 unsigned long pfn, dpfn; 258 pgprot_t hugeprot; 259 260 + ncontig = num_contig_ptes(sz, &pgsize); 261 + 262 if (!pte_present(pte)) { 263 + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) 264 set_pte_at(mm, addr, ptep, pte); 265 return; 266 } ··· 273 return; 274 } 275 276 pfn = pte_pfn(pte); 277 dpfn = pgsize >> PAGE_SHIFT; 278 hugeprot = pte_pgprot(pte); ··· 571 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep, 572 pte_t old_pte, pte_t pte) 573 { 574 + unsigned long psize = huge_page_size(hstate_vma(vma)); 575 + 576 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 577 }
+1 -1
arch/parisc/include/asm/hugetlb.h
··· 6 7 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 8 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 9 - pte_t *ptep, pte_t pte); 10 11 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 12 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
··· 6 7 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 8 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 9 + pte_t *ptep, pte_t pte, unsigned long sz); 10 11 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 12 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+1 -1
arch/parisc/mm/hugetlbpage.c
··· 140 } 141 142 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 143 - pte_t *ptep, pte_t entry) 144 { 145 __set_huge_pte_at(mm, addr, ptep, entry); 146 }
··· 140 } 141 142 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 143 + pte_t *ptep, pte_t entry, unsigned long sz) 144 { 145 __set_huge_pte_at(mm, addr, ptep, entry); 146 }
+2 -1
arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h
··· 46 } 47 48 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 49 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); 50 51 #define __HAVE_ARCH_HUGE_PTE_CLEAR 52 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
··· 46 } 47 48 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 49 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 50 + pte_t pte, unsigned long sz); 51 52 #define __HAVE_ARCH_HUGE_PTE_CLEAR 53 static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+4 -1
arch/powerpc/mm/book3s64/hugetlbpage.c
··· 143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 144 pte_t *ptep, pte_t old_pte, pte_t pte) 145 { 146 147 if (radix_enabled()) 148 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep, 149 old_pte, pte); 150 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 151 } 152 153 void __init hugetlbpage_init_defaultsize(void)
··· 143 void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, 144 pte_t *ptep, pte_t old_pte, pte_t pte) 145 { 146 + unsigned long psize; 147 148 if (radix_enabled()) 149 return radix__huge_ptep_modify_prot_commit(vma, addr, ptep, 150 old_pte, pte); 151 + 152 + psize = huge_page_size(hstate_vma(vma)); 153 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 154 } 155 156 void __init hugetlbpage_init_defaultsize(void)
+2 -1
arch/powerpc/mm/book3s64/radix_hugetlbpage.c
··· 47 pte_t old_pte, pte_t pte) 48 { 49 struct mm_struct *mm = vma->vm_mm; 50 51 /* 52 * POWER9 NMMU must flush the TLB after clearing the PTE before ··· 59 atomic_read(&mm->context.copros) > 0) 60 radix__flush_hugetlb_page(vma, addr); 61 62 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 63 }
··· 47 pte_t old_pte, pte_t pte) 48 { 49 struct mm_struct *mm = vma->vm_mm; 50 + unsigned long psize = huge_page_size(hstate_vma(vma)); 51 52 /* 53 * POWER9 NMMU must flush the TLB after clearing the PTE before ··· 58 atomic_read(&mm->context.copros) > 0) 59 radix__flush_hugetlb_page(vma, addr); 60 61 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 62 }
+2 -1
arch/powerpc/mm/nohash/8xx.c
··· 91 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) 92 return -EINVAL; 93 94 - set_huge_pte_at(&init_mm, va, ptep, pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot))); 95 96 return 0; 97 }
··· 91 if (new && WARN_ON(pte_present(*ptep) && pgprot_val(prot))) 92 return -EINVAL; 93 94 + set_huge_pte_at(&init_mm, va, ptep, 95 + pte_mkhuge(pfn_pte(pa >> PAGE_SHIFT, prot)), psize); 96 97 return 0; 98 }
+2 -1
arch/powerpc/mm/pgtable.c
··· 288 } 289 290 #if defined(CONFIG_PPC_8xx) 291 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) 292 { 293 pmd_t *pmd = pmd_off(mm, addr); 294 pte_basic_t val;
··· 288 } 289 290 #if defined(CONFIG_PPC_8xx) 291 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 292 + pte_t pte, unsigned long sz) 293 { 294 pmd_t *pmd = pmd_off(mm, addr); 295 pte_basic_t val;
+2 -1
arch/riscv/include/asm/hugetlb.h
··· 18 19 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 20 void set_huge_pte_at(struct mm_struct *mm, 21 - unsigned long addr, pte_t *ptep, pte_t pte); 22 23 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 24 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
··· 18 19 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 20 void set_huge_pte_at(struct mm_struct *mm, 21 + unsigned long addr, pte_t *ptep, pte_t pte, 22 + unsigned long sz); 23 24 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR 25 pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+2 -1
arch/riscv/mm/hugetlbpage.c
··· 180 void set_huge_pte_at(struct mm_struct *mm, 181 unsigned long addr, 182 pte_t *ptep, 183 - pte_t pte) 184 { 185 int i, pte_num; 186
··· 180 void set_huge_pte_at(struct mm_struct *mm, 181 unsigned long addr, 182 pte_t *ptep, 183 + pte_t pte, 184 + unsigned long sz) 185 { 186 int i, pte_num; 187
+4 -2
arch/s390/include/asm/hugetlb.h
··· 16 #define hugepages_supported() (MACHINE_HAS_EDAT1) 17 18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 pte_t *ptep, pte_t pte); 20 pte_t huge_ptep_get(pte_t *ptep); 21 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ··· 67 int changed = !pte_same(huge_ptep_get(ptep), pte); 68 if (changed) { 69 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 70 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 71 } 72 return changed; 73 } ··· 76 unsigned long addr, pte_t *ptep) 77 { 78 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); 79 - set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); 80 } 81 82 static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
··· 16 #define hugepages_supported() (MACHINE_HAS_EDAT1) 17 18 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 + pte_t *ptep, pte_t pte, unsigned long sz); 20 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 21 pte_t *ptep, pte_t pte); 22 pte_t huge_ptep_get(pte_t *ptep); 23 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, ··· 65 int changed = !pte_same(huge_ptep_get(ptep), pte); 66 if (changed) { 67 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); 68 + __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 69 } 70 return changed; 71 } ··· 74 unsigned long addr, pte_t *ptep) 75 { 76 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); 77 + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); 78 } 79 80 static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
+7 -1
arch/s390/mm/hugetlbpage.c
··· 142 __storage_key_init_range(paddr, paddr + size - 1); 143 } 144 145 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 146 pte_t *ptep, pte_t pte) 147 { 148 unsigned long rste; ··· 161 162 clear_huge_pte_skeys(mm, rste); 163 set_pte(ptep, __pte(rste)); 164 } 165 166 pte_t huge_ptep_get(pte_t *ptep)
··· 142 __storage_key_init_range(paddr, paddr + size - 1); 143 } 144 145 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 146 pte_t *ptep, pte_t pte) 147 { 148 unsigned long rste; ··· 161 162 clear_huge_pte_skeys(mm, rste); 163 set_pte(ptep, __pte(rste)); 164 + } 165 + 166 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 167 + pte_t *ptep, pte_t pte, unsigned long sz) 168 + { 169 + __set_huge_pte_at(mm, addr, ptep, pte); 170 } 171 172 pte_t huge_ptep_get(pte_t *ptep)
+4 -2
arch/sparc/include/asm/hugetlb.h
··· 14 15 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 16 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 17 pte_t *ptep, pte_t pte); 18 19 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR ··· 34 unsigned long addr, pte_t *ptep) 35 { 36 pte_t old_pte = *ptep; 37 - set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 38 } 39 40 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS ··· 44 { 45 int changed = !pte_same(*ptep, pte); 46 if (changed) { 47 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 48 flush_tlb_page(vma, addr); 49 } 50 return changed;
··· 14 15 #define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 16 void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 17 + pte_t *ptep, pte_t pte, unsigned long sz); 18 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 19 pte_t *ptep, pte_t pte); 20 21 #define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR ··· 32 unsigned long addr, pte_t *ptep) 33 { 34 pte_t old_pte = *ptep; 35 + __set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte)); 36 } 37 38 #define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS ··· 42 { 43 int changed = !pte_same(*ptep, pte); 44 if (changed) { 45 + __set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 46 flush_tlb_page(vma, addr); 47 } 48 return changed;
+7 -1
arch/sparc/mm/hugetlbpage.c
··· 328 return pte_offset_huge(pmd, addr); 329 } 330 331 - void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 332 pte_t *ptep, pte_t entry) 333 { 334 unsigned int nptes, orig_shift, shift; ··· 362 if (size == HPAGE_SIZE) 363 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 364 orig_shift); 365 } 366 367 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
··· 328 return pte_offset_huge(pmd, addr); 329 } 330 331 + void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 332 pte_t *ptep, pte_t entry) 333 { 334 unsigned int nptes, orig_shift, shift; ··· 362 if (size == HPAGE_SIZE) 363 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0, 364 orig_shift); 365 + } 366 + 367 + void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 368 + pte_t *ptep, pte_t entry, unsigned long sz) 369 + { 370 + __set_huge_pte_at(mm, addr, ptep, entry); 371 } 372 373 pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+8
arch/x86/include/asm/pgtable.h
··· 955 return a.pte == b.pte; 956 } 957 958 static inline int pte_present(pte_t a) 959 { 960 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
··· 955 return a.pte == b.pte; 956 } 957 958 + static inline pte_t pte_next_pfn(pte_t pte) 959 + { 960 + if (__pte_needs_invert(pte_val(pte))) 961 + return __pte(pte_val(pte) - (1UL << PFN_PTE_SHIFT)); 962 + return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 963 + } 964 + #define pte_next_pfn pte_next_pfn 965 + 966 static inline int pte_present(pte_t a) 967 { 968 return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
+2 -3
fs/binfmt_elf_fdpic.c
··· 345 /* there's now no turning back... the old userspace image is dead, 346 * defunct, deceased, etc. 347 */ 348 if (elf_check_fdpic(&exec_params.hdr)) 349 - set_personality(PER_LINUX_FDPIC); 350 - else 351 - set_personality(PER_LINUX); 352 if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) 353 current->personality |= READ_IMPLIES_EXEC; 354
··· 345 /* there's now no turning back... the old userspace image is dead, 346 * defunct, deceased, etc. 347 */ 348 + SET_PERSONALITY(exec_params.hdr); 349 if (elf_check_fdpic(&exec_params.hdr)) 350 + current->personality |= PER_LINUX_FDPIC; 351 if (elf_read_implies_exec(&exec_params.hdr, executable_stack)) 352 current->personality |= READ_IMPLIES_EXEC; 353
+3 -3
fs/nilfs2/gcinode.c
··· 73 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 74 75 err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); 76 - if (unlikely(err)) { /* -EIO, -ENOMEM, -ENOENT */ 77 - brelse(bh); 78 goto failed; 79 - } 80 } 81 82 lock_buffer(bh); ··· 100 failed: 101 unlock_page(bh->b_page); 102 put_page(bh->b_page); 103 return err; 104 } 105
··· 73 struct the_nilfs *nilfs = inode->i_sb->s_fs_info; 74 75 err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); 76 + if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */ 77 goto failed; 78 } 79 80 lock_buffer(bh); ··· 102 failed: 103 unlock_page(bh->b_page); 104 put_page(bh->b_page); 105 + if (unlikely(err)) 106 + brelse(bh); 107 return err; 108 } 109
+1 -1
include/asm-generic/hugetlb.h
··· 76 77 #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 78 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 79 - pte_t *ptep, pte_t pte) 80 { 81 set_pte_at(mm, addr, ptep, pte); 82 }
··· 76 77 #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT 78 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 79 + pte_t *ptep, pte_t pte, unsigned long sz) 80 { 81 set_pte_at(mm, addr, ptep, pte); 82 }
+4 -2
include/linux/hugetlb.h
··· 984 unsigned long addr, pte_t *ptep, 985 pte_t old_pte, pte_t pte) 986 { 987 - set_huge_pte_at(vma->vm_mm, addr, ptep, pte); 988 } 989 #endif 990 ··· 1175 } 1176 1177 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1178 - pte_t *ptep, pte_t pte) 1179 { 1180 } 1181
··· 984 unsigned long addr, pte_t *ptep, 985 pte_t old_pte, pte_t pte) 986 { 987 + unsigned long psize = huge_page_size(hstate_vma(vma)); 988 + 989 + set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize); 990 } 991 #endif 992 ··· 1173 } 1174 1175 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, 1176 + pte_t *ptep, pte_t pte, unsigned long sz) 1177 { 1178 } 1179
+11
include/linux/maple_tree.h
··· 428 #define MAS_ROOT ((struct maple_enode *)5UL) 429 #define MAS_NONE ((struct maple_enode *)9UL) 430 #define MAS_PAUSE ((struct maple_enode *)17UL) 431 #define MA_ERROR(err) \ 432 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) 433 ··· 511 static inline bool mas_is_paused(const struct ma_state *mas) 512 { 513 return mas->node == MAS_PAUSE; 514 } 515 516 /**
··· 428 #define MAS_ROOT ((struct maple_enode *)5UL) 429 #define MAS_NONE ((struct maple_enode *)9UL) 430 #define MAS_PAUSE ((struct maple_enode *)17UL) 431 + #define MAS_OVERFLOW ((struct maple_enode *)33UL) 432 + #define MAS_UNDERFLOW ((struct maple_enode *)65UL) 433 #define MA_ERROR(err) \ 434 ((struct maple_enode *)(((unsigned long)err << 2) | 2UL)) 435 ··· 509 static inline bool mas_is_paused(const struct ma_state *mas) 510 { 511 return mas->node == MAS_PAUSE; 512 + } 513 + 514 + /* Check if the mas is pointing to a node or not */ 515 + static inline bool mas_is_active(struct ma_state *mas) 516 + { 517 + if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE) 518 + return true; 519 + 520 + return false; 521 } 522 523 /**
+9 -1
include/linux/pgtable.h
··· 206 #endif 207 208 #ifndef set_ptes 209 /** 210 * set_ptes - Map consecutive pages to a contiguous range of addresses. 211 * @mm: Address space to map the pages into. ··· 239 if (--nr == 0) 240 break; 241 ptep++; 242 - pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 243 } 244 arch_leave_lazy_mmu_mode(); 245 }
··· 206 #endif 207 208 #ifndef set_ptes 209 + 210 + #ifndef pte_next_pfn 211 + static inline pte_t pte_next_pfn(pte_t pte) 212 + { 213 + return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT)); 214 + } 215 + #endif 216 + 217 /** 218 * set_ptes - Map consecutive pages to a contiguous range of addresses. 219 * @mm: Address space to map the pages into. ··· 231 if (--nr == 0) 232 break; 233 ptep++; 234 + pte = pte_next_pfn(pte); 235 } 236 arch_leave_lazy_mmu_mode(); 237 }
+17
kernel/crash_core.c
··· 740 #define pr_fmt(fmt) "crash hp: " fmt 741 742 /* 743 * This routine utilized when the crash_hotplug sysfs node is read. 744 * It reflects the kernel's ability/permission to update the crash 745 * elfcorehdr directly. ··· 759 { 760 int rc = 0; 761 762 /* Obtain lock while reading crash information */ 763 if (!kexec_trylock()) { 764 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 765 return 0; 766 } 767 if (kexec_crash_image) { ··· 774 } 775 /* Release lock now that update complete */ 776 kexec_unlock(); 777 778 return rc; 779 } ··· 797 { 798 struct kimage *image; 799 800 /* Obtain lock while changing crash information */ 801 if (!kexec_trylock()) { 802 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 803 return; 804 } 805 ··· 868 out: 869 /* Release lock now that update complete */ 870 kexec_unlock(); 871 } 872 873 static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
··· 740 #define pr_fmt(fmt) "crash hp: " fmt 741 742 /* 743 + * Different than kexec/kdump loading/unloading/jumping/shrinking which 744 + * usually rarely happen, there will be many crash hotplug events notified 745 + * during one short period, e.g one memory board is hot added and memory 746 + * regions are online. So mutex lock __crash_hotplug_lock is used to 747 + * serialize the crash hotplug handling specifically. 748 + */ 749 + DEFINE_MUTEX(__crash_hotplug_lock); 750 + #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock) 751 + #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock) 752 + 753 + /* 754 * This routine utilized when the crash_hotplug sysfs node is read. 755 * It reflects the kernel's ability/permission to update the crash 756 * elfcorehdr directly. ··· 748 { 749 int rc = 0; 750 751 + crash_hotplug_lock(); 752 /* Obtain lock while reading crash information */ 753 if (!kexec_trylock()) { 754 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 755 + crash_hotplug_unlock(); 756 return 0; 757 } 758 if (kexec_crash_image) { ··· 761 } 762 /* Release lock now that update complete */ 763 kexec_unlock(); 764 + crash_hotplug_unlock(); 765 766 return rc; 767 } ··· 783 { 784 struct kimage *image; 785 786 + crash_hotplug_lock(); 787 /* Obtain lock while changing crash information */ 788 if (!kexec_trylock()) { 789 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n"); 790 + crash_hotplug_unlock(); 791 return; 792 } 793 ··· 852 out: 853 /* Release lock now that update complete */ 854 kexec_unlock(); 855 + crash_hotplug_unlock(); 856 } 857 858 static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
+166 -61
lib/maple_tree.c
··· 256 return xa_is_err(mas->node); 257 } 258 259 static inline bool mas_searchable(struct ma_state *mas) 260 { 261 if (mas_is_none(mas)) ··· 4431 * 4432 * @mas: The maple state 4433 * @max: The minimum starting range 4434 * 4435 * Return: The entry in the previous slot which is possibly NULL 4436 */ 4437 - static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) 4438 { 4439 void *entry; 4440 void __rcu **slots; ··· 4454 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4455 goto retry; 4456 4457 - again: 4458 if (mas->min <= min) { 4459 pivot = mas_safe_min(mas, pivots, mas->offset); 4460 ··· 4461 goto retry; 4462 4463 if (pivot <= min) 4464 - return NULL; 4465 } 4466 4467 if (likely(mas->offset)) { 4468 mas->offset--; 4469 mas->last = mas->index - 1; ··· 4476 } 4477 4478 if (mas_is_none(mas)) 4479 - return NULL; 4480 4481 mas->last = mas->max; 4482 node = mas_mn(mas); ··· 4493 if (likely(entry)) 4494 return entry; 4495 4496 - if (!empty) 4497 goto again; 4498 4499 return entry; 4500 } 4501 4502 /* ··· 4595 * @mas: The maple state 4596 * @max: The maximum starting range 4597 * @empty: Can be empty 4598 * 4599 * Return: The entry in the next slot which is possibly NULL 4600 */ 4601 - static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) 4602 { 4603 void __rcu **slots; 4604 unsigned long *pivots; ··· 4620 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4621 goto retry; 4622 4623 - again: 4624 if (mas->max >= max) { 4625 if (likely(mas->offset < data_end)) 4626 pivot = pivots[mas->offset]; 4627 else 4628 - return NULL; /* must be mas->max */ 4629 4630 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4631 goto retry; 4632 4633 if (pivot >= max) 4634 - return NULL; 4635 } 4636 4637 if (likely(mas->offset < data_end)) { 4638 mas->index = pivots[mas->offset] + 1; 4639 mas->offset++; 4640 if (likely(mas->offset < data_end)) 4641 mas->last = pivots[mas->offset]; ··· 4647 goto retry; 4648 } 4649 4650 - if (mas_is_none(mas)) 4651 return NULL; 4652 4653 mas->offset = 0; 4654 mas->index = mas->min; ··· 4670 return entry; 4671 4672 if (!empty) { 4673 - if (!mas->offset) 4674 - data_end = 2; 4675 goto again; 4676 } 4677 4678 return entry; 4679 } 4680 4681 /* ··· 4693 * 4694 * Set the @mas->node to the next entry and the range_start to 4695 * the beginning value for the entry. Does not check beyond @limit. 4696 - * Sets @mas->index and @mas->last to the limit if it is hit. 4697 * Restarts on dead nodes. 4698 * 4699 * Return: the next entry or %NULL. 4700 */ 4701 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4702 { 4703 - if (mas->last >= limit) 4704 return NULL; 4705 4706 - return mas_next_slot(mas, limit, false); 4707 } 4708 4709 /* ··· 4882 { 4883 void *entry; 4884 4885 - if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) 4886 mas->node = MAS_START; 4887 retry: 4888 entry = mas_state_walk(mas); ··· 5339 5340 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5341 { 5342 - if (mas_is_start(wr_mas->mas)) 5343 - return; 5344 5345 - if (unlikely(mas_is_paused(wr_mas->mas))) 5346 - goto reset; 5347 5348 - if (unlikely(mas_is_none(wr_mas->mas))) 5349 - goto reset; 5350 5351 /* 5352 * A less strict version of mas_is_span_wr() where we allow spanning ··· 5648 { 5649 bool was_none = mas_is_none(mas); 5650 5651 - if (mas_is_none(mas) || mas_is_paused(mas)) 5652 mas->node = MAS_START; 5653 5654 if (mas_is_start(mas)) 5655 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ ··· 5685 5686 if (mas_is_none(mas)) 5687 return true; 5688 return false; 5689 } 5690 ··· 5708 return entry; 5709 5710 /* Retries on dead nodes handled by mas_next_slot */ 5711 - return mas_next_slot(mas, max, false); 5712 } 5713 EXPORT_SYMBOL_GPL(mas_next); 5714 ··· 5731 return entry; 5732 5733 /* Retries on dead nodes handled by mas_next_slot */ 5734 - return mas_next_slot(mas, max, true); 5735 } 5736 EXPORT_SYMBOL_GPL(mas_next_range); 5737 ··· 5762 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, 5763 void **entry) 5764 { 5765 - if (mas->index <= min) 5766 - goto none; 5767 - 5768 - if (mas_is_none(mas) || mas_is_paused(mas)) 5769 - mas->node = MAS_START; 5770 - 5771 - if (mas_is_start(mas)) { 5772 - mas_walk(mas); 5773 - if (!mas->index) 5774 - goto none; 5775 } 5776 5777 if (unlikely(mas_is_ptr(mas))) { 5778 if (!mas->index) ··· 5831 if (mas_prev_setup(mas, min, &entry)) 5832 return entry; 5833 5834 - return mas_prev_slot(mas, min, false); 5835 } 5836 EXPORT_SYMBOL_GPL(mas_prev); 5837 ··· 5854 if (mas_prev_setup(mas, min, &entry)) 5855 return entry; 5856 5857 - return mas_prev_slot(mas, min, true); 5858 } 5859 EXPORT_SYMBOL_GPL(mas_prev_range); 5860 ··· 5912 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, 5913 void **entry) 5914 { 5915 - *entry = NULL; 5916 5917 - if (unlikely(mas_is_none(mas))) { 5918 if (unlikely(mas->last >= max)) 5919 return true; 5920 5921 mas->index = mas->last; 5922 mas->node = MAS_START; 5923 - } else if (unlikely(mas_is_paused(mas))) { 5924 - if (unlikely(mas->last >= max)) 5925 return true; 5926 5927 mas->node = MAS_START; 5928 - mas->index = ++mas->last; 5929 - } else if (unlikely(mas_is_ptr(mas))) 5930 - goto ptr_out_of_range; 5931 5932 - if (unlikely(mas_is_start(mas))) { 5933 /* First run or continue */ 5934 if (mas->index > max) 5935 return true; ··· 5990 return entry; 5991 5992 /* Retries on dead nodes handled by mas_next_slot */ 5993 - return mas_next_slot(mas, max, false); 5994 } 5995 EXPORT_SYMBOL_GPL(mas_find); 5996 ··· 6008 */ 6009 void *mas_find_range(struct ma_state *mas, unsigned long max) 6010 { 6011 - void *entry; 6012 6013 if (mas_find_setup(mas, max, &entry)) 6014 return entry; 6015 6016 /* Retries on dead nodes handled by mas_next_slot */ 6017 - return mas_next_slot(mas, max, true); 6018 } 6019 EXPORT_SYMBOL_GPL(mas_find_range); 6020 ··· 6029 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 6030 void **entry) 6031 { 6032 - *entry = NULL; 6033 6034 - if (unlikely(mas_is_none(mas))) { 6035 - if (mas->index <= min) 6036 - goto none; 6037 - 6038 - mas->last = mas->index; 6039 - mas->node = MAS_START; 6040 } 6041 6042 - if (unlikely(mas_is_paused(mas))) { 6043 if (unlikely(mas->index <= min)) { 6044 mas->node = MAS_NONE; 6045 return true; 6046 } 6047 mas->node = MAS_START; 6048 mas->last = --mas->index; 6049 } 6050 6051 - if (unlikely(mas_is_start(mas))) { 6052 /* First run or continue */ 6053 if (mas->index < min) 6054 return true; ··· 6109 */ 6110 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6111 { 6112 - void *entry; 6113 6114 if (mas_find_rev_setup(mas, min, &entry)) 6115 return entry; 6116 6117 /* Retries on dead nodes handled by mas_prev_slot */ 6118 - return mas_prev_slot(mas, min, false); 6119 6120 } 6121 EXPORT_SYMBOL_GPL(mas_find_rev); ··· 6135 */ 6136 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6137 { 6138 - void *entry; 6139 6140 if (mas_find_rev_setup(mas, min, &entry)) 6141 return entry; 6142 6143 /* Retries on dead nodes handled by mas_prev_slot */ 6144 - return mas_prev_slot(mas, min, true); 6145 } 6146 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6147
··· 256 return xa_is_err(mas->node); 257 } 258 259 + static __always_inline bool mas_is_overflow(struct ma_state *mas) 260 + { 261 + if (unlikely(mas->node == MAS_OVERFLOW)) 262 + return true; 263 + 264 + return false; 265 + } 266 + 267 + static __always_inline bool mas_is_underflow(struct ma_state *mas) 268 + { 269 + if (unlikely(mas->node == MAS_UNDERFLOW)) 270 + return true; 271 + 272 + return false; 273 + } 274 + 275 static inline bool mas_searchable(struct ma_state *mas) 276 { 277 if (mas_is_none(mas)) ··· 4415 * 4416 * @mas: The maple state 4417 * @max: The minimum starting range 4418 + * @empty: Can be empty 4419 + * @set_underflow: Set the @mas->node to underflow state on limit. 4420 * 4421 * Return: The entry in the previous slot which is possibly NULL 4422 */ 4423 + static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty, 4424 + bool set_underflow) 4425 { 4426 void *entry; 4427 void __rcu **slots; ··· 4435 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4436 goto retry; 4437 4438 if (mas->min <= min) { 4439 pivot = mas_safe_min(mas, pivots, mas->offset); 4440 ··· 4443 goto retry; 4444 4445 if (pivot <= min) 4446 + goto underflow; 4447 } 4448 4449 + again: 4450 if (likely(mas->offset)) { 4451 mas->offset--; 4452 mas->last = mas->index - 1; ··· 4457 } 4458 4459 if (mas_is_none(mas)) 4460 + goto underflow; 4461 4462 mas->last = mas->max; 4463 node = mas_mn(mas); ··· 4474 if (likely(entry)) 4475 return entry; 4476 4477 + if (!empty) { 4478 + if (mas->index <= min) 4479 + goto underflow; 4480 + 4481 goto again; 4482 + } 4483 4484 return entry; 4485 + 4486 + underflow: 4487 + if (set_underflow) 4488 + mas->node = MAS_UNDERFLOW; 4489 + return NULL; 4490 } 4491 4492 /* ··· 4567 * @mas: The maple state 4568 * @max: The maximum starting range 4569 * @empty: Can be empty 4570 + * @set_overflow: Should @mas->node be set to overflow when the limit is 4571 + * reached. 4572 * 4573 * Return: The entry in the next slot which is possibly NULL 4574 */ 4575 + static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty, 4576 + bool set_overflow) 4577 { 4578 void __rcu **slots; 4579 unsigned long *pivots; ··· 4589 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4590 goto retry; 4591 4592 if (mas->max >= max) { 4593 if (likely(mas->offset < data_end)) 4594 pivot = pivots[mas->offset]; 4595 else 4596 + goto overflow; 4597 4598 if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) 4599 goto retry; 4600 4601 if (pivot >= max) 4602 + goto overflow; 4603 } 4604 4605 if (likely(mas->offset < data_end)) { 4606 mas->index = pivots[mas->offset] + 1; 4607 + again: 4608 mas->offset++; 4609 if (likely(mas->offset < data_end)) 4610 mas->last = pivots[mas->offset]; ··· 4616 goto retry; 4617 } 4618 4619 + if (WARN_ON_ONCE(mas_is_none(mas))) { 4620 + mas->node = MAS_OVERFLOW; 4621 return NULL; 4622 + goto overflow; 4623 + } 4624 4625 mas->offset = 0; 4626 mas->index = mas->min; ··· 4636 return entry; 4637 4638 if (!empty) { 4639 + if (mas->last >= max) 4640 + goto overflow; 4641 + 4642 + mas->index = mas->last + 1; 4643 + /* Node cannot end on NULL, so it's safe to short-cut here */ 4644 goto again; 4645 } 4646 4647 return entry; 4648 + 4649 + overflow: 4650 + if (set_overflow) 4651 + mas->node = MAS_OVERFLOW; 4652 + return NULL; 4653 } 4654 4655 /* ··· 4651 * 4652 * Set the @mas->node to the next entry and the range_start to 4653 * the beginning value for the entry. Does not check beyond @limit. 4654 + * Sets @mas->index and @mas->last to the range, Does not update @mas->index and 4655 + * @mas->last on overflow. 4656 * Restarts on dead nodes. 4657 * 4658 * Return: the next entry or %NULL. 4659 */ 4660 static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) 4661 { 4662 + if (mas->last >= limit) { 4663 + mas->node = MAS_OVERFLOW; 4664 return NULL; 4665 + } 4666 4667 + return mas_next_slot(mas, limit, false, true); 4668 } 4669 4670 /* ··· 4837 { 4838 void *entry; 4839 4840 + if (!mas_is_active(mas) || !mas_is_start(mas)) 4841 mas->node = MAS_START; 4842 retry: 4843 entry = mas_state_walk(mas); ··· 5294 5295 static void mas_wr_store_setup(struct ma_wr_state *wr_mas) 5296 { 5297 + if (!mas_is_active(wr_mas->mas)) { 5298 + if (mas_is_start(wr_mas->mas)) 5299 + return; 5300 5301 + if (unlikely(mas_is_paused(wr_mas->mas))) 5302 + goto reset; 5303 5304 + if (unlikely(mas_is_none(wr_mas->mas))) 5305 + goto reset; 5306 + 5307 + if (unlikely(mas_is_overflow(wr_mas->mas))) 5308 + goto reset; 5309 + 5310 + if (unlikely(mas_is_underflow(wr_mas->mas))) 5311 + goto reset; 5312 + } 5313 5314 /* 5315 * A less strict version of mas_is_span_wr() where we allow spanning ··· 5595 { 5596 bool was_none = mas_is_none(mas); 5597 5598 + if (unlikely(mas->last >= max)) { 5599 + mas->node = MAS_OVERFLOW; 5600 + return true; 5601 + } 5602 + 5603 + if (mas_is_active(mas)) 5604 + return false; 5605 + 5606 + if (mas_is_none(mas) || mas_is_paused(mas)) { 5607 mas->node = MAS_START; 5608 + } else if (mas_is_overflow(mas)) { 5609 + /* Overflowed before, but the max changed */ 5610 + mas->node = MAS_START; 5611 + } else if (mas_is_underflow(mas)) { 5612 + mas->node = MAS_START; 5613 + *entry = mas_walk(mas); 5614 + if (*entry) 5615 + return true; 5616 + } 5617 5618 if (mas_is_start(mas)) 5619 *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ ··· 5615 5616 if (mas_is_none(mas)) 5617 return true; 5618 + 5619 return false; 5620 } 5621 ··· 5637 return entry; 5638 5639 /* Retries on dead nodes handled by mas_next_slot */ 5640 + return mas_next_slot(mas, max, false, true); 5641 } 5642 EXPORT_SYMBOL_GPL(mas_next); 5643 ··· 5660 return entry; 5661 5662 /* Retries on dead nodes handled by mas_next_slot */ 5663 + return mas_next_slot(mas, max, true, true); 5664 } 5665 EXPORT_SYMBOL_GPL(mas_next_range); 5666 ··· 5691 static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, 5692 void **entry) 5693 { 5694 + if (unlikely(mas->index <= min)) { 5695 + mas->node = MAS_UNDERFLOW; 5696 + return true; 5697 } 5698 + 5699 + if (mas_is_active(mas)) 5700 + return false; 5701 + 5702 + if (mas_is_overflow(mas)) { 5703 + mas->node = MAS_START; 5704 + *entry = mas_walk(mas); 5705 + if (*entry) 5706 + return true; 5707 + } 5708 + 5709 + if (mas_is_none(mas) || mas_is_paused(mas)) { 5710 + mas->node = MAS_START; 5711 + } else if (mas_is_underflow(mas)) { 5712 + /* underflowed before but the min changed */ 5713 + mas->node = MAS_START; 5714 + } 5715 + 5716 + if (mas_is_start(mas)) 5717 + mas_walk(mas); 5718 5719 if (unlikely(mas_is_ptr(mas))) { 5720 if (!mas->index) ··· 5747 if (mas_prev_setup(mas, min, &entry)) 5748 return entry; 5749 5750 + return mas_prev_slot(mas, min, false, true); 5751 } 5752 EXPORT_SYMBOL_GPL(mas_prev); 5753 ··· 5770 if (mas_prev_setup(mas, min, &entry)) 5771 return entry; 5772 5773 + return mas_prev_slot(mas, min, true, true); 5774 } 5775 EXPORT_SYMBOL_GPL(mas_prev_range); 5776 ··· 5828 static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, 5829 void **entry) 5830 { 5831 + if (mas_is_active(mas)) { 5832 + if (mas->last < max) 5833 + return false; 5834 5835 + return true; 5836 + } 5837 + 5838 + if (mas_is_paused(mas)) { 5839 + if (unlikely(mas->last >= max)) 5840 + return true; 5841 + 5842 + mas->index = ++mas->last; 5843 + mas->node = MAS_START; 5844 + } else if (mas_is_none(mas)) { 5845 if (unlikely(mas->last >= max)) 5846 return true; 5847 5848 mas->index = mas->last; 5849 mas->node = MAS_START; 5850 + } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) { 5851 + if (mas->index > max) { 5852 + mas->node = MAS_OVERFLOW; 5853 return true; 5854 + } 5855 5856 mas->node = MAS_START; 5857 + } 5858 5859 + if (mas_is_start(mas)) { 5860 /* First run or continue */ 5861 if (mas->index > max) 5862 return true; ··· 5895 return entry; 5896 5897 /* Retries on dead nodes handled by mas_next_slot */ 5898 + return mas_next_slot(mas, max, false, false); 5899 } 5900 EXPORT_SYMBOL_GPL(mas_find); 5901 ··· 5913 */ 5914 void *mas_find_range(struct ma_state *mas, unsigned long max) 5915 { 5916 + void *entry = NULL; 5917 5918 if (mas_find_setup(mas, max, &entry)) 5919 return entry; 5920 5921 /* Retries on dead nodes handled by mas_next_slot */ 5922 + return mas_next_slot(mas, max, true, false); 5923 } 5924 EXPORT_SYMBOL_GPL(mas_find_range); 5925 ··· 5934 static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, 5935 void **entry) 5936 { 5937 + if (mas_is_active(mas)) { 5938 + if (mas->index > min) 5939 + return false; 5940 5941 + return true; 5942 } 5943 5944 + if (mas_is_paused(mas)) { 5945 if (unlikely(mas->index <= min)) { 5946 mas->node = MAS_NONE; 5947 return true; 5948 } 5949 mas->node = MAS_START; 5950 mas->last = --mas->index; 5951 + } else if (mas_is_none(mas)) { 5952 + if (mas->index <= min) 5953 + goto none; 5954 + 5955 + mas->last = mas->index; 5956 + mas->node = MAS_START; 5957 + } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) { 5958 + if (mas->last <= min) { 5959 + mas->node = MAS_UNDERFLOW; 5960 + return true; 5961 + } 5962 + 5963 + mas->node = MAS_START; 5964 } 5965 5966 + if (mas_is_start(mas)) { 5967 /* First run or continue */ 5968 if (mas->index < min) 5969 return true; ··· 6004 */ 6005 void *mas_find_rev(struct ma_state *mas, unsigned long min) 6006 { 6007 + void *entry = NULL; 6008 6009 if (mas_find_rev_setup(mas, min, &entry)) 6010 return entry; 6011 6012 /* Retries on dead nodes handled by mas_prev_slot */ 6013 + return mas_prev_slot(mas, min, false, false); 6014 6015 } 6016 EXPORT_SYMBOL_GPL(mas_find_rev); ··· 6030 */ 6031 void *mas_find_range_rev(struct ma_state *mas, unsigned long min) 6032 { 6033 + void *entry = NULL; 6034 6035 if (mas_find_rev_setup(mas, min, &entry)) 6036 return entry; 6037 6038 /* Retries on dead nodes handled by mas_prev_slot */ 6039 + return mas_prev_slot(mas, min, true, false); 6040 } 6041 EXPORT_SYMBOL_GPL(mas_find_range_rev); 6042
+72 -15
lib/test_maple_tree.c
··· 2166 MT_BUG_ON(mt, val != NULL); 2167 MT_BUG_ON(mt, mas.index != 0); 2168 MT_BUG_ON(mt, mas.last != 5); 2169 - MT_BUG_ON(mt, mas.node != MAS_NONE); 2170 2171 mas.index = 0; 2172 mas.last = 5; ··· 2917 * exists MAS_NONE active range 2918 * exists active active range 2919 * DNE active active set to last range 2920 * 2921 * Function ENTRY Start Result index & last 2922 * mas_prev() ··· 2946 * any MAS_ROOT MAS_NONE 0 2947 * exists active active range 2948 * DNE active active last range 2949 * 2950 * Function ENTRY Start Result index & last 2951 * mas_find() ··· 2957 * DNE MAS_START MAS_NONE 0 2958 * DNE MAS_PAUSE MAS_NONE 0 2959 * DNE MAS_ROOT MAS_NONE 0 2960 - * DNE MAS_NONE MAS_NONE 0 2961 * if index == 0 2962 * exists MAS_START MAS_ROOT 0 2963 * exists MAS_PAUSE MAS_ROOT 0 ··· 2969 * DNE MAS_START active set to max 2970 * exists MAS_PAUSE active range 2971 * DNE MAS_PAUSE active set to max 2972 - * exists MAS_NONE active range 2973 * exists active active range 2974 * DNE active active last range (max < last) 2975 * ··· 2994 * DNE MAS_START active set to min 2995 * exists MAS_PAUSE active range 2996 * DNE MAS_PAUSE active set to min 2997 - * exists MAS_NONE active range 2998 * exists active active range 2999 * DNE active active last range (min > index) 3000 * ··· 3041 mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); 3042 3043 mas_lock(&mas); 3044 - /* prev: Start -> none */ 3045 entry = mas_prev(&mas, 0); 3046 MT_BUG_ON(mt, entry != NULL); 3047 - MT_BUG_ON(mt, mas.node != MAS_NONE); 3048 3049 /* prev: Start -> root */ 3050 mas_set(&mas, 10); ··· 3071 MT_BUG_ON(mt, entry != NULL); 3072 MT_BUG_ON(mt, mas.node != MAS_NONE); 3073 3074 - /* next: start -> none */ 3075 mas_set(&mas, 10); 3076 entry = mas_next(&mas, ULONG_MAX); 3077 MT_BUG_ON(mt, mas.index != 1); ··· 3270 MT_BUG_ON(mt, mas.last != 0x2500); 3271 MT_BUG_ON(mt, !mas_active(mas)); 3272 3273 - /* next:active -> active out of range*/ 3274 entry = mas_next(&mas, 0x2999); 3275 MT_BUG_ON(mt, entry != NULL); 3276 MT_BUG_ON(mt, mas.index != 0x2501); 3277 MT_BUG_ON(mt, mas.last != 0x2fff); 3278 MT_BUG_ON(mt, !mas_active(mas)); 3279 3280 - /* Continue after out of range*/ 3281 entry = mas_next(&mas, ULONG_MAX); 3282 MT_BUG_ON(mt, entry != ptr3); 3283 MT_BUG_ON(mt, mas.index != 0x3000); 3284 MT_BUG_ON(mt, mas.last != 0x3500); 3285 MT_BUG_ON(mt, !mas_active(mas)); 3286 3287 - /* next:active -> active out of range*/ 3288 entry = mas_next(&mas, ULONG_MAX); 3289 MT_BUG_ON(mt, entry != NULL); 3290 MT_BUG_ON(mt, mas.index != 0x3501); 3291 MT_BUG_ON(mt, mas.last != ULONG_MAX); 3292 MT_BUG_ON(mt, !mas_active(mas)); 3293 3294 /* next: none -> active, skip value at location */ ··· 3330 MT_BUG_ON(mt, mas.last != 0x1500); 3331 MT_BUG_ON(mt, !mas_active(mas)); 3332 3333 - /* prev:active -> active out of range*/ 3334 entry = mas_prev(&mas, 0); 3335 MT_BUG_ON(mt, entry != NULL); 3336 MT_BUG_ON(mt, mas.index != 0); 3337 MT_BUG_ON(mt, mas.last != 0x0FFF); 3338 MT_BUG_ON(mt, !mas_active(mas)); 3339 3340 /* prev: pause ->active */ ··· 3383 MT_BUG_ON(mt, mas.last != 0x2500); 3384 MT_BUG_ON(mt, !mas_active(mas)); 3385 3386 - /* prev:active -> active out of range*/ 3387 entry = mas_prev(&mas, 0x1600); 3388 MT_BUG_ON(mt, entry != NULL); 3389 MT_BUG_ON(mt, mas.index != 0x1501); 3390 MT_BUG_ON(mt, mas.last != 0x1FFF); 3391 MT_BUG_ON(mt, !mas_active(mas)); 3392 3393 - /* prev: active ->active, continue*/ 3394 entry = mas_prev(&mas, 0); 3395 MT_BUG_ON(mt, entry != ptr); 3396 MT_BUG_ON(mt, mas.index != 0x1000); ··· 3437 MT_BUG_ON(mt, mas.last != 0x2FFF); 3438 MT_BUG_ON(mt, !mas_active(mas)); 3439 3440 - /* find: none ->active */ 3441 entry = mas_find(&mas, 0x5000); 3442 MT_BUG_ON(mt, entry != ptr3); 3443 MT_BUG_ON(mt, mas.index != 0x3000); ··· 3835 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3836 check_empty_area_fill(&tree); 3837 mtree_destroy(&tree); 3838 - 3839 3840 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3841 check_state_handling(&tree);
··· 2166 MT_BUG_ON(mt, val != NULL); 2167 MT_BUG_ON(mt, mas.index != 0); 2168 MT_BUG_ON(mt, mas.last != 5); 2169 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 2170 2171 mas.index = 0; 2172 mas.last = 5; ··· 2917 * exists MAS_NONE active range 2918 * exists active active range 2919 * DNE active active set to last range 2920 + * ERANGE active MAS_OVERFLOW last range 2921 * 2922 * Function ENTRY Start Result index & last 2923 * mas_prev() ··· 2945 * any MAS_ROOT MAS_NONE 0 2946 * exists active active range 2947 * DNE active active last range 2948 + * ERANGE active MAS_UNDERFLOW last range 2949 * 2950 * Function ENTRY Start Result index & last 2951 * mas_find() ··· 2955 * DNE MAS_START MAS_NONE 0 2956 * DNE MAS_PAUSE MAS_NONE 0 2957 * DNE MAS_ROOT MAS_NONE 0 2958 + * DNE MAS_NONE MAS_NONE 1 2959 * if index == 0 2960 * exists MAS_START MAS_ROOT 0 2961 * exists MAS_PAUSE MAS_ROOT 0 ··· 2967 * DNE MAS_START active set to max 2968 * exists MAS_PAUSE active range 2969 * DNE MAS_PAUSE active set to max 2970 + * exists MAS_NONE active range (start at last) 2971 * exists active active range 2972 * DNE active active last range (max < last) 2973 * ··· 2992 * DNE MAS_START active set to min 2993 * exists MAS_PAUSE active range 2994 * DNE MAS_PAUSE active set to min 2995 + * exists MAS_NONE active range (start at index) 2996 * exists active active range 2997 * DNE active active last range (min > index) 2998 * ··· 3039 mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); 3040 3041 mas_lock(&mas); 3042 + /* prev: Start -> underflow*/ 3043 entry = mas_prev(&mas, 0); 3044 MT_BUG_ON(mt, entry != NULL); 3045 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3046 3047 /* prev: Start -> root */ 3048 mas_set(&mas, 10); ··· 3069 MT_BUG_ON(mt, entry != NULL); 3070 MT_BUG_ON(mt, mas.node != MAS_NONE); 3071 3072 + /* next: start -> none*/ 3073 mas_set(&mas, 10); 3074 entry = mas_next(&mas, ULONG_MAX); 3075 MT_BUG_ON(mt, mas.index != 1); ··· 3268 MT_BUG_ON(mt, mas.last != 0x2500); 3269 MT_BUG_ON(mt, !mas_active(mas)); 3270 3271 + /* next:active -> active beyond data */ 3272 entry = mas_next(&mas, 0x2999); 3273 MT_BUG_ON(mt, entry != NULL); 3274 MT_BUG_ON(mt, mas.index != 0x2501); 3275 MT_BUG_ON(mt, mas.last != 0x2fff); 3276 MT_BUG_ON(mt, !mas_active(mas)); 3277 3278 + /* Continue after last range ends after max */ 3279 entry = mas_next(&mas, ULONG_MAX); 3280 MT_BUG_ON(mt, entry != ptr3); 3281 MT_BUG_ON(mt, mas.index != 0x3000); 3282 MT_BUG_ON(mt, mas.last != 0x3500); 3283 MT_BUG_ON(mt, !mas_active(mas)); 3284 3285 + /* next:active -> active continued */ 3286 entry = mas_next(&mas, ULONG_MAX); 3287 MT_BUG_ON(mt, entry != NULL); 3288 MT_BUG_ON(mt, mas.index != 0x3501); 3289 MT_BUG_ON(mt, mas.last != ULONG_MAX); 3290 + MT_BUG_ON(mt, !mas_active(mas)); 3291 + 3292 + /* next:active -> overflow */ 3293 + entry = mas_next(&mas, ULONG_MAX); 3294 + MT_BUG_ON(mt, entry != NULL); 3295 + MT_BUG_ON(mt, mas.index != 0x3501); 3296 + MT_BUG_ON(mt, mas.last != ULONG_MAX); 3297 + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); 3298 + 3299 + /* next:overflow -> overflow */ 3300 + entry = mas_next(&mas, ULONG_MAX); 3301 + MT_BUG_ON(mt, entry != NULL); 3302 + MT_BUG_ON(mt, mas.index != 0x3501); 3303 + MT_BUG_ON(mt, mas.last != ULONG_MAX); 3304 + MT_BUG_ON(mt, mas.node != MAS_OVERFLOW); 3305 + 3306 + /* prev:overflow -> active */ 3307 + entry = mas_prev(&mas, 0); 3308 + MT_BUG_ON(mt, entry != ptr3); 3309 + MT_BUG_ON(mt, mas.index != 0x3000); 3310 + MT_BUG_ON(mt, mas.last != 0x3500); 3311 MT_BUG_ON(mt, !mas_active(mas)); 3312 3313 /* next: none -> active, skip value at location */ ··· 3307 MT_BUG_ON(mt, mas.last != 0x1500); 3308 MT_BUG_ON(mt, !mas_active(mas)); 3309 3310 + /* prev:active -> active spanning end range */ 3311 + entry = mas_prev(&mas, 0x0100); 3312 + MT_BUG_ON(mt, entry != NULL); 3313 + MT_BUG_ON(mt, mas.index != 0); 3314 + MT_BUG_ON(mt, mas.last != 0x0FFF); 3315 + MT_BUG_ON(mt, !mas_active(mas)); 3316 + 3317 + /* prev:active -> underflow */ 3318 entry = mas_prev(&mas, 0); 3319 MT_BUG_ON(mt, entry != NULL); 3320 MT_BUG_ON(mt, mas.index != 0); 3321 MT_BUG_ON(mt, mas.last != 0x0FFF); 3322 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3323 + 3324 + /* prev:underflow -> underflow */ 3325 + entry = mas_prev(&mas, 0); 3326 + MT_BUG_ON(mt, entry != NULL); 3327 + MT_BUG_ON(mt, mas.index != 0); 3328 + MT_BUG_ON(mt, mas.last != 0x0FFF); 3329 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3330 + 3331 + /* next:underflow -> active */ 3332 + entry = mas_next(&mas, ULONG_MAX); 3333 + MT_BUG_ON(mt, entry != ptr); 3334 + MT_BUG_ON(mt, mas.index != 0x1000); 3335 + MT_BUG_ON(mt, mas.last != 0x1500); 3336 + MT_BUG_ON(mt, !mas_active(mas)); 3337 + 3338 + /* prev:first value -> underflow */ 3339 + entry = mas_prev(&mas, 0x1000); 3340 + MT_BUG_ON(mt, entry != NULL); 3341 + MT_BUG_ON(mt, mas.index != 0x1000); 3342 + MT_BUG_ON(mt, mas.last != 0x1500); 3343 + MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW); 3344 + 3345 + /* find:underflow -> first value */ 3346 + entry = mas_find(&mas, ULONG_MAX); 3347 + MT_BUG_ON(mt, entry != ptr); 3348 + MT_BUG_ON(mt, mas.index != 0x1000); 3349 + MT_BUG_ON(mt, mas.last != 0x1500); 3350 MT_BUG_ON(mt, !mas_active(mas)); 3351 3352 /* prev: pause ->active */ ··· 3325 MT_BUG_ON(mt, mas.last != 0x2500); 3326 MT_BUG_ON(mt, !mas_active(mas)); 3327 3328 + /* prev:active -> active spanning min */ 3329 entry = mas_prev(&mas, 0x1600); 3330 MT_BUG_ON(mt, entry != NULL); 3331 MT_BUG_ON(mt, mas.index != 0x1501); 3332 MT_BUG_ON(mt, mas.last != 0x1FFF); 3333 MT_BUG_ON(mt, !mas_active(mas)); 3334 3335 + /* prev: active ->active, continue */ 3336 entry = mas_prev(&mas, 0); 3337 MT_BUG_ON(mt, entry != ptr); 3338 MT_BUG_ON(mt, mas.index != 0x1000); ··· 3379 MT_BUG_ON(mt, mas.last != 0x2FFF); 3380 MT_BUG_ON(mt, !mas_active(mas)); 3381 3382 + /* find: overflow ->active */ 3383 entry = mas_find(&mas, 0x5000); 3384 MT_BUG_ON(mt, entry != ptr3); 3385 MT_BUG_ON(mt, mas.index != 0x3000); ··· 3777 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3778 check_empty_area_fill(&tree); 3779 mtree_destroy(&tree); 3780 3781 mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); 3782 check_state_handling(&tree);
+2
mm/damon/vaddr-test.h
··· 148 KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); 149 KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); 150 } 151 } 152 153 /*
··· 148 KUNIT_EXPECT_EQ(test, r->ar.start, expected[i * 2]); 149 KUNIT_EXPECT_EQ(test, r->ar.end, expected[i * 2 + 1]); 150 } 151 + 152 + damon_destroy_target(t); 153 } 154 155 /*
+2 -1
mm/damon/vaddr.c
··· 341 bool referenced = false; 342 pte_t entry = huge_ptep_get(pte); 343 struct folio *folio = pfn_folio(pte_pfn(entry)); 344 345 folio_get(folio); 346 347 if (pte_young(entry)) { 348 referenced = true; 349 entry = pte_mkold(entry); 350 - set_huge_pte_at(mm, addr, pte, entry); 351 } 352 353 #ifdef CONFIG_MMU_NOTIFIER
··· 341 bool referenced = false; 342 pte_t entry = huge_ptep_get(pte); 343 struct folio *folio = pfn_folio(pte_pfn(entry)); 344 + unsigned long psize = huge_page_size(hstate_vma(vma)); 345 346 folio_get(folio); 347 348 if (pte_young(entry)) { 349 referenced = true; 350 entry = pte_mkold(entry); 351 + set_huge_pte_at(mm, addr, pte, entry, psize); 352 } 353 354 #ifdef CONFIG_MMU_NOTIFIER
+2 -2
mm/filemap.c
··· 3503 if (count) { 3504 set_pte_range(vmf, folio, page, count, addr); 3505 folio_ref_add(folio, count); 3506 - if (in_range(vmf->address, addr, count)) 3507 ret = VM_FAULT_NOPAGE; 3508 } 3509 ··· 3517 if (count) { 3518 set_pte_range(vmf, folio, page, count, addr); 3519 folio_ref_add(folio, count); 3520 - if (in_range(vmf->address, addr, count)) 3521 ret = VM_FAULT_NOPAGE; 3522 } 3523
··· 3503 if (count) { 3504 set_pte_range(vmf, folio, page, count, addr); 3505 folio_ref_add(folio, count); 3506 + if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3507 ret = VM_FAULT_NOPAGE; 3508 } 3509 ··· 3517 if (count) { 3518 set_pte_range(vmf, folio, page, count, addr); 3519 folio_ref_add(folio, count); 3520 + if (in_range(vmf->address, addr, count * PAGE_SIZE)) 3521 ret = VM_FAULT_NOPAGE; 3522 } 3523
+24 -19
mm/hugetlb.c
··· 4980 4981 static void 4982 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4983 - struct folio *new_folio, pte_t old) 4984 { 4985 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 4986 ··· 4988 hugepage_add_new_anon_rmap(new_folio, vma, addr); 4989 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 4990 newpte = huge_pte_mkuffd_wp(newpte); 4991 - set_huge_pte_at(vma->vm_mm, addr, ptep, newpte); 4992 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4993 folio_set_hugetlb_migratable(new_folio); 4994 } ··· 5065 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5066 if (!userfaultfd_wp(dst_vma)) 5067 entry = huge_pte_clear_uffd_wp(entry); 5068 - set_huge_pte_at(dst, addr, dst_pte, entry); 5069 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5070 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5071 bool uffd_wp = pte_swp_uffd_wp(entry); ··· 5080 entry = swp_entry_to_pte(swp_entry); 5081 if (userfaultfd_wp(src_vma) && uffd_wp) 5082 entry = pte_swp_mkuffd_wp(entry); 5083 - set_huge_pte_at(src, addr, src_pte, entry); 5084 } 5085 if (!userfaultfd_wp(dst_vma)) 5086 entry = huge_pte_clear_uffd_wp(entry); 5087 - set_huge_pte_at(dst, addr, dst_pte, entry); 5088 } else if (unlikely(is_pte_marker(entry))) { 5089 pte_marker marker = copy_pte_marker( 5090 pte_to_swp_entry(entry), dst_vma); 5091 5092 if (marker) 5093 set_huge_pte_at(dst, addr, dst_pte, 5094 - make_pte_marker(marker)); 5095 } else { 5096 entry = huge_ptep_get(src_pte); 5097 pte_folio = page_folio(pte_page(entry)); ··· 5145 goto again; 5146 } 5147 hugetlb_install_folio(dst_vma, dst_pte, addr, 5148 - new_folio, src_pte_old); 5149 spin_unlock(src_ptl); 5150 spin_unlock(dst_ptl); 5151 continue; ··· 5166 if (!userfaultfd_wp(dst_vma)) 5167 entry = huge_pte_clear_uffd_wp(entry); 5168 5169 - set_huge_pte_at(dst, addr, dst_pte, entry); 5170 hugetlb_count_add(npages, dst); 5171 } 5172 spin_unlock(src_ptl); ··· 5184 } 5185 5186 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5187 - unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte) 5188 { 5189 struct hstate *h = hstate_vma(vma); 5190 struct mm_struct *mm = vma->vm_mm; ··· 5203 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5204 5205 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5206 - set_huge_pte_at(mm, new_addr, dst_pte, pte); 5207 5208 if (src_ptl != dst_ptl) 5209 spin_unlock(src_ptl); ··· 5260 if (!dst_pte) 5261 break; 5262 5263 - move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte); 5264 } 5265 5266 if (shared_pmd) ··· 5338 if (pte_swp_uffd_wp_any(pte) && 5339 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5340 set_huge_pte_at(mm, address, ptep, 5341 - make_pte_marker(PTE_MARKER_UFFD_WP)); 5342 else 5343 huge_pte_clear(mm, address, ptep, sz); 5344 spin_unlock(ptl); ··· 5373 if (huge_pte_uffd_wp(pte) && 5374 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5375 set_huge_pte_at(mm, address, ptep, 5376 - make_pte_marker(PTE_MARKER_UFFD_WP)); 5377 hugetlb_count_sub(pages_per_huge_page(h), mm); 5378 page_remove_rmap(page, vma, true); 5379 ··· 5679 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5680 if (huge_pte_uffd_wp(pte)) 5681 newpte = huge_pte_mkuffd_wp(newpte); 5682 - set_huge_pte_at(mm, haddr, ptep, newpte); 5683 folio_set_hugetlb_migratable(new_folio); 5684 /* Make the old page be freed below */ 5685 new_folio = old_folio; ··· 5975 */ 5976 if (unlikely(pte_marker_uffd_wp(old_pte))) 5977 new_pte = huge_pte_mkuffd_wp(new_pte); 5978 - set_huge_pte_at(mm, haddr, ptep, new_pte); 5979 5980 hugetlb_count_add(pages_per_huge_page(h), mm); 5981 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { ··· 6264 } 6265 6266 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6267 - set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6268 6269 /* No need to invalidate - it was non-present before */ 6270 update_mmu_cache(dst_vma, dst_addr, dst_pte); ··· 6416 if (wp_enabled) 6417 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6418 6419 - set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte); 6420 6421 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6422 ··· 6602 else if (uffd_wp_resolve) 6603 newpte = pte_swp_clear_uffd_wp(newpte); 6604 if (!pte_same(pte, newpte)) 6605 - set_huge_pte_at(mm, address, ptep, newpte); 6606 } else if (unlikely(is_pte_marker(pte))) { 6607 /* No other markers apply for now. */ 6608 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); ··· 6627 if (unlikely(uffd_wp)) 6628 /* Safe to modify directly (none->non-present). */ 6629 set_huge_pte_at(mm, address, ptep, 6630 - make_pte_marker(PTE_MARKER_UFFD_WP)); 6631 } 6632 spin_unlock(ptl); 6633 }
··· 4980 4981 static void 4982 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr, 4983 + struct folio *new_folio, pte_t old, unsigned long sz) 4984 { 4985 pte_t newpte = make_huge_pte(vma, &new_folio->page, 1); 4986 ··· 4988 hugepage_add_new_anon_rmap(new_folio, vma, addr); 4989 if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old)) 4990 newpte = huge_pte_mkuffd_wp(newpte); 4991 + set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz); 4992 hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm); 4993 folio_set_hugetlb_migratable(new_folio); 4994 } ··· 5065 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) { 5066 if (!userfaultfd_wp(dst_vma)) 5067 entry = huge_pte_clear_uffd_wp(entry); 5068 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5069 } else if (unlikely(is_hugetlb_entry_migration(entry))) { 5070 swp_entry_t swp_entry = pte_to_swp_entry(entry); 5071 bool uffd_wp = pte_swp_uffd_wp(entry); ··· 5080 entry = swp_entry_to_pte(swp_entry); 5081 if (userfaultfd_wp(src_vma) && uffd_wp) 5082 entry = pte_swp_mkuffd_wp(entry); 5083 + set_huge_pte_at(src, addr, src_pte, entry, sz); 5084 } 5085 if (!userfaultfd_wp(dst_vma)) 5086 entry = huge_pte_clear_uffd_wp(entry); 5087 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5088 } else if (unlikely(is_pte_marker(entry))) { 5089 pte_marker marker = copy_pte_marker( 5090 pte_to_swp_entry(entry), dst_vma); 5091 5092 if (marker) 5093 set_huge_pte_at(dst, addr, dst_pte, 5094 + make_pte_marker(marker), sz); 5095 } else { 5096 entry = huge_ptep_get(src_pte); 5097 pte_folio = page_folio(pte_page(entry)); ··· 5145 goto again; 5146 } 5147 hugetlb_install_folio(dst_vma, dst_pte, addr, 5148 + new_folio, src_pte_old, sz); 5149 spin_unlock(src_ptl); 5150 spin_unlock(dst_ptl); 5151 continue; ··· 5166 if (!userfaultfd_wp(dst_vma)) 5167 entry = huge_pte_clear_uffd_wp(entry); 5168 5169 + set_huge_pte_at(dst, addr, dst_pte, entry, sz); 5170 hugetlb_count_add(npages, dst); 5171 } 5172 spin_unlock(src_ptl); ··· 5184 } 5185 5186 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr, 5187 + unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte, 5188 + unsigned long sz) 5189 { 5190 struct hstate *h = hstate_vma(vma); 5191 struct mm_struct *mm = vma->vm_mm; ··· 5202 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 5203 5204 pte = huge_ptep_get_and_clear(mm, old_addr, src_pte); 5205 + set_huge_pte_at(mm, new_addr, dst_pte, pte, sz); 5206 5207 if (src_ptl != dst_ptl) 5208 spin_unlock(src_ptl); ··· 5259 if (!dst_pte) 5260 break; 5261 5262 + move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz); 5263 } 5264 5265 if (shared_pmd) ··· 5337 if (pte_swp_uffd_wp_any(pte) && 5338 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5339 set_huge_pte_at(mm, address, ptep, 5340 + make_pte_marker(PTE_MARKER_UFFD_WP), 5341 + sz); 5342 else 5343 huge_pte_clear(mm, address, ptep, sz); 5344 spin_unlock(ptl); ··· 5371 if (huge_pte_uffd_wp(pte) && 5372 !(zap_flags & ZAP_FLAG_DROP_MARKER)) 5373 set_huge_pte_at(mm, address, ptep, 5374 + make_pte_marker(PTE_MARKER_UFFD_WP), 5375 + sz); 5376 hugetlb_count_sub(pages_per_huge_page(h), mm); 5377 page_remove_rmap(page, vma, true); 5378 ··· 5676 hugepage_add_new_anon_rmap(new_folio, vma, haddr); 5677 if (huge_pte_uffd_wp(pte)) 5678 newpte = huge_pte_mkuffd_wp(newpte); 5679 + set_huge_pte_at(mm, haddr, ptep, newpte, huge_page_size(h)); 5680 folio_set_hugetlb_migratable(new_folio); 5681 /* Make the old page be freed below */ 5682 new_folio = old_folio; ··· 5972 */ 5973 if (unlikely(pte_marker_uffd_wp(old_pte))) 5974 new_pte = huge_pte_mkuffd_wp(new_pte); 5975 + set_huge_pte_at(mm, haddr, ptep, new_pte, huge_page_size(h)); 5976 5977 hugetlb_count_add(pages_per_huge_page(h), mm); 5978 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { ··· 6261 } 6262 6263 _dst_pte = make_pte_marker(PTE_MARKER_POISONED); 6264 + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, 6265 + huge_page_size(h)); 6266 6267 /* No need to invalidate - it was non-present before */ 6268 update_mmu_cache(dst_vma, dst_addr, dst_pte); ··· 6412 if (wp_enabled) 6413 _dst_pte = huge_pte_mkuffd_wp(_dst_pte); 6414 6415 + set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, huge_page_size(h)); 6416 6417 hugetlb_count_add(pages_per_huge_page(h), dst_mm); 6418 ··· 6598 else if (uffd_wp_resolve) 6599 newpte = pte_swp_clear_uffd_wp(newpte); 6600 if (!pte_same(pte, newpte)) 6601 + set_huge_pte_at(mm, address, ptep, newpte, psize); 6602 } else if (unlikely(is_pte_marker(pte))) { 6603 /* No other markers apply for now. */ 6604 WARN_ON_ONCE(!pte_marker_uffd_wp(pte)); ··· 6623 if (unlikely(uffd_wp)) 6624 /* Safe to modify directly (none->non-present). */ 6625 set_huge_pte_at(mm, address, ptep, 6626 + make_pte_marker(PTE_MARKER_UFFD_WP), 6627 + psize); 6628 } 6629 spin_unlock(ptl); 6630 }
+13
mm/memcontrol.c
··· 3867 case _MEMSWAP: 3868 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3869 break; 3870 case _TCP: 3871 ret = memcg_update_tcp_max(memcg, nr_pages); 3872 break; ··· 5084 .seq_show = memcg_numa_stat_show, 5085 }, 5086 #endif 5087 { 5088 .name = "kmem.usage_in_bytes", 5089 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
··· 3867 case _MEMSWAP: 3868 ret = mem_cgroup_resize_max(memcg, nr_pages, true); 3869 break; 3870 + case _KMEM: 3871 + pr_warn_once("kmem.limit_in_bytes is deprecated and will be removed. " 3872 + "Writing any value to this file has no effect. " 3873 + "Please report your usecase to linux-mm@kvack.org if you " 3874 + "depend on this functionality.\n"); 3875 + ret = 0; 3876 + break; 3877 case _TCP: 3878 ret = memcg_update_tcp_max(memcg, nr_pages); 3879 break; ··· 5077 .seq_show = memcg_numa_stat_show, 5078 }, 5079 #endif 5080 + { 5081 + .name = "kmem.limit_in_bytes", 5082 + .private = MEMFILE_PRIVATE(_KMEM, RES_LIMIT), 5083 + .write = mem_cgroup_write, 5084 + .read_u64 = mem_cgroup_read_u64, 5085 + }, 5086 { 5087 .name = "kmem.usage_in_bytes", 5088 .private = MEMFILE_PRIVATE(_KMEM, RES_USAGE),
+19 -20
mm/mempolicy.c
··· 426 unsigned long start; 427 unsigned long end; 428 struct vm_area_struct *first; 429 }; 430 431 /* ··· 447 /* 448 * queue_folios_pmd() has three possible return values: 449 * 0 - folios are placed on the right node or queued successfully, or 450 - * special page is met, i.e. huge zero page. 451 - * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 452 - * specified. 453 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 454 * existing folio was already on a node that does not follow the 455 * policy. ··· 479 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 480 if (!vma_migratable(walk->vma) || 481 migrate_folio_add(folio, qp->pagelist, flags)) { 482 - ret = 1; 483 goto unlock; 484 } 485 } else ··· 495 * 496 * queue_folios_pte_range() has three possible return values: 497 * 0 - folios are placed on the right node or queued successfully, or 498 - * special page is met, i.e. zero page. 499 - * 1 - there is unmovable folio, and MPOL_MF_MOVE* & MPOL_MF_STRICT were 500 - * specified. 501 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 502 * on a node that does not follow the policy. 503 */ ··· 507 struct folio *folio; 508 struct queue_pages *qp = walk->private; 509 unsigned long flags = qp->flags; 510 - bool has_unmovable = false; 511 pte_t *pte, *mapped_pte; 512 pte_t ptent; 513 spinlock_t *ptl; ··· 536 if (!queue_folio_required(folio, qp)) 537 continue; 538 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 539 - /* MPOL_MF_STRICT must be specified if we get here */ 540 - if (!vma_migratable(vma)) { 541 - has_unmovable = true; 542 - break; 543 - } 544 545 /* 546 * Do not abort immediately since there may be ··· 549 * need migrate other LRU pages. 550 */ 551 if (migrate_folio_add(folio, qp->pagelist, flags)) 552 - has_unmovable = true; 553 } else 554 break; 555 } 556 pte_unmap_unlock(mapped_pte, ptl); 557 cond_resched(); 558 - 559 - if (has_unmovable) 560 - return 1; 561 562 return addr != end ? -EIO : 0; 563 } ··· 595 * Detecting misplaced folio but allow migrating folios which 596 * have been queued. 597 */ 598 - ret = 1; 599 goto unlock; 600 } 601 ··· 616 * Failed to isolate folio but allow migrating pages 617 * which have been queued. 618 */ 619 - ret = 1; 620 } 621 unlock: 622 spin_unlock(ptl); ··· 752 .start = start, 753 .end = end, 754 .first = NULL, 755 }; 756 const struct mm_walk_ops *ops = lock_vma ? 757 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; 758 759 err = walk_page_range(mm, start, end, ops, &qp); 760 761 if (!qp.first) 762 /* whole range in hole */ 763 err = -EFAULT; ··· 1357 putback_movable_pages(&pagelist); 1358 } 1359 1360 - if ((ret > 0) || (nr_failed && (flags & MPOL_MF_STRICT))) 1361 err = -EIO; 1362 } else { 1363 up_out:
··· 426 unsigned long start; 427 unsigned long end; 428 struct vm_area_struct *first; 429 + bool has_unmovable; 430 }; 431 432 /* ··· 446 /* 447 * queue_folios_pmd() has three possible return values: 448 * 0 - folios are placed on the right node or queued successfully, or 449 + * special page is met, i.e. zero page, or unmovable page is found 450 + * but continue walking (indicated by queue_pages.has_unmovable). 451 * -EIO - is migration entry or only MPOL_MF_STRICT was specified and an 452 * existing folio was already on a node that does not follow the 453 * policy. ··· 479 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 480 if (!vma_migratable(walk->vma) || 481 migrate_folio_add(folio, qp->pagelist, flags)) { 482 + qp->has_unmovable = true; 483 goto unlock; 484 } 485 } else ··· 495 * 496 * queue_folios_pte_range() has three possible return values: 497 * 0 - folios are placed on the right node or queued successfully, or 498 + * special page is met, i.e. zero page, or unmovable page is found 499 + * but continue walking (indicated by queue_pages.has_unmovable). 500 * -EIO - only MPOL_MF_STRICT was specified and an existing folio was already 501 * on a node that does not follow the policy. 502 */ ··· 508 struct folio *folio; 509 struct queue_pages *qp = walk->private; 510 unsigned long flags = qp->flags; 511 pte_t *pte, *mapped_pte; 512 pte_t ptent; 513 spinlock_t *ptl; ··· 538 if (!queue_folio_required(folio, qp)) 539 continue; 540 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) { 541 + /* 542 + * MPOL_MF_STRICT must be specified if we get here. 543 + * Continue walking vmas due to MPOL_MF_MOVE* flags. 544 + */ 545 + if (!vma_migratable(vma)) 546 + qp->has_unmovable = true; 547 548 /* 549 * Do not abort immediately since there may be ··· 550 * need migrate other LRU pages. 551 */ 552 if (migrate_folio_add(folio, qp->pagelist, flags)) 553 + qp->has_unmovable = true; 554 } else 555 break; 556 } 557 pte_unmap_unlock(mapped_pte, ptl); 558 cond_resched(); 559 560 return addr != end ? -EIO : 0; 561 } ··· 599 * Detecting misplaced folio but allow migrating folios which 600 * have been queued. 601 */ 602 + qp->has_unmovable = true; 603 goto unlock; 604 } 605 ··· 620 * Failed to isolate folio but allow migrating pages 621 * which have been queued. 622 */ 623 + qp->has_unmovable = true; 624 } 625 unlock: 626 spin_unlock(ptl); ··· 756 .start = start, 757 .end = end, 758 .first = NULL, 759 + .has_unmovable = false, 760 }; 761 const struct mm_walk_ops *ops = lock_vma ? 762 &queue_pages_lock_vma_walk_ops : &queue_pages_walk_ops; 763 764 err = walk_page_range(mm, start, end, ops, &qp); 765 766 + if (qp.has_unmovable) 767 + err = 1; 768 if (!qp.first) 769 /* whole range in hole */ 770 err = -EFAULT; ··· 1358 putback_movable_pages(&pagelist); 1359 } 1360 1361 + if (((ret > 0) || nr_failed) && (flags & MPOL_MF_STRICT)) 1362 err = -EIO; 1363 } else { 1364 up_out:
+5 -2
mm/migrate.c
··· 243 244 #ifdef CONFIG_HUGETLB_PAGE 245 if (folio_test_hugetlb(folio)) { 246 - unsigned int shift = huge_page_shift(hstate_vma(vma)); 247 248 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 249 if (folio_test_anon(folio)) ··· 253 rmap_flags); 254 else 255 page_dup_file_rmap(new, true); 256 - set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); 257 } else 258 #endif 259 {
··· 243 244 #ifdef CONFIG_HUGETLB_PAGE 245 if (folio_test_hugetlb(folio)) { 246 + struct hstate *h = hstate_vma(vma); 247 + unsigned int shift = huge_page_shift(h); 248 + unsigned long psize = huge_page_size(h); 249 250 pte = arch_make_huge_pte(pte, shift, vma->vm_flags); 251 if (folio_test_anon(folio)) ··· 251 rmap_flags); 252 else 253 page_dup_file_rmap(new, true); 254 + set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte, 255 + psize); 256 } else 257 #endif 258 {
+18 -5
mm/rmap.c
··· 1480 struct mmu_notifier_range range; 1481 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1482 unsigned long pfn; 1483 1484 /* 1485 * When racing against e.g. zap_pte_range() on another cpu, ··· 1512 */ 1513 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1514 &range.end); 1515 } 1516 mmu_notifier_invalidate_range_start(&range); 1517 ··· 1632 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1633 if (folio_test_hugetlb(folio)) { 1634 hugetlb_count_sub(folio_nr_pages(folio), mm); 1635 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 1636 } else { 1637 dec_mm_counter(mm, mm_counter(&folio->page)); 1638 set_pte_at(mm, address, pvmw.pte, pteval); ··· 1825 struct mmu_notifier_range range; 1826 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1827 unsigned long pfn; 1828 1829 /* 1830 * When racing against e.g. zap_pte_range() on another cpu, ··· 1861 */ 1862 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1863 &range.end); 1864 } 1865 mmu_notifier_invalidate_range_start(&range); 1866 ··· 2029 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2030 if (folio_test_hugetlb(folio)) { 2031 hugetlb_count_sub(folio_nr_pages(folio), mm); 2032 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2033 } else { 2034 dec_mm_counter(mm, mm_counter(&folio->page)); 2035 set_pte_at(mm, address, pvmw.pte, pteval); ··· 2054 2055 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2056 if (folio_test_hugetlb(folio)) 2057 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2058 else 2059 set_pte_at(mm, address, pvmw.pte, pteval); 2060 ret = false; ··· 2069 if (anon_exclusive && 2070 page_try_share_anon_rmap(subpage)) { 2071 if (folio_test_hugetlb(folio)) 2072 - set_huge_pte_at(mm, address, pvmw.pte, pteval); 2073 else 2074 set_pte_at(mm, address, pvmw.pte, pteval); 2075 ret = false; ··· 2102 if (pte_uffd_wp(pteval)) 2103 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2104 if (folio_test_hugetlb(folio)) 2105 - set_huge_pte_at(mm, address, pvmw.pte, swp_pte); 2106 else 2107 set_pte_at(mm, address, pvmw.pte, swp_pte); 2108 trace_set_migration_pte(address, pte_val(swp_pte),
··· 1480 struct mmu_notifier_range range; 1481 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1482 unsigned long pfn; 1483 + unsigned long hsz = 0; 1484 1485 /* 1486 * When racing against e.g. zap_pte_range() on another cpu, ··· 1511 */ 1512 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1513 &range.end); 1514 + 1515 + /* We need the huge page size for set_huge_pte_at() */ 1516 + hsz = huge_page_size(hstate_vma(vma)); 1517 } 1518 mmu_notifier_invalidate_range_start(&range); 1519 ··· 1628 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 1629 if (folio_test_hugetlb(folio)) { 1630 hugetlb_count_sub(folio_nr_pages(folio), mm); 1631 + set_huge_pte_at(mm, address, pvmw.pte, pteval, 1632 + hsz); 1633 } else { 1634 dec_mm_counter(mm, mm_counter(&folio->page)); 1635 set_pte_at(mm, address, pvmw.pte, pteval); ··· 1820 struct mmu_notifier_range range; 1821 enum ttu_flags flags = (enum ttu_flags)(long)arg; 1822 unsigned long pfn; 1823 + unsigned long hsz = 0; 1824 1825 /* 1826 * When racing against e.g. zap_pte_range() on another cpu, ··· 1855 */ 1856 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1857 &range.end); 1858 + 1859 + /* We need the huge page size for set_huge_pte_at() */ 1860 + hsz = huge_page_size(hstate_vma(vma)); 1861 } 1862 mmu_notifier_invalidate_range_start(&range); 1863 ··· 2020 pteval = swp_entry_to_pte(make_hwpoison_entry(subpage)); 2021 if (folio_test_hugetlb(folio)) { 2022 hugetlb_count_sub(folio_nr_pages(folio), mm); 2023 + set_huge_pte_at(mm, address, pvmw.pte, pteval, 2024 + hsz); 2025 } else { 2026 dec_mm_counter(mm, mm_counter(&folio->page)); 2027 set_pte_at(mm, address, pvmw.pte, pteval); ··· 2044 2045 if (arch_unmap_one(mm, vma, address, pteval) < 0) { 2046 if (folio_test_hugetlb(folio)) 2047 + set_huge_pte_at(mm, address, pvmw.pte, 2048 + pteval, hsz); 2049 else 2050 set_pte_at(mm, address, pvmw.pte, pteval); 2051 ret = false; ··· 2058 if (anon_exclusive && 2059 page_try_share_anon_rmap(subpage)) { 2060 if (folio_test_hugetlb(folio)) 2061 + set_huge_pte_at(mm, address, pvmw.pte, 2062 + pteval, hsz); 2063 else 2064 set_pte_at(mm, address, pvmw.pte, pteval); 2065 ret = false; ··· 2090 if (pte_uffd_wp(pteval)) 2091 swp_pte = pte_swp_mkuffd_wp(swp_pte); 2092 if (folio_test_hugetlb(folio)) 2093 + set_huge_pte_at(mm, address, pvmw.pte, swp_pte, 2094 + hsz); 2095 else 2096 set_pte_at(mm, address, pvmw.pte, swp_pte); 2097 trace_set_migration_pte(address, pte_val(swp_pte),
+1 -1
mm/vmalloc.c
··· 111 pte_t entry = pfn_pte(pfn, prot); 112 113 entry = arch_make_huge_pte(entry, ilog2(size), 0); 114 - set_huge_pte_at(&init_mm, addr, pte, entry); 115 pfn += PFN_DOWN(size); 116 continue; 117 }
··· 111 pte_t entry = pfn_pte(pfn, prot); 112 113 entry = arch_make_huge_pte(entry, ilog2(size), 0); 114 + set_huge_pte_at(&init_mm, addr, pte, entry, size); 115 pfn += PFN_DOWN(size); 116 continue; 117 }
+20
mm/zswap.c
··· 1219 return false; 1220 1221 /* 1222 * XXX: zswap reclaim does not work with cgroups yet. Without a 1223 * cgroup-aware entry LRU, we will push out entries system-wide based on 1224 * local cgroup limits. ··· 1346 1347 /* map */ 1348 spin_lock(&tree->lock); 1349 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { 1350 zswap_duplicate_entry++; 1351 zswap_invalidate_entry(tree, dupentry); 1352 }
··· 1219 return false; 1220 1221 /* 1222 + * If this is a duplicate, it must be removed before attempting to store 1223 + * it, otherwise, if the store fails the old page won't be removed from 1224 + * the tree, and it might be written back overriding the new data. 1225 + */ 1226 + spin_lock(&tree->lock); 1227 + dupentry = zswap_rb_search(&tree->rbroot, offset); 1228 + if (dupentry) { 1229 + zswap_duplicate_entry++; 1230 + zswap_invalidate_entry(tree, dupentry); 1231 + } 1232 + spin_unlock(&tree->lock); 1233 + 1234 + /* 1235 * XXX: zswap reclaim does not work with cgroups yet. Without a 1236 * cgroup-aware entry LRU, we will push out entries system-wide based on 1237 * local cgroup limits. ··· 1333 1334 /* map */ 1335 spin_lock(&tree->lock); 1336 + /* 1337 + * A duplicate entry should have been removed at the beginning of this 1338 + * function. Since the swap entry should be pinned, if a duplicate is 1339 + * found again here it means that something went wrong in the swap 1340 + * cache. 1341 + */ 1342 while (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) { 1343 + WARN_ON(1); 1344 zswap_duplicate_entry++; 1345 zswap_invalidate_entry(tree, dupentry); 1346 }
+2 -2
tools/testing/selftests/mm/charge_reserved_hugetlb.sh
··· 25 fi 26 27 if [[ $cgroup2 ]]; then 28 - cgroup_path=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') 29 if [[ -z "$cgroup_path" ]]; then 30 cgroup_path=/dev/cgroup/memory 31 mount -t cgroup2 none $cgroup_path ··· 33 fi 34 echo "+hugetlb" >$cgroup_path/cgroup.subtree_control 35 else 36 - cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') 37 if [[ -z "$cgroup_path" ]]; then 38 cgroup_path=/dev/cgroup/memory 39 mount -t cgroup memory,hugetlb $cgroup_path
··· 25 fi 26 27 if [[ $cgroup2 ]]; then 28 + cgroup_path=$(mount -t cgroup2 | head -1 | awk '{print $3}') 29 if [[ -z "$cgroup_path" ]]; then 30 cgroup_path=/dev/cgroup/memory 31 mount -t cgroup2 none $cgroup_path ··· 33 fi 34 echo "+hugetlb" >$cgroup_path/cgroup.subtree_control 35 else 36 + cgroup_path=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') 37 if [[ -z "$cgroup_path" ]]; then 38 cgroup_path=/dev/cgroup/memory 39 mount -t cgroup memory,hugetlb $cgroup_path
+2 -2
tools/testing/selftests/mm/hugetlb_reparenting_test.sh
··· 20 21 22 if [[ $cgroup2 ]]; then 23 - CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk -e '{print $3}') 24 if [[ -z "$CGROUP_ROOT" ]]; then 25 CGROUP_ROOT=/dev/cgroup/memory 26 mount -t cgroup2 none $CGROUP_ROOT ··· 28 fi 29 echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control 30 else 31 - CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk -e '{print $3}') 32 if [[ -z "$CGROUP_ROOT" ]]; then 33 CGROUP_ROOT=/dev/cgroup/memory 34 mount -t cgroup memory,hugetlb $CGROUP_ROOT
··· 20 21 22 if [[ $cgroup2 ]]; then 23 + CGROUP_ROOT=$(mount -t cgroup2 | head -1 | awk '{print $3}') 24 if [[ -z "$CGROUP_ROOT" ]]; then 25 CGROUP_ROOT=/dev/cgroup/memory 26 mount -t cgroup2 none $CGROUP_ROOT ··· 28 fi 29 echo "+hugetlb +memory" >$CGROUP_ROOT/cgroup.subtree_control 30 else 31 + CGROUP_ROOT=$(mount -t cgroup | grep ",hugetlb" | awk '{print $3}') 32 if [[ -z "$CGROUP_ROOT" ]]; then 33 CGROUP_ROOT=/dev/cgroup/memory 34 mount -t cgroup memory,hugetlb $CGROUP_ROOT