Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm

Pull misc fixes from Andrew Morton:
"27 hotfixes. 12 are cc:stable, 18 are MM.

There's a patch series from Jiayuan Chen which fixes some
issues with KASAN and vmalloc. Apart from that it's the usual
shower of singletons - please see the respective changelogs
for details"

* tag 'mm-hotfixes-stable-2025-12-28-21-50' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (27 commits)
mm/ksm: fix pte_unmap_unlock of wrong address in break_ksm_pmd_entry
mm/page_owner: fix memory leak in page_owner_stack_fops->release()
mm/memremap: fix spurious large folio warning for FS-DAX
MAINTAINERS: notify the "Device Memory" community of memory hotplug changes
sparse: update MAINTAINERS info
mm/page_alloc: report 1 as zone_batchsize for !CONFIG_MMU
mm: consider non-anon swap cache folios in folio_expected_ref_count()
rust: maple_tree: rcu_read_lock() in destructor to silence lockdep
mm: memcg: fix unit conversion for K() macro in OOM log
mm: fixup pfnmap memory failure handling to use pgoff
tools/mm/page_owner_sort: fix timestamp comparison for stable sorting
selftests/mm: fix thread state check in uffd-unit-tests
kernel/kexec: fix IMA when allocation happens in CMA area
kernel/kexec: change the prototype of kimage_map_segment()
MAINTAINERS: add ABI headers to KHO and LIVE UPDATE
.mailmap: remove one of the entries for WangYuli
mm/damon/vaddr: fix missing pte_unmap_unlock in damos_va_migrate_pmd_entry()
MAINTAINERS: update one straggling entry for Bartosz Golaszewski
mm/page_alloc: change all pageblocks migrate type on coalescing
mm: leafops.h: correct kernel-doc function param. names
...

+163 -59
+2 -2
.mailmap
··· 127 127 Barry Song <baohua@kernel.org> <barry.song@analog.com> 128 128 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@sandisk.com> 129 129 Bart Van Assche <bvanassche@acm.org> <bart.vanassche@wdc.com> 130 - Bartosz Golaszewski <brgl@bgdev.pl> <bgolaszewski@baylibre.com> 130 + Bartosz Golaszewski <brgl@kernel.org> <bartosz.golaszewski@linaro.org> 131 + Bartosz Golaszewski <brgl@kernel.org> <bgolaszewski@baylibre.com> 131 132 Ben Dooks <ben-linux@fluff.org> <ben.dooks@simtec.co.uk> 132 133 Ben Dooks <ben-linux@fluff.org> <ben.dooks@sifive.com> 133 134 Ben Gardner <bgardner@wabtec.com> ··· 858 857 Vladimir Davydov <vdavydov.dev@gmail.com> <vdavydov@virtuozzo.com> 859 858 WangYuli <wangyuli@aosc.io> <wangyl5933@chinaunicom.cn> 860 859 WangYuli <wangyuli@aosc.io> <wangyuli@deepin.org> 861 - WangYuli <wangyuli@aosc.io> <wangyuli@uniontech.com> 862 860 Weiwen Hu <huweiwen@linux.alibaba.com> <sehuww@mail.scut.edu.cn> 863 861 WeiXiong Liao <gmpy.liaowx@gmail.com> <liaoweixiong@allwinnertech.com> 864 862 Wen Gong <quic_wgong@quicinc.com> <wgong@codeaurora.org>
+6 -2
MAINTAINERS
··· 13959 13959 F: Documentation/admin-guide/mm/kho.rst 13960 13960 F: Documentation/core-api/kho/* 13961 13961 F: include/linux/kexec_handover.h 13962 + F: include/linux/kho/ 13962 13963 F: kernel/liveupdate/kexec_handover* 13963 13964 F: lib/test_kho.c 13964 13965 F: tools/testing/selftests/kho/ ··· 14638 14637 F: Documentation/core-api/liveupdate.rst 14639 14638 F: Documentation/mm/memfd_preservation.rst 14640 14639 F: Documentation/userspace-api/liveupdate.rst 14640 + F: include/linux/kho/abi/ 14641 14641 F: include/linux/liveupdate.h 14642 14642 F: include/linux/liveupdate/ 14643 14643 F: include/uapi/linux/liveupdate.h ··· 16428 16426 M: David Hildenbrand <david@kernel.org> 16429 16427 M: Oscar Salvador <osalvador@suse.de> 16430 16428 L: linux-mm@kvack.org 16429 + L: linux-cxl@vger.kernel.org 16431 16430 S: Maintained 16432 16431 F: Documentation/admin-guide/mm/memory-hotplug.rst 16433 16432 F: Documentation/core-api/memory-hotplug.rst ··· 16754 16751 16755 16752 MEMORY MANAGEMENT - USERFAULTFD 16756 16753 M: Andrew Morton <akpm@linux-foundation.org> 16754 + M: Mike Rapoport <rppt@kernel.org> 16757 16755 R: Peter Xu <peterx@redhat.com> 16758 16756 L: linux-mm@kvack.org 16759 16757 S: Maintained ··· 21349 21345 F: drivers/net/wwan/qcom_bam_dmux.c 21350 21346 21351 21347 QUALCOMM BLUETOOTH DRIVER 21352 - M: Bartosz Golaszewski <brgl@bgdev.pl> 21348 + M: Bartosz Golaszewski <brgl@kernel.org> 21353 21349 L: linux-arm-msm@vger.kernel.org 21354 21350 S: Maintained 21355 21351 F: drivers/bluetooth/btqca.[ch] ··· 24575 24571 F: include/linux/sunserialcore.h 24576 24572 24577 24573 SPARSE CHECKER 24578 - M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com> 24574 + M: Chris Li <sparse@chrisli.org> 24579 24575 L: linux-sparse@vger.kernel.org 24580 24576 S: Maintained 24581 24577 W: https://sparse.docs.kernel.org/
+1
include/linux/genalloc.h
··· 44 44 * @nr: The number of zeroed bits we're looking for 45 45 * @data: optional additional data used by the callback 46 46 * @pool: the pool being allocated from 47 + * @start_addr: start address of memory chunk 47 48 */ 48 49 typedef unsigned long (*genpool_algo_t)(unsigned long *map, 49 50 unsigned long size,
+16
include/linux/kasan.h
··· 28 28 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u) 29 29 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u) 30 30 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u) 31 + #define KASAN_VMALLOC_KEEP_TAG ((__force kasan_vmalloc_flags_t)0x08u) 31 32 32 33 #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */ 33 34 #define KASAN_VMALLOC_TLB_FLUSH 0x2 /* TLB flush */ ··· 631 630 __kasan_poison_vmalloc(start, size); 632 631 } 633 632 633 + void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, 634 + kasan_vmalloc_flags_t flags); 635 + static __always_inline void 636 + kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, 637 + kasan_vmalloc_flags_t flags) 638 + { 639 + if (kasan_enabled()) 640 + __kasan_unpoison_vmap_areas(vms, nr_vms, flags); 641 + } 642 + 634 643 #else /* CONFIG_KASAN_VMALLOC */ 635 644 636 645 static inline void kasan_populate_early_vm_area_shadow(void *start, ··· 663 652 return (void *)start; 664 653 } 665 654 static inline void kasan_poison_vmalloc(const void *start, unsigned long size) 655 + { } 656 + 657 + static __always_inline void 658 + kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, 659 + kasan_vmalloc_flags_t flags) 666 660 { } 667 661 668 662 #endif /* CONFIG_KASAN_VMALLOC */
+2 -2
include/linux/kexec.h
··· 530 530 #define kexec_dprintk(fmt, arg...) \ 531 531 do { if (kexec_file_dbg_print) pr_info(fmt, ##arg); } while (0) 532 532 533 - extern void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size); 533 + extern void *kimage_map_segment(struct kimage *image, int idx); 534 534 extern void kimage_unmap_segment(void *buffer); 535 535 #else /* !CONFIG_KEXEC_CORE */ 536 536 struct pt_regs; ··· 540 540 static inline void crash_kexec(struct pt_regs *regs) { } 541 541 static inline int kexec_should_crash(struct task_struct *p) { return 0; } 542 542 static inline int kexec_crash_loaded(void) { return 0; } 543 - static inline void *kimage_map_segment(struct kimage *image, unsigned long addr, unsigned long size) 543 + static inline void *kimage_map_segment(struct kimage *image, int idx) 544 544 { return NULL; } 545 545 static inline void kimage_unmap_segment(void *buffer) { } 546 546 #define kexec_in_progress false
+2 -2
include/linux/leafops.h
··· 133 133 134 134 /** 135 135 * softleaf_type() - Identify the type of leaf entry. 136 - * @enntry: Leaf entry. 136 + * @entry: Leaf entry. 137 137 * 138 138 * Returns: the leaf entry type associated with @entry. 139 139 */ ··· 534 534 /** 535 535 * pte_is_uffd_marker() - Does this PTE entry encode a userfault-specific marker 536 536 * leaf entry? 537 - * @entry: Leaf entry. 537 + * @pte: PTE entry. 538 538 * 539 539 * It's useful to be able to determine which leaf entries encode UFFD-specific 540 540 * markers so we can handle these correctly.
+2
include/linux/memory-failure.h
··· 9 9 struct pfn_address_space { 10 10 struct interval_tree_node node; 11 11 struct address_space *mapping; 12 + int (*pfn_to_vma_pgoff)(struct vm_area_struct *vma, 13 + unsigned long pfn, pgoff_t *pgoff); 12 14 }; 13 15 14 16 int register_pfn_address_space(struct pfn_address_space *pfn_space);
+4 -4
include/linux/mm.h
··· 2459 2459 if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio))) 2460 2460 return 0; 2461 2461 2462 - if (folio_test_anon(folio)) { 2463 - /* One reference per page from the swapcache. */ 2464 - ref_count += folio_test_swapcache(folio) << order; 2465 - } else { 2462 + /* One reference per page from the swapcache. */ 2463 + ref_count += folio_test_swapcache(folio) << order; 2464 + 2465 + if (!folio_test_anon(folio)) { 2466 2466 /* One reference per page from the pagecache. */ 2467 2467 ref_count += !!folio->mapping << order; 2468 2468 /* One reference from PG_private. */
+12 -4
kernel/kexec_core.c
··· 953 953 return result; 954 954 } 955 955 956 - void *kimage_map_segment(struct kimage *image, 957 - unsigned long addr, unsigned long size) 956 + void *kimage_map_segment(struct kimage *image, int idx) 958 957 { 958 + unsigned long addr, size, eaddr; 959 959 unsigned long src_page_addr, dest_page_addr = 0; 960 - unsigned long eaddr = addr + size; 961 960 kimage_entry_t *ptr, entry; 962 961 struct page **src_pages; 963 962 unsigned int npages; 963 + struct page *cma; 964 964 void *vaddr = NULL; 965 965 int i; 966 966 967 + cma = image->segment_cma[idx]; 968 + if (cma) 969 + return page_address(cma); 970 + 971 + addr = image->segment[idx].mem; 972 + size = image->segment[idx].memsz; 973 + eaddr = addr + size; 967 974 /* 968 975 * Collect the source pages and map them in a contiguous VA range. 969 976 */ ··· 1011 1004 1012 1005 void kimage_unmap_segment(void *segment_buffer) 1013 1006 { 1014 - vunmap(segment_buffer); 1007 + if (is_vmalloc_addr(segment_buffer)) 1008 + vunmap(segment_buffer); 1015 1009 } 1016 1010 1017 1011 struct kexec_load_limit {
+2
lib/idr.c
··· 40 40 41 41 if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR))) 42 42 idr->idr_rt.xa_flags |= IDR_RT_MARKER; 43 + if (max < base) 44 + return -ENOSPC; 43 45 44 46 id = (id < base) ? 0 : id - base; 45 47 radix_tree_iter_init(&iter, id);
+1 -1
mm/damon/vaddr.c
··· 743 743 if (!folio) 744 744 continue; 745 745 if (damos_va_filter_out(s, folio, walk->vma, addr, pte, NULL)) 746 - return 0; 746 + continue; 747 747 damos_va_migrate_dests_add(folio, walk->vma, addr, dests, 748 748 migration_lists); 749 749 nr = folio_nr_pages(folio);
+32
mm/kasan/common.c
··· 28 28 #include <linux/string.h> 29 29 #include <linux/types.h> 30 30 #include <linux/bug.h> 31 + #include <linux/vmalloc.h> 31 32 32 33 #include "kasan.h" 33 34 #include "../slab.h" ··· 576 575 } 577 576 return true; 578 577 } 578 + 579 + #ifdef CONFIG_KASAN_VMALLOC 580 + void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, 581 + kasan_vmalloc_flags_t flags) 582 + { 583 + unsigned long size; 584 + void *addr; 585 + int area; 586 + u8 tag; 587 + 588 + /* 589 + * If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers 590 + * would be unpoisoned with the KASAN_TAG_KERNEL which would disable 591 + * KASAN checks down the line. 592 + */ 593 + if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG)) 594 + return; 595 + 596 + size = vms[0]->size; 597 + addr = vms[0]->addr; 598 + vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags); 599 + tag = get_tag(vms[0]->addr); 600 + 601 + for (area = 1 ; area < nr_vms ; area++) { 602 + size = vms[area]->size; 603 + addr = set_tag(vms[area]->addr, tag); 604 + vms[area]->addr = 605 + __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG); 606 + } 607 + } 608 + #endif
+1 -1
mm/kasan/hw_tags.c
··· 361 361 return (void *)start; 362 362 } 363 363 364 - tag = kasan_random_tag(); 364 + tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag(); 365 365 start = set_tag(start, tag); 366 366 367 367 /* Unpoison and initialize memory up to size. */
+3 -1
mm/kasan/shadow.c
··· 631 631 !(flags & KASAN_VMALLOC_PROT_NORMAL)) 632 632 return (void *)start; 633 633 634 - start = set_tag(start, kasan_random_tag()); 634 + if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG))) 635 + start = set_tag(start, kasan_random_tag()); 636 + 635 637 kasan_unpoison(start, size, false); 636 638 return (void *)start; 637 639 }
+1 -1
mm/ksm.c
··· 650 650 } 651 651 } 652 652 out_unlock: 653 - pte_unmap_unlock(ptep, ptl); 653 + pte_unmap_unlock(start_ptep, ptl); 654 654 return found; 655 655 } 656 656
+2 -2
mm/memcontrol.c
··· 5638 5638 memcg = root_mem_cgroup; 5639 5639 5640 5640 pr_warn("Memory cgroup min protection %lukB -- low protection %lukB", 5641 - K(atomic_long_read(&memcg->memory.children_min_usage)*PAGE_SIZE), 5642 - K(atomic_long_read(&memcg->memory.children_low_usage)*PAGE_SIZE)); 5641 + K(atomic_long_read(&memcg->memory.children_min_usage)), 5642 + K(atomic_long_read(&memcg->memory.children_low_usage))); 5643 5643 }
+18 -11
mm/memory-failure.c
··· 2161 2161 { 2162 2162 guard(mutex)(&pfn_space_lock); 2163 2163 2164 + if (!pfn_space->pfn_to_vma_pgoff) 2165 + return -EINVAL; 2166 + 2164 2167 if (interval_tree_iter_first(&pfn_space_itree, 2165 2168 pfn_space->node.start, 2166 2169 pfn_space->node.last)) ··· 2186 2183 } 2187 2184 EXPORT_SYMBOL_GPL(unregister_pfn_address_space); 2188 2185 2189 - static void add_to_kill_pfn(struct task_struct *tsk, 2190 - struct vm_area_struct *vma, 2191 - struct list_head *to_kill, 2192 - unsigned long pfn) 2186 + static void add_to_kill_pgoff(struct task_struct *tsk, 2187 + struct vm_area_struct *vma, 2188 + struct list_head *to_kill, 2189 + pgoff_t pgoff) 2193 2190 { 2194 2191 struct to_kill *tk; 2195 2192 ··· 2200 2197 } 2201 2198 2202 2199 /* Check for pgoff not backed by struct page */ 2203 - tk->addr = vma_address(vma, pfn, 1); 2200 + tk->addr = vma_address(vma, pgoff, 1); 2204 2201 tk->size_shift = PAGE_SHIFT; 2205 2202 2206 2203 if (tk->addr == -EFAULT) 2207 2204 pr_info("Unable to find address %lx in %s\n", 2208 - pfn, tsk->comm); 2205 + pgoff, tsk->comm); 2209 2206 2210 2207 get_task_struct(tsk); 2211 2208 tk->tsk = tsk; ··· 2215 2212 /* 2216 2213 * Collect processes when the error hit a PFN not backed by struct page. 2217 2214 */ 2218 - static void collect_procs_pfn(struct address_space *mapping, 2215 + static void collect_procs_pfn(struct pfn_address_space *pfn_space, 2219 2216 unsigned long pfn, struct list_head *to_kill) 2220 2217 { 2221 2218 struct vm_area_struct *vma; 2222 2219 struct task_struct *tsk; 2220 + struct address_space *mapping = pfn_space->mapping; 2223 2221 2224 2222 i_mmap_lock_read(mapping); 2225 2223 rcu_read_lock(); ··· 2230 2226 t = task_early_kill(tsk, true); 2231 2227 if (!t) 2232 2228 continue; 2233 - vma_interval_tree_foreach(vma, &mapping->i_mmap, pfn, pfn) { 2234 - if (vma->vm_mm == t->mm) 2235 - add_to_kill_pfn(t, vma, to_kill, pfn); 2229 + vma_interval_tree_foreach(vma, &mapping->i_mmap, 0, ULONG_MAX) { 2230 + pgoff_t pgoff; 2231 + 2232 + if (vma->vm_mm == t->mm && 2233 + !pfn_space->pfn_to_vma_pgoff(vma, pfn, &pgoff)) 2234 + add_to_kill_pgoff(t, vma, to_kill, pgoff); 2236 2235 } 2237 2236 } 2238 2237 rcu_read_unlock(); ··· 2271 2264 struct pfn_address_space *pfn_space = 2272 2265 container_of(node, struct pfn_address_space, node); 2273 2266 2274 - collect_procs_pfn(pfn_space->mapping, pfn, &tokill); 2267 + collect_procs_pfn(pfn_space, pfn, &tokill); 2275 2268 2276 2269 mf_handled = true; 2277 2270 }
-2
mm/memremap.c
··· 427 427 if (folio_test_anon(folio)) { 428 428 for (i = 0; i < nr; i++) 429 429 __ClearPageAnonExclusive(folio_page(folio, i)); 430 - } else { 431 - VM_WARN_ON_ONCE(folio_test_large(folio)); 432 430 } 433 431 434 432 /*
+13 -13
mm/page_alloc.c
··· 914 914 NULL) != NULL; 915 915 } 916 916 917 + static void change_pageblock_range(struct page *pageblock_page, 918 + int start_order, int migratetype) 919 + { 920 + int nr_pageblocks = 1 << (start_order - pageblock_order); 921 + 922 + while (nr_pageblocks--) { 923 + set_pageblock_migratetype(pageblock_page, migratetype); 924 + pageblock_page += pageblock_nr_pages; 925 + } 926 + } 927 + 917 928 /* 918 929 * Freeing function for a buddy system allocator. 919 930 * ··· 1011 1000 * expand() down the line puts the sub-blocks 1012 1001 * on the right freelists. 1013 1002 */ 1014 - set_pageblock_migratetype(buddy, migratetype); 1003 + change_pageblock_range(buddy, order, migratetype); 1015 1004 } 1016 1005 1017 1006 combined_pfn = buddy_pfn & pfn; ··· 2157 2146 } 2158 2147 2159 2148 #endif /* CONFIG_MEMORY_ISOLATION */ 2160 - 2161 - static void change_pageblock_range(struct page *pageblock_page, 2162 - int start_order, int migratetype) 2163 - { 2164 - int nr_pageblocks = 1 << (start_order - pageblock_order); 2165 - 2166 - while (nr_pageblocks--) { 2167 - set_pageblock_migratetype(pageblock_page, migratetype); 2168 - pageblock_page += pageblock_nr_pages; 2169 - } 2170 - } 2171 2149 2172 2150 static inline bool boost_watermark(struct zone *zone) 2173 2151 { ··· 5924 5924 * recycled, this leads to the once large chunks of space being 5925 5925 * fragmented and becoming unavailable for high-order allocations. 5926 5926 */ 5927 - return 0; 5927 + return 1; 5928 5928 #endif 5929 5929 } 5930 5930
+1 -1
mm/page_owner.c
··· 952 952 .open = page_owner_stack_open, 953 953 .read = seq_read, 954 954 .llseek = seq_lseek, 955 - .release = seq_release, 955 + .release = seq_release_private, 956 956 }; 957 957 958 958 static int page_owner_threshold_get(void *data, u64 *val)
+4 -4
mm/vmalloc.c
··· 4331 4331 */ 4332 4332 if (size <= alloced_size) { 4333 4333 kasan_unpoison_vmalloc(p + old_size, size - old_size, 4334 - KASAN_VMALLOC_PROT_NORMAL); 4334 + KASAN_VMALLOC_PROT_NORMAL | 4335 + KASAN_VMALLOC_VM_ALLOC | 4336 + KASAN_VMALLOC_KEEP_TAG); 4335 4337 /* 4336 4338 * No need to zero memory here, as unused memory will have 4337 4339 * already been zeroed at initial allocation time or during ··· 5027 5025 * With hardware tag-based KASAN, marking is skipped for 5028 5026 * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc(). 5029 5027 */ 5030 - for (area = 0; area < nr_vms; area++) 5031 - vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, 5032 - vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); 5028 + kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL); 5033 5029 5034 5030 kfree(vas); 5035 5031 return vms;
+10 -1
rust/kernel/maple_tree.rs
··· 265 265 loop { 266 266 // This uses the raw accessor because we're destroying pointers without removing them 267 267 // from the maple tree, which is only valid because this is the destructor. 268 - let ptr = ma_state.mas_find_raw(usize::MAX); 268 + // 269 + // Take the rcu lock because mas_find_raw() requires that you hold either the spinlock 270 + // or the rcu read lock. This is only really required if memory reclaim might 271 + // reallocate entries in the tree, as we otherwise have exclusive access. That feature 272 + // doesn't exist yet, so for now, taking the rcu lock only serves the purpose of 273 + // silencing lockdep. 274 + let ptr = { 275 + let _rcu = kernel::sync::rcu::Guard::new(); 276 + ma_state.mas_find_raw(usize::MAX) 277 + }; 269 278 if ptr.is_null() { 270 279 break; 271 280 }
+1 -3
security/integrity/ima/ima_kexec.c
··· 250 250 if (!image->ima_buffer_addr) 251 251 return; 252 252 253 - ima_kexec_buffer = kimage_map_segment(image, 254 - image->ima_buffer_addr, 255 - image->ima_buffer_size); 253 + ima_kexec_buffer = kimage_map_segment(image, image->ima_segment_index); 256 254 if (!ima_kexec_buffer) { 257 255 pr_err("Could not map measurements buffer.\n"); 258 256 return;
+5 -1
tools/mm/page_owner_sort.c
··· 181 181 { 182 182 const struct block_list *l1 = p1, *l2 = p2; 183 183 184 - return l1->ts_nsec < l2->ts_nsec ? -1 : 1; 184 + if (l1->ts_nsec < l2->ts_nsec) 185 + return -1; 186 + if (l1->ts_nsec > l2->ts_nsec) 187 + return 1; 188 + return 0; 185 189 } 186 190 187 191 static int compare_cull_condition(const void *p1, const void *p2)
+21
tools/testing/radix-tree/idr-test.c
··· 57 57 idr_destroy(&idr); 58 58 } 59 59 60 + void idr_alloc2_test(void) 61 + { 62 + int id; 63 + struct idr idr = IDR_INIT_BASE(idr, 1); 64 + 65 + id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); 66 + assert(id == -ENOSPC); 67 + 68 + id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL); 69 + assert(id == 1); 70 + 71 + id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL); 72 + assert(id == -ENOSPC); 73 + 74 + id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL); 75 + assert(id == -ENOSPC); 76 + 77 + idr_destroy(&idr); 78 + } 79 + 60 80 void idr_replace_test(void) 61 81 { 62 82 DEFINE_IDR(idr); ··· 429 409 430 410 idr_replace_test(); 431 411 idr_alloc_test(); 412 + idr_alloc2_test(); 432 413 idr_null_test(); 433 414 idr_nowait_test(); 434 415 idr_get_next_test(0);
+1 -1
tools/testing/selftests/mm/uffd-unit-tests.c
··· 1317 1317 p = strstr(tmp, header); 1318 1318 if (p) { 1319 1319 /* For example, "State:\tD (disk sleep)" */ 1320 - c = *(p + sizeof(header) - 1); 1320 + c = *(p + strlen(header)); 1321 1321 return c == 'D' ? 1322 1322 THR_STATE_UNINTERRUPTIBLE : THR_STATE_UNKNOWN; 1323 1323 }