Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'akpm' (patches from Andrew)

Merge more updates from Andrew Morton:

- MM remainders

- various misc things

- kcov updates

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (27 commits)
lib/test_printf.c: call wait_for_random_bytes() before plain %p tests
hexagon: drop the unused variable zero_page_mask
hexagon: fix printk format warning in setup.c
mm: fix oom_kill event handling
treewide: use PHYS_ADDR_MAX to avoid type casting ULLONG_MAX
mm: use octal not symbolic permissions
ipc: use new return type vm_fault_t
sysvipc/sem: mitigate semnum index against spectre v1
fault-injection: reorder config entries
arm: port KCOV to arm
sched/core / kcov: avoid kcov_area during task switch
kcov: prefault the kcov_area
kcov: ensure irq code sees a valid area
kernel/relay.c: change return type to vm_fault_t
exofs: avoid VLA in structures
coredump: fix spam with zero VMA process
fat: use fat_fs_error() instead of BUG_ON() in __fat_get_block()
proc: skip branch in /proc/*/* lookup
mremap: remove LATENCY_LIMIT from mremap to reduce the number of TLB shootdowns
mm/memblock: add missing include <linux/bootmem.h>
...

+396 -211
+2 -1
arch/arm/Kconfig
··· 8 8 select ARCH_HAS_DEVMEM_IS_ALLOWED 9 9 select ARCH_HAS_ELF_RANDOMIZE 10 10 select ARCH_HAS_FORTIFY_SOURCE 11 + select ARCH_HAS_KCOV 11 12 select ARCH_HAS_PTE_SPECIAL if ARM_LPAE 12 - select ARCH_HAS_SET_MEMORY 13 13 select ARCH_HAS_PHYS_TO_DMA 14 + select ARCH_HAS_SET_MEMORY 14 15 select ARCH_HAS_STRICT_KERNEL_RWX if MMU && !XIP_KERNEL 15 16 select ARCH_HAS_STRICT_MODULE_RWX if MMU 16 17 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+3
arch/arm/boot/compressed/Makefile
··· 25 25 26 26 GCOV_PROFILE := n 27 27 28 + # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. 29 + KCOV_INSTRUMENT := n 30 + 28 31 # 29 32 # Architecture dependencies 30 33 #
+8
arch/arm/kvm/hyp/Makefile
··· 23 23 obj-$(CONFIG_KVM_ARM_HOST) += switch.o 24 24 CFLAGS_switch.o += $(CFLAGS_ARMV7VE) 25 25 obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o 26 + 27 + # KVM code is run at a different exception code with a different map, so 28 + # compiler instrumentation that inserts callbacks or checks into the code may 29 + # cause crashes. Just disable it. 30 + GCOV_PROFILE := n 31 + KASAN_SANITIZE := n 32 + UBSAN_SANITIZE := n 33 + KCOV_INSTRUMENT := n
+3
arch/arm/vdso/Makefile
··· 30 30 # Disable gcov profiling for VDSO code 31 31 GCOV_PROFILE := n 32 32 33 + # Prevents link failures: __sanitizer_cov_trace_pc() is not linked in. 34 + KCOV_INSTRUMENT := n 35 + 33 36 # Force dependency 34 37 $(obj)/vdso.o : $(obj)/vdso.so 35 38
+3 -3
arch/arm64/mm/init.c
··· 310 310 } 311 311 #endif 312 312 313 - static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX; 313 + static phys_addr_t memory_limit = PHYS_ADDR_MAX; 314 314 315 315 /* 316 316 * Limit the memory size that was specified via FDT. ··· 401 401 * high up in memory, add back the kernel region that must be accessible 402 402 * via the linear mapping. 403 403 */ 404 - if (memory_limit != (phys_addr_t)ULLONG_MAX) { 404 + if (memory_limit != PHYS_ADDR_MAX) { 405 405 memblock_mem_limit_remove_map(memory_limit); 406 406 memblock_add(__pa_symbol(_text), (u64)(_end - _text)); 407 407 } ··· 666 666 */ 667 667 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) 668 668 { 669 - if (memory_limit != (phys_addr_t)ULLONG_MAX) { 669 + if (memory_limit != PHYS_ADDR_MAX) { 670 670 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); 671 671 } else { 672 672 pr_emerg("Memory Limit: none\n");
-1
arch/hexagon/include/asm/pgtable.h
··· 30 30 31 31 /* A handy thing to have if one has the RAM. Declared in head.S */ 32 32 extern unsigned long empty_zero_page; 33 - extern unsigned long zero_page_mask; 34 33 35 34 /* 36 35 * The PTE model described here is that of the Hexagon Virtual Machine,
+1 -1
arch/hexagon/kernel/setup.c
··· 66 66 */ 67 67 __vmsetvec(_K_VM_event_vector); 68 68 69 - printk(KERN_INFO "PHYS_OFFSET=0x%08x\n", PHYS_OFFSET); 69 + printk(KERN_INFO "PHYS_OFFSET=0x%08lx\n", PHYS_OFFSET); 70 70 71 71 /* 72 72 * Simulator has a few differences from the hardware.
-3
arch/hexagon/mm/init.c
··· 39 39 /* Set as variable to limit PMD copies */ 40 40 int max_kernel_seg = 0x303; 41 41 42 - /* think this should be (page_size-1) the way it's used...*/ 43 - unsigned long zero_page_mask; 44 - 45 42 /* indicate pfn's of high memory */ 46 43 unsigned long highstart_pfn, highend_pfn; 47 44
+2 -2
arch/mips/kernel/setup.c
··· 93 93 * If the region reaches the top of the physical address space, adjust 94 94 * the size slightly so that (start + size) doesn't overflow 95 95 */ 96 - if (start + size - 1 == (phys_addr_t)ULLONG_MAX) 96 + if (start + size - 1 == PHYS_ADDR_MAX) 97 97 --size; 98 98 99 99 /* Sanity check */ ··· 376 376 unsigned long reserved_end; 377 377 unsigned long mapstart = ~0UL; 378 378 unsigned long bootmap_size; 379 - phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX; 379 + phys_addr_t ramstart = PHYS_ADDR_MAX; 380 380 bool bootmap_valid = false; 381 381 int i; 382 382
+1 -1
arch/powerpc/mm/mem.c
··· 215 215 /* Place all memblock_regions in the same node and merge contiguous 216 216 * memblock_regions 217 217 */ 218 - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 218 + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 219 219 } 220 220 221 221 void __init initmem_init(void)
+1 -1
arch/sparc/mm/init_64.c
··· 1620 1620 (top_of_ram - total_ram) >> 20); 1621 1621 1622 1622 init_node_masks_nonnuma(); 1623 - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 1623 + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 1624 1624 allocate_node_data(0); 1625 1625 node_set_online(0); 1626 1626 }
+3 -1
arch/x86/mm/init.c
··· 706 706 */ 707 707 int devmem_is_allowed(unsigned long pagenr) 708 708 { 709 - if (page_is_ram(pagenr)) { 709 + if (region_intersects(PFN_PHYS(pagenr), PAGE_SIZE, 710 + IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE) 711 + != REGION_DISJOINT) { 710 712 /* 711 713 * For disallowed memory regions in the low 1MB range, 712 714 * request that the page be shown as all zeros.
+1 -1
arch/x86/mm/init_32.c
··· 692 692 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; 693 693 #endif 694 694 695 - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 695 + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 696 696 sparse_memory_present_with_active_regions(0); 697 697 698 698 #ifdef CONFIG_FLATMEM
+1 -1
arch/x86/mm/init_64.c
··· 742 742 #ifndef CONFIG_NUMA 743 743 void __init initmem_init(void) 744 744 { 745 - memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 745 + memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); 746 746 } 747 747 #endif 748 748
+1 -1
drivers/firmware/efi/arm-init.c
··· 193 193 * uses its own memory map instead. 194 194 */ 195 195 memblock_dump_all(); 196 - memblock_remove(0, (phys_addr_t)ULLONG_MAX); 196 + memblock_remove(0, PHYS_ADDR_MAX); 197 197 198 198 for_each_efi_memory_desc(md) { 199 199 paddr = md->phys_addr;
+1 -1
drivers/remoteproc/qcom_q6v5_pil.c
··· 686 686 struct elf32_hdr *ehdr; 687 687 phys_addr_t mpss_reloc; 688 688 phys_addr_t boot_addr; 689 - phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; 689 + phys_addr_t min_addr = PHYS_ADDR_MAX; 690 690 phys_addr_t max_addr = 0; 691 691 bool relocate = false; 692 692 char seg_name[10];
+2 -2
drivers/soc/qcom/mdt_loader.c
··· 50 50 const struct elf32_phdr *phdrs; 51 51 const struct elf32_phdr *phdr; 52 52 const struct elf32_hdr *ehdr; 53 - phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; 53 + phys_addr_t min_addr = PHYS_ADDR_MAX; 54 54 phys_addr_t max_addr = 0; 55 55 int i; 56 56 ··· 97 97 const struct elf32_hdr *ehdr; 98 98 const struct firmware *seg_fw; 99 99 phys_addr_t mem_reloc; 100 - phys_addr_t min_addr = (phys_addr_t)ULLONG_MAX; 100 + phys_addr_t min_addr = PHYS_ADDR_MAX; 101 101 phys_addr_t max_addr = 0; 102 102 size_t fw_name_len; 103 103 ssize_t offset;
+9 -8
fs/binfmt_elf.c
··· 1621 1621 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */ 1622 1622 return -EINVAL; 1623 1623 size = round_up(size, PAGE_SIZE); 1624 - data = vmalloc(size); 1625 - if (!data) 1624 + data = kvmalloc(size, GFP_KERNEL); 1625 + if (ZERO_OR_NULL_PTR(data)) 1626 1626 return -ENOMEM; 1627 1627 1628 1628 start_end_ofs = data + 2; ··· 1639 1639 filename = file_path(file, name_curpos, remaining); 1640 1640 if (IS_ERR(filename)) { 1641 1641 if (PTR_ERR(filename) == -ENAMETOOLONG) { 1642 - vfree(data); 1642 + kvfree(data); 1643 1643 size = size * 5 / 4; 1644 1644 goto alloc; 1645 1645 } ··· 1932 1932 kfree(t); 1933 1933 } 1934 1934 kfree(info->psinfo.data); 1935 - vfree(info->files.data); 1935 + kvfree(info->files.data); 1936 1936 } 1937 1937 1938 1938 #else ··· 2148 2148 2149 2149 /* Free data possibly allocated by fill_files_note(): */ 2150 2150 if (info->notes_files) 2151 - vfree(info->notes_files->data); 2151 + kvfree(info->notes_files->data); 2152 2152 2153 2153 kfree(info->prstatus); 2154 2154 kfree(info->psinfo); ··· 2294 2294 2295 2295 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz)) 2296 2296 goto end_coredump; 2297 - vma_filesz = vmalloc(array_size(sizeof(*vma_filesz), (segs - 1))); 2298 - if (!vma_filesz) 2297 + vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)), 2298 + GFP_KERNEL); 2299 + if (ZERO_OR_NULL_PTR(vma_filesz)) 2299 2300 goto end_coredump; 2300 2301 2301 2302 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; ··· 2403 2402 cleanup: 2404 2403 free_note_info(&info); 2405 2404 kfree(shdr4extnum); 2406 - vfree(vma_filesz); 2405 + kvfree(vma_filesz); 2407 2406 kfree(phdr4note); 2408 2407 kfree(elf); 2409 2408 out:
+48 -34
fs/exofs/ore.c
··· 146 146 struct ore_io_state **pios) 147 147 { 148 148 struct ore_io_state *ios; 149 - struct page **pages; 150 - struct osd_sg_entry *sgilist; 149 + size_t size_ios, size_extra, size_total; 150 + void *ios_extra; 151 + 152 + /* 153 + * The desired layout looks like this, with the extra_allocation 154 + * items pointed at from fields within ios or per_dev: 155 + 151 156 struct __alloc_all_io_state { 152 157 struct ore_io_state ios; 153 158 struct ore_per_dev_state per_dev[numdevs]; 154 159 union { 155 160 struct osd_sg_entry sglist[sgs_per_dev * numdevs]; 156 161 struct page *pages[num_par_pages]; 157 - }; 158 - } *_aios; 162 + } extra_allocation; 163 + } whole_allocation; 159 164 160 - if (likely(sizeof(*_aios) <= PAGE_SIZE)) { 161 - _aios = kzalloc(sizeof(*_aios), GFP_KERNEL); 162 - if (unlikely(!_aios)) { 163 - ORE_DBGMSG("Failed kzalloc bytes=%zd\n", 164 - sizeof(*_aios)); 165 + */ 166 + 167 + /* This should never happen, so abort early if it ever does. */ 168 + if (sgs_per_dev && num_par_pages) { 169 + ORE_DBGMSG("Tried to use both pages and sglist\n"); 170 + *pios = NULL; 171 + return -EINVAL; 172 + } 173 + 174 + if (numdevs > (INT_MAX - sizeof(*ios)) / 175 + sizeof(struct ore_per_dev_state)) 176 + return -ENOMEM; 177 + size_ios = sizeof(*ios) + sizeof(struct ore_per_dev_state) * numdevs; 178 + 179 + if (sgs_per_dev * numdevs > INT_MAX / sizeof(struct osd_sg_entry)) 180 + return -ENOMEM; 181 + if (num_par_pages > INT_MAX / sizeof(struct page *)) 182 + return -ENOMEM; 183 + size_extra = max(sizeof(struct osd_sg_entry) * (sgs_per_dev * numdevs), 184 + sizeof(struct page *) * num_par_pages); 185 + 186 + size_total = size_ios + size_extra; 187 + 188 + if (likely(size_total <= PAGE_SIZE)) { 189 + ios = kzalloc(size_total, GFP_KERNEL); 190 + if (unlikely(!ios)) { 191 + ORE_DBGMSG("Failed kzalloc bytes=%zd\n", size_total); 165 192 *pios = NULL; 166 193 return -ENOMEM; 167 194 } 168 - pages = num_par_pages ? _aios->pages : NULL; 169 - sgilist = sgs_per_dev ? _aios->sglist : NULL; 170 - ios = &_aios->ios; 195 + ios_extra = (char *)ios + size_ios; 171 196 } else { 172 - struct __alloc_small_io_state { 173 - struct ore_io_state ios; 174 - struct ore_per_dev_state per_dev[numdevs]; 175 - } *_aio_small; 176 - union __extra_part { 177 - struct osd_sg_entry sglist[sgs_per_dev * numdevs]; 178 - struct page *pages[num_par_pages]; 179 - } *extra_part; 180 - 181 - _aio_small = kzalloc(sizeof(*_aio_small), GFP_KERNEL); 182 - if (unlikely(!_aio_small)) { 197 + ios = kzalloc(size_ios, GFP_KERNEL); 198 + if (unlikely(!ios)) { 183 199 ORE_DBGMSG("Failed alloc first part bytes=%zd\n", 184 - sizeof(*_aio_small)); 200 + size_ios); 185 201 *pios = NULL; 186 202 return -ENOMEM; 187 203 } 188 - extra_part = kzalloc(sizeof(*extra_part), GFP_KERNEL); 189 - if (unlikely(!extra_part)) { 204 + ios_extra = kzalloc(size_extra, GFP_KERNEL); 205 + if (unlikely(!ios_extra)) { 190 206 ORE_DBGMSG("Failed alloc second part bytes=%zd\n", 191 - sizeof(*extra_part)); 192 - kfree(_aio_small); 207 + size_extra); 208 + kfree(ios); 193 209 *pios = NULL; 194 210 return -ENOMEM; 195 211 } 196 212 197 - pages = num_par_pages ? extra_part->pages : NULL; 198 - sgilist = sgs_per_dev ? extra_part->sglist : NULL; 199 213 /* In this case the per_dev[0].sgilist holds the pointer to 200 214 * be freed 201 215 */ 202 - ios = &_aio_small->ios; 203 216 ios->extra_part_alloc = true; 204 217 } 205 218 206 - if (pages) { 207 - ios->parity_pages = pages; 219 + if (num_par_pages) { 220 + ios->parity_pages = ios_extra; 208 221 ios->max_par_pages = num_par_pages; 209 222 } 210 - if (sgilist) { 223 + if (sgs_per_dev) { 224 + struct osd_sg_entry *sgilist = ios_extra; 211 225 unsigned d; 212 226 213 227 for (d = 0; d < numdevs; ++d) {
+55 -20
fs/exofs/ore_raid.c
··· 71 71 { 72 72 struct __stripe_pages_2d *sp2d; 73 73 unsigned data_devs = group_width - parity; 74 + 75 + /* 76 + * Desired allocation layout is, though when larger than PAGE_SIZE, 77 + * each struct __alloc_1p_arrays is separately allocated: 78 + 74 79 struct _alloc_all_bytes { 75 80 struct __alloc_stripe_pages_2d { 76 81 struct __stripe_pages_2d sp2d; ··· 87 82 char page_is_read[data_devs]; 88 83 } __a1pa[pages_in_unit]; 89 84 } *_aab; 85 + 90 86 struct __alloc_1p_arrays *__a1pa; 91 87 struct __alloc_1p_arrays *__a1pa_end; 92 - const unsigned sizeof__a1pa = sizeof(_aab->__a1pa[0]); 88 + 89 + */ 90 + 91 + char *__a1pa; 92 + char *__a1pa_end; 93 + 94 + const size_t sizeof_stripe_pages_2d = 95 + sizeof(struct __stripe_pages_2d) + 96 + sizeof(struct __1_page_stripe) * pages_in_unit; 97 + const size_t sizeof__a1pa = 98 + ALIGN(sizeof(struct page *) * (2 * group_width) + data_devs, 99 + sizeof(void *)); 100 + const size_t sizeof__a1pa_arrays = sizeof__a1pa * pages_in_unit; 101 + const size_t alloc_total = sizeof_stripe_pages_2d + 102 + sizeof__a1pa_arrays; 103 + 93 104 unsigned num_a1pa, alloc_size, i; 94 105 95 106 /* FIXME: check these numbers in ore_verify_layout */ 96 - BUG_ON(sizeof(_aab->__asp2d) > PAGE_SIZE); 107 + BUG_ON(sizeof_stripe_pages_2d > PAGE_SIZE); 97 108 BUG_ON(sizeof__a1pa > PAGE_SIZE); 98 109 99 - if (sizeof(*_aab) > PAGE_SIZE) { 100 - num_a1pa = (PAGE_SIZE - sizeof(_aab->__asp2d)) / sizeof__a1pa; 101 - alloc_size = sizeof(_aab->__asp2d) + sizeof__a1pa * num_a1pa; 110 + /* 111 + * If alloc_total would be larger than PAGE_SIZE, only allocate 112 + * as many a1pa items as would fill the rest of the page, instead 113 + * of the full pages_in_unit count. 114 + */ 115 + if (alloc_total > PAGE_SIZE) { 116 + num_a1pa = (PAGE_SIZE - sizeof_stripe_pages_2d) / sizeof__a1pa; 117 + alloc_size = sizeof_stripe_pages_2d + sizeof__a1pa * num_a1pa; 102 118 } else { 103 119 num_a1pa = pages_in_unit; 104 - alloc_size = sizeof(*_aab); 120 + alloc_size = alloc_total; 105 121 } 106 122 107 - _aab = kzalloc(alloc_size, GFP_KERNEL); 108 - if (unlikely(!_aab)) { 123 + *psp2d = sp2d = kzalloc(alloc_size, GFP_KERNEL); 124 + if (unlikely(!sp2d)) { 109 125 ORE_DBGMSG("!! Failed to alloc sp2d size=%d\n", alloc_size); 110 126 return -ENOMEM; 111 127 } 128 + /* From here Just call _sp2d_free */ 112 129 113 - sp2d = &_aab->__asp2d.sp2d; 114 - *psp2d = sp2d; /* From here Just call _sp2d_free */ 130 + /* Find start of a1pa area. */ 131 + __a1pa = (char *)sp2d + sizeof_stripe_pages_2d; 132 + /* Find end of the _allocated_ a1pa area. */ 133 + __a1pa_end = __a1pa + alloc_size; 115 134 116 - __a1pa = _aab->__a1pa; 117 - __a1pa_end = __a1pa + num_a1pa; 118 - 135 + /* Allocate additionally needed a1pa items in PAGE_SIZE chunks. */ 119 136 for (i = 0; i < pages_in_unit; ++i) { 137 + struct __1_page_stripe *stripe = &sp2d->_1p_stripes[i]; 138 + 120 139 if (unlikely(__a1pa >= __a1pa_end)) { 121 140 num_a1pa = min_t(unsigned, PAGE_SIZE / sizeof__a1pa, 122 141 pages_in_unit - i); 142 + alloc_size = sizeof__a1pa * num_a1pa; 123 143 124 - __a1pa = kcalloc(num_a1pa, sizeof__a1pa, GFP_KERNEL); 144 + __a1pa = kzalloc(alloc_size, GFP_KERNEL); 125 145 if (unlikely(!__a1pa)) { 126 146 ORE_DBGMSG("!! Failed to _alloc_1p_arrays=%d\n", 127 147 num_a1pa); 128 148 return -ENOMEM; 129 149 } 130 - __a1pa_end = __a1pa + num_a1pa; 150 + __a1pa_end = __a1pa + alloc_size; 131 151 /* First *pages is marked for kfree of the buffer */ 132 - sp2d->_1p_stripes[i].alloc = true; 152 + stripe->alloc = true; 133 153 } 134 154 135 - sp2d->_1p_stripes[i].pages = __a1pa->pages; 136 - sp2d->_1p_stripes[i].scribble = __a1pa->scribble ; 137 - sp2d->_1p_stripes[i].page_is_read = __a1pa->page_is_read; 138 - ++__a1pa; 155 + /* 156 + * Attach all _lp_stripes pointers to the allocation for 157 + * it which was either part of the original PAGE_SIZE 158 + * allocation or the subsequent allocation in this loop. 159 + */ 160 + stripe->pages = (void *)__a1pa; 161 + stripe->scribble = stripe->pages + group_width; 162 + stripe->page_is_read = (char *)stripe->scribble + group_width; 163 + __a1pa += sizeof__a1pa; 139 164 } 140 165 141 166 sp2d->parity = parity;
+11 -12
fs/exofs/super.c
··· 549 549 static int __alloc_dev_table(struct exofs_sb_info *sbi, unsigned numdevs, 550 550 struct exofs_dev **peds) 551 551 { 552 - struct __alloc_ore_devs_and_exofs_devs { 553 - /* Twice bigger table: See exofs_init_comps() and comment at 554 - * exofs_read_lookup_dev_table() 555 - */ 556 - struct ore_dev *oreds[numdevs * 2 - 1]; 557 - struct exofs_dev eds[numdevs]; 558 - } *aoded; 552 + /* Twice bigger table: See exofs_init_comps() and comment at 553 + * exofs_read_lookup_dev_table() 554 + */ 555 + const size_t numores = numdevs * 2 - 1; 559 556 struct exofs_dev *eds; 560 557 unsigned i; 561 558 562 - aoded = kzalloc(sizeof(*aoded), GFP_KERNEL); 563 - if (unlikely(!aoded)) { 559 + sbi->oc.ods = kzalloc(numores * sizeof(struct ore_dev *) + 560 + numdevs * sizeof(struct exofs_dev), GFP_KERNEL); 561 + if (unlikely(!sbi->oc.ods)) { 564 562 EXOFS_ERR("ERROR: failed allocating Device array[%d]\n", 565 563 numdevs); 566 564 return -ENOMEM; 567 565 } 568 566 569 - sbi->oc.ods = aoded->oreds; 570 - *peds = eds = aoded->eds; 567 + /* Start of allocated struct exofs_dev entries */ 568 + *peds = eds = (void *)sbi->oc.ods[numores]; 569 + /* Initialize pointers into struct exofs_dev */ 571 570 for (i = 0; i < numdevs; ++i) 572 - aoded->oreds[i] = &eds[i].ored; 571 + sbi->oc.ods[i] = &eds[i].ored; 573 572 return 0; 574 573 } 575 574
+7 -1
fs/fat/inode.c
··· 158 158 err = fat_bmap(inode, iblock, &phys, &mapped_blocks, create, false); 159 159 if (err) 160 160 return err; 161 + if (!phys) { 162 + fat_fs_error(sb, 163 + "invalid FAT chain (i_pos %lld, last_block %llu)", 164 + MSDOS_I(inode)->i_pos, 165 + (unsigned long long)last_block); 166 + return -EIO; 167 + } 161 168 162 - BUG_ON(!phys); 163 169 BUG_ON(*max_blocks != mapped_blocks); 164 170 set_buffer_new(bh_result); 165 171 map_bh(bh_result, sb, phys);
+3 -6
fs/proc/base.c
··· 2439 2439 for (p = ents; p < last; p++) { 2440 2440 if (p->len != dentry->d_name.len) 2441 2441 continue; 2442 - if (!memcmp(dentry->d_name.name, p->name, p->len)) 2442 + if (!memcmp(dentry->d_name.name, p->name, p->len)) { 2443 + res = proc_pident_instantiate(dentry, task, p); 2443 2444 break; 2445 + } 2444 2446 } 2445 - if (p >= last) 2446 - goto out; 2447 - 2448 - res = proc_pident_instantiate(dentry, task, p); 2449 - out: 2450 2447 put_task_struct(task); 2451 2448 out_no_task: 2452 2449 return res;
+14
include/linux/kcov.h
··· 22 22 KCOV_MODE_TRACE_CMP = 3, 23 23 }; 24 24 25 + #define KCOV_IN_CTXSW (1 << 30) 26 + 25 27 void kcov_task_init(struct task_struct *t); 26 28 void kcov_task_exit(struct task_struct *t); 29 + 30 + #define kcov_prepare_switch(t) \ 31 + do { \ 32 + (t)->kcov_mode |= KCOV_IN_CTXSW; \ 33 + } while (0) 34 + 35 + #define kcov_finish_switch(t) \ 36 + do { \ 37 + (t)->kcov_mode &= ~KCOV_IN_CTXSW; \ 38 + } while (0) 27 39 28 40 #else 29 41 30 42 static inline void kcov_task_init(struct task_struct *t) {} 31 43 static inline void kcov_task_exit(struct task_struct *t) {} 44 + static inline void kcov_prepare_switch(struct task_struct *t) {} 45 + static inline void kcov_finish_switch(struct task_struct *t) {} 32 46 33 47 #endif /* CONFIG_KCOV */ 34 48 #endif /* _LINUX_KCOV_H */
+22 -4
include/linux/memcontrol.h
··· 53 53 MEMCG_HIGH, 54 54 MEMCG_MAX, 55 55 MEMCG_OOM, 56 + MEMCG_OOM_KILL, 56 57 MEMCG_SWAP_MAX, 57 58 MEMCG_SWAP_FAIL, 58 59 MEMCG_NR_MEMORY_EVENTS, ··· 721 720 722 721 rcu_read_lock(); 723 722 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 724 - if (likely(memcg)) { 723 + if (likely(memcg)) 725 724 count_memcg_events(memcg, idx, 1); 726 - if (idx == OOM_KILL) 727 - cgroup_file_notify(&memcg->events_file); 728 - } 729 725 rcu_read_unlock(); 730 726 } 731 727 ··· 731 733 { 732 734 atomic_long_inc(&memcg->memory_events[event]); 733 735 cgroup_file_notify(&memcg->events_file); 736 + } 737 + 738 + static inline void memcg_memory_event_mm(struct mm_struct *mm, 739 + enum memcg_memory_event event) 740 + { 741 + struct mem_cgroup *memcg; 742 + 743 + if (mem_cgroup_disabled()) 744 + return; 745 + 746 + rcu_read_lock(); 747 + memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 748 + if (likely(memcg)) 749 + memcg_memory_event(memcg, event); 750 + rcu_read_unlock(); 734 751 } 735 752 736 753 #ifdef CONFIG_TRANSPARENT_HUGEPAGE ··· 766 753 767 754 static inline void memcg_memory_event(struct mem_cgroup *memcg, 768 755 enum memcg_memory_event event) 756 + { 757 + } 758 + 759 + static inline void memcg_memory_event_mm(struct mm_struct *mm, 760 + enum memcg_memory_event event) 769 761 { 770 762 } 771 763
+1 -1
include/linux/sched.h
··· 1130 1130 1131 1131 #ifdef CONFIG_KCOV 1132 1132 /* Coverage collection mode enabled for this task (0 if disabled): */ 1133 - enum kcov_mode kcov_mode; 1133 + unsigned int kcov_mode; 1134 1134 1135 1135 /* Size of the kcov_area: */ 1136 1136 unsigned int kcov_size;
+1
include/linux/slab.h
··· 600 600 struct memcg_cache_array __rcu *memcg_caches; 601 601 struct list_head __root_caches_node; 602 602 struct list_head children; 603 + bool dying; 603 604 }; 604 605 struct { 605 606 struct mem_cgroup *memcg;
+14 -4
ipc/sem.c
··· 85 85 #include <linux/nsproxy.h> 86 86 #include <linux/ipc_namespace.h> 87 87 #include <linux/sched/wake_q.h> 88 + #include <linux/nospec.h> 88 89 89 90 #include <linux/uaccess.h> 90 91 #include "util.h" ··· 369 368 int nsops) 370 369 { 371 370 struct sem *sem; 371 + int idx; 372 372 373 373 if (nsops != 1) { 374 374 /* Complex operation - acquire a full lock */ ··· 387 385 * 388 386 * Both facts are tracked by use_global_mode. 389 387 */ 390 - sem = &sma->sems[sops->sem_num]; 388 + idx = array_index_nospec(sops->sem_num, sma->sem_nsems); 389 + sem = &sma->sems[idx]; 391 390 392 391 /* 393 392 * Initial check for use_global_lock. Just an optimization, ··· 641 638 un = q->undo; 642 639 643 640 for (sop = sops; sop < sops + nsops; sop++) { 644 - curr = &sma->sems[sop->sem_num]; 641 + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); 642 + curr = &sma->sems[idx]; 645 643 sem_op = sop->sem_op; 646 644 result = curr->semval; 647 645 ··· 722 718 * until the operations can go through. 723 719 */ 724 720 for (sop = sops; sop < sops + nsops; sop++) { 725 - curr = &sma->sems[sop->sem_num]; 721 + int idx = array_index_nospec(sop->sem_num, sma->sem_nsems); 722 + 723 + curr = &sma->sems[idx]; 726 724 sem_op = sop->sem_op; 727 725 result = curr->semval; 728 726 ··· 1362 1356 return -EIDRM; 1363 1357 } 1364 1358 1359 + semnum = array_index_nospec(semnum, sma->sem_nsems); 1365 1360 curr = &sma->sems[semnum]; 1366 1361 1367 1362 ipc_assert_locked_object(&sma->sem_perm); ··· 1516 1509 err = -EIDRM; 1517 1510 goto out_unlock; 1518 1511 } 1512 + 1513 + semnum = array_index_nospec(semnum, nsems); 1519 1514 curr = &sma->sems[semnum]; 1520 1515 1521 1516 switch (cmd) { ··· 2090 2081 */ 2091 2082 if (nsops == 1) { 2092 2083 struct sem *curr; 2093 - curr = &sma->sems[sops->sem_num]; 2084 + int idx = array_index_nospec(sops->sem_num, sma->sem_nsems); 2085 + curr = &sma->sems[idx]; 2094 2086 2095 2087 if (alter) { 2096 2088 if (sma->complex_count) {
+1 -1
ipc/shm.c
··· 408 408 up_write(&shm_ids(ns).rwsem); 409 409 } 410 410 411 - static int shm_fault(struct vm_fault *vmf) 411 + static vm_fault_t shm_fault(struct vm_fault *vmf) 412 412 { 413 413 struct file *file = vmf->vma->vm_file; 414 414 struct shm_file_data *sfd = shm_file_data(file);
+8
kernel/fork.c
··· 440 440 continue; 441 441 } 442 442 charge = 0; 443 + /* 444 + * Don't duplicate many vmas if we've been oom-killed (for 445 + * example) 446 + */ 447 + if (fatal_signal_pending(current)) { 448 + retval = -EINTR; 449 + goto out; 450 + } 443 451 if (mpnt->vm_flags & VM_ACCOUNT) { 444 452 unsigned long len = vma_pages(mpnt); 445 453
+19 -2
kernel/kcov.c
··· 58 58 59 59 static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t) 60 60 { 61 - enum kcov_mode mode; 61 + unsigned int mode; 62 62 63 63 /* 64 64 * We are interested in code coverage as a function of a syscall inputs, ··· 241 241 242 242 void kcov_task_init(struct task_struct *t) 243 243 { 244 - t->kcov_mode = KCOV_MODE_DISABLED; 244 + WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED); 245 + barrier(); 245 246 t->kcov_size = 0; 246 247 t->kcov_area = NULL; 247 248 t->kcov = NULL; ··· 324 323 return 0; 325 324 } 326 325 326 + /* 327 + * Fault in a lazily-faulted vmalloc area before it can be used by 328 + * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the 329 + * vmalloc fault handling path is instrumented. 330 + */ 331 + static void kcov_fault_in_area(struct kcov *kcov) 332 + { 333 + unsigned long stride = PAGE_SIZE / sizeof(unsigned long); 334 + unsigned long *area = kcov->area; 335 + unsigned long offset; 336 + 337 + for (offset = 0; offset < kcov->size; offset += stride) 338 + READ_ONCE(area[offset]); 339 + } 340 + 327 341 static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd, 328 342 unsigned long arg) 329 343 { ··· 387 371 #endif 388 372 else 389 373 return -EINVAL; 374 + kcov_fault_in_area(kcov); 390 375 /* Cache in task struct for performance. */ 391 376 t->kcov_size = kcov->size; 392 377 t->kcov_area = kcov->area;
+4
kernel/kexec_core.c
··· 829 829 else 830 830 buf += mchunk; 831 831 mbytes -= mchunk; 832 + 833 + cond_resched(); 832 834 } 833 835 out: 834 836 return result; ··· 895 893 else 896 894 buf += mchunk; 897 895 mbytes -= mchunk; 896 + 897 + cond_resched(); 898 898 } 899 899 out: 900 900 return result;
+1 -1
kernel/relay.c
··· 39 39 /* 40 40 * fault() vm_op implementation for relay file mapping. 41 41 */ 42 - static int relay_buf_fault(struct vm_fault *vmf) 42 + static vm_fault_t relay_buf_fault(struct vm_fault *vmf) 43 43 { 44 44 struct page *page; 45 45 struct rchan_buf *buf = vmf->vma->vm_private_data;
+4
kernel/sched/core.c
··· 10 10 #include <linux/kthread.h> 11 11 #include <linux/nospec.h> 12 12 13 + #include <linux/kcov.h> 14 + 13 15 #include <asm/switch_to.h> 14 16 #include <asm/tlb.h> 15 17 ··· 2635 2633 prepare_task_switch(struct rq *rq, struct task_struct *prev, 2636 2634 struct task_struct *next) 2637 2635 { 2636 + kcov_prepare_switch(prev); 2638 2637 sched_info_switch(rq, prev, next); 2639 2638 perf_event_task_sched_out(prev, next); 2640 2639 rseq_preempt(prev); ··· 2705 2702 finish_task(prev); 2706 2703 finish_lock_switch(rq); 2707 2704 finish_arch_post_lock_switch(); 2705 + kcov_finish_switch(current); 2708 2706 2709 2707 fire_sched_in_preempt_notifiers(current); 2710 2708 /*
+18 -18
lib/Kconfig.debug
··· 1506 1506 1507 1507 If unsure, say N. 1508 1508 1509 + config FUNCTION_ERROR_INJECTION 1510 + def_bool y 1511 + depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES 1512 + 1509 1513 config FAULT_INJECTION 1510 1514 bool "Fault-injection framework" 1511 1515 depends on DEBUG_KERNEL 1512 1516 help 1513 1517 Provide fault-injection framework. 1514 1518 For more details, see Documentation/fault-injection/. 1515 - 1516 - config FUNCTION_ERROR_INJECTION 1517 - def_bool y 1518 - depends on HAVE_FUNCTION_ERROR_INJECTION && KPROBES 1519 1519 1520 1520 config FAILSLAB 1521 1521 bool "Fault-injection capability for kmalloc" ··· 1547 1547 Only works with drivers that use the generic timeout handling, 1548 1548 for others it wont do anything. 1549 1549 1550 - config FAIL_MMC_REQUEST 1551 - bool "Fault-injection capability for MMC IO" 1552 - depends on FAULT_INJECTION_DEBUG_FS && MMC 1553 - help 1554 - Provide fault-injection capability for MMC IO. 1555 - This will make the mmc core return data errors. This is 1556 - useful to test the error handling in the mmc block device 1557 - and to test how the mmc host driver handles retries from 1558 - the block device. 1559 - 1560 1550 config FAIL_FUTEX 1561 1551 bool "Fault-injection capability for futexes" 1562 1552 select DEBUG_FS 1563 1553 depends on FAULT_INJECTION && FUTEX 1564 1554 help 1565 1555 Provide fault-injection capability for futexes. 1556 + 1557 + config FAULT_INJECTION_DEBUG_FS 1558 + bool "Debugfs entries for fault-injection capabilities" 1559 + depends on FAULT_INJECTION && SYSFS && DEBUG_FS 1560 + help 1561 + Enable configuration of fault-injection capabilities via debugfs. 1566 1562 1567 1563 config FAIL_FUNCTION 1568 1564 bool "Fault-injection capability for functions" ··· 1570 1574 an error value and have to handle it. This is useful to test the 1571 1575 error handling in various subsystems. 1572 1576 1573 - config FAULT_INJECTION_DEBUG_FS 1574 - bool "Debugfs entries for fault-injection capabilities" 1575 - depends on FAULT_INJECTION && SYSFS && DEBUG_FS 1577 + config FAIL_MMC_REQUEST 1578 + bool "Fault-injection capability for MMC IO" 1579 + depends on FAULT_INJECTION_DEBUG_FS && MMC 1576 1580 help 1577 - Enable configuration of fault-injection capabilities via debugfs. 1581 + Provide fault-injection capability for MMC IO. 1582 + This will make the mmc core return data errors. This is 1583 + useful to test the error handling in the mmc block device 1584 + and to test how the mmc host driver handles retries from 1585 + the block device. 1578 1586 1579 1587 config FAULT_INJECTION_STACKTRACE_FILTER 1580 1588 bool "stacktrace filter for fault-injection capabilities"
+7
lib/test_printf.c
··· 260 260 { 261 261 int err; 262 262 263 + /* 264 + * Make sure crng is ready. Otherwise we get "(ptrval)" instead 265 + * of a hashed address when printing '%p' in plain_hash() and 266 + * plain_format(). 267 + */ 268 + wait_for_random_bytes(); 269 + 263 270 err = plain_hash(); 264 271 if (err) { 265 272 pr_warn("plain 'p' does not appear to be hashed\n");
+4 -6
mm/cleancache.c
··· 307 307 struct dentry *root = debugfs_create_dir("cleancache", NULL); 308 308 if (root == NULL) 309 309 return -ENXIO; 310 - debugfs_create_u64("succ_gets", S_IRUGO, root, &cleancache_succ_gets); 311 - debugfs_create_u64("failed_gets", S_IRUGO, 312 - root, &cleancache_failed_gets); 313 - debugfs_create_u64("puts", S_IRUGO, root, &cleancache_puts); 314 - debugfs_create_u64("invalidates", S_IRUGO, 315 - root, &cleancache_invalidates); 310 + debugfs_create_u64("succ_gets", 0444, root, &cleancache_succ_gets); 311 + debugfs_create_u64("failed_gets", 0444, root, &cleancache_failed_gets); 312 + debugfs_create_u64("puts", 0444, root, &cleancache_puts); 313 + debugfs_create_u64("invalidates", 0444, root, &cleancache_invalidates); 316 314 #endif 317 315 return 0; 318 316 }
+10 -15
mm/cma_debug.c
··· 172 172 173 173 tmp = debugfs_create_dir(name, cma_debugfs_root); 174 174 175 - debugfs_create_file("alloc", S_IWUSR, tmp, cma, 176 - &cma_alloc_fops); 177 - 178 - debugfs_create_file("free", S_IWUSR, tmp, cma, 179 - &cma_free_fops); 180 - 181 - debugfs_create_file("base_pfn", S_IRUGO, tmp, 182 - &cma->base_pfn, &cma_debugfs_fops); 183 - debugfs_create_file("count", S_IRUGO, tmp, 184 - &cma->count, &cma_debugfs_fops); 185 - debugfs_create_file("order_per_bit", S_IRUGO, tmp, 186 - &cma->order_per_bit, &cma_debugfs_fops); 187 - debugfs_create_file("used", S_IRUGO, tmp, cma, &cma_used_fops); 188 - debugfs_create_file("maxchunk", S_IRUGO, tmp, cma, &cma_maxchunk_fops); 175 + debugfs_create_file("alloc", 0200, tmp, cma, &cma_alloc_fops); 176 + debugfs_create_file("free", 0200, tmp, cma, &cma_free_fops); 177 + debugfs_create_file("base_pfn", 0444, tmp, 178 + &cma->base_pfn, &cma_debugfs_fops); 179 + debugfs_create_file("count", 0444, tmp, &cma->count, &cma_debugfs_fops); 180 + debugfs_create_file("order_per_bit", 0444, tmp, 181 + &cma->order_per_bit, &cma_debugfs_fops); 182 + debugfs_create_file("used", 0444, tmp, cma, &cma_used_fops); 183 + debugfs_create_file("maxchunk", 0444, tmp, cma, &cma_maxchunk_fops); 189 184 190 185 u32s = DIV_ROUND_UP(cma_bitmap_maxno(cma), BITS_PER_BYTE * sizeof(u32)); 191 - debugfs_create_u32_array("bitmap", S_IRUGO, tmp, (u32*)cma->bitmap, u32s); 186 + debugfs_create_u32_array("bitmap", 0444, tmp, (u32 *)cma->bitmap, u32s); 192 187 } 193 188 194 189 static int __init cma_debugfs_init(void)
+1 -1
mm/compaction.c
··· 1899 1899 1900 1900 return count; 1901 1901 } 1902 - static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node); 1902 + static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node); 1903 1903 1904 1904 int compaction_register_node(struct node *node) 1905 1905 {
+1 -1
mm/dmapool.c
··· 105 105 return PAGE_SIZE - size; 106 106 } 107 107 108 - static DEVICE_ATTR(pools, S_IRUGO, show_pools, NULL); 108 + static DEVICE_ATTR(pools, 0444, show_pools, NULL); 109 109 110 110 /** 111 111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
+1 -1
mm/failslab.c
··· 42 42 static int __init failslab_debugfs_init(void) 43 43 { 44 44 struct dentry *dir; 45 - umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 45 + umode_t mode = S_IFREG | 0600; 46 46 47 47 dir = fault_create_debugfs_attr("failslab", NULL, &failslab.attr); 48 48 if (IS_ERR(dir))
+5 -6
mm/frontswap.c
··· 486 486 struct dentry *root = debugfs_create_dir("frontswap", NULL); 487 487 if (root == NULL) 488 488 return -ENXIO; 489 - debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads); 490 - debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores); 491 - debugfs_create_u64("failed_stores", S_IRUGO, root, 492 - &frontswap_failed_stores); 493 - debugfs_create_u64("invalidates", S_IRUGO, 494 - root, &frontswap_invalidates); 489 + debugfs_create_u64("loads", 0444, root, &frontswap_loads); 490 + debugfs_create_u64("succ_stores", 0444, root, &frontswap_succ_stores); 491 + debugfs_create_u64("failed_stores", 0444, root, 492 + &frontswap_failed_stores); 493 + debugfs_create_u64("invalidates", 0444, root, &frontswap_invalidates); 495 494 #endif 496 495 return 0; 497 496 }
+10 -4
mm/ksm.c
··· 216 216 #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 217 217 #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 218 218 #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 219 + #define KSM_FLAG_MASK (SEQNR_MASK|UNSTABLE_FLAG|STABLE_FLAG) 220 + /* to mask all the flags */ 219 221 220 222 /* The stable and unstable tree heads */ 221 223 static struct rb_root one_stable_tree[1] = { RB_ROOT }; ··· 2600 2598 anon_vma_lock_read(anon_vma); 2601 2599 anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 2602 2600 0, ULONG_MAX) { 2601 + unsigned long addr; 2602 + 2603 2603 cond_resched(); 2604 2604 vma = vmac->vma; 2605 - if (rmap_item->address < vma->vm_start || 2606 - rmap_item->address >= vma->vm_end) 2605 + 2606 + /* Ignore the stable/unstable/sqnr flags */ 2607 + addr = rmap_item->address & ~KSM_FLAG_MASK; 2608 + 2609 + if (addr < vma->vm_start || addr >= vma->vm_end) 2607 2610 continue; 2608 2611 /* 2609 2612 * Initially we examine only the vma which covers this ··· 2622 2615 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 2623 2616 continue; 2624 2617 2625 - if (!rwc->rmap_one(page, vma, 2626 - rmap_item->address, rwc->arg)) { 2618 + if (!rwc->rmap_one(page, vma, addr, rwc->arg)) { 2627 2619 anon_vma_unlock_read(anon_vma); 2628 2620 return; 2629 2621 }
+7 -3
mm/memblock.c
··· 20 20 #include <linux/kmemleak.h> 21 21 #include <linux/seq_file.h> 22 22 #include <linux/memblock.h> 23 + #include <linux/bootmem.h> 23 24 24 25 #include <asm/sections.h> 25 26 #include <linux/io.h> ··· 1809 1808 struct dentry *root = debugfs_create_dir("memblock", NULL); 1810 1809 if (!root) 1811 1810 return -ENXIO; 1812 - debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); 1813 - debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); 1811 + debugfs_create_file("memory", 0444, root, 1812 + &memblock.memory, &memblock_debug_fops); 1813 + debugfs_create_file("reserved", 0444, root, 1814 + &memblock.reserved, &memblock_debug_fops); 1814 1815 #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP 1815 - debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops); 1816 + debugfs_create_file("physmem", 0444, root, 1817 + &memblock.physmem, &memblock_debug_fops); 1816 1818 #endif 1817 1819 1818 1820 return 0;
+8 -2
mm/memcontrol.c
··· 3550 3550 3551 3551 seq_printf(sf, "oom_kill_disable %d\n", memcg->oom_kill_disable); 3552 3552 seq_printf(sf, "under_oom %d\n", (bool)memcg->under_oom); 3553 - seq_printf(sf, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 3553 + seq_printf(sf, "oom_kill %lu\n", 3554 + atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 3554 3555 return 0; 3555 3556 } 3556 3557 ··· 5240 5239 atomic_long_read(&memcg->memory_events[MEMCG_MAX])); 5241 5240 seq_printf(m, "oom %lu\n", 5242 5241 atomic_long_read(&memcg->memory_events[MEMCG_OOM])); 5243 - seq_printf(m, "oom_kill %lu\n", memcg_sum_events(memcg, OOM_KILL)); 5242 + seq_printf(m, "oom_kill %lu\n", 5243 + atomic_long_read(&memcg->memory_events[MEMCG_OOM_KILL])); 5244 5244 5245 5245 return 0; 5246 5246 } ··· 5482 5480 elow = memcg->memory.low; 5483 5481 5484 5482 parent = parent_mem_cgroup(memcg); 5483 + /* No parent means a non-hierarchical mode on v1 memcg */ 5484 + if (!parent) 5485 + return MEMCG_PROT_NONE; 5486 + 5485 5487 if (parent == root) 5486 5488 goto exit; 5487 5489
-4
mm/mremap.c
··· 191 191 drop_rmap_locks(vma); 192 192 } 193 193 194 - #define LATENCY_LIMIT (64 * PAGE_SIZE) 195 - 196 194 unsigned long move_page_tables(struct vm_area_struct *vma, 197 195 unsigned long old_addr, struct vm_area_struct *new_vma, 198 196 unsigned long new_addr, unsigned long len, ··· 245 247 next = (new_addr + PMD_SIZE) & PMD_MASK; 246 248 if (extent > next - new_addr) 247 249 extent = next - new_addr; 248 - if (extent > LATENCY_LIMIT) 249 - extent = LATENCY_LIMIT; 250 250 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, 251 251 new_pmd, new_addr, need_rmap_locks, &need_flush); 252 252 }
+1 -1
mm/oom_kill.c
··· 913 913 914 914 /* Raise event before sending signal: task reaper must see this */ 915 915 count_vm_event(OOM_KILL); 916 - count_memcg_event_mm(mm, OOM_KILL); 916 + memcg_memory_event_mm(mm, MEMCG_OOM_KILL); 917 917 918 918 /* 919 919 * We should send SIGKILL before granting access to memory reserves
+1 -1
mm/page_alloc.c
··· 3061 3061 3062 3062 static int __init fail_page_alloc_debugfs(void) 3063 3063 { 3064 - umode_t mode = S_IFREG | S_IRUSR | S_IWUSR; 3064 + umode_t mode = S_IFREG | 0600; 3065 3065 struct dentry *dir; 3066 3066 3067 3067 dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
+1 -1
mm/page_idle.c
··· 201 201 } 202 202 203 203 static struct bin_attribute page_idle_bitmap_attr = 204 - __BIN_ATTR(bitmap, S_IRUSR | S_IWUSR, 204 + __BIN_ATTR(bitmap, 0600, 205 205 page_idle_bitmap_read, page_idle_bitmap_write, 0); 206 206 207 207 static struct bin_attribute *page_idle_bin_attrs[] = {
+2 -2
mm/page_owner.c
··· 631 631 return 0; 632 632 } 633 633 634 - dentry = debugfs_create_file("page_owner", S_IRUSR, NULL, 635 - NULL, &proc_page_owner_operations); 634 + dentry = debugfs_create_file("page_owner", 0400, NULL, 635 + NULL, &proc_page_owner_operations); 636 636 637 637 return PTR_ERR_OR_ZERO(dentry); 638 638 }
+5 -4
mm/shmem.c
··· 3013 3013 if (len > PAGE_SIZE) 3014 3014 return -ENAMETOOLONG; 3015 3015 3016 - inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE); 3016 + inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK | 0777, 0, 3017 + VM_NORESERVE); 3017 3018 if (!inode) 3018 3019 return -ENOSPC; 3019 3020 ··· 3446 3445 sbinfo->max_blocks << (PAGE_SHIFT - 10)); 3447 3446 if (sbinfo->max_inodes != shmem_default_max_inodes()) 3448 3447 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes); 3449 - if (sbinfo->mode != (S_IRWXUGO | S_ISVTX)) 3448 + if (sbinfo->mode != (0777 | S_ISVTX)) 3450 3449 seq_printf(seq, ",mode=%03ho", sbinfo->mode); 3451 3450 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) 3452 3451 seq_printf(seq, ",uid=%u", ··· 3487 3486 if (!sbinfo) 3488 3487 return -ENOMEM; 3489 3488 3490 - sbinfo->mode = S_IRWXUGO | S_ISVTX; 3489 + sbinfo->mode = 0777 | S_ISVTX; 3491 3490 sbinfo->uid = current_fsuid(); 3492 3491 sbinfo->gid = current_fsgid(); 3493 3492 sb->s_fs_info = sbinfo; ··· 3930 3929 d_set_d_op(path.dentry, &anon_ops); 3931 3930 3932 3931 res = ERR_PTR(-ENOSPC); 3933 - inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags); 3932 + inode = shmem_get_inode(sb, NULL, S_IFREG | 0777, 0, flags); 3934 3933 if (!inode) 3935 3934 goto put_memory; 3936 3935
+34 -3
mm/slab_common.c
··· 136 136 s->memcg_params.root_cache = NULL; 137 137 RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL); 138 138 INIT_LIST_HEAD(&s->memcg_params.children); 139 + s->memcg_params.dying = false; 139 140 } 140 141 141 142 static int init_memcg_params(struct kmem_cache *s, ··· 609 608 * The memory cgroup could have been offlined while the cache 610 609 * creation work was pending. 611 610 */ 612 - if (memcg->kmem_state != KMEM_ONLINE) 611 + if (memcg->kmem_state != KMEM_ONLINE || root_cache->memcg_params.dying) 613 612 goto out_unlock; 614 613 615 614 idx = memcg_cache_id(memcg); ··· 711 710 { 712 711 if (WARN_ON_ONCE(is_root_cache(s)) || 713 712 WARN_ON_ONCE(s->memcg_params.deact_fn)) 713 + return; 714 + 715 + if (s->memcg_params.root_cache->memcg_params.dying) 714 716 return; 715 717 716 718 /* pin memcg so that @s doesn't get destroyed in the middle */ ··· 827 823 return -EBUSY; 828 824 return 0; 829 825 } 826 + 827 + static void flush_memcg_workqueue(struct kmem_cache *s) 828 + { 829 + mutex_lock(&slab_mutex); 830 + s->memcg_params.dying = true; 831 + mutex_unlock(&slab_mutex); 832 + 833 + /* 834 + * SLUB deactivates the kmem_caches through call_rcu_sched. Make 835 + * sure all registered rcu callbacks have been invoked. 836 + */ 837 + if (IS_ENABLED(CONFIG_SLUB)) 838 + rcu_barrier_sched(); 839 + 840 + /* 841 + * SLAB and SLUB create memcg kmem_caches through workqueue and SLUB 842 + * deactivates the memcg kmem_caches through workqueue. Make sure all 843 + * previous workitems on workqueue are processed. 844 + */ 845 + flush_workqueue(memcg_kmem_cache_wq); 846 + } 830 847 #else 831 848 static inline int shutdown_memcg_caches(struct kmem_cache *s) 832 849 { 833 850 return 0; 851 + } 852 + 853 + static inline void flush_memcg_workqueue(struct kmem_cache *s) 854 + { 834 855 } 835 856 #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 836 857 ··· 873 844 874 845 if (unlikely(!s)) 875 846 return; 847 + 848 + flush_memcg_workqueue(s); 876 849 877 850 get_online_cpus(); 878 851 get_online_mems(); ··· 1243 1212 1244 1213 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) 1245 1214 #ifdef CONFIG_SLAB 1246 - #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR) 1215 + #define SLABINFO_RIGHTS (0600) 1247 1216 #else 1248 - #define SLABINFO_RIGHTS S_IRUSR 1217 + #define SLABINFO_RIGHTS (0400) 1249 1218 #endif 1250 1219 1251 1220 static void print_slabinfo_header(struct seq_file *m)
+1 -1
mm/swapfile.c
··· 100 100 101 101 static inline unsigned char swap_count(unsigned char ent) 102 102 { 103 - return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ 103 + return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ 104 104 } 105 105 106 106 /* returns 1 if swap entry is freed */
+2 -2
mm/vmalloc.c
··· 2741 2741 static int __init proc_vmalloc_init(void) 2742 2742 { 2743 2743 if (IS_ENABLED(CONFIG_NUMA)) 2744 - proc_create_seq_private("vmallocinfo", S_IRUSR, NULL, 2744 + proc_create_seq_private("vmallocinfo", 0400, NULL, 2745 2745 &vmalloc_op, 2746 2746 nr_node_ids * sizeof(unsigned int), NULL); 2747 2747 else 2748 - proc_create_seq("vmallocinfo", S_IRUSR, NULL, &vmalloc_op); 2748 + proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op); 2749 2749 return 0; 2750 2750 } 2751 2751 module_init(proc_vmalloc_init);
+3 -2
mm/zsmalloc.c
··· 661 661 } 662 662 pool->stat_dentry = entry; 663 663 664 - entry = debugfs_create_file("classes", S_IFREG | S_IRUGO, 665 - pool->stat_dentry, pool, &zs_stats_size_fops); 664 + entry = debugfs_create_file("classes", S_IFREG | 0444, 665 + pool->stat_dentry, pool, 666 + &zs_stats_size_fops); 666 667 if (!entry) { 667 668 pr_warn("%s: debugfs file entry <%s> creation failed\n", 668 669 name, "classes");
+19 -19
mm/zswap.c
··· 1256 1256 if (!zswap_debugfs_root) 1257 1257 return -ENOMEM; 1258 1258 1259 - debugfs_create_u64("pool_limit_hit", S_IRUGO, 1260 - zswap_debugfs_root, &zswap_pool_limit_hit); 1261 - debugfs_create_u64("reject_reclaim_fail", S_IRUGO, 1262 - zswap_debugfs_root, &zswap_reject_reclaim_fail); 1263 - debugfs_create_u64("reject_alloc_fail", S_IRUGO, 1264 - zswap_debugfs_root, &zswap_reject_alloc_fail); 1265 - debugfs_create_u64("reject_kmemcache_fail", S_IRUGO, 1266 - zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1267 - debugfs_create_u64("reject_compress_poor", S_IRUGO, 1268 - zswap_debugfs_root, &zswap_reject_compress_poor); 1269 - debugfs_create_u64("written_back_pages", S_IRUGO, 1270 - zswap_debugfs_root, &zswap_written_back_pages); 1271 - debugfs_create_u64("duplicate_entry", S_IRUGO, 1272 - zswap_debugfs_root, &zswap_duplicate_entry); 1273 - debugfs_create_u64("pool_total_size", S_IRUGO, 1274 - zswap_debugfs_root, &zswap_pool_total_size); 1275 - debugfs_create_atomic_t("stored_pages", S_IRUGO, 1276 - zswap_debugfs_root, &zswap_stored_pages); 1259 + debugfs_create_u64("pool_limit_hit", 0444, 1260 + zswap_debugfs_root, &zswap_pool_limit_hit); 1261 + debugfs_create_u64("reject_reclaim_fail", 0444, 1262 + zswap_debugfs_root, &zswap_reject_reclaim_fail); 1263 + debugfs_create_u64("reject_alloc_fail", 0444, 1264 + zswap_debugfs_root, &zswap_reject_alloc_fail); 1265 + debugfs_create_u64("reject_kmemcache_fail", 0444, 1266 + zswap_debugfs_root, &zswap_reject_kmemcache_fail); 1267 + debugfs_create_u64("reject_compress_poor", 0444, 1268 + zswap_debugfs_root, &zswap_reject_compress_poor); 1269 + debugfs_create_u64("written_back_pages", 0444, 1270 + zswap_debugfs_root, &zswap_written_back_pages); 1271 + debugfs_create_u64("duplicate_entry", 0444, 1272 + zswap_debugfs_root, &zswap_duplicate_entry); 1273 + debugfs_create_u64("pool_total_size", 0444, 1274 + zswap_debugfs_root, &zswap_pool_total_size); 1275 + debugfs_create_atomic_t("stored_pages", 0444, 1276 + zswap_debugfs_root, &zswap_stored_pages); 1277 1277 debugfs_create_atomic_t("same_filled_pages", 0444, 1278 - zswap_debugfs_root, &zswap_same_filled_pages); 1278 + zswap_debugfs_root, &zswap_same_filled_pages); 1279 1279 1280 1280 return 0; 1281 1281 }