Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Three sets of overlapping changes. Nothing serious.

Signed-off-by: David S. Miller <davem@davemloft.net>

+899 -605
+1
.mailmap
··· 69 69 James Bottomley <jejb@titanic.il.steeleye.com> 70 70 James E Wilson <wilson@specifix.com> 71 71 James Ketrenos <jketreno@io.(none)> 72 + Javi Merino <javi.merino@kernel.org> <javi.merino@arm.com> 72 73 <javier@osg.samsung.com> <javier.martinez@collabora.co.uk> 73 74 Jean Tourrilhes <jt@hpl.hp.com> 74 75 Jeff Garzik <jgarzik@pretzel.yyz.us>
+1
Documentation/devicetree/bindings/input/touchscreen/silead_gsl1680.txt
··· 13 13 - touchscreen-size-y : See touchscreen.txt 14 14 15 15 Optional properties: 16 + - firmware-name : File basename (string) for board specific firmware 16 17 - touchscreen-inverted-x : See touchscreen.txt 17 18 - touchscreen-inverted-y : See touchscreen.txt 18 19 - touchscreen-swapped-x-y : See touchscreen.txt
+2 -2
MAINTAINERS
··· 8753 8753 F: include/linux/oprofile.h 8754 8754 8755 8755 ORACLE CLUSTER FILESYSTEM 2 (OCFS2) 8756 - M: Mark Fasheh <mfasheh@suse.com> 8756 + M: Mark Fasheh <mfasheh@versity.com> 8757 8757 M: Joel Becker <jlbec@evilplan.org> 8758 8758 L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) 8759 8759 W: http://ocfs2.wiki.kernel.org ··· 11641 11641 THERMAL/CPU_COOLING 11642 11642 M: Amit Daniel Kachhap <amit.kachhap@gmail.com> 11643 11643 M: Viresh Kumar <viresh.kumar@linaro.org> 11644 - M: Javi Merino <javi.merino@arm.com> 11644 + M: Javi Merino <javi.merino@kernel.org> 11645 11645 L: linux-pm@vger.kernel.org 11646 11646 S: Supported 11647 11647 F: Documentation/thermal/cpu-cooling-api.txt
+1 -1
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 8 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc7 4 + EXTRAVERSION = 5 5 NAME = Psychotic Stoned Sheep 6 6 7 7 # *DOCUMENTATION*
+1 -1
arch/arm/boot/compressed/head.S
··· 779 779 orrne r0, r0, #1 @ MMU enabled 780 780 movne r1, #0xfffffffd @ domain 0 = client 781 781 bic r6, r6, #1 << 31 @ 32-bit translation system 782 - bic r6, r6, #3 << 0 @ use only ttbr0 782 + bic r6, r6, #(7 << 0) | (1 << 4) @ use only ttbr0 783 783 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer 784 784 mcrne p15, 0, r1, c3, c0, 0 @ load domain access control 785 785 mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
+1 -1
arch/arm/include/asm/dma-mapping.h
··· 111 111 /* The ARM override for dma_max_pfn() */ 112 112 static inline unsigned long dma_max_pfn(struct device *dev) 113 113 { 114 - return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); 114 + return dma_to_pfn(dev, *dev->dma_mask); 115 115 } 116 116 #define dma_max_pfn(dev) dma_max_pfn(dev) 117 117
+11 -3
arch/arm/kernel/devtree.c
··· 88 88 return; 89 89 90 90 for_each_child_of_node(cpus, cpu) { 91 + const __be32 *cell; 92 + int prop_bytes; 91 93 u32 hwid; 92 94 93 95 if (of_node_cmp(cpu->type, "cpu")) ··· 101 99 * properties is considered invalid to build the 102 100 * cpu_logical_map. 103 101 */ 104 - if (of_property_read_u32(cpu, "reg", &hwid)) { 102 + cell = of_get_property(cpu, "reg", &prop_bytes); 103 + if (!cell || prop_bytes < sizeof(*cell)) { 105 104 pr_debug(" * %s missing reg property\n", 106 105 cpu->full_name); 107 106 of_node_put(cpu); ··· 110 107 } 111 108 112 109 /* 113 - * 8 MSBs must be set to 0 in the DT since the reg property 110 + * Bits n:24 must be set to 0 in the DT since the reg property 114 111 * defines the MPIDR[23:0]. 115 112 */ 116 - if (hwid & ~MPIDR_HWID_BITMASK) { 113 + do { 114 + hwid = be32_to_cpu(*cell++); 115 + prop_bytes -= sizeof(*cell); 116 + } while (!hwid && prop_bytes > 0); 117 + 118 + if (prop_bytes || (hwid & ~MPIDR_HWID_BITMASK)) { 117 119 of_node_put(cpu); 118 120 return; 119 121 }
-2
arch/arm64/include/asm/debug-monitors.h
··· 61 61 62 62 #define AARCH64_BREAK_KGDB_DYN_DBG \ 63 63 (AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5)) 64 - #define KGDB_DYN_BRK_INS_BYTE(x) \ 65 - ((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff) 66 64 67 65 #define CACHE_FLUSH_IS_SAFE 1 68 66
+24 -12
arch/arm64/kernel/kgdb.c
··· 19 19 * along with this program. If not, see <http://www.gnu.org/licenses/>. 20 20 */ 21 21 22 + #include <linux/bug.h> 22 23 #include <linux/irq.h> 23 24 #include <linux/kdebug.h> 24 25 #include <linux/kgdb.h> 25 26 #include <linux/kprobes.h> 27 + #include <asm/debug-monitors.h> 28 + #include <asm/insn.h> 26 29 #include <asm/traps.h> 27 30 28 31 struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { ··· 341 338 unregister_die_notifier(&kgdb_notifier); 342 339 } 343 340 344 - /* 345 - * ARM instructions are always in LE. 346 - * Break instruction is encoded in LE format 347 - */ 348 - struct kgdb_arch arch_kgdb_ops = { 349 - .gdb_bpt_instr = { 350 - KGDB_DYN_BRK_INS_BYTE(0), 351 - KGDB_DYN_BRK_INS_BYTE(1), 352 - KGDB_DYN_BRK_INS_BYTE(2), 353 - KGDB_DYN_BRK_INS_BYTE(3), 354 - } 355 - }; 341 + struct kgdb_arch arch_kgdb_ops; 342 + 343 + int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) 344 + { 345 + int err; 346 + 347 + BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE); 348 + 349 + err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr); 350 + if (err) 351 + return err; 352 + 353 + return aarch64_insn_write((void *)bpt->bpt_addr, 354 + (u32)AARCH64_BREAK_KGDB_DYN_DBG); 355 + } 356 + 357 + int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) 358 + { 359 + return aarch64_insn_write((void *)bpt->bpt_addr, 360 + *(u32 *)bpt->saved_instr); 361 + }
+6 -8
arch/arm64/kernel/smp.c
··· 201 201 return ret; 202 202 } 203 203 204 - static void smp_store_cpu_info(unsigned int cpuid) 205 - { 206 - store_cpu_topology(cpuid); 207 - numa_store_cpu_info(cpuid); 208 - } 209 - 210 204 /* 211 205 * This is the secondary CPU boot entry. We're using this CPUs 212 206 * idle thread stack, but a set of temporary page tables. ··· 248 254 */ 249 255 notify_cpu_starting(cpu); 250 256 251 - smp_store_cpu_info(cpu); 257 + store_cpu_topology(cpu); 252 258 253 259 /* 254 260 * OK, now it's safe to let the boot CPU continue. Wait for ··· 683 689 { 684 690 int err; 685 691 unsigned int cpu; 692 + unsigned int this_cpu; 686 693 687 694 init_cpu_topology(); 688 695 689 - smp_store_cpu_info(smp_processor_id()); 696 + this_cpu = smp_processor_id(); 697 + store_cpu_topology(this_cpu); 698 + numa_store_cpu_info(this_cpu); 690 699 691 700 /* 692 701 * If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set ··· 716 719 continue; 717 720 718 721 set_cpu_present(cpu, true); 722 + numa_store_cpu_info(cpu); 719 723 } 720 724 } 721 725
+1
arch/mips/Kconfig
··· 65 65 select ARCH_CLOCKSOURCE_DATA 66 66 select HANDLE_DOMAIN_IRQ 67 67 select HAVE_EXIT_THREAD 68 + select HAVE_REGS_AND_STACK_ACCESS_API 68 69 69 70 menu "Machine selection" 70 71
-36
arch/mips/Kconfig.debug
··· 113 113 help 114 114 Add several files to the debugfs to test spinlock speed. 115 115 116 - if CPU_MIPSR6 117 - 118 - choice 119 - prompt "Compact branch policy" 120 - default MIPS_COMPACT_BRANCHES_OPTIMAL 121 - 122 - config MIPS_COMPACT_BRANCHES_NEVER 123 - bool "Never (force delay slot branches)" 124 - help 125 - Pass the -mcompact-branches=never flag to the compiler in order to 126 - force it to always emit branches with delay slots, and make no use 127 - of the compact branch instructions introduced by MIPSr6. This is 128 - useful if you suspect there may be an issue with compact branches in 129 - either the compiler or the CPU. 130 - 131 - config MIPS_COMPACT_BRANCHES_OPTIMAL 132 - bool "Optimal (use where beneficial)" 133 - help 134 - Pass the -mcompact-branches=optimal flag to the compiler in order for 135 - it to make use of compact branch instructions where it deems them 136 - beneficial, and use branches with delay slots elsewhere. This is the 137 - default compiler behaviour, and should be used unless you have a 138 - reason to choose otherwise. 139 - 140 - config MIPS_COMPACT_BRANCHES_ALWAYS 141 - bool "Always (force compact branches)" 142 - help 143 - Pass the -mcompact-branches=always flag to the compiler in order to 144 - force it to always emit compact branches, making no use of branch 145 - instructions with delay slots. This can result in more compact code 146 - which may be beneficial in some scenarios. 147 - 148 - endchoice 149 - 150 - endif # CPU_MIPSR6 151 - 152 116 config SCACHE_DEBUGFS 153 117 bool "L2 cache debugfs entries" 154 118 depends on DEBUG_FS
-4
arch/mips/Makefile
··· 203 203 toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt) 204 204 cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT 205 205 206 - cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never 207 - cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal 208 - cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always 209 - 210 206 # 211 207 # Firmware support 212 208 #
+1 -1
arch/mips/ath79/clock.c
··· 96 96 struct clk *clk; 97 97 98 98 clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div); 99 - if (!clk) 99 + if (IS_ERR(clk)) 100 100 panic("failed to allocate %s clock structure", name); 101 101 102 102 return clk;
+6
arch/mips/cavium-octeon/octeon-irq.c
··· 1619 1619 return -ENOMEM; 1620 1620 } 1621 1621 1622 + /* 1623 + * Clear the OF_POPULATED flag that was set by of_irq_init() 1624 + * so that all GPIO devices will be probed. 1625 + */ 1626 + of_node_clear_flag(gpio_node, OF_POPULATED); 1627 + 1622 1628 return 0; 1623 1629 } 1624 1630 /*
+1 -1
arch/mips/cavium-octeon/octeon-platform.c
··· 1059 1059 { 1060 1060 return of_platform_bus_probe(NULL, octeon_ids, NULL); 1061 1061 } 1062 - device_initcall(octeon_publish_devices); 1062 + arch_initcall(octeon_publish_devices); 1063 1063 1064 1064 MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>"); 1065 1065 MODULE_LICENSE("GPL");
+38 -2
arch/mips/dec/int-handler.S
··· 146 146 /* 147 147 * Find irq with highest priority 148 148 */ 149 - PTR_LA t1,cpu_mask_nr_tbl 149 + # open coded PTR_LA t1, cpu_mask_nr_tbl 150 + #if (_MIPS_SZPTR == 32) 151 + # open coded la t1, cpu_mask_nr_tbl 152 + lui t1, %hi(cpu_mask_nr_tbl) 153 + addiu t1, %lo(cpu_mask_nr_tbl) 154 + 155 + #endif 156 + #if (_MIPS_SZPTR == 64) 157 + # open coded dla t1, cpu_mask_nr_tbl 158 + .set push 159 + .set noat 160 + lui t1, %highest(cpu_mask_nr_tbl) 161 + lui AT, %hi(cpu_mask_nr_tbl) 162 + daddiu t1, t1, %higher(cpu_mask_nr_tbl) 163 + daddiu AT, AT, %lo(cpu_mask_nr_tbl) 164 + dsll t1, 32 165 + daddu t1, t1, AT 166 + .set pop 167 + #endif 150 168 1: lw t2,(t1) 151 169 nop 152 170 and t2,t0 ··· 213 195 /* 214 196 * Find irq with highest priority 215 197 */ 216 - PTR_LA t1,asic_mask_nr_tbl 198 + # open coded PTR_LA t1,asic_mask_nr_tbl 199 + #if (_MIPS_SZPTR == 32) 200 + # open coded la t1, asic_mask_nr_tbl 201 + lui t1, %hi(asic_mask_nr_tbl) 202 + addiu t1, %lo(asic_mask_nr_tbl) 203 + 204 + #endif 205 + #if (_MIPS_SZPTR == 64) 206 + # open coded dla t1, asic_mask_nr_tbl 207 + .set push 208 + .set noat 209 + lui t1, %highest(asic_mask_nr_tbl) 210 + lui AT, %hi(asic_mask_nr_tbl) 211 + daddiu t1, t1, %higher(asic_mask_nr_tbl) 212 + daddiu AT, AT, %lo(asic_mask_nr_tbl) 213 + dsll t1, 32 214 + daddu t1, t1, AT 215 + .set pop 216 + #endif 217 217 2: lw t2,(t1) 218 218 nop 219 219 and t2,t0
+1
arch/mips/include/asm/asmmacro.h
··· 157 157 ldc1 $f28, THREAD_FPR28(\thread) 158 158 ldc1 $f30, THREAD_FPR30(\thread) 159 159 ctc1 \tmp, fcr31 160 + .set pop 160 161 .endm 161 162 162 163 .macro fpu_restore_16odd thread
+2 -2
arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
··· 15 15 static inline bool __should_swizzle_bits(volatile void *a) 16 16 { 17 17 extern const bool octeon_should_swizzle_table[]; 18 + u64 did = ((u64)(uintptr_t)a >> 40) & 0xff; 18 19 19 - unsigned long did = ((unsigned long)a >> 40) & 0xff; 20 20 return octeon_should_swizzle_table[did]; 21 21 } 22 22 ··· 29 29 30 30 #define __should_swizzle_bits(a) false 31 31 32 - static inline bool __should_swizzle_addr(unsigned long p) 32 + static inline bool __should_swizzle_addr(u64 p) 33 33 { 34 34 /* boot bus? */ 35 35 return ((p >> 40) & 0xff) == 0;
+2
arch/mips/include/asm/mach-paravirt/kernel-entry-init.h
··· 11 11 #define CP0_EBASE $15, 1 12 12 13 13 .macro kernel_entry_setup 14 + #ifdef CONFIG_SMP 14 15 mfc0 t0, CP0_EBASE 15 16 andi t0, t0, 0x3ff # CPUNum 16 17 beqz t0, 1f 17 18 # CPUs other than zero goto smp_bootstrap 18 19 j smp_bootstrap 20 + #endif /* CONFIG_SMP */ 19 21 20 22 1: 21 23 .endm
+11
arch/mips/include/asm/mips-cm.h
··· 458 458 static inline unsigned int mips_cm_max_vp_width(void) 459 459 { 460 460 extern int smp_num_siblings; 461 + uint32_t cfg; 461 462 462 463 if (mips_cm_revision() >= CM_REV_CM3) 463 464 return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK; 465 + 466 + if (mips_cm_present()) { 467 + /* 468 + * We presume that all cores in the system will have the same 469 + * number of VP(E)s, and if that ever changes then this will 470 + * need revisiting. 471 + */ 472 + cfg = read_gcr_cl_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; 473 + return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; 474 + } 464 475 465 476 if (IS_ENABLED(CONFIG_SMP)) 466 477 return smp_num_siblings;
-2
arch/mips/include/asm/mipsregs.h
··· 660 660 661 661 #define MIPS_CONF7_IAR (_ULCAST_(1) << 10) 662 662 #define MIPS_CONF7_AR (_ULCAST_(1) << 16) 663 - /* FTLB probability bits for R6 */ 664 - #define MIPS_CONF7_FTLBP_SHIFT (18) 665 663 666 664 /* WatchLo* register definitions */ 667 665 #define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
-1
arch/mips/include/asm/uprobes.h
··· 36 36 unsigned long resume_epc; 37 37 u32 insn[2]; 38 38 u32 ixol[2]; 39 - union mips_instruction orig_inst[MAX_UINSN_BYTES / 4]; 40 39 }; 41 40 42 41 struct arch_uprobe_task {
+30 -23
arch/mips/kernel/cpu-probe.c
··· 352 352 static int mips_ftlb_disabled; 353 353 static int mips_has_ftlb_configured; 354 354 355 - static int set_ftlb_enable(struct cpuinfo_mips *c, int enable); 355 + enum ftlb_flags { 356 + FTLB_EN = 1 << 0, 357 + FTLB_SET_PROB = 1 << 1, 358 + }; 359 + 360 + static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags); 356 361 357 362 static int __init ftlb_disable(char *s) 358 363 { ··· 375 370 pr_warn("Can't turn FTLB off\n"); 376 371 return 1; 377 372 } 378 - 379 - back_to_back_c0_hazard(); 380 373 381 374 config4 = read_c0_config4(); 382 375 ··· 534 531 return 3; 535 532 } 536 533 537 - static int set_ftlb_enable(struct cpuinfo_mips *c, int enable) 534 + static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) 538 535 { 539 536 unsigned int config; 540 537 ··· 545 542 case CPU_P6600: 546 543 /* proAptiv & related cores use Config6 to enable the FTLB */ 547 544 config = read_c0_config6(); 548 - /* Clear the old probability value */ 549 - config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); 550 - if (enable) 551 - /* Enable FTLB */ 552 - write_c0_config6(config | 553 - (calculate_ftlb_probability(c) 554 - << MIPS_CONF6_FTLBP_SHIFT) 555 - | MIPS_CONF6_FTLBEN); 545 + 546 + if (flags & FTLB_EN) 547 + config |= MIPS_CONF6_FTLBEN; 556 548 else 557 - /* Disable FTLB */ 558 - write_c0_config6(config & ~MIPS_CONF6_FTLBEN); 549 + config &= ~MIPS_CONF6_FTLBEN; 550 + 551 + if (flags & FTLB_SET_PROB) { 552 + config &= ~(3 << MIPS_CONF6_FTLBP_SHIFT); 553 + config |= calculate_ftlb_probability(c) 554 + << MIPS_CONF6_FTLBP_SHIFT; 555 + } 556 + 557 + write_c0_config6(config); 558 + back_to_back_c0_hazard(); 559 559 break; 560 560 case CPU_I6400: 561 - /* I6400 & related cores use Config7 to configure FTLB */ 562 - config = read_c0_config7(); 563 - /* Clear the old probability value */ 564 - config &= ~(3 << MIPS_CONF7_FTLBP_SHIFT); 565 - write_c0_config7(config | (calculate_ftlb_probability(c) 566 - << MIPS_CONF7_FTLBP_SHIFT)); 567 - break; 561 + /* There's no way to disable the FTLB */ 562 + if (!(flags & FTLB_EN)) 563 + return 1; 564 + return 0; 568 565 case CPU_LOONGSON3: 569 566 /* Flush ITLB, DTLB, VTLB and FTLB */ 570 567 write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | 571 568 LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); 572 569 /* Loongson-3 cores use Config6 to enable the FTLB */ 573 570 config = read_c0_config6(); 574 - if (enable) 571 + if (flags & FTLB_EN) 575 572 /* Enable FTLB */ 576 573 write_c0_config6(config & ~MIPS_CONF6_FTLBDIS); 577 574 else ··· 791 788 PAGE_SIZE, config4); 792 789 /* Switch FTLB off */ 793 790 set_ftlb_enable(c, 0); 791 + mips_ftlb_disabled = 1; 794 792 break; 795 793 } 796 794 c->tlbsizeftlbsets = 1 << ··· 856 852 c->scache.flags = MIPS_CACHE_NOT_PRESENT; 857 853 858 854 /* Enable FTLB if present and not disabled */ 859 - set_ftlb_enable(c, !mips_ftlb_disabled); 855 + set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN); 860 856 861 857 ok = decode_config0(c); /* Read Config registers. */ 862 858 BUG_ON(!ok); /* Arch spec violation! */ ··· 905 901 } 906 902 } 907 903 } 904 + 905 + /* configure the FTLB write probability */ 906 + set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB); 908 907 909 908 mips_probe_watch_registers(c); 910 909
+1 -2
arch/mips/kernel/genex.S
··· 142 142 PTR_LA k1, __r4k_wait 143 143 ori k0, 0x1f /* 32 byte rollback region */ 144 144 xori k0, 0x1f 145 - bne k0, k1, 9f 145 + bne k0, k1, \handler 146 146 MTC0 k0, CP0_EPC 147 - 9: 148 147 .set pop 149 148 .endm 150 149
+2
arch/mips/kernel/mips-r2-to-r6-emul.c
··· 1164 1164 regs->regs[31] = r31; 1165 1165 regs->cp0_epc = epc; 1166 1166 if (!used_math()) { /* First time FPU user. */ 1167 + preempt_disable(); 1167 1168 err = init_fpu(); 1169 + preempt_enable(); 1168 1170 set_used_math(); 1169 1171 } 1170 1172 lose_fpu(1); /* Save FPU state for the emulator. */
+4 -4
arch/mips/kernel/process.c
··· 605 605 return -EOPNOTSUPP; 606 606 607 607 /* Avoid inadvertently triggering emulation */ 608 - if ((value & PR_FP_MODE_FR) && cpu_has_fpu && 609 - !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 608 + if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && 609 + !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) 610 610 return -EOPNOTSUPP; 611 - if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 611 + if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) 612 612 return -EOPNOTSUPP; 613 613 614 614 /* FR = 0 not supported in MIPS R6 */ 615 - if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) 615 + if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) 616 616 return -EOPNOTSUPP; 617 617 618 618 /* Proceed with the mode switch */
+8 -1
arch/mips/kernel/setup.c
··· 87 87 int x = boot_mem_map.nr_map; 88 88 int i; 89 89 90 + /* 91 + * If the region reaches the top of the physical address space, adjust 92 + * the size slightly so that (start + size) doesn't overflow 93 + */ 94 + if (start + size - 1 == (phys_addr_t)ULLONG_MAX) 95 + --size; 96 + 90 97 /* Sanity check */ 91 98 if (start + size < start) { 92 99 pr_warn("Trying to add an invalid memory region, skipped\n"); ··· 764 757 device_tree_init(); 765 758 sparse_init(); 766 759 plat_swiotlb_setup(); 767 - paging_init(); 768 760 769 761 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 770 762 /* Tell bootmem about cma reserved memblock section */ ··· 876 870 prefill_possible_map(); 877 871 878 872 cpu_cache_init(); 873 + paging_init(); 879 874 } 880 875 881 876 unsigned long kernelsp[NR_CPUS];
+1 -1
arch/mips/kernel/smp-cps.c
··· 513 513 * in which case the CPC will refuse to power down the core. 514 514 */ 515 515 do { 516 - mips_cm_lock_other(core, vpe_id); 516 + mips_cm_lock_other(core, 0); 517 517 mips_cpc_lock_other(core); 518 518 stat = read_cpc_co_stat_conf(); 519 519 stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
+3 -4
arch/mips/kernel/smp.c
··· 322 322 cpumask_set_cpu(cpu, &cpu_coherent_mask); 323 323 notify_cpu_starting(cpu); 324 324 325 + cpumask_set_cpu(cpu, &cpu_callin_map); 326 + synchronise_count_slave(cpu); 327 + 325 328 set_cpu_online(cpu, true); 326 329 327 330 set_cpu_sibling_map(cpu); 328 331 set_cpu_core_map(cpu); 329 332 330 333 calculate_cpu_foreign_map(); 331 - 332 - cpumask_set_cpu(cpu, &cpu_callin_map); 333 - 334 - synchronise_count_slave(cpu); 335 334 336 335 /* 337 336 * irq will be enabled in ->smp_finish(), enabling it too early
+4 -23
arch/mips/kernel/uprobes.c
··· 157 157 int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) 158 158 { 159 159 struct uprobe_task *utask = current->utask; 160 - union mips_instruction insn; 161 160 162 161 /* 163 162 * Now find the EPC where to resume after the breakpoint has been ··· 167 168 unsigned long epc; 168 169 169 170 epc = regs->cp0_epc; 170 - __compute_return_epc_for_insn(regs, insn); 171 + __compute_return_epc_for_insn(regs, 172 + (union mips_instruction) aup->insn[0]); 171 173 aup->resume_epc = regs->cp0_epc; 172 174 } 173 - 174 175 utask->autask.saved_trap_nr = current->thread.trap_nr; 175 176 current->thread.trap_nr = UPROBE_TRAP_NR; 176 177 regs->cp0_epc = current->utask->xol_vaddr; ··· 221 222 return NOTIFY_DONE; 222 223 223 224 switch (val) { 224 - case DIE_BREAK: 225 + case DIE_UPROBE: 225 226 if (uprobe_pre_sstep_notifier(regs)) 226 227 return NOTIFY_STOP; 227 228 break; ··· 256 257 ra = regs->regs[31]; 257 258 258 259 /* Replace the return address with the trampoline address */ 259 - regs->regs[31] = ra; 260 + regs->regs[31] = trampoline_vaddr; 260 261 261 262 return ra; 262 263 } ··· 277 278 unsigned long vaddr) 278 279 { 279 280 return uprobe_write_opcode(mm, vaddr, UPROBE_SWBP_INSN); 280 - } 281 - 282 - /** 283 - * set_orig_insn - Restore the original instruction. 284 - * @mm: the probed process address space. 285 - * @auprobe: arch specific probepoint information. 286 - * @vaddr: the virtual address to insert the opcode. 287 - * 288 - * For mm @mm, restore the original opcode (opcode) at @vaddr. 289 - * Return 0 (success) or a negative errno. 290 - * 291 - * This overrides the weak version in kernel/events/uprobes.c. 292 - */ 293 - int set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, 294 - unsigned long vaddr) 295 - { 296 - return uprobe_write_opcode(mm, vaddr, 297 - *(uprobe_opcode_t *)&auprobe->orig_inst[0].word); 298 281 } 299 282 300 283 void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+4 -4
arch/mips/kernel/vdso.c
··· 39 39 static void __init init_vdso_image(struct mips_vdso_image *image) 40 40 { 41 41 unsigned long num_pages, i; 42 + unsigned long data_pfn; 42 43 43 44 BUG_ON(!PAGE_ALIGNED(image->data)); 44 45 BUG_ON(!PAGE_ALIGNED(image->size)); 45 46 46 47 num_pages = image->size / PAGE_SIZE; 47 48 48 - for (i = 0; i < num_pages; i++) { 49 - image->mapping.pages[i] = 50 - virt_to_page(image->data + (i * PAGE_SIZE)); 51 - } 49 + data_pfn = __phys_to_pfn(__pa_symbol(image->data)); 50 + for (i = 0; i < num_pages; i++) 51 + image->mapping.pages[i] = pfn_to_page(data_pfn + i); 52 52 } 53 53 54 54 static int __init init_vdso(void)
+1
arch/mips/math-emu/dsemul.c
··· 298 298 /* Set EPC to return to post-branch instruction */ 299 299 xcp->cp0_epc = current->thread.bd_emu_cont_pc; 300 300 pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc); 301 + MIPS_FPU_EMU_INC_STATS(ds_emul); 301 302 return true; 302 303 }
+1 -1
arch/mips/mm/c-r4k.c
··· 800 800 * If address-based cache ops don't require an SMP call, then 801 801 * use them exclusively for small flushes. 802 802 */ 803 - size = start - end; 803 + size = end - start; 804 804 cache_size = icache_size; 805 805 if (!cpu_has_ic_fills_f_dc) { 806 806 size *= 2;
+9 -7
arch/mips/mm/init.c
··· 261 261 { 262 262 struct maar_config cfg[BOOT_MEM_MAP_MAX]; 263 263 unsigned i, num_configured, num_cfg = 0; 264 - phys_addr_t skip; 265 264 266 265 for (i = 0; i < boot_mem_map.nr_map; i++) { 267 266 switch (boot_mem_map.map[i].type) { ··· 271 272 continue; 272 273 } 273 274 274 - skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff); 275 - 275 + /* Round lower up */ 276 276 cfg[num_cfg].lower = boot_mem_map.map[i].addr; 277 - cfg[num_cfg].lower += skip; 277 + cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff; 278 278 279 - cfg[num_cfg].upper = cfg[num_cfg].lower; 280 - cfg[num_cfg].upper += boot_mem_map.map[i].size - 1; 281 - cfg[num_cfg].upper -= skip; 279 + /* Round upper down */ 280 + cfg[num_cfg].upper = boot_mem_map.map[i].addr + 281 + boot_mem_map.map[i].size; 282 + cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1; 282 283 283 284 cfg[num_cfg].attrs = MIPS_MAAR_S; 284 285 num_cfg++; ··· 439 440 { 440 441 #ifdef CONFIG_HIGHMEM 441 442 unsigned long tmp; 443 + 444 + if (cpu_has_dc_aliases) 445 + return; 442 446 443 447 for (tmp = highstart_pfn; tmp < highend_pfn; tmp++) { 444 448 struct page *page = pfn_to_page(tmp);
+7 -1
arch/mips/mti-malta/malta-setup.c
··· 39 39 #include <linux/console.h> 40 40 #endif 41 41 42 + #define ROCIT_CONFIG_GEN0 0x1f403000 43 + #define ROCIT_CONFIG_GEN0_PCI_IOCU BIT(7) 44 + 42 45 extern void malta_be_init(void); 43 46 extern int malta_be_handler(struct pt_regs *regs, int is_fixup); 44 47 ··· 110 107 static int __init plat_enable_iocoherency(void) 111 108 { 112 109 int supported = 0; 110 + u32 cfg; 111 + 113 112 if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { 114 113 if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { 115 114 BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; ··· 134 129 } else if (mips_cm_numiocu() != 0) { 135 130 /* Nothing special needs to be done to enable coherency */ 136 131 pr_info("CMP IOCU detected\n"); 137 - if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { 132 + cfg = __raw_readl((u32 *)CKSEG1ADDR(ROCIT_CONFIG_GEN0)); 133 + if (!(cfg & ROCIT_CONFIG_GEN0_PCI_IOCU)) { 138 134 pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); 139 135 return 0; 140 136 }
+9 -2
arch/powerpc/platforms/powernv/pci-ioda.c
··· 124 124 r->start < (phb->ioda.m64_base + phb->ioda.m64_size)); 125 125 } 126 126 127 + static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags) 128 + { 129 + unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH); 130 + 131 + return (resource_flags & flags) == flags; 132 + } 133 + 127 134 static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no) 128 135 { 129 136 phb->ioda.pe_array[pe_no].phb = phb; ··· 2878 2871 res = &pdev->resource[i + PCI_IOV_RESOURCES]; 2879 2872 if (!res->flags || res->parent) 2880 2873 continue; 2881 - if (!pnv_pci_is_m64(phb, res)) { 2874 + if (!pnv_pci_is_m64_flags(res->flags)) { 2882 2875 dev_warn(&pdev->dev, "Don't support SR-IOV with" 2883 2876 " non M64 VF BAR%d: %pR. \n", 2884 2877 i, res); ··· 3103 3096 * alignment for any 64-bit resource, PCIe doesn't care and 3104 3097 * bridges only do 64-bit prefetchable anyway. 3105 3098 */ 3106 - if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64)) 3099 + if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type)) 3107 3100 return phb->ioda.m64_segsize; 3108 3101 if (type & IORESOURCE_MEM) 3109 3102 return phb->ioda.m32_segsize;
+1 -1
arch/sh/include/asm/atomic-llsc.h
··· 60 60 " movco.l %0, @%3 \n" \ 61 61 " bf 1b \n" \ 62 62 " synco \n" \ 63 - : "=&z" (temp), "=&z" (res) \ 63 + : "=&z" (temp), "=&r" (res) \ 64 64 : "r" (i), "r" (&v->counter) \ 65 65 : "t"); \ 66 66 \
+1
arch/sparc/include/asm/page_64.h
··· 25 25 #define HPAGE_MASK (~(HPAGE_SIZE - 1UL)) 26 26 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 27 27 #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 28 + #define REAL_HPAGE_PER_HPAGE (_AC(1,UL) << (HPAGE_SHIFT - REAL_HPAGE_SHIFT)) 28 29 #endif 29 30 30 31 #ifndef __ASSEMBLY__
+2
arch/sparc/include/asm/smp_64.h
··· 43 43 int hard_smp_processor_id(void); 44 44 #define raw_smp_processor_id() (current_thread_info()->cpu) 45 45 46 + void smp_fill_in_cpu_possible_map(void); 46 47 void smp_fill_in_sib_core_maps(void); 47 48 void cpu_play_dead(void); 48 49 ··· 73 72 #define smp_fill_in_sib_core_maps() do { } while (0) 74 73 #define smp_fetch_global_regs() do { } while (0) 75 74 #define smp_fetch_global_pmu() do { } while (0) 75 + #define smp_fill_in_cpu_possible_map() do { } while (0) 76 76 77 77 #endif /* !(CONFIG_SMP) */ 78 78
+26
arch/sparc/kernel/setup_64.c
··· 31 31 #include <linux/initrd.h> 32 32 #include <linux/module.h> 33 33 #include <linux/start_kernel.h> 34 + #include <linux/bootmem.h> 34 35 35 36 #include <asm/io.h> 36 37 #include <asm/processor.h> ··· 51 50 #include <asm/elf.h> 52 51 #include <asm/mdesc.h> 53 52 #include <asm/cacheflush.h> 53 + #include <asm/dma.h> 54 + #include <asm/irq.h> 54 55 55 56 #ifdef CONFIG_IP_PNP 56 57 #include <net/ipconfig.h> ··· 593 590 pause_patch(); 594 591 } 595 592 593 + void __init alloc_irqstack_bootmem(void) 594 + { 595 + unsigned int i, node; 596 + 597 + for_each_possible_cpu(i) { 598 + node = cpu_to_node(i); 599 + 600 + softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 601 + THREAD_SIZE, 602 + THREAD_SIZE, 0); 603 + hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 604 + THREAD_SIZE, 605 + THREAD_SIZE, 0); 606 + } 607 + } 608 + 596 609 void __init setup_arch(char **cmdline_p) 597 610 { 598 611 /* Initialize PROM console and command line. */ ··· 669 650 670 651 paging_init(); 671 652 init_sparc64_elf_hwcap(); 653 + smp_fill_in_cpu_possible_map(); 654 + /* 655 + * Once the OF device tree and MDESC have been setup and nr_cpus has 656 + * been parsed, we know the list of possible cpus. Therefore we can 657 + * allocate the IRQ stacks. 658 + */ 659 + alloc_irqstack_bootmem(); 672 660 } 673 661 674 662 extern int stop_a_enabled;
+14
arch/sparc/kernel/smp_64.c
··· 1227 1227 xcall_deliver_impl = hypervisor_xcall_deliver; 1228 1228 } 1229 1229 1230 + void __init smp_fill_in_cpu_possible_map(void) 1231 + { 1232 + int possible_cpus = num_possible_cpus(); 1233 + int i; 1234 + 1235 + if (possible_cpus > nr_cpu_ids) 1236 + possible_cpus = nr_cpu_ids; 1237 + 1238 + for (i = 0; i < possible_cpus; i++) 1239 + set_cpu_possible(i, true); 1240 + for (; i < NR_CPUS; i++) 1241 + set_cpu_possible(i, false); 1242 + } 1243 + 1230 1244 void smp_fill_in_sib_core_maps(void) 1231 1245 { 1232 1246 unsigned int i;
+1
arch/sparc/mm/fault_64.c
··· 484 484 tsb_grow(mm, MM_TSB_BASE, mm_rss); 485 485 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 486 486 mm_rss = mm->context.hugetlb_pte_count + mm->context.thp_pte_count; 487 + mm_rss *= REAL_HPAGE_PER_HPAGE; 487 488 if (unlikely(mm_rss > 488 489 mm->context.tsb_block[MM_TSB_HUGE].tsb_rss_limit)) { 489 490 if (mm->context.tsb_block[MM_TSB_HUGE].tsb)
+3 -19
arch/sparc/mm/init_64.c
··· 1160 1160 return numa_latency[from][to]; 1161 1161 } 1162 1162 1163 - static int find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1163 + static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) 1164 1164 { 1165 1165 int i; 1166 1166 ··· 1173 1173 return i; 1174 1174 } 1175 1175 1176 - static void find_numa_latencies_for_group(struct mdesc_handle *md, u64 grp, 1177 - int index) 1176 + static void __init find_numa_latencies_for_group(struct mdesc_handle *md, 1177 + u64 grp, int index) 1178 1178 { 1179 1179 u64 arc; 1180 1180 ··· 2081 2081 { 2082 2082 unsigned long end_pfn, shift, phys_base; 2083 2083 unsigned long real_end, i; 2084 - int node; 2085 2084 2086 2085 setup_page_offset(); 2087 2086 ··· 2248 2249 2249 2250 /* Setup bootmem... */ 2250 2251 last_valid_pfn = end_pfn = bootmem_init(phys_base); 2251 - 2252 - /* Once the OF device tree and MDESC have been setup, we know 2253 - * the list of possible cpus. Therefore we can allocate the 2254 - * IRQ stacks. 2255 - */ 2256 - for_each_possible_cpu(i) { 2257 - node = cpu_to_node(i); 2258 - 2259 - softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2260 - THREAD_SIZE, 2261 - THREAD_SIZE, 0); 2262 - hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node), 2263 - THREAD_SIZE, 2264 - THREAD_SIZE, 0); 2265 - } 2266 2252 2267 2253 kernel_physical_mapping_init(); 2268 2254
+31 -4
arch/sparc/mm/tlb.c
··· 174 174 return; 175 175 176 176 if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) { 177 - if (pmd_val(pmd) & _PAGE_PMD_HUGE) 178 - mm->context.thp_pte_count++; 179 - else 180 - mm->context.thp_pte_count--; 177 + /* 178 + * Note that this routine only sets pmds for THP pages. 179 + * Hugetlb pages are handled elsewhere. We need to check 180 + * for huge zero page. Huge zero pages are like hugetlb 181 + * pages in that there is no RSS, but there is the need 182 + * for TSB entries. So, huge zero page counts go into 183 + * hugetlb_pte_count. 184 + */ 185 + if (pmd_val(pmd) & _PAGE_PMD_HUGE) { 186 + if (is_huge_zero_page(pmd_page(pmd))) 187 + mm->context.hugetlb_pte_count++; 188 + else 189 + mm->context.thp_pte_count++; 190 + } else { 191 + if (is_huge_zero_page(pmd_page(orig))) 192 + mm->context.hugetlb_pte_count--; 193 + else 194 + mm->context.thp_pte_count--; 195 + } 181 196 182 197 /* Do not try to allocate the TSB hash table if we 183 198 * don't have one already. We have various locks held ··· 219 204 } 220 205 } 221 206 207 + /* 208 + * This routine is only called when splitting a THP 209 + */ 222 210 void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, 223 211 pmd_t *pmdp) 224 212 { ··· 231 213 232 214 set_pmd_at(vma->vm_mm, address, pmdp, entry); 233 215 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); 216 + 217 + /* 218 + * set_pmd_at() will not be called in a way to decrement 219 + * thp_pte_count when splitting a THP, so do it now. 220 + * Sanity check pmd before doing the actual decrement. 221 + */ 222 + if ((pmd_val(entry) & _PAGE_PMD_HUGE) && 223 + !is_huge_zero_page(pmd_page(entry))) 224 + (vma->vm_mm)->context.thp_pte_count--; 234 225 } 235 226 236 227 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+12 -6
arch/sparc/mm/tsb.c
··· 469 469 470 470 int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 471 471 { 472 + unsigned long mm_rss = get_mm_rss(mm); 472 473 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 473 - unsigned long total_huge_pte_count; 474 + unsigned long saved_hugetlb_pte_count; 475 + unsigned long saved_thp_pte_count; 474 476 #endif 475 477 unsigned int i; 476 478 ··· 485 483 * will re-increment the counters as the parent PTEs are 486 484 * copied into the child address space. 487 485 */ 488 - total_huge_pte_count = mm->context.hugetlb_pte_count + 489 - mm->context.thp_pte_count; 486 + saved_hugetlb_pte_count = mm->context.hugetlb_pte_count; 487 + saved_thp_pte_count = mm->context.thp_pte_count; 490 488 mm->context.hugetlb_pte_count = 0; 491 489 mm->context.thp_pte_count = 0; 490 + 491 + mm_rss -= saved_thp_pte_count * (HPAGE_SIZE / PAGE_SIZE); 492 492 #endif 493 493 494 494 /* copy_mm() copies over the parent's mm_struct before calling ··· 503 499 /* If this is fork, inherit the parent's TSB size. We would 504 500 * grow it to that size on the first page fault anyways. 505 501 */ 506 - tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 502 + tsb_grow(mm, MM_TSB_BASE, mm_rss); 507 503 508 504 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 509 - if (unlikely(total_huge_pte_count)) 510 - tsb_grow(mm, MM_TSB_HUGE, total_huge_pte_count); 505 + if (unlikely(saved_hugetlb_pte_count + saved_thp_pte_count)) 506 + tsb_grow(mm, MM_TSB_HUGE, 507 + (saved_hugetlb_pte_count + saved_thp_pte_count) * 508 + REAL_HPAGE_PER_HPAGE); 511 509 #endif 512 510 513 511 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb))
+2 -2
arch/x86/entry/entry_64.S
··· 1002 1002 testb $3, CS+8(%rsp) 1003 1003 jz .Lerror_kernelspace 1004 1004 1005 - .Lerror_entry_from_usermode_swapgs: 1006 1005 /* 1007 1006 * We entered from user mode or we're pretending to have entered 1008 1007 * from user mode due to an IRET fault. ··· 1044 1045 * gsbase and proceed. We'll fix up the exception and land in 1045 1046 * .Lgs_change's error handler with kernel gsbase. 1046 1047 */ 1047 - jmp .Lerror_entry_from_usermode_swapgs 1048 + SWAPGS 1049 + jmp .Lerror_entry_done 1048 1050 1049 1051 .Lbstep_iret: 1050 1052 /* Fix truncated RIP */
+1 -1
arch/x86/entry/vdso/vdso2c.h
··· 22 22 23 23 ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff)); 24 24 25 - if (hdr->e_type != ET_DYN) 25 + if (GET_LE(&hdr->e_type) != ET_DYN) 26 26 fail("input is not a shared object\n"); 27 27 28 28 /* Walk the segment table. */
+3 -2
arch/x86/events/intel/bts.c
··· 455 455 * The only surefire way of knowing if this NMI is ours is by checking 456 456 * the write ptr against the PMI threshold. 457 457 */ 458 - if (ds->bts_index >= ds->bts_interrupt_threshold) 458 + if (ds && (ds->bts_index >= ds->bts_interrupt_threshold)) 459 459 handled = 1; 460 460 461 461 /* ··· 584 584 if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) 585 585 return -ENODEV; 586 586 587 - bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE; 587 + bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | 588 + PERF_PMU_CAP_EXCLUSIVE; 588 589 bts_pmu.task_ctx_nr = perf_sw_context; 589 590 bts_pmu.event_init = bts_event_init; 590 591 bts_pmu.add = bts_event_add;
+1 -1
arch/x86/include/asm/tlbflush.h
··· 81 81 /* Initialize cr4 shadow for this CPU. */ 82 82 static inline void cr4_init_shadow(void) 83 83 { 84 - this_cpu_write(cpu_tlbstate.cr4, __read_cr4()); 84 + this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe()); 85 85 } 86 86 87 87 /* Set in this cpu's CR4. */
+11 -12
arch/x86/kernel/cpu/common.c
··· 804 804 identify_cpu_without_cpuid(c); 805 805 806 806 /* cyrix could have cpuid enabled via c_identify()*/ 807 - if (!have_cpuid_p()) 808 - return; 807 + if (have_cpuid_p()) { 808 + cpu_detect(c); 809 + get_cpu_vendor(c); 810 + get_cpu_cap(c); 809 811 810 - cpu_detect(c); 811 - get_cpu_vendor(c); 812 - get_cpu_cap(c); 812 + if (this_cpu->c_early_init) 813 + this_cpu->c_early_init(c); 813 814 814 - if (this_cpu->c_early_init) 815 - this_cpu->c_early_init(c); 815 + c->cpu_index = 0; 816 + filter_cpuid_features(c, false); 816 817 817 - c->cpu_index = 0; 818 - filter_cpuid_features(c, false); 819 - 820 - if (this_cpu->c_bsp_init) 821 - this_cpu->c_bsp_init(c); 818 + if (this_cpu->c_bsp_init) 819 + this_cpu->c_bsp_init(c); 820 + } 822 821 823 822 setup_force_cpu_cap(X86_FEATURE_ALWAYS); 824 823 fpu__init_system(c);
+1 -3
arch/x86/kernel/setup.c
··· 1137 1137 * auditing all the early-boot CR4 manipulation would be needed to 1138 1138 * rule it out. 1139 1139 */ 1140 - if (boot_cpu_data.cpuid_level >= 0) 1141 - /* A CPU has %cr4 if and only if it has CPUID. */ 1142 - mmu_cr4_features = __read_cr4(); 1140 + mmu_cr4_features = __read_cr4_safe(); 1143 1141 1144 1142 memblock_set_current_limit(get_max_mapped()); 1145 1143
+11 -10
arch/x86/mm/pageattr.c
··· 917 917 } 918 918 } 919 919 920 - static int populate_pmd(struct cpa_data *cpa, 921 - unsigned long start, unsigned long end, 922 - unsigned num_pages, pud_t *pud, pgprot_t pgprot) 920 + static long populate_pmd(struct cpa_data *cpa, 921 + unsigned long start, unsigned long end, 922 + unsigned num_pages, pud_t *pud, pgprot_t pgprot) 923 923 { 924 - unsigned int cur_pages = 0; 924 + long cur_pages = 0; 925 925 pmd_t *pmd; 926 926 pgprot_t pmd_pgprot; 927 927 ··· 991 991 return num_pages; 992 992 } 993 993 994 - static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 995 - pgprot_t pgprot) 994 + static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd, 995 + pgprot_t pgprot) 996 996 { 997 997 pud_t *pud; 998 998 unsigned long end; 999 - int cur_pages = 0; 999 + long cur_pages = 0; 1000 1000 pgprot_t pud_pgprot; 1001 1001 1002 1002 end = start + (cpa->numpages << PAGE_SHIFT); ··· 1052 1052 1053 1053 /* Map trailing leftover */ 1054 1054 if (start < end) { 1055 - int tmp; 1055 + long tmp; 1056 1056 1057 1057 pud = pud_offset(pgd, start); 1058 1058 if (pud_none(*pud)) ··· 1078 1078 pgprot_t pgprot = __pgprot(_KERNPG_TABLE); 1079 1079 pud_t *pud = NULL; /* shut up gcc */ 1080 1080 pgd_t *pgd_entry; 1081 - int ret; 1081 + long ret; 1082 1082 1083 1083 pgd_entry = cpa->pgd + pgd_index(addr); 1084 1084 ··· 1327 1327 1328 1328 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) 1329 1329 { 1330 - int ret, numpages = cpa->numpages; 1330 + unsigned long numpages = cpa->numpages; 1331 + int ret; 1331 1332 1332 1333 while (numpages) { 1333 1334 /*
+1 -1
arch/x86/platform/efi/efi_64.c
··· 245 245 * text and allocate a new stack because we can't rely on the 246 246 * stack pointer being < 4GB. 247 247 */ 248 - if (!IS_ENABLED(CONFIG_EFI_MIXED)) 248 + if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native()) 249 249 return 0; 250 250 251 251 /*
+14 -2
block/blk-mq.c
··· 296 296 if (ret) 297 297 return ERR_PTR(ret); 298 298 299 + /* 300 + * Check if the hardware context is actually mapped to anything. 301 + * If not tell the caller that it should skip this queue. 302 + */ 299 303 hctx = q->queue_hw_ctx[hctx_idx]; 304 + if (!blk_mq_hw_queue_mapped(hctx)) { 305 + ret = -EXDEV; 306 + goto out_queue_exit; 307 + } 300 308 ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask)); 301 309 302 310 blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); 303 311 rq = __blk_mq_alloc_request(&alloc_data, rw, 0); 304 312 if (!rq) { 305 - blk_queue_exit(q); 306 - return ERR_PTR(-EWOULDBLOCK); 313 + ret = -EWOULDBLOCK; 314 + goto out_queue_exit; 307 315 } 308 316 309 317 return rq; 318 + 319 + out_queue_exit: 320 + blk_queue_exit(q); 321 + return ERR_PTR(ret); 310 322 } 311 323 EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); 312 324
+4 -2
block/blk-throttle.c
··· 780 780 /* 781 781 * If previous slice expired, start a new one otherwise renew/extend 782 782 * existing slice to make sure it is at least throtl_slice interval 783 - * long since now. 783 + * long since now. New slice is started only for empty throttle group. 784 + * If there is queued bio, that means there should be an active 785 + * slice and it should be extended instead. 784 786 */ 785 - if (throtl_slice_used(tg, rw)) 787 + if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) 786 788 throtl_start_new_slice(tg, rw); 787 789 else { 788 790 if (time_before(tg->slice_end[rw], jiffies + throtl_slice))
+24 -17
crypto/rsa-pkcs1pad.c
··· 298 298 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); 299 299 struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm); 300 300 struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req); 301 + unsigned int dst_len; 301 302 unsigned int pos; 302 - 303 - if (err == -EOVERFLOW) 304 - /* Decrypted value had no leading 0 byte */ 305 - err = -EINVAL; 303 + u8 *out_buf; 306 304 307 305 if (err) 308 306 goto done; 309 307 310 - if (req_ctx->child_req.dst_len != ctx->key_size - 1) { 311 - err = -EINVAL; 308 + err = -EINVAL; 309 + dst_len = req_ctx->child_req.dst_len; 310 + if (dst_len < ctx->key_size - 1) 312 311 goto done; 312 + 313 + out_buf = req_ctx->out_buf; 314 + if (dst_len == ctx->key_size) { 315 + if (out_buf[0] != 0x00) 316 + /* Decrypted value had no leading 0 byte */ 317 + goto done; 318 + 319 + dst_len--; 320 + out_buf++; 313 321 } 314 322 315 - if (req_ctx->out_buf[0] != 0x02) { 316 - err = -EINVAL; 323 + if (out_buf[0] != 0x02) 317 324 goto done; 318 - } 319 - for (pos = 1; pos < req_ctx->child_req.dst_len; pos++) 320 - if (req_ctx->out_buf[pos] == 0x00) 325 + 326 + for (pos = 1; pos < dst_len; pos++) 327 + if (out_buf[pos] == 0x00) 321 328 break; 322 - if (pos < 9 || pos == req_ctx->child_req.dst_len) { 323 - err = -EINVAL; 329 + if (pos < 9 || pos == dst_len) 324 330 goto done; 325 - } 326 331 pos++; 327 332 328 - if (req->dst_len < req_ctx->child_req.dst_len - pos) 333 + err = 0; 334 + 335 + if (req->dst_len < dst_len - pos) 329 336 err = -EOVERFLOW; 330 - req->dst_len = req_ctx->child_req.dst_len - pos; 337 + req->dst_len = dst_len - pos; 331 338 332 339 if (!err) 333 340 sg_copy_from_buffer(req->dst, 334 341 sg_nents_for_len(req->dst, req->dst_len), 335 - req_ctx->out_buf + pos, req->dst_len); 342 + out_buf + pos, req->dst_len); 336 343 337 344 done: 338 345 kzfree(req_ctx->out_buf);
+28 -20
drivers/acpi/nfit/core.c
··· 94 94 return to_acpi_device(acpi_desc->dev); 95 95 } 96 96 97 - static int xlat_status(void *buf, unsigned int cmd) 97 + static int xlat_status(void *buf, unsigned int cmd, u32 status) 98 98 { 99 99 struct nd_cmd_clear_error *clear_err; 100 100 struct nd_cmd_ars_status *ars_status; 101 - struct nd_cmd_ars_start *ars_start; 102 - struct nd_cmd_ars_cap *ars_cap; 103 101 u16 flags; 104 102 105 103 switch (cmd) { 106 104 case ND_CMD_ARS_CAP: 107 - ars_cap = buf; 108 - if ((ars_cap->status & 0xffff) == NFIT_ARS_CAP_NONE) 105 + if ((status & 0xffff) == NFIT_ARS_CAP_NONE) 109 106 return -ENOTTY; 110 107 111 108 /* Command failed */ 112 - if (ars_cap->status & 0xffff) 109 + if (status & 0xffff) 113 110 return -EIO; 114 111 115 112 /* No supported scan types for this range */ 116 113 flags = ND_ARS_PERSISTENT | ND_ARS_VOLATILE; 117 - if ((ars_cap->status >> 16 & flags) == 0) 114 + if ((status >> 16 & flags) == 0) 118 115 return -ENOTTY; 119 116 break; 120 117 case ND_CMD_ARS_START: 121 - ars_start = buf; 122 118 /* ARS is in progress */ 123 - if ((ars_start->status & 0xffff) == NFIT_ARS_START_BUSY) 119 + if ((status & 0xffff) == NFIT_ARS_START_BUSY) 124 120 return -EBUSY; 125 121 126 122 /* Command failed */ 127 - if (ars_start->status & 0xffff) 123 + if (status & 0xffff) 128 124 return -EIO; 129 125 break; 130 126 case ND_CMD_ARS_STATUS: 131 127 ars_status = buf; 132 128 /* Command failed */ 133 - if (ars_status->status & 0xffff) 129 + if (status & 0xffff) 134 130 return -EIO; 135 131 /* Check extended status (Upper two bytes) */ 136 - if (ars_status->status == NFIT_ARS_STATUS_DONE) 132 + if (status == NFIT_ARS_STATUS_DONE) 137 133 return 0; 138 134 139 135 /* ARS is in progress */ 140 - if (ars_status->status == NFIT_ARS_STATUS_BUSY) 136 + if (status == NFIT_ARS_STATUS_BUSY) 141 137 return -EBUSY; 142 138 143 139 /* No ARS performed for the current boot */ 144 - if (ars_status->status == NFIT_ARS_STATUS_NONE) 140 + if (status == NFIT_ARS_STATUS_NONE) 145 141 return -EAGAIN; 146 142 147 143 /* ··· 145 149 * agent wants the scan to stop. If we didn't overflow 146 150 * then just continue with the returned results. 147 151 */ 148 - if (ars_status->status == NFIT_ARS_STATUS_INTR) { 152 + if (status == NFIT_ARS_STATUS_INTR) { 149 153 if (ars_status->flags & NFIT_ARS_F_OVERFLOW) 150 154 return -ENOSPC; 151 155 return 0; 152 156 } 153 157 154 158 /* Unknown status */ 155 - if (ars_status->status >> 16) 159 + if (status >> 16) 156 160 return -EIO; 157 161 break; 158 162 case ND_CMD_CLEAR_ERROR: 159 163 clear_err = buf; 160 - if (clear_err->status & 0xffff) 164 + if (status & 0xffff) 161 165 return -EIO; 162 166 if (!clear_err->cleared) 163 167 return -EIO; ··· 168 172 break; 169 173 } 170 174 175 + /* all other non-zero status results in an error */ 176 + if (status) 177 + return -EIO; 171 178 return 0; 172 179 } 173 180 ··· 185 186 struct nd_cmd_pkg *call_pkg = NULL; 186 187 const char *cmd_name, *dimm_name; 187 188 unsigned long cmd_mask, dsm_mask; 189 + u32 offset, fw_status = 0; 188 190 acpi_handle handle; 189 191 unsigned int func; 190 192 const u8 *uuid; 191 - u32 offset; 192 193 int rc, i; 193 194 194 195 func = cmd; ··· 316 317 out_obj->buffer.pointer + offset, out_size); 317 318 offset += out_size; 318 319 } 320 + 321 + /* 322 + * Set fw_status for all the commands with a known format to be 323 + * later interpreted by xlat_status(). 324 + */ 325 + if (i >= 1 && ((cmd >= ND_CMD_ARS_CAP && cmd <= ND_CMD_CLEAR_ERROR) 326 + || (cmd >= ND_CMD_SMART && cmd <= ND_CMD_VENDOR))) 327 + fw_status = *(u32 *) out_obj->buffer.pointer; 328 + 319 329 if (offset + in_buf.buffer.length < buf_len) { 320 330 if (i >= 1) { 321 331 /* ··· 333 325 */ 334 326 rc = buf_len - offset - in_buf.buffer.length; 335 327 if (cmd_rc) 336 - *cmd_rc = xlat_status(buf, cmd); 328 + *cmd_rc = xlat_status(buf, cmd, fw_status); 337 329 } else { 338 330 dev_err(dev, "%s:%s underrun cmd: %s buf_len: %d out_len: %d\n", 339 331 __func__, dimm_name, cmd_name, buf_len, ··· 343 335 } else { 344 336 rc = 0; 345 337 if (cmd_rc) 346 - *cmd_rc = xlat_status(buf, cmd); 338 + *cmd_rc = xlat_status(buf, cmd, fw_status); 347 339 } 348 340 349 341 out:
+5 -1
drivers/base/regmap/regmap.c
··· 1475 1475 1476 1476 kfree(buf); 1477 1477 } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) { 1478 - regcache_drop_region(map, reg, reg + 1); 1478 + /* regcache_drop_region() takes lock that we already have, 1479 + * thus call map->cache_ops->drop() directly 1480 + */ 1481 + if (map->cache_ops && map->cache_ops->drop) 1482 + map->cache_ops->drop(map, reg, reg + 1); 1479 1483 } 1480 1484 1481 1485 trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
··· 1708 1708 1709 1709 DRM_INFO("amdgpu: finishing device.\n"); 1710 1710 adev->shutdown = true; 1711 + drm_crtc_force_disable_all(adev->ddev); 1711 1712 /* evict vram memory */ 1712 1713 amdgpu_bo_evict_vram(adev); 1713 1714 amdgpu_ib_pool_fini(adev); 1714 1715 amdgpu_fence_driver_fini(adev); 1715 - drm_crtc_force_disable_all(adev->ddev); 1716 1716 amdgpu_fbdev_fini(adev); 1717 1717 r = amdgpu_fini(adev); 1718 1718 kfree(adev->ip_block_status);
+1
drivers/gpu/drm/nouveau/include/nvkm/core/device.h
··· 175 175 void (*fini)(struct nvkm_device *, bool suspend); 176 176 resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); 177 177 resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); 178 + bool cpu_coherent; 178 179 }; 179 180 180 181 struct nvkm_device_quirk {
+2 -1
drivers/gpu/drm/nouveau/nouveau_bo.c
··· 209 209 nvbo->tile_flags = tile_flags; 210 210 nvbo->bo.bdev = &drm->ttm.bdev; 211 211 212 - nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; 212 + if (!nvxx_device(&drm->device)->func->cpu_coherent) 213 + nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; 213 214 214 215 nvbo->page_shift = 12; 215 216 if (drm->client.vm) {
+1
drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c
··· 1614 1614 .fini = nvkm_device_pci_fini, 1615 1615 .resource_addr = nvkm_device_pci_resource_addr, 1616 1616 .resource_size = nvkm_device_pci_resource_size, 1617 + .cpu_coherent = !IS_ENABLED(CONFIG_ARM), 1617 1618 }; 1618 1619 1619 1620 int
+1
drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
··· 245 245 .fini = nvkm_device_tegra_fini, 246 246 .resource_addr = nvkm_device_tegra_resource_addr, 247 247 .resource_size = nvkm_device_tegra_resource_size, 248 + .cpu_coherent = false, 248 249 }; 249 250 250 251 int
+3
drivers/gpu/drm/nouveau/nvkm/engine/fifo/dmanv04.c
··· 37 37 { 38 38 struct nv04_fifo_chan *chan = nv04_fifo_chan(base); 39 39 struct nvkm_instmem *imem = chan->fifo->base.engine.subdev.device->imem; 40 + 41 + mutex_lock(&chan->fifo->base.engine.subdev.mutex); 40 42 nvkm_ramht_remove(imem->ramht, cookie); 43 + mutex_unlock(&chan->fifo->base.engine.subdev.mutex); 41 44 } 42 45 43 46 static int
+6
drivers/gpu/drm/radeon/si_dpm.c
··· 3015 3015 if (rdev->pdev->device == 0x6811 && 3016 3016 rdev->pdev->revision == 0x81) 3017 3017 max_mclk = 120000; 3018 + /* limit sclk/mclk on Jet parts for stability */ 3019 + if (rdev->pdev->device == 0x6665 && 3020 + rdev->pdev->revision == 0xc3) { 3021 + max_sclk = 75000; 3022 + max_mclk = 80000; 3023 + } 3018 3024 3019 3025 if (rps->vce_active) { 3020 3026 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
+1 -1
drivers/gpu/drm/udl/udl_fb.c
··· 122 122 return 0; 123 123 cmd = urb->transfer_buffer; 124 124 125 - for (i = y; i < height ; i++) { 125 + for (i = y; i < y + height ; i++) { 126 126 const int line_offset = fb->base.pitches[0] * i; 127 127 const int byte_offset = line_offset + (x * bpp); 128 128 const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
+11 -7
drivers/i2c/busses/i2c-eg20t.c
··· 773 773 /* Set the number of I2C channel instance */ 774 774 adap_info->ch_num = id->driver_data; 775 775 776 - ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, 777 - KBUILD_MODNAME, adap_info); 778 - if (ret) { 779 - pch_pci_err(pdev, "request_irq FAILED\n"); 780 - goto err_request_irq; 781 - } 782 - 783 776 for (i = 0; i < adap_info->ch_num; i++) { 784 777 pch_adap = &adap_info->pch_data[i].pch_adapter; 785 778 adap_info->pch_i2c_suspended = false; ··· 790 797 791 798 pch_adap->dev.of_node = pdev->dev.of_node; 792 799 pch_adap->dev.parent = &pdev->dev; 800 + } 801 + 802 + ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED, 803 + KBUILD_MODNAME, adap_info); 804 + if (ret) { 805 + pch_pci_err(pdev, "request_irq FAILED\n"); 806 + goto err_request_irq; 807 + } 808 + 809 + for (i = 0; i < adap_info->ch_num; i++) { 810 + pch_adap = &adap_info->pch_data[i].pch_adapter; 793 811 794 812 pch_i2c_init(&adap_info->pch_data[i]); 795 813
+2 -1
drivers/i2c/busses/i2c-qup.c
··· 1599 1599 #ifdef CONFIG_PM_SLEEP 1600 1600 static int qup_i2c_suspend(struct device *device) 1601 1601 { 1602 - qup_i2c_pm_suspend_runtime(device); 1602 + if (!pm_runtime_suspended(device)) 1603 + return qup_i2c_pm_suspend_runtime(device); 1603 1604 return 0; 1604 1605 } 1605 1606
+1 -1
drivers/i2c/muxes/i2c-mux-pca954x.c
··· 164 164 /* Only select the channel if its different from the last channel */ 165 165 if (data->last_chan != regval) { 166 166 ret = pca954x_reg_write(muxc->parent, client, regval); 167 - data->last_chan = regval; 167 + data->last_chan = ret ? 0 : regval; 168 168 } 169 169 170 170 return ret;
+6
drivers/input/joydev.c
··· 950 950 .flags = INPUT_DEVICE_ID_MATCH_EVBIT | 951 951 INPUT_DEVICE_ID_MATCH_ABSBIT, 952 952 .evbit = { BIT_MASK(EV_ABS) }, 953 + .absbit = { BIT_MASK(ABS_Z) }, 954 + }, 955 + { 956 + .flags = INPUT_DEVICE_ID_MATCH_EVBIT | 957 + INPUT_DEVICE_ID_MATCH_ABSBIT, 958 + .evbit = { BIT_MASK(EV_ABS) }, 953 959 .absbit = { BIT_MASK(ABS_WHEEL) }, 954 960 }, 955 961 {
+9 -7
drivers/input/touchscreen/silead.c
··· 390 390 data->max_fingers = 5; /* Most devices handle up-to 5 fingers */ 391 391 } 392 392 393 - error = device_property_read_string(dev, "touchscreen-fw-name", &str); 393 + error = device_property_read_string(dev, "firmware-name", &str); 394 394 if (!error) 395 - snprintf(data->fw_name, sizeof(data->fw_name), "%s", str); 395 + snprintf(data->fw_name, sizeof(data->fw_name), 396 + "silead/%s", str); 396 397 else 397 398 dev_dbg(dev, "Firmware file name read error. Using default."); 398 399 } ··· 411 410 if (!acpi_id) 412 411 return -ENODEV; 413 412 414 - snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 415 - acpi_id->id); 413 + snprintf(data->fw_name, sizeof(data->fw_name), 414 + "silead/%s.fw", acpi_id->id); 416 415 417 416 for (i = 0; i < strlen(data->fw_name); i++) 418 417 data->fw_name[i] = tolower(data->fw_name[i]); 419 418 } else { 420 - snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", 421 - id->name); 419 + snprintf(data->fw_name, sizeof(data->fw_name), 420 + "silead/%s.fw", id->name); 422 421 } 423 422 424 423 return 0; ··· 427 426 static int silead_ts_set_default_fw_name(struct silead_ts_data *data, 428 427 const struct i2c_device_id *id) 429 428 { 430 - snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name); 429 + snprintf(data->fw_name, sizeof(data->fw_name), 430 + "silead/%s.fw", id->name); 431 431 return 0; 432 432 } 433 433 #endif
+4 -3
drivers/irqchip/irq-gic-v3.c
··· 548 548 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 549 549 unsigned long cluster_id) 550 550 { 551 - int cpu = *base_cpu; 551 + int next_cpu, cpu = *base_cpu; 552 552 unsigned long mpidr = cpu_logical_map(cpu); 553 553 u16 tlist = 0; 554 554 ··· 562 562 563 563 tlist |= 1 << (mpidr & 0xf); 564 564 565 - cpu = cpumask_next(cpu, mask); 566 - if (cpu >= nr_cpu_ids) 565 + next_cpu = cpumask_next(cpu, mask); 566 + if (next_cpu >= nr_cpu_ids) 567 567 goto out; 568 + cpu = next_cpu; 568 569 569 570 mpidr = cpu_logical_map(cpu); 570 571
+50 -55
drivers/irqchip/irq-mips-gic.c
··· 638 638 if (!gic_local_irq_is_routable(intr)) 639 639 return -EPERM; 640 640 641 - /* 642 - * HACK: These are all really percpu interrupts, but the rest 643 - * of the MIPS kernel code does not use the percpu IRQ API for 644 - * the CP0 timer and performance counter interrupts. 645 - */ 646 - switch (intr) { 647 - case GIC_LOCAL_INT_TIMER: 648 - case GIC_LOCAL_INT_PERFCTR: 649 - case GIC_LOCAL_INT_FDC: 650 - irq_set_chip_and_handler(virq, 651 - &gic_all_vpes_local_irq_controller, 652 - handle_percpu_irq); 653 - break; 654 - default: 655 - irq_set_chip_and_handler(virq, 656 - &gic_local_irq_controller, 657 - handle_percpu_devid_irq); 658 - irq_set_percpu_devid(virq); 659 - break; 660 - } 661 - 662 641 spin_lock_irqsave(&gic_lock, flags); 663 642 for (i = 0; i < gic_vpes; i++) { 664 643 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; ··· 703 724 return 0; 704 725 } 705 726 706 - static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 707 - irq_hw_number_t hw) 727 + static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq, 728 + unsigned int hwirq) 708 729 { 709 - if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS) 710 - return gic_local_irq_domain_map(d, virq, hw); 730 + struct irq_chip *chip; 731 + int err; 711 732 712 - irq_set_chip_and_handler(virq, &gic_level_irq_controller, 713 - handle_level_irq); 733 + if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 734 + err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 735 + &gic_level_irq_controller, 736 + NULL); 737 + } else { 738 + switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { 739 + case GIC_LOCAL_INT_TIMER: 740 + case GIC_LOCAL_INT_PERFCTR: 741 + case GIC_LOCAL_INT_FDC: 742 + /* 743 + * HACK: These are all really percpu interrupts, but 744 + * the rest of the MIPS kernel code does not use the 745 + * percpu IRQ API for them. 746 + */ 747 + chip = &gic_all_vpes_local_irq_controller; 748 + irq_set_handler(virq, handle_percpu_irq); 749 + break; 714 750 715 - return gic_shared_irq_domain_map(d, virq, hw, 0); 751 + default: 752 + chip = &gic_local_irq_controller; 753 + irq_set_handler(virq, handle_percpu_devid_irq); 754 + irq_set_percpu_devid(virq); 755 + break; 756 + } 757 + 758 + err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 759 + chip, NULL); 760 + } 761 + 762 + return err; 716 763 } 717 764 718 765 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, ··· 749 744 int cpu, ret, i; 750 745 751 746 if (spec->type == GIC_DEVICE) { 752 - /* verify that it doesn't conflict with an IPI irq */ 753 - if (test_bit(spec->hwirq, ipi_resrv)) 747 + /* verify that shared irqs don't conflict with an IPI irq */ 748 + if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) && 749 + test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv)) 754 750 return -EBUSY; 755 751 756 - hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq); 757 - 758 - return irq_domain_set_hwirq_and_chip(d, virq, hwirq, 759 - &gic_level_irq_controller, 760 - NULL); 752 + return gic_setup_dev_chip(d, virq, spec->hwirq); 761 753 } else { 762 754 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 763 755 if (base_hwirq == gic_shared_intrs) { ··· 823 821 } 824 822 825 823 static const struct irq_domain_ops gic_irq_domain_ops = { 826 - .map = gic_irq_domain_map, 827 824 .alloc = gic_irq_domain_alloc, 828 825 .free = gic_irq_domain_free, 829 826 .match = gic_irq_domain_match, ··· 853 852 struct irq_fwspec *fwspec = arg; 854 853 struct gic_irq_spec spec = { 855 854 .type = GIC_DEVICE, 856 - .hwirq = fwspec->param[1], 857 855 }; 858 856 int i, ret; 859 - bool is_shared = fwspec->param[0] == GIC_SHARED; 860 857 861 - if (is_shared) { 862 - ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 863 - if (ret) 864 - return ret; 865 - } 858 + if (fwspec->param[0] == GIC_SHARED) 859 + spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 860 + else 861 + spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 862 + 863 + ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 864 + if (ret) 865 + return ret; 866 866 867 867 for (i = 0; i < nr_irqs; i++) { 868 - irq_hw_number_t hwirq; 869 - 870 - if (is_shared) 871 - hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i); 872 - else 873 - hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i); 874 - 875 - ret = irq_domain_set_hwirq_and_chip(d, virq + i, 876 - hwirq, 877 - &gic_level_irq_controller, 878 - NULL); 868 + ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i); 879 869 if (ret) 880 870 goto error; 881 871 } ··· 888 896 static void gic_dev_domain_activate(struct irq_domain *domain, 889 897 struct irq_data *d) 890 898 { 891 - gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); 899 + if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS) 900 + gic_local_irq_domain_map(domain, d->irq, d->hwirq); 901 + else 902 + gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); 892 903 } 893 904 894 905 static struct irq_domain_ops gic_dev_domain_ops = {
+9 -5
drivers/mmc/host/dw_mmc.c
··· 1112 1112 1113 1113 div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; 1114 1114 1115 - dev_info(&slot->mmc->class_dev, 1116 - "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1117 - slot->id, host->bus_hz, clock, 1118 - div ? ((host->bus_hz / div) >> 1) : 1119 - host->bus_hz, div); 1115 + if (clock != slot->__clk_old || force_clkinit) 1116 + dev_info(&slot->mmc->class_dev, 1117 + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", 1118 + slot->id, host->bus_hz, clock, 1119 + div ? ((host->bus_hz / div) >> 1) : 1120 + host->bus_hz, div); 1120 1121 1121 1122 /* disable clock */ 1122 1123 mci_writel(host, CLKENA, 0); ··· 1140 1139 1141 1140 /* inform CIU */ 1142 1141 mci_send_cmd(slot, sdmmc_cmd_bits, 0); 1142 + 1143 + /* keep the last clock value that was requested from core */ 1144 + slot->__clk_old = clock; 1143 1145 } 1144 1146 1145 1147 host->current_speed = clock;
+3
drivers/mmc/host/dw_mmc.h
··· 249 249 * @queue_node: List node for placing this node in the @queue list of 250 250 * &struct dw_mci. 251 251 * @clock: Clock rate configured by set_ios(). Protected by host->lock. 252 + * @__clk_old: The last clock value that was requested from core. 253 + * Keeping track of this helps us to avoid spamming the console. 252 254 * @flags: Random state bits associated with the slot. 253 255 * @id: Number of this slot. 254 256 * @sdio_id: Number of this slot in the SDIO interrupt registers. ··· 265 263 struct list_head queue_node; 266 264 267 265 unsigned int clock; 266 + unsigned int __clk_old; 268 267 269 268 unsigned long flags; 270 269 #define DW_MMC_CARD_PRESENT 0
+3
drivers/mtd/nand/davinci_nand.c
··· 240 240 unsigned long flags; 241 241 u32 val; 242 242 243 + /* Reset ECC hardware */ 244 + davinci_nand_readl(info, NAND_4BIT_ECC1_OFFSET); 245 + 243 246 spin_lock_irqsave(&davinci_nand_lock, flags); 244 247 245 248 /* Start 4-bit ECC calculation for read/write */
+8 -4
drivers/mtd/nand/mtk_ecc.c
··· 366 366 u8 *data, u32 bytes) 367 367 { 368 368 dma_addr_t addr; 369 - u32 *p, len, i; 369 + u8 *p; 370 + u32 len, i, val; 370 371 int ret = 0; 371 372 372 373 addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); ··· 393 392 394 393 /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ 395 394 len = (config->strength * ECC_PARITY_BITS + 7) >> 3; 396 - p = (u32 *)(data + bytes); 395 + p = data + bytes; 397 396 398 397 /* write the parity bytes generated by the ECC back to the OOB region */ 399 - for (i = 0; i < len; i++) 400 - p[i] = readl(ecc->regs + ECC_ENCPAR(i)); 398 + for (i = 0; i < len; i++) { 399 + if ((i % 4) == 0) 400 + val = readl(ecc->regs + ECC_ENCPAR(i / 4)); 401 + p[i] = (val >> ((i % 4) * 8)) & 0xff; 402 + } 401 403 timeout: 402 404 403 405 dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);
+5 -2
drivers/mtd/nand/mtk_nand.c
··· 93 93 #define NFI_FSM_MASK (0xf << 16) 94 94 #define NFI_ADDRCNTR (0x70) 95 95 #define CNTR_MASK GENMASK(16, 12) 96 + #define ADDRCNTR_SEC_SHIFT (12) 97 + #define ADDRCNTR_SEC(val) \ 98 + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT) 96 99 #define NFI_STRADDR (0x80) 97 100 #define NFI_BYTELEN (0x84) 98 101 #define NFI_CSEL (0x90) ··· 702 699 } 703 700 704 701 ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg, 705 - (reg & CNTR_MASK) >= chip->ecc.steps, 702 + ADDRCNTR_SEC(reg) >= chip->ecc.steps, 706 703 10, MTK_TIMEOUT); 707 704 if (ret) 708 705 dev_err(dev, "hwecc write timeout\n"); ··· 905 902 dev_warn(nfc->dev, "read ahb/dma done timeout\n"); 906 903 907 904 rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg, 908 - (reg & CNTR_MASK) >= sectors, 10, 905 + ADDRCNTR_SEC(reg) >= sectors, 10, 909 906 MTK_TIMEOUT); 910 907 if (rc < 0) { 911 908 dev_err(nfc->dev, "subpage done timeout\n");
+1 -1
drivers/mtd/nand/mxc_nand.c
··· 943 943 struct nand_chip *nand_chip = mtd_to_nand(mtd); 944 944 int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26; 945 945 946 - if (section > nand_chip->ecc.steps) 946 + if (section >= nand_chip->ecc.steps) 947 947 return -ERANGE; 948 948 949 949 if (!section) {
+1 -1
drivers/mtd/nand/omap2.c
··· 2169 2169 return 0; 2170 2170 2171 2171 return_error: 2172 - if (info->dma) 2172 + if (!IS_ERR_OR_NULL(info->dma)) 2173 2173 dma_release_channel(info->dma); 2174 2174 if (nand_chip->ecc.priv) { 2175 2175 nand_bch_free(nand_chip->ecc.priv);
+17 -10
drivers/net/can/dev.c
··· 21 21 #include <linux/slab.h> 22 22 #include <linux/netdevice.h> 23 23 #include <linux/if_arp.h> 24 + #include <linux/workqueue.h> 24 25 #include <linux/can.h> 25 26 #include <linux/can/dev.h> 26 27 #include <linux/can/skb.h> ··· 502 501 /* 503 502 * CAN device restart for bus-off recovery 504 503 */ 505 - static void can_restart(unsigned long data) 504 + static void can_restart(struct net_device *dev) 506 505 { 507 - struct net_device *dev = (struct net_device *)data; 508 506 struct can_priv *priv = netdev_priv(dev); 509 507 struct net_device_stats *stats = &dev->stats; 510 508 struct sk_buff *skb; ··· 543 543 netdev_err(dev, "Error %d during restart", err); 544 544 } 545 545 546 + static void can_restart_work(struct work_struct *work) 547 + { 548 + struct delayed_work *dwork = to_delayed_work(work); 549 + struct can_priv *priv = container_of(dwork, struct can_priv, restart_work); 550 + 551 + can_restart(priv->dev); 552 + } 553 + 546 554 int can_restart_now(struct net_device *dev) 547 555 { 548 556 struct can_priv *priv = netdev_priv(dev); ··· 564 556 if (priv->state != CAN_STATE_BUS_OFF) 565 557 return -EBUSY; 566 558 567 - /* Runs as soon as possible in the timer context */ 568 - mod_timer(&priv->restart_timer, jiffies); 559 + cancel_delayed_work_sync(&priv->restart_work); 560 + can_restart(dev); 569 561 570 562 return 0; 571 563 } ··· 586 578 netif_carrier_off(dev); 587 579 588 580 if (priv->restart_ms) 589 - mod_timer(&priv->restart_timer, 590 - jiffies + (priv->restart_ms * HZ) / 1000); 581 + schedule_delayed_work(&priv->restart_work, 582 + msecs_to_jiffies(priv->restart_ms)); 591 583 } 592 584 EXPORT_SYMBOL_GPL(can_bus_off); 593 585 ··· 696 688 return NULL; 697 689 698 690 priv = netdev_priv(dev); 691 + priv->dev = dev; 699 692 700 693 if (echo_skb_max) { 701 694 priv->echo_skb_max = echo_skb_max; ··· 706 697 707 698 priv->state = CAN_STATE_STOPPED; 708 699 709 - init_timer(&priv->restart_timer); 700 + INIT_DELAYED_WORK(&priv->restart_work, can_restart_work); 710 701 711 702 return dev; 712 703 } ··· 787 778 if (!netif_carrier_ok(dev)) 788 779 netif_carrier_on(dev); 789 780 790 - setup_timer(&priv->restart_timer, can_restart, (unsigned long)dev); 791 - 792 781 return 0; 793 782 } 794 783 EXPORT_SYMBOL_GPL(open_candev); ··· 801 794 { 802 795 struct can_priv *priv = netdev_priv(dev); 803 796 804 - del_timer_sync(&priv->restart_timer); 797 + cancel_delayed_work_sync(&priv->restart_work); 805 798 can_flush_echo_skb(dev); 806 799 } 807 800 EXPORT_SYMBOL_GPL(close_candev);
+5 -5
drivers/net/ethernet/broadcom/tg3.c
··· 18134 18134 18135 18135 rtnl_lock(); 18136 18136 18137 - /* We needn't recover from permanent error */ 18138 - if (state == pci_channel_io_frozen) 18139 - tp->pcierr_recovery = true; 18140 - 18141 18137 /* We probably don't have netdev yet */ 18142 18138 if (!netdev || !netif_running(netdev)) 18143 18139 goto done; 18140 + 18141 + /* We needn't recover from permanent error */ 18142 + if (state == pci_channel_io_frozen) 18143 + tp->pcierr_recovery = true; 18144 18144 18145 18145 tg3_phy_stop(tp); 18146 18146 ··· 18238 18238 18239 18239 rtnl_lock(); 18240 18240 18241 - if (!netif_running(netdev)) 18241 + if (!netdev || !netif_running(netdev)) 18242 18242 goto done; 18243 18243 18244 18244 tg3_full_lock(tp, 0);
+12 -3
drivers/net/ethernet/freescale/fec_main.c
··· 89 89 .driver_data = 0, 90 90 }, { 91 91 .name = "imx25-fec", 92 - .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, 92 + .driver_data = FEC_QUIRK_USE_GASKET, 93 93 }, { 94 94 .name = "imx27-fec", 95 - .driver_data = FEC_QUIRK_HAS_RACC, 95 + .driver_data = 0, 96 96 }, { 97 97 .name = "imx28-fec", 98 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | ··· 180 180 /* FEC receive acceleration */ 181 181 #define FEC_RACC_IPDIS (1 << 1) 182 182 #define FEC_RACC_PRODIS (1 << 2) 183 + #define FEC_RACC_SHIFT16 BIT(7) 183 184 #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) 184 185 185 186 /* ··· 946 945 947 946 #if !defined(CONFIG_M5272) 948 947 if (fep->quirks & FEC_QUIRK_HAS_RACC) { 949 - /* set RX checksum */ 950 948 val = readl(fep->hwp + FEC_RACC); 949 + /* align IP header */ 950 + val |= FEC_RACC_SHIFT16; 951 951 if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 952 + /* set RX checksum */ 952 953 val |= FEC_RACC_OPTIONS; 953 954 else 954 955 val &= ~FEC_RACC_OPTIONS; ··· 1431 1428 prefetch(skb->data - NET_IP_ALIGN); 1432 1429 skb_put(skb, pkt_len - 4); 1433 1430 data = skb->data; 1431 + 1432 + #if !defined(CONFIG_M5272) 1433 + if (fep->quirks & FEC_QUIRK_HAS_RACC) 1434 + data = skb_pull_inline(skb, 2); 1435 + #endif 1436 + 1434 1437 if (!is_copybreak && need_swap) 1435 1438 swap_buffer(data, pkt_len); 1436 1439
+7 -1
drivers/nvdimm/core.c
··· 99 99 nvdimm_map->size = size; 100 100 kref_init(&nvdimm_map->kref); 101 101 102 - if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) 102 + if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) { 103 + dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n", 104 + &offset, size, dev_name(dev)); 103 105 goto err_request_region; 106 + } 104 107 105 108 if (flags) 106 109 nvdimm_map->mem = memremap(offset, size, flags); ··· 173 170 else 174 171 kref_get(&nvdimm_map->kref); 175 172 nvdimm_bus_unlock(dev); 173 + 174 + if (!nvdimm_map) 175 + return NULL; 176 176 177 177 if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map)) 178 178 return NULL;
+20 -2
drivers/nvdimm/nd.h
··· 52 52 struct nd_region_data { 53 53 int ns_count; 54 54 int ns_active; 55 - unsigned int flush_mask; 56 - void __iomem *flush_wpq[0][0]; 55 + unsigned int hints_shift; 56 + void __iomem *flush_wpq[0]; 57 57 }; 58 + 59 + static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd, 60 + int dimm, int hint) 61 + { 62 + unsigned int num = 1 << ndrd->hints_shift; 63 + unsigned int mask = num - 1; 64 + 65 + return ndrd->flush_wpq[dimm * num + (hint & mask)]; 66 + } 67 + 68 + static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm, 69 + int hint, void __iomem *flush) 70 + { 71 + unsigned int num = 1 << ndrd->hints_shift; 72 + unsigned int mask = num - 1; 73 + 74 + ndrd->flush_wpq[dimm * num + (hint & mask)] = flush; 75 + } 58 76 59 77 static inline struct nd_namespace_index *to_namespace_index( 60 78 struct nvdimm_drvdata *ndd, int i)
+13 -9
drivers/nvdimm/region_devs.c
··· 38 38 39 39 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm), 40 40 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es"); 41 - for (i = 0; i < nvdimm->num_flush; i++) { 41 + for (i = 0; i < (1 << ndrd->hints_shift); i++) { 42 42 struct resource *res = &nvdimm->flush_wpq[i]; 43 43 unsigned long pfn = PHYS_PFN(res->start); 44 44 void __iomem *flush_page; ··· 54 54 55 55 if (j < i) 56 56 flush_page = (void __iomem *) ((unsigned long) 57 - ndrd->flush_wpq[dimm][j] & PAGE_MASK); 57 + ndrd_get_flush_wpq(ndrd, dimm, j) 58 + & PAGE_MASK); 58 59 else 59 60 flush_page = devm_nvdimm_ioremap(dev, 60 - PHYS_PFN(pfn), PAGE_SIZE); 61 + PFN_PHYS(pfn), PAGE_SIZE); 61 62 if (!flush_page) 62 63 return -ENXIO; 63 - ndrd->flush_wpq[dimm][i] = flush_page 64 - + (res->start & ~PAGE_MASK); 64 + ndrd_set_flush_wpq(ndrd, dimm, i, flush_page 65 + + (res->start & ~PAGE_MASK)); 65 66 } 66 67 67 68 return 0; ··· 94 93 return -ENOMEM; 95 94 dev_set_drvdata(dev, ndrd); 96 95 97 - ndrd->flush_mask = (1 << ilog2(num_flush)) - 1; 96 + if (!num_flush) 97 + return 0; 98 + 99 + ndrd->hints_shift = ilog2(num_flush); 98 100 for (i = 0; i < nd_region->ndr_mappings; i++) { 99 101 struct nd_mapping *nd_mapping = &nd_region->mapping[i]; 100 102 struct nvdimm *nvdimm = nd_mapping->nvdimm; ··· 904 900 */ 905 901 wmb(); 906 902 for (i = 0; i < nd_region->ndr_mappings; i++) 907 - if (ndrd->flush_wpq[i][0]) 908 - writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]); 903 + if (ndrd_get_flush_wpq(ndrd, i, 0)) 904 + writeq(1, ndrd_get_flush_wpq(ndrd, i, idx)); 909 905 wmb(); 910 906 } 911 907 EXPORT_SYMBOL_GPL(nvdimm_flush); ··· 929 925 930 926 for (i = 0; i < nd_region->ndr_mappings; i++) 931 927 /* flush hints present, flushing required */ 932 - if (ndrd->flush_wpq[i][0]) 928 + if (ndrd_get_flush_wpq(ndrd, i, 0)) 933 929 return 1; 934 930 935 931 /*
+1 -1
drivers/nvme/host/rdma.c
··· 561 561 562 562 queue = &ctrl->queues[idx]; 563 563 queue->ctrl = ctrl; 564 - queue->flags = 0; 565 564 init_completion(&queue->cm_done); 566 565 567 566 if (idx > 0) ··· 594 595 goto out_destroy_cm_id; 595 596 } 596 597 598 + clear_bit(NVME_RDMA_Q_DELETING, &queue->flags); 597 599 set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags); 598 600 599 601 return 0;
+2
drivers/scsi/hosts.c
··· 486 486 else 487 487 shost->dma_boundary = 0xffffffff; 488 488 489 + shost->use_blk_mq = scsi_use_blk_mq; 490 + 489 491 device_initialize(&shost->shost_gendev); 490 492 dev_set_name(&shost->shost_gendev, "host%d", shost->host_no); 491 493 shost->shost_gendev.bus = &scsi_bus_type;
-1
drivers/scsi/scsi.c
··· 1160 1160 bool scsi_use_blk_mq = false; 1161 1161 #endif 1162 1162 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 1163 - EXPORT_SYMBOL_GPL(scsi_use_blk_mq); 1164 1163 1165 1164 static int __init init_scsi(void) 1166 1165 {
+1
drivers/scsi/scsi_priv.h
··· 29 29 extern void scsi_exit_hosts(void); 30 30 31 31 /* scsi.c */ 32 + extern bool scsi_use_blk_mq; 32 33 extern int scsi_setup_command_freelist(struct Scsi_Host *shost); 33 34 extern void scsi_destroy_command_freelist(struct Scsi_Host *shost); 34 35 #ifdef CONFIG_SCSI_LOGGING
+3 -6
fs/btrfs/extent-tree.c
··· 4271 4271 if (ret < 0) 4272 4272 return ret; 4273 4273 4274 - /* 4275 - * Use new btrfs_qgroup_reserve_data to reserve precious data space 4276 - * 4277 - * TODO: Find a good method to avoid reserve data space for NOCOW 4278 - * range, but don't impact performance on quota disable case. 4279 - */ 4274 + /* Use new btrfs_qgroup_reserve_data to reserve precious data space. */ 4280 4275 ret = btrfs_qgroup_reserve_data(inode, start, len); 4276 + if (ret) 4277 + btrfs_free_reserved_data_space_noquota(inode, start, len); 4281 4278 return ret; 4282 4279 } 4283 4280
+12
fs/btrfs/ioctl.c
··· 1634 1634 int namelen; 1635 1635 int ret = 0; 1636 1636 1637 + if (!S_ISDIR(file_inode(file)->i_mode)) 1638 + return -ENOTDIR; 1639 + 1637 1640 ret = mnt_want_write_file(file); 1638 1641 if (ret) 1639 1642 goto out; ··· 1694 1691 struct btrfs_ioctl_vol_args *vol_args; 1695 1692 int ret; 1696 1693 1694 + if (!S_ISDIR(file_inode(file)->i_mode)) 1695 + return -ENOTDIR; 1696 + 1697 1697 vol_args = memdup_user(arg, sizeof(*vol_args)); 1698 1698 if (IS_ERR(vol_args)) 1699 1699 return PTR_ERR(vol_args); ··· 1719 1713 u64 *ptr = NULL; 1720 1714 bool readonly = false; 1721 1715 struct btrfs_qgroup_inherit *inherit = NULL; 1716 + 1717 + if (!S_ISDIR(file_inode(file)->i_mode)) 1718 + return -ENOTDIR; 1722 1719 1723 1720 vol_args = memdup_user(arg, sizeof(*vol_args)); 1724 1721 if (IS_ERR(vol_args)) ··· 2365 2356 int namelen; 2366 2357 int ret; 2367 2358 int err = 0; 2359 + 2360 + if (!S_ISDIR(dir->i_mode)) 2361 + return -ENOTDIR; 2368 2362 2369 2363 vol_args = memdup_user(arg, sizeof(*vol_args)); 2370 2364 if (IS_ERR(vol_args))
+1
fs/configfs/file.c
··· 333 333 if (bin_attr->cb_max_size && 334 334 *ppos + count > bin_attr->cb_max_size) { 335 335 len = -EFBIG; 336 + goto out; 336 337 } 337 338 338 339 tbuf = vmalloc(*ppos + count);
+10
fs/ocfs2/aops.c
··· 1842 1842 ocfs2_commit_trans(osb, handle); 1843 1843 1844 1844 out: 1845 + /* 1846 + * The mmapped page won't be unlocked in ocfs2_free_write_ctxt(), 1847 + * even in case of error here like ENOSPC and ENOMEM. So, we need 1848 + * to unlock the target page manually to prevent deadlocks when 1849 + * retrying again on ENOSPC, or when returning non-VM_FAULT_LOCKED 1850 + * to VM code. 1851 + */ 1852 + if (wc->w_target_locked) 1853 + unlock_page(mmap_page); 1854 + 1845 1855 ocfs2_free_write_ctxt(inode, wc); 1846 1856 1847 1857 if (data_ac) {
+2 -1
include/linux/can/dev.h
··· 32 32 * CAN common private data 33 33 */ 34 34 struct can_priv { 35 + struct net_device *dev; 35 36 struct can_device_stats can_stats; 36 37 37 38 struct can_bittiming bittiming, data_bittiming; ··· 48 47 u32 ctrlmode_static; /* static enabled options for driver/hardware */ 49 48 50 49 int restart_ms; 51 - struct timer_list restart_timer; 50 + struct delayed_work restart_work; 52 51 53 52 int (*do_set_bittiming)(struct net_device *dev); 54 53 int (*do_set_data_bittiming)(struct net_device *dev);
+1 -1
include/linux/dma-mapping.h
··· 718 718 #define dma_mmap_writecombine dma_mmap_wc 719 719 #endif 720 720 721 - #ifdef CONFIG_NEED_DMA_MAP_STATE 721 + #if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG) 722 722 #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 723 723 #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 724 724 #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
+1 -1
include/linux/mroute.h
··· 120 120 struct rtmsg; 121 121 int ipmr_get_route(struct net *net, struct sk_buff *skb, 122 122 __be32 saddr, __be32 daddr, 123 - struct rtmsg *rtm, int nowait); 123 + struct rtmsg *rtm, int nowait, u32 portid); 124 124 #endif
+1 -1
include/linux/mroute6.h
··· 116 116 117 117 struct rtmsg; 118 118 extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, 119 - struct rtmsg *rtm, int nowait); 119 + struct rtmsg *rtm, int nowait, u32 portid); 120 120 121 121 #ifdef CONFIG_IPV6_MROUTE 122 122 extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
+1
include/linux/pagemap.h
··· 620 620 return __get_user(c, end); 621 621 } 622 622 623 + (void)c; 623 624 return 0; 624 625 } 625 626
+1 -1
include/linux/property.h
··· 190 190 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ 191 191 .is_array = true, \ 192 192 .is_string = false, \ 193 - { .pointer = { _type_##_data = _val_ } }, \ 193 + { .pointer = { ._type_##_data = _val_ } }, \ 194 194 } 195 195 196 196 #define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
+2
include/linux/swap.h
··· 257 257 258 258 static inline void workingset_node_pages_dec(struct radix_tree_node *node) 259 259 { 260 + VM_BUG_ON(!workingset_node_pages(node)); 260 261 node->count--; 261 262 } 262 263 ··· 273 272 274 273 static inline void workingset_node_shadows_dec(struct radix_tree_node *node) 275 274 { 275 + VM_BUG_ON(!workingset_node_shadows(node)); 276 276 node->count -= 1U << RADIX_TREE_COUNT_SHIFT; 277 277 } 278 278
+3 -10
include/net/sctp/structs.h
··· 555 555 556 556 atomic_t refcnt; 557 557 558 + /* How many times this chunk have been sent, for prsctp RTX policy */ 559 + int sent_count; 560 + 558 561 /* This is our link to the per-transport transmitted list. */ 559 562 struct list_head transmitted_list; 560 563 ··· 606 603 607 604 /* This needs to be recoverable for SCTP_SEND_FAILED events. */ 608 605 struct sctp_sndrcvinfo sinfo; 609 - 610 - /* We use this field to record param for prsctp policies, 611 - * for TTL policy, it is the time_to_drop of this chunk, 612 - * for RTX policy, it is the max_sent_count of this chunk, 613 - * for PRIO policy, it is the priority of this chunk. 614 - */ 615 - unsigned long prsctp_param; 616 - 617 - /* How many times this chunk have been sent, for prsctp RTX policy */ 618 - int sent_count; 619 606 620 607 /* Which association does this belong to? */ 621 608 struct sctp_association *asoc;
+1 -4
include/scsi/scsi_host.h
··· 771 771 shost->tmf_in_progress; 772 772 } 773 773 774 - extern bool scsi_use_blk_mq; 775 - 776 774 static inline bool shost_use_blk_mq(struct Scsi_Host *shost) 777 775 { 778 - return scsi_use_blk_mq; 779 - 776 + return shost->use_blk_mq; 780 777 } 781 778 782 779 extern int scsi_queue_work(struct Scsi_Host *, struct work_struct *);
+25 -4
kernel/cgroup.c
··· 3446 3446 * Except for the root, subtree_control must be zero for a cgroup 3447 3447 * with tasks so that child cgroups don't compete against tasks. 3448 3448 */ 3449 - if (enable && cgroup_parent(cgrp) && !list_empty(&cgrp->cset_links)) { 3450 - ret = -EBUSY; 3451 - goto out_unlock; 3449 + if (enable && cgroup_parent(cgrp)) { 3450 + struct cgrp_cset_link *link; 3451 + 3452 + /* 3453 + * Because namespaces pin csets too, @cgrp->cset_links 3454 + * might not be empty even when @cgrp is empty. Walk and 3455 + * verify each cset. 3456 + */ 3457 + spin_lock_irq(&css_set_lock); 3458 + 3459 + ret = 0; 3460 + list_for_each_entry(link, &cgrp->cset_links, cset_link) { 3461 + if (css_set_populated(link->cset)) { 3462 + ret = -EBUSY; 3463 + break; 3464 + } 3465 + } 3466 + 3467 + spin_unlock_irq(&css_set_lock); 3468 + 3469 + if (ret) 3470 + goto out_unlock; 3452 3471 } 3453 3472 3454 3473 /* save and update control masks and prepare csses */ ··· 3918 3899 * cgroup_task_count - count the number of tasks in a cgroup. 3919 3900 * @cgrp: the cgroup in question 3920 3901 * 3921 - * Return the number of tasks in the cgroup. 3902 + * Return the number of tasks in the cgroup. The returned number can be 3903 + * higher than the actual number of tasks due to css_set references from 3904 + * namespace roots and temporary usages. 3922 3905 */ 3923 3906 static int cgroup_task_count(const struct cgroup *cgrp) 3924 3907 {
+15 -4
kernel/cpuset.c
··· 325 325 /* 326 326 * Return in pmask the portion of a cpusets's cpus_allowed that 327 327 * are online. If none are online, walk up the cpuset hierarchy 328 - * until we find one that does have some online cpus. The top 329 - * cpuset always has some cpus online. 328 + * until we find one that does have some online cpus. 330 329 * 331 330 * One way or another, we guarantee to return some non-empty subset 332 331 * of cpu_online_mask. ··· 334 335 */ 335 336 static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 336 337 { 337 - while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) 338 + while (!cpumask_intersects(cs->effective_cpus, cpu_online_mask)) { 338 339 cs = parent_cs(cs); 340 + if (unlikely(!cs)) { 341 + /* 342 + * The top cpuset doesn't have any online cpu as a 343 + * consequence of a race between cpuset_hotplug_work 344 + * and cpu hotplug notifier. But we know the top 345 + * cpuset's effective_cpus is on its way to to be 346 + * identical to cpu_online_mask. 347 + */ 348 + cpumask_copy(pmask, cpu_online_mask); 349 + return; 350 + } 351 + } 339 352 cpumask_and(pmask, cs->effective_cpus, cpu_online_mask); 340 353 } 341 354 ··· 2085 2074 * which could have been changed by cpuset just after it inherits the 2086 2075 * state from the parent and before it sits on the cgroup's task list. 2087 2076 */ 2088 - void cpuset_fork(struct task_struct *task) 2077 + static void cpuset_fork(struct task_struct *task) 2089 2078 { 2090 2079 if (task_css_is_root(task, cpuset_cgrp_id)) 2091 2080 return;
+1 -1
kernel/events/core.c
··· 3929 3929 3930 3930 static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2) 3931 3931 { 3932 - if ((e1->pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && 3932 + if ((e1->pmu == e2->pmu) && 3933 3933 (e1->cpu == e2->cpu || 3934 3934 e1->cpu == -1 || 3935 3935 e2->cpu == -1))
+6 -2
kernel/irq/chip.c
··· 820 820 desc->name = name; 821 821 822 822 if (handle != handle_bad_irq && is_chained) { 823 + unsigned int type = irqd_get_trigger_type(&desc->irq_data); 824 + 823 825 /* 824 826 * We're about to start this interrupt immediately, 825 827 * hence the need to set the trigger configuration. ··· 830 828 * chained interrupt. Reset it immediately because we 831 829 * do know better. 832 830 */ 833 - __irq_set_trigger(desc, irqd_get_trigger_type(&desc->irq_data)); 834 - desc->handle_irq = handle; 831 + if (type != IRQ_TYPE_NONE) { 832 + __irq_set_trigger(desc, type); 833 + desc->handle_irq = handle; 834 + } 835 835 836 836 irq_settings_set_noprobe(desc); 837 837 irq_settings_set_norequest(desc);
+16 -13
kernel/trace/trace.c
··· 5124 5124 struct trace_iterator *iter = filp->private_data; 5125 5125 ssize_t sret; 5126 5126 5127 - /* return any leftover data */ 5128 - sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 5129 - if (sret != -EBUSY) 5130 - return sret; 5131 - 5132 - trace_seq_init(&iter->seq); 5133 - 5134 5127 /* 5135 5128 * Avoid more than one consumer on a single file descriptor 5136 5129 * This is just a matter of traces coherency, the ring buffer itself 5137 5130 * is protected. 5138 5131 */ 5139 5132 mutex_lock(&iter->mutex); 5133 + 5134 + /* return any leftover data */ 5135 + sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 5136 + if (sret != -EBUSY) 5137 + goto out; 5138 + 5139 + trace_seq_init(&iter->seq); 5140 + 5140 5141 if (iter->trace->read) { 5141 5142 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); 5142 5143 if (sret) ··· 6164 6163 return -EBUSY; 6165 6164 #endif 6166 6165 6167 - if (splice_grow_spd(pipe, &spd)) 6168 - return -ENOMEM; 6169 - 6170 6166 if (*ppos & (PAGE_SIZE - 1)) 6171 6167 return -EINVAL; 6172 6168 ··· 6172 6174 return -EINVAL; 6173 6175 len &= PAGE_MASK; 6174 6176 } 6177 + 6178 + if (splice_grow_spd(pipe, &spd)) 6179 + return -ENOMEM; 6175 6180 6176 6181 again: 6177 6182 trace_access_lock(iter->cpu_file); ··· 6233 6232 /* did we read anything? */ 6234 6233 if (!spd.nr_pages) { 6235 6234 if (ret) 6236 - return ret; 6235 + goto out; 6237 6236 6237 + ret = -EAGAIN; 6238 6238 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) 6239 - return -EAGAIN; 6239 + goto out; 6240 6240 6241 6241 ret = wait_on_pipe(iter, true); 6242 6242 if (ret) 6243 - return ret; 6243 + goto out; 6244 6244 6245 6245 goto again; 6246 6246 } 6247 6247 6248 6248 ret = splice_to_pipe(pipe, &spd); 6249 + out: 6249 6250 splice_shrink_spd(&spd); 6250 6251 6251 6252 return ret;
+1 -1
lib/Kconfig.debug
··· 821 821 help 822 822 Say Y here to enable the kernel to detect "hung tasks", 823 823 which are bugs that cause the task to be stuck in 824 - uninterruptible "D" state indefinitiley. 824 + uninterruptible "D" state indefinitely. 825 825 826 826 When a hung task is detected, the kernel will print the 827 827 current stack trace (which you should report), but the
+4 -4
lib/radix-tree.c
··· 105 105 106 106 #ifdef CONFIG_RADIX_TREE_MULTIORDER 107 107 if (radix_tree_is_internal_node(entry)) { 108 - unsigned long siboff = get_slot_offset(parent, entry); 109 - if (siboff < RADIX_TREE_MAP_SIZE) { 110 - offset = siboff; 111 - entry = rcu_dereference_raw(parent->slots[offset]); 108 + if (is_sibling_entry(parent, entry)) { 109 + void **sibentry = (void **) entry_to_node(entry); 110 + offset = get_slot_offset(parent, sibentry); 111 + entry = rcu_dereference_raw(*sibentry); 112 112 } 113 113 } 114 114 #endif
+57 -57
mm/filemap.c
··· 110 110 * ->tasklist_lock (memory_failure, collect_procs_ao) 111 111 */ 112 112 113 + static int page_cache_tree_insert(struct address_space *mapping, 114 + struct page *page, void **shadowp) 115 + { 116 + struct radix_tree_node *node; 117 + void **slot; 118 + int error; 119 + 120 + error = __radix_tree_create(&mapping->page_tree, page->index, 0, 121 + &node, &slot); 122 + if (error) 123 + return error; 124 + if (*slot) { 125 + void *p; 126 + 127 + p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 128 + if (!radix_tree_exceptional_entry(p)) 129 + return -EEXIST; 130 + 131 + mapping->nrexceptional--; 132 + if (!dax_mapping(mapping)) { 133 + if (shadowp) 134 + *shadowp = p; 135 + if (node) 136 + workingset_node_shadows_dec(node); 137 + } else { 138 + /* DAX can replace empty locked entry with a hole */ 139 + WARN_ON_ONCE(p != 140 + (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | 141 + RADIX_DAX_ENTRY_LOCK)); 142 + /* DAX accounts exceptional entries as normal pages */ 143 + if (node) 144 + workingset_node_pages_dec(node); 145 + /* Wakeup waiters for exceptional entry lock */ 146 + dax_wake_mapping_entry_waiter(mapping, page->index, 147 + false); 148 + } 149 + } 150 + radix_tree_replace_slot(slot, page); 151 + mapping->nrpages++; 152 + if (node) { 153 + workingset_node_pages_inc(node); 154 + /* 155 + * Don't track node that contains actual pages. 156 + * 157 + * Avoid acquiring the list_lru lock if already 158 + * untracked. The list_empty() test is safe as 159 + * node->private_list is protected by 160 + * mapping->tree_lock. 161 + */ 162 + if (!list_empty(&node->private_list)) 163 + list_lru_del(&workingset_shadow_nodes, 164 + &node->private_list); 165 + } 166 + return 0; 167 + } 168 + 113 169 static void page_cache_tree_delete(struct address_space *mapping, 114 170 struct page *page, void *shadow) 115 171 { ··· 617 561 618 562 spin_lock_irqsave(&mapping->tree_lock, flags); 619 563 __delete_from_page_cache(old, NULL); 620 - error = radix_tree_insert(&mapping->page_tree, offset, new); 564 + error = page_cache_tree_insert(mapping, new, NULL); 621 565 BUG_ON(error); 622 566 mapping->nrpages++; 623 567 ··· 639 583 return error; 640 584 } 641 585 EXPORT_SYMBOL_GPL(replace_page_cache_page); 642 - 643 - static int page_cache_tree_insert(struct address_space *mapping, 644 - struct page *page, void **shadowp) 645 - { 646 - struct radix_tree_node *node; 647 - void **slot; 648 - int error; 649 - 650 - error = __radix_tree_create(&mapping->page_tree, page->index, 0, 651 - &node, &slot); 652 - if (error) 653 - return error; 654 - if (*slot) { 655 - void *p; 656 - 657 - p = radix_tree_deref_slot_protected(slot, &mapping->tree_lock); 658 - if (!radix_tree_exceptional_entry(p)) 659 - return -EEXIST; 660 - 661 - mapping->nrexceptional--; 662 - if (!dax_mapping(mapping)) { 663 - if (shadowp) 664 - *shadowp = p; 665 - if (node) 666 - workingset_node_shadows_dec(node); 667 - } else { 668 - /* DAX can replace empty locked entry with a hole */ 669 - WARN_ON_ONCE(p != 670 - (void *)(RADIX_TREE_EXCEPTIONAL_ENTRY | 671 - RADIX_DAX_ENTRY_LOCK)); 672 - /* DAX accounts exceptional entries as normal pages */ 673 - if (node) 674 - workingset_node_pages_dec(node); 675 - /* Wakeup waiters for exceptional entry lock */ 676 - dax_wake_mapping_entry_waiter(mapping, page->index, 677 - false); 678 - } 679 - } 680 - radix_tree_replace_slot(slot, page); 681 - mapping->nrpages++; 682 - if (node) { 683 - workingset_node_pages_inc(node); 684 - /* 685 - * Don't track node that contains actual pages. 686 - * 687 - * Avoid acquiring the list_lru lock if already 688 - * untracked. The list_empty() test is safe as 689 - * node->private_list is protected by 690 - * mapping->tree_lock. 691 - */ 692 - if (!list_empty(&node->private_list)) 693 - list_lru_del(&workingset_shadow_nodes, 694 - &node->private_list); 695 - } 696 - return 0; 697 - } 698 586 699 587 static int __add_to_page_cache_locked(struct page *page, 700 588 struct address_space *mapping,
-3
mm/huge_memory.c
··· 1138 1138 bool was_writable; 1139 1139 int flags = 0; 1140 1140 1141 - /* A PROT_NONE fault should not end up here */ 1142 - BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); 1143 - 1144 1141 fe->ptl = pmd_lock(vma->vm_mm, fe->pmd); 1145 1142 if (unlikely(!pmd_same(pmd, *fe->pmd))) 1146 1143 goto out_unlock;
+2 -1
mm/ksm.c
··· 283 283 { 284 284 struct rmap_item *rmap_item; 285 285 286 - rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 286 + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | 287 + __GFP_NORETRY | __GFP_NOWARN); 287 288 if (rmap_item) 288 289 ksm_rmap_items++; 289 290 return rmap_item;
+7 -5
mm/memory.c
··· 3351 3351 bool was_writable = pte_write(pte); 3352 3352 int flags = 0; 3353 3353 3354 - /* A PROT_NONE fault should not end up here */ 3355 - BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))); 3356 - 3357 3354 /* 3358 3355 * The "pte" at this point cannot be used safely without 3359 3356 * validation through pte_unmap_same(). It's of NUMA type but ··· 3455 3458 return VM_FAULT_FALLBACK; 3456 3459 } 3457 3460 3461 + static inline bool vma_is_accessible(struct vm_area_struct *vma) 3462 + { 3463 + return vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE); 3464 + } 3465 + 3458 3466 /* 3459 3467 * These routines also need to handle stuff like marking pages dirty 3460 3468 * and/or accessed for architectures that don't do it in hardware (most ··· 3526 3524 if (!pte_present(entry)) 3527 3525 return do_swap_page(fe, entry); 3528 3526 3529 - if (pte_protnone(entry)) 3527 + if (pte_protnone(entry) && vma_is_accessible(fe->vma)) 3530 3528 return do_numa_page(fe, entry); 3531 3529 3532 3530 fe->ptl = pte_lockptr(fe->vma->vm_mm, fe->pmd); ··· 3592 3590 3593 3591 barrier(); 3594 3592 if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) { 3595 - if (pmd_protnone(orig_pmd)) 3593 + if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) 3596 3594 return do_huge_pmd_numa_page(&fe, orig_pmd); 3597 3595 3598 3596 if ((fe.flags & FAULT_FLAG_WRITE) &&
+5 -5
mm/memory_hotplug.c
··· 1555 1555 { 1556 1556 gfp_t gfp_mask = GFP_USER | __GFP_MOVABLE; 1557 1557 int nid = page_to_nid(page); 1558 - nodemask_t nmask = node_online_map; 1559 - struct page *new_page; 1558 + nodemask_t nmask = node_states[N_MEMORY]; 1559 + struct page *new_page = NULL; 1560 1560 1561 1561 /* 1562 1562 * TODO: allocate a destination hugepage from a nearest neighbor node, ··· 1567 1567 return alloc_huge_page_node(page_hstate(compound_head(page)), 1568 1568 next_node_in(nid, nmask)); 1569 1569 1570 - if (nid != next_node_in(nid, nmask)) 1571 - node_clear(nid, nmask); 1570 + node_clear(nid, nmask); 1572 1571 1573 1572 if (PageHighMem(page) 1574 1573 || (zone_idx(page_zone(page)) == ZONE_MOVABLE)) 1575 1574 gfp_mask |= __GFP_HIGHMEM; 1576 1575 1577 - new_page = __alloc_pages_nodemask(gfp_mask, 0, 1576 + if (!nodes_empty(nmask)) 1577 + new_page = __alloc_pages_nodemask(gfp_mask, 0, 1578 1578 node_zonelist(nid, gfp_mask), &nmask); 1579 1579 if (!new_page) 1580 1580 new_page = __alloc_pages(gfp_mask, 0,
+3 -2
mm/shmem.c
··· 270 270 info->alloced -= pages; 271 271 shmem_recalc_inode(inode); 272 272 spin_unlock_irqrestore(&info->lock, flags); 273 - 273 + shmem_unacct_blocks(info->flags, pages); 274 274 return false; 275 275 } 276 276 percpu_counter_add(&sbinfo->used_blocks, pages); ··· 291 291 292 292 if (sbinfo->max_blocks) 293 293 percpu_counter_sub(&sbinfo->used_blocks, pages); 294 + shmem_unacct_blocks(info->flags, pages); 294 295 } 295 296 296 297 /* ··· 1981 1980 return addr; 1982 1981 sb = shm_mnt->mnt_sb; 1983 1982 } 1984 - if (SHMEM_SB(sb)->huge != SHMEM_HUGE_NEVER) 1983 + if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER) 1985 1984 return addr; 1986 1985 } 1987 1986
-19
mm/vmscan.c
··· 2303 2303 } 2304 2304 } 2305 2305 2306 - #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH 2307 - static void init_tlb_ubc(void) 2308 - { 2309 - /* 2310 - * This deliberately does not clear the cpumask as it's expensive 2311 - * and unnecessary. If there happens to be data in there then the 2312 - * first SWAP_CLUSTER_MAX pages will send an unnecessary IPI and 2313 - * then will be cleared. 2314 - */ 2315 - current->tlb_ubc.flush_required = false; 2316 - } 2317 - #else 2318 - static inline void init_tlb_ubc(void) 2319 - { 2320 - } 2321 - #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */ 2322 - 2323 2306 /* 2324 2307 * This is a basic per-node page freer. Used by both kswapd and direct reclaim. 2325 2308 */ ··· 2337 2354 */ 2338 2355 scan_adjusted = (global_reclaim(sc) && !current_is_kswapd() && 2339 2356 sc->priority == DEF_PRIORITY); 2340 - 2341 - init_tlb_ubc(); 2342 2357 2343 2358 blk_start_plug(&plug); 2344 2359 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
+4 -6
mm/workingset.c
··· 418 418 * no pages, so we expect to be able to remove them all and 419 419 * delete and free the empty node afterwards. 420 420 */ 421 - 422 - BUG_ON(!node->count); 423 - BUG_ON(node->count & RADIX_TREE_COUNT_MASK); 421 + BUG_ON(!workingset_node_shadows(node)); 422 + BUG_ON(workingset_node_pages(node)); 424 423 425 424 for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) { 426 425 if (node->slots[i]) { 427 426 BUG_ON(!radix_tree_exceptional_entry(node->slots[i])); 428 427 node->slots[i] = NULL; 429 - BUG_ON(node->count < (1U << RADIX_TREE_COUNT_SHIFT)); 430 - node->count -= 1U << RADIX_TREE_COUNT_SHIFT; 428 + workingset_node_shadows_dec(node); 431 429 BUG_ON(!mapping->nrexceptional); 432 430 mapping->nrexceptional--; 433 431 } 434 432 } 435 - BUG_ON(node->count); 433 + BUG_ON(workingset_node_shadows(node)); 436 434 inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM); 437 435 if (!__radix_tree_delete_node(&mapping->page_tree, node)) 438 436 BUG();
+2 -1
net/ipv4/ipmr.c
··· 2123 2123 2124 2124 int ipmr_get_route(struct net *net, struct sk_buff *skb, 2125 2125 __be32 saddr, __be32 daddr, 2126 - struct rtmsg *rtm, int nowait) 2126 + struct rtmsg *rtm, int nowait, u32 portid) 2127 2127 { 2128 2128 struct mfc_cache *cache; 2129 2129 struct mr_table *mrt; ··· 2168 2168 return -ENOMEM; 2169 2169 } 2170 2170 2171 + NETLINK_CB(skb2).portid = portid; 2171 2172 skb_push(skb2, sizeof(struct iphdr)); 2172 2173 skb_reset_network_header(skb2); 2173 2174 iph = ip_hdr(skb2);
+2 -1
net/ipv4/route.c
··· 2500 2500 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) { 2501 2501 int err = ipmr_get_route(net, skb, 2502 2502 fl4->saddr, fl4->daddr, 2503 - r, nowait); 2503 + r, nowait, portid); 2504 + 2504 2505 if (err <= 0) { 2505 2506 if (!nowait) { 2506 2507 if (err == 0)
+1 -2
net/ipv4/tcp_input.c
··· 2362 2362 } 2363 2363 #if IS_ENABLED(CONFIG_IPV6) 2364 2364 else if (sk->sk_family == AF_INET6) { 2365 - struct ipv6_pinfo *np = inet6_sk(sk); 2366 2365 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2367 2366 msg, 2368 - &np->daddr, ntohs(inet->inet_dport), 2367 + &sk->sk_v6_daddr, ntohs(inet->inet_dport), 2369 2368 tp->snd_cwnd, tcp_left_out(tp), 2370 2369 tp->snd_ssthresh, tp->prior_ssthresh, 2371 2370 tp->packets_out);
+7 -5
net/ipv4/tcp_output.c
··· 1992 1992 len = 0; 1993 1993 tcp_for_write_queue_from_safe(skb, next, sk) { 1994 1994 copy = min_t(int, skb->len, probe_size - len); 1995 - if (nskb->ip_summed) 1995 + if (nskb->ip_summed) { 1996 1996 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); 1997 - else 1998 - nskb->csum = skb_copy_and_csum_bits(skb, 0, 1999 - skb_put(nskb, copy), 2000 - copy, nskb->csum); 1997 + } else { 1998 + __wsum csum = skb_copy_and_csum_bits(skb, 0, 1999 + skb_put(nskb, copy), 2000 + copy, 0); 2001 + nskb->csum = csum_block_add(nskb->csum, csum, len); 2002 + } 2001 2003 2002 2004 if (skb->len <= copy) { 2003 2005 /* We've eaten all the data from this skb.
-1
net/ipv6/ip6_gre.c
··· 648 648 encap_limit = t->parms.encap_limit; 649 649 650 650 memcpy(&fl6, &t->fl.u.ip6, sizeof(fl6)); 651 - fl6.flowi6_proto = skb->protocol; 652 651 653 652 err = gre_handle_offloads(skb, !!(t->parms.o_flags & TUNNEL_CSUM)); 654 653 if (err)
+3 -2
net/ipv6/ip6mr.c
··· 2285 2285 return 1; 2286 2286 } 2287 2287 2288 - int ip6mr_get_route(struct net *net, 2289 - struct sk_buff *skb, struct rtmsg *rtm, int nowait) 2288 + int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm, 2289 + int nowait, u32 portid) 2290 2290 { 2291 2291 int err; 2292 2292 struct mr6_table *mrt; ··· 2331 2331 return -ENOMEM; 2332 2332 } 2333 2333 2334 + NETLINK_CB(skb2).portid = portid; 2334 2335 skb_reset_transport_header(skb2); 2335 2336 2336 2337 skb_put(skb2, sizeof(struct ipv6hdr));
+3 -1
net/ipv6/route.c
··· 3216 3216 if (iif) { 3217 3217 #ifdef CONFIG_IPV6_MROUTE 3218 3218 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { 3219 - int err = ip6mr_get_route(net, skb, rtm, nowait); 3219 + int err = ip6mr_get_route(net, skb, rtm, nowait, 3220 + portid); 3221 + 3220 3222 if (err <= 0) { 3221 3223 if (!nowait) { 3222 3224 if (err == 0)
+3 -4
net/sched/act_ife.c
··· 53 53 u32 *tlv = (u32 *)(skbdata); 54 54 u16 totlen = nla_total_size(dlen); /*alignment + hdr */ 55 55 char *dptr = (char *)tlv + NLA_HDRLEN; 56 - u32 htlv = attrtype << 16 | dlen; 56 + u32 htlv = attrtype << 16 | (dlen + NLA_HDRLEN); 57 57 58 58 *tlv = htonl(htlv); 59 59 memset(dptr, 0, totlen - NLA_HDRLEN); ··· 653 653 struct tcf_ife_info *ife = to_ife(a); 654 654 int action = ife->tcf_action; 655 655 struct ifeheadr *ifehdr = (struct ifeheadr *)skb->data; 656 - u16 ifehdrln = ifehdr->metalen; 656 + int ifehdrln = (int)ifehdr->metalen; 657 657 struct meta_tlvhdr *tlv = (struct meta_tlvhdr *)(ifehdr->tlv_data); 658 658 659 659 spin_lock(&ife->tcf_lock); ··· 766 766 return TC_ACT_SHOT; 767 767 } 768 768 769 - iethh = eth_hdr(skb); 770 - 771 769 err = skb_cow_head(skb, hdrm); 772 770 if (unlikely(err)) { 773 771 ife->tcf_qstats.drops++; ··· 776 778 if (!(at & AT_EGRESS)) 777 779 skb_push(skb, skb->dev->hard_header_len); 778 780 781 + iethh = (struct ethhdr *)skb->data; 779 782 __skb_push(skb, hdrm); 780 783 memcpy(skb->data, iethh, skb->mac_len); 781 784 skb_reset_mac_header(skb);
+3
net/sched/sch_qfq.c
··· 1153 1153 if (!skb) 1154 1154 return NULL; 1155 1155 1156 + qdisc_qstats_backlog_dec(sch, skb); 1156 1157 sch->q.qlen--; 1157 1158 qdisc_bstats_update(sch, skb); 1158 1159 ··· 1257 1256 } 1258 1257 1259 1258 bstats_update(&cl->bstats, skb); 1259 + qdisc_qstats_backlog_inc(sch, skb); 1260 1260 ++sch->q.qlen; 1261 1261 1262 1262 agg = cl->agg; ··· 1478 1476 qdisc_reset(cl->qdisc); 1479 1477 } 1480 1478 } 1479 + sch->qstats.backlog = 0; 1481 1480 sch->q.qlen = 0; 1482 1481 } 1483 1482
+3
net/sched/sch_sfb.c
··· 400 400 enqueue: 401 401 ret = qdisc_enqueue(skb, child, to_free); 402 402 if (likely(ret == NET_XMIT_SUCCESS)) { 403 + qdisc_qstats_backlog_inc(sch, skb); 403 404 sch->q.qlen++; 404 405 increment_qlen(skb, q); 405 406 } else if (net_xmit_drop_count(ret)) { ··· 429 428 430 429 if (skb) { 431 430 qdisc_bstats_update(sch, skb); 431 + qdisc_qstats_backlog_dec(sch, skb); 432 432 sch->q.qlen--; 433 433 decrement_qlen(skb, q); 434 434 } ··· 452 450 struct sfb_sched_data *q = qdisc_priv(sch); 453 451 454 452 qdisc_reset(q->qdisc); 453 + sch->qstats.backlog = 0; 455 454 sch->q.qlen = 0; 456 455 q->slot = 0; 457 456 q->double_buffering = false;
+8 -3
net/sctp/chunk.c
··· 192 192 msg, msg->expires_at, jiffies); 193 193 } 194 194 195 + if (asoc->peer.prsctp_capable && 196 + SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags)) 197 + msg->expires_at = 198 + jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive); 199 + 195 200 /* This is the biggest possible DATA chunk that can fit into 196 201 * the packet 197 202 */ ··· 354 349 /* Check whether this message has expired. */ 355 350 int sctp_chunk_abandoned(struct sctp_chunk *chunk) 356 351 { 357 - if (!chunk->asoc->prsctp_enable || 352 + if (!chunk->asoc->peer.prsctp_capable || 358 353 !SCTP_PR_POLICY(chunk->sinfo.sinfo_flags)) { 359 354 struct sctp_datamsg *msg = chunk->msg; 360 355 ··· 368 363 } 369 364 370 365 if (SCTP_PR_TTL_ENABLED(chunk->sinfo.sinfo_flags) && 371 - time_after(jiffies, chunk->prsctp_param)) { 366 + time_after(jiffies, chunk->msg->expires_at)) { 372 367 if (chunk->sent_count) 373 368 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(TTL)]++; 374 369 else 375 370 chunk->asoc->abandoned_unsent[SCTP_PR_INDEX(TTL)]++; 376 371 return 1; 377 372 } else if (SCTP_PR_RTX_ENABLED(chunk->sinfo.sinfo_flags) && 378 - chunk->sent_count > chunk->prsctp_param) { 373 + chunk->sent_count > chunk->sinfo.sinfo_timetolive) { 379 374 chunk->asoc->abandoned_sent[SCTP_PR_INDEX(RTX)]++; 380 375 return 1; 381 376 }
+6 -6
net/sctp/outqueue.c
··· 304 304 "illegal chunk"); 305 305 306 306 sctp_outq_tail_data(q, chunk); 307 - if (chunk->asoc->prsctp_enable && 307 + if (chunk->asoc->peer.prsctp_capable && 308 308 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 309 309 chunk->asoc->sent_cnt_removable++; 310 310 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) ··· 354 354 355 355 list_for_each_entry_safe(chk, temp, queue, transmitted_list) { 356 356 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 357 - chk->prsctp_param <= sinfo->sinfo_timetolive) 357 + chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 358 358 continue; 359 359 360 360 list_del_init(&chk->transmitted_list); ··· 389 389 390 390 list_for_each_entry_safe(chk, temp, queue, list) { 391 391 if (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || 392 - chk->prsctp_param <= sinfo->sinfo_timetolive) 392 + chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive) 393 393 continue; 394 394 395 395 list_del_init(&chk->list); ··· 413 413 { 414 414 struct sctp_transport *transport; 415 415 416 - if (!asoc->prsctp_enable || !asoc->sent_cnt_removable) 416 + if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable) 417 417 return; 418 418 419 419 msg_len = sctp_prsctp_prune_sent(asoc, sinfo, ··· 1026 1026 1027 1027 /* Mark as failed send. */ 1028 1028 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); 1029 - if (asoc->prsctp_enable && 1029 + if (asoc->peer.prsctp_capable && 1030 1030 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 1031 1031 asoc->sent_cnt_removable--; 1032 1032 sctp_chunk_free(chunk); ··· 1319 1319 tsn = ntohl(tchunk->subh.data_hdr->tsn); 1320 1320 if (TSN_lte(tsn, ctsn)) { 1321 1321 list_del_init(&tchunk->transmitted_list); 1322 - if (asoc->prsctp_enable && 1322 + if (asoc->peer.prsctp_capable && 1323 1323 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) 1324 1324 asoc->sent_cnt_removable--; 1325 1325 sctp_chunk_free(tchunk);
+40 -18
net/sctp/sctp_diag.c
··· 276 276 return err; 277 277 } 278 278 279 - static int sctp_tsp_dump(struct sctp_transport *tsp, void *p) 279 + static int sctp_sock_dump(struct sock *sk, void *p) 280 280 { 281 - struct sctp_endpoint *ep = tsp->asoc->ep; 281 + struct sctp_endpoint *ep = sctp_sk(sk)->ep; 282 282 struct sctp_comm_param *commp = p; 283 - struct sock *sk = ep->base.sk; 284 283 struct sk_buff *skb = commp->skb; 285 284 struct netlink_callback *cb = commp->cb; 286 285 const struct inet_diag_req_v2 *r = commp->r; 287 - struct sctp_association *assoc = 288 - list_entry(ep->asocs.next, struct sctp_association, asocs); 286 + struct sctp_association *assoc; 289 287 int err = 0; 290 288 291 - /* find the ep only once through the transports by this condition */ 292 - if (tsp->asoc != assoc) 293 - goto out; 294 - 295 - if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) 296 - goto out; 297 - 298 289 lock_sock(sk); 299 - if (sk != assoc->base.sk) 300 - goto release; 301 290 list_for_each_entry(assoc, &ep->asocs, asocs) { 302 291 if (cb->args[4] < cb->args[1]) 303 292 goto next; ··· 306 317 NLM_F_MULTI, cb->nlh, 307 318 commp->net_admin) < 0) { 308 319 cb->args[3] = 1; 309 - err = 2; 320 + err = 1; 310 321 goto release; 311 322 } 312 323 cb->args[3] = 1; ··· 316 327 NETLINK_CB(cb->skb).portid, 317 328 cb->nlh->nlmsg_seq, 0, cb->nlh, 318 329 commp->net_admin) < 0) { 319 - err = 2; 330 + err = 1; 320 331 goto release; 321 332 } 322 333 next: ··· 328 339 cb->args[4] = 0; 329 340 release: 330 341 release_sock(sk); 342 + sock_put(sk); 331 343 return err; 344 + } 345 + 346 + static int sctp_get_sock(struct sctp_transport *tsp, void *p) 347 + { 348 + struct sctp_endpoint *ep = tsp->asoc->ep; 349 + struct sctp_comm_param *commp = p; 350 + struct sock *sk = ep->base.sk; 351 + struct netlink_callback *cb = commp->cb; 352 + const struct inet_diag_req_v2 *r = commp->r; 353 + struct sctp_association *assoc = 354 + list_entry(ep->asocs.next, struct sctp_association, asocs); 355 + 356 + /* find the ep only once through the transports by this condition */ 357 + if (tsp->asoc != assoc) 358 + goto out; 359 + 360 + if (r->sdiag_family != AF_UNSPEC && sk->sk_family != r->sdiag_family) 361 + goto out; 362 + 363 + sock_hold(sk); 364 + cb->args[5] = (long)sk; 365 + 366 + return 1; 367 + 332 368 out: 333 369 cb->args[2]++; 334 - return err; 370 + return 0; 335 371 } 336 372 337 373 static int sctp_ep_dump(struct sctp_endpoint *ep, void *p) ··· 494 480 * 2 : to record the transport pos of this time's traversal 495 481 * 3 : to mark if we have dumped the ep info of the current asoc 496 482 * 4 : to work as a temporary variable to traversal list 483 + * 5 : to save the sk we get from travelsing the tsp list. 497 484 */ 498 485 if (!(idiag_states & ~(TCPF_LISTEN | TCPF_CLOSE))) 499 486 goto done; 500 - sctp_for_each_transport(sctp_tsp_dump, net, cb->args[2], &commp); 487 + 488 + next: 489 + cb->args[5] = 0; 490 + sctp_for_each_transport(sctp_get_sock, net, cb->args[2], &commp); 491 + 492 + if (cb->args[5] && !sctp_sock_dump((struct sock *)cb->args[5], &commp)) 493 + goto next; 494 + 501 495 done: 502 496 cb->args[1] = cb->args[4]; 503 497 cb->args[4] = 0;
-15
net/sctp/sm_make_chunk.c
··· 706 706 return retval; 707 707 } 708 708 709 - static void sctp_set_prsctp_policy(struct sctp_chunk *chunk, 710 - const struct sctp_sndrcvinfo *sinfo) 711 - { 712 - if (!chunk->asoc->prsctp_enable) 713 - return; 714 - 715 - if (SCTP_PR_TTL_ENABLED(sinfo->sinfo_flags)) 716 - chunk->prsctp_param = 717 - jiffies + msecs_to_jiffies(sinfo->sinfo_timetolive); 718 - else if (SCTP_PR_RTX_ENABLED(sinfo->sinfo_flags) || 719 - SCTP_PR_PRIO_ENABLED(sinfo->sinfo_flags)) 720 - chunk->prsctp_param = sinfo->sinfo_timetolive; 721 - } 722 - 723 709 /* Make a DATA chunk for the given association from the provided 724 710 * parameters. However, do not populate the data payload. 725 711 */ ··· 739 753 740 754 retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); 741 755 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); 742 - sctp_set_prsctp_policy(retval, sinfo); 743 756 744 757 nodata: 745 758 return retval;
+7 -3
net/sctp/socket.c
··· 4473 4473 const union sctp_addr *paddr, void *p) 4474 4474 { 4475 4475 struct sctp_transport *transport; 4476 - int err = 0; 4476 + int err = -ENOENT; 4477 4477 4478 4478 rcu_read_lock(); 4479 4479 transport = sctp_addrs_lookup_transport(net, laddr, paddr); 4480 4480 if (!transport || !sctp_transport_hold(transport)) 4481 4481 goto out; 4482 - err = cb(transport, p); 4482 + 4483 + sctp_association_hold(transport->asoc); 4483 4484 sctp_transport_put(transport); 4484 4485 4485 - out: 4486 4486 rcu_read_unlock(); 4487 + err = cb(transport, p); 4488 + sctp_association_put(transport->asoc); 4489 + 4490 + out: 4487 4491 return err; 4488 4492 } 4489 4493 EXPORT_SYMBOL_GPL(sctp_transport_lookup_process);
+3 -3
net/vmw_vsock/af_vsock.c
··· 465 465 466 466 if (vsock_is_pending(sk)) { 467 467 vsock_remove_pending(listener, sk); 468 + 469 + listener->sk_ack_backlog--; 468 470 } else if (!vsk->rejected) { 469 471 /* We are not on the pending list and accept() did not reject 470 472 * us, so we must have been accepted by our user process. We ··· 476 474 cleanup = false; 477 475 goto out; 478 476 } 479 - 480 - listener->sk_ack_backlog--; 481 477 482 478 /* We need to remove ourself from the global connected sockets list so 483 479 * incoming packets can't find this socket, and to reduce the reference ··· 2010 2010 2011 2011 MODULE_AUTHOR("VMware, Inc."); 2012 2012 MODULE_DESCRIPTION("VMware Virtual Socket Family"); 2013 - MODULE_VERSION("1.0.1.0-k"); 2013 + MODULE_VERSION("1.0.2.0-k"); 2014 2014 MODULE_LICENSE("GPL v2");
+1
scripts/recordmcount.c
··· 363 363 strcmp(".sched.text", txtname) == 0 || 364 364 strcmp(".spinlock.text", txtname) == 0 || 365 365 strcmp(".irqentry.text", txtname) == 0 || 366 + strcmp(".softirqentry.text", txtname) == 0 || 366 367 strcmp(".kprobes.text", txtname) == 0 || 367 368 strcmp(".text.unlikely", txtname) == 0; 368 369 }
+1
scripts/recordmcount.pl
··· 134 134 ".sched.text" => 1, 135 135 ".spinlock.text" => 1, 136 136 ".irqentry.text" => 1, 137 + ".softirqentry.text" => 1, 137 138 ".kprobes.text" => 1, 138 139 ".text.unlikely" => 1, 139 140 );
+7 -4
security/keys/encrypted-keys/encrypted.c
··· 29 29 #include <linux/rcupdate.h> 30 30 #include <linux/scatterlist.h> 31 31 #include <linux/ctype.h> 32 + #include <crypto/aes.h> 32 33 #include <crypto/hash.h> 33 34 #include <crypto/sha.h> 34 35 #include <crypto/skcipher.h> ··· 479 478 struct crypto_skcipher *tfm; 480 479 struct skcipher_request *req; 481 480 unsigned int encrypted_datalen; 481 + u8 iv[AES_BLOCK_SIZE]; 482 482 unsigned int padlen; 483 483 char pad[16]; 484 484 int ret; ··· 502 500 sg_init_table(sg_out, 1); 503 501 sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); 504 502 505 - skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 506 - epayload->iv); 503 + memcpy(iv, epayload->iv, sizeof(iv)); 504 + skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); 507 505 ret = crypto_skcipher_encrypt(req); 508 506 tfm = crypto_skcipher_reqtfm(req); 509 507 skcipher_request_free(req); ··· 583 581 struct crypto_skcipher *tfm; 584 582 struct skcipher_request *req; 585 583 unsigned int encrypted_datalen; 584 + u8 iv[AES_BLOCK_SIZE]; 586 585 char pad[16]; 587 586 int ret; 588 587 ··· 602 599 epayload->decrypted_datalen); 603 600 sg_set_buf(&sg_out[1], pad, sizeof pad); 604 601 605 - skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, 606 - epayload->iv); 602 + memcpy(iv, epayload->iv, sizeof(iv)); 603 + skcipher_request_set_crypt(req, sg_in, sg_out, encrypted_datalen, iv); 607 604 ret = crypto_skcipher_decrypt(req); 608 605 tfm = crypto_skcipher_reqtfm(req); 609 606 skcipher_request_free(req);
+2 -1
tools/testing/nvdimm/test/nfit.c
··· 603 603 return -ENOMEM; 604 604 sprintf(t->label[i], "label%d", i); 605 605 606 - t->flush[i] = test_alloc(t, sizeof(u64) * NUM_HINTS, 606 + t->flush[i] = test_alloc(t, max(PAGE_SIZE, 607 + sizeof(u64) * NUM_HINTS), 607 608 &t->flush_dma[i]); 608 609 if (!t->flush[i]) 609 610 return -ENOMEM;
+1 -1
tools/testing/radix-tree/Makefile
··· 1 1 2 - CFLAGS += -I. -g -Wall -D_LGPL_SOURCE 2 + CFLAGS += -I. -g -O2 -Wall -D_LGPL_SOURCE 3 3 LDFLAGS += -lpthread -lurcu 4 4 TARGETS = main 5 5 OFILES = main.o radix-tree.o linux.o test.o tag_check.o find_next_bit.o \
+12 -4
tools/testing/radix-tree/multiorder.c
··· 124 124 unsigned long i; 125 125 unsigned long min = index & ~((1UL << order) - 1); 126 126 unsigned long max = min + (1UL << order); 127 + void **slot; 128 + struct item *item2 = item_create(min); 127 129 RADIX_TREE(tree, GFP_KERNEL); 128 130 129 131 printf("Multiorder index %ld, order %d\n", index, order); ··· 141 139 item_check_absent(&tree, i); 142 140 for (i = max; i < 2*max; i++) 143 141 item_check_absent(&tree, i); 142 + for (i = min; i < max; i++) 143 + assert(radix_tree_insert(&tree, i, item2) == -EEXIST); 144 + 145 + slot = radix_tree_lookup_slot(&tree, index); 146 + free(*slot); 147 + radix_tree_replace_slot(slot, item2); 144 148 for (i = min; i < max; i++) { 145 - static void *entry = (void *) 146 - (0xA0 | RADIX_TREE_EXCEPTIONAL_ENTRY); 147 - assert(radix_tree_insert(&tree, i, entry) == -EEXIST); 149 + struct item *item = item_lookup(&tree, i); 150 + assert(item != 0); 151 + assert(item->index == min); 148 152 } 149 153 150 - assert(item_delete(&tree, index) != 0); 154 + assert(item_delete(&tree, min) != 0); 151 155 152 156 for (i = 0; i < 2*max; i++) 153 157 item_check_absent(&tree, i);