Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'sched/urgent' into sched/core, to pick up fixes and resolve conflicts

Conflicts:
kernel/sched/fair.c

Signed-off-by: Ingo Molnar <mingo@kernel.org>

+716 -395
+7 -3
Documentation/device-mapper/snapshot.txt
··· 41 41 the amount of free space and expand the <COW device> before it fills up. 42 42 43 43 <persistent?> is P (Persistent) or N (Not persistent - will not survive 44 - after reboot). 45 - The difference is that for transient snapshots less metadata must be 46 - saved on disk - they can be kept in memory by the kernel. 44 + after reboot). O (Overflow) can be added as a persistent store option 45 + to allow userspace to advertise its support for seeing "Overflow" in the 46 + snapshot status. So supported store types are "P", "PO" and "N". 47 + 48 + The difference between persistent and transient is with transient 49 + snapshots less metadata must be saved on disk - they can be kept in 50 + memory by the kernel. 47 51 48 52 49 53 * snapshot-merge <origin> <COW device> <persistent> <chunksize>
+1 -1
Documentation/devicetree/bindings/spi/sh-msiof.txt
··· 51 51 - renesas,tx-fifo-size : Overrides the default tx fifo size given in words 52 52 (default is 64) 53 53 - renesas,rx-fifo-size : Overrides the default rx fifo size given in words 54 - (default is 64, or 256 on R-Car Gen2) 54 + (default is 64) 55 55 56 56 Pinctrl properties might be needed, too. See 57 57 Documentation/devicetree/bindings/pinctrl/renesas,*.
+1
Documentation/devicetree/bindings/usb/renesas_usbhs.txt
··· 5 5 - "renesas,usbhs-r8a7790" 6 6 - "renesas,usbhs-r8a7791" 7 7 - "renesas,usbhs-r8a7794" 8 + - "renesas,usbhs-r8a7795" 8 9 - reg: Base address and length of the register for the USBHS 9 10 - interrupts: Interrupt specifier for the USBHS 10 11 - clocks: A list of phandle + clock specifier pairs
+2 -12
MAINTAINERS
··· 4003 4003 F: sound/usb/misc/ua101.c 4004 4004 4005 4005 EXTENSIBLE FIRMWARE INTERFACE (EFI) 4006 - M: Matt Fleming <matt.fleming@intel.com> 4006 + M: Matt Fleming <matt@codeblueprint.co.uk> 4007 4007 L: linux-efi@vger.kernel.org 4008 4008 T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 4009 4009 S: Maintained ··· 4018 4018 EFI VARIABLE FILESYSTEM 4019 4019 M: Matthew Garrett <matthew.garrett@nebula.com> 4020 4020 M: Jeremy Kerr <jk@ozlabs.org> 4021 - M: Matt Fleming <matt.fleming@intel.com> 4021 + M: Matt Fleming <matt@codeblueprint.co.uk> 4022 4022 T: git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git 4023 4023 L: linux-efi@vger.kernel.org 4024 4024 S: Maintained ··· 9914 9914 F: drivers/staging/lustre 9915 9915 9916 9916 STAGING - NVIDIA COMPLIANT EMBEDDED CONTROLLER INTERFACE (nvec) 9917 - M: Julian Andres Klode <jak@jak-linux.org> 9918 9917 M: Marc Dietrich <marvin24@gmx.de> 9919 9918 L: ac100@lists.launchpad.net (moderated for non-subscribers) 9920 9919 L: linux-tegra@vger.kernel.org ··· 11376 11377 W: http://oops.ghostprotocols.net:81/blog 11377 11378 S: Maintained 11378 11379 F: drivers/net/wireless/wl3501* 11379 - 11380 - WM97XX TOUCHSCREEN DRIVERS 11381 - M: Mark Brown <broonie@kernel.org> 11382 - M: Liam Girdwood <lrg@slimlogic.co.uk> 11383 - L: linux-input@vger.kernel.org 11384 - W: https://github.com/CirrusLogic/linux-drivers/wiki 11385 - S: Supported 11386 - F: drivers/input/touchscreen/*wm97* 11387 - F: include/linux/wm97xx.h 11388 11380 11389 11381 WOLFSON MICROELECTRONICS DRIVERS 11390 11382 L: patches@opensource.wolfsonmicro.com
+2 -2
Makefile
··· 1 1 VERSION = 4 2 2 PATCHLEVEL = 3 3 3 SUBLEVEL = 0 4 - EXTRAVERSION = -rc4 5 - NAME = Hurr durr I'ma sheep 4 + EXTRAVERSION = -rc5 5 + NAME = Blurry Fish Butt 6 6 7 7 # *DOCUMENTATION* 8 8 # To see a list of typical targets execute "make help"
+2
arch/alpha/include/asm/word-at-a-time.h
··· 52 52 #endif 53 53 } 54 54 55 + #define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) 56 + 55 57 #endif /* _ASM_WORD_AT_A_TIME_H */
+1 -1
arch/arm/boot/dts/Makefile
··· 578 578 sun4i-a10-hackberry.dtb \ 579 579 sun4i-a10-hyundai-a7hd.dtb \ 580 580 sun4i-a10-inet97fv2.dtb \ 581 - sun4i-a10-itead-iteaduino-plus.dts \ 581 + sun4i-a10-itead-iteaduino-plus.dtb \ 582 582 sun4i-a10-jesurun-q5.dtb \ 583 583 sun4i-a10-marsboard.dtb \ 584 584 sun4i-a10-mini-xplus.dtb \
+1
arch/arm/boot/dts/exynos4412.dtsi
··· 98 98 opp-hz = /bits/ 64 <800000000>; 99 99 opp-microvolt = <1000000>; 100 100 clock-latency-ns = <200000>; 101 + opp-suspend; 101 102 }; 102 103 opp07 { 103 104 opp-hz = /bits/ 64 <900000000>;
+1
arch/arm/boot/dts/exynos5250-smdk5250.dts
··· 197 197 regulator-name = "P1.8V_LDO_OUT10"; 198 198 regulator-min-microvolt = <1800000>; 199 199 regulator-max-microvolt = <1800000>; 200 + regulator-always-on; 200 201 }; 201 202 202 203 ldo11_reg: LDO11 {
+1 -1
arch/arm/boot/dts/exynos5420.dtsi
··· 1117 1117 interrupt-parent = <&combiner>; 1118 1118 interrupts = <3 0>; 1119 1119 clock-names = "sysmmu", "master"; 1120 - clocks = <&clock CLK_SMMU_FIMD1M0>, <&clock CLK_FIMD1>; 1120 + clocks = <&clock CLK_SMMU_FIMD1M1>, <&clock CLK_FIMD1>; 1121 1121 power-domains = <&disp_pd>; 1122 1122 #iommu-cells = <0>; 1123 1123 };
-1
arch/arm/boot/dts/exynos5422-odroidxu3-common.dtsi
··· 472 472 */ 473 473 pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; 474 474 pinctrl-names = "default"; 475 - samsung,pwm-outputs = <0>; 476 475 status = "okay"; 477 476 }; 478 477
+1 -1
arch/arm/boot/dts/imx53-qsrb.dts
··· 36 36 pinctrl-0 = <&pinctrl_pmic>; 37 37 reg = <0x08>; 38 38 interrupt-parent = <&gpio5>; 39 - interrupts = <23 0x8>; 39 + interrupts = <23 IRQ_TYPE_LEVEL_HIGH>; 40 40 regulators { 41 41 sw1_reg: sw1a { 42 42 regulator-name = "SW1";
+1
arch/arm/boot/dts/imx53.dtsi
··· 15 15 #include <dt-bindings/clock/imx5-clock.h> 16 16 #include <dt-bindings/gpio/gpio.h> 17 17 #include <dt-bindings/input/input.h> 18 + #include <dt-bindings/interrupt-controller/irq.h> 18 19 19 20 / { 20 21 aliases {
-2
arch/arm/boot/dts/imx6qdl-rex.dtsi
··· 35 35 compatible = "regulator-fixed"; 36 36 reg = <1>; 37 37 pinctrl-names = "default"; 38 - pinctrl-0 = <&pinctrl_usbh1>; 39 38 regulator-name = "usbh1_vbus"; 40 39 regulator-min-microvolt = <5000000>; 41 40 regulator-max-microvolt = <5000000>; ··· 46 47 compatible = "regulator-fixed"; 47 48 reg = <2>; 48 49 pinctrl-names = "default"; 49 - pinctrl-0 = <&pinctrl_usbotg>; 50 50 regulator-name = "usb_otg_vbus"; 51 51 regulator-min-microvolt = <5000000>; 52 52 regulator-max-microvolt = <5000000>;
+1
arch/arm/boot/dts/r8a7790.dtsi
··· 1627 1627 "mix.0", "mix.1", 1628 1628 "dvc.0", "dvc.1", 1629 1629 "clk_a", "clk_b", "clk_c", "clk_i"; 1630 + power-domains = <&cpg_clocks>; 1630 1631 1631 1632 status = "disabled"; 1632 1633
+1
arch/arm/boot/dts/r8a7791.dtsi
··· 1677 1677 "mix.0", "mix.1", 1678 1678 "dvc.0", "dvc.1", 1679 1679 "clk_a", "clk_b", "clk_c", "clk_i"; 1680 + power-domains = <&cpg_clocks>; 1680 1681 1681 1682 status = "disabled"; 1682 1683
+1 -1
arch/arm/boot/dts/sun7i-a20.dtsi
··· 107 107 720000 1200000 108 108 528000 1100000 109 109 312000 1000000 110 - 144000 900000 110 + 144000 1000000 111 111 >; 112 112 #cooling-cells = <2>; 113 113 cooling-min-level = <0>;
+26 -1
arch/arm/mach-exynos/mcpm-exynos.c
··· 20 20 #include <asm/cputype.h> 21 21 #include <asm/cp15.h> 22 22 #include <asm/mcpm.h> 23 + #include <asm/smp_plat.h> 23 24 24 25 #include "regs-pmu.h" 25 26 #include "common.h" ··· 71 70 cluster >= EXYNOS5420_NR_CLUSTERS) 72 71 return -EINVAL; 73 72 74 - exynos_cpu_power_up(cpunr); 73 + if (!exynos_cpu_power_state(cpunr)) { 74 + exynos_cpu_power_up(cpunr); 75 + 76 + /* 77 + * This assumes the cluster number of the big cores(Cortex A15) 78 + * is 0 and the Little cores(Cortex A7) is 1. 79 + * When the system was booted from the Little core, 80 + * they should be reset during power up cpu. 81 + */ 82 + if (cluster && 83 + cluster == MPIDR_AFFINITY_LEVEL(cpu_logical_map(0), 1)) { 84 + /* 85 + * Before we reset the Little cores, we should wait 86 + * the SPARE2 register is set to 1 because the init 87 + * codes of the iROM will set the register after 88 + * initialization. 89 + */ 90 + while (!pmu_raw_readl(S5P_PMU_SPARE2)) 91 + udelay(10); 92 + 93 + pmu_raw_writel(EXYNOS5420_KFC_CORE_RESET(cpu), 94 + EXYNOS_SWRESET); 95 + } 96 + } 97 + 75 98 return 0; 76 99 } 77 100
+6
arch/arm/mach-exynos/regs-pmu.h
··· 513 513 #define SPREAD_ENABLE 0xF 514 514 #define SPREAD_USE_STANDWFI 0xF 515 515 516 + #define EXYNOS5420_KFC_CORE_RESET0 BIT(8) 517 + #define EXYNOS5420_KFC_ETM_RESET0 BIT(20) 518 + 519 + #define EXYNOS5420_KFC_CORE_RESET(_nr) \ 520 + ((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr)) 521 + 516 522 #define EXYNOS5420_BB_CON1 0x0784 517 523 #define EXYNOS5420_BB_SEL_EN BIT(31) 518 524 #define EXYNOS5420_BB_PMOS_EN BIT(7)
+12 -11
arch/arm64/kernel/debug-monitors.c
··· 201 201 } 202 202 203 203 /* 204 - * Call registered single step handers 204 + * Call registered single step handlers 205 205 * There is no Syndrome info to check for determining the handler. 206 206 * So we call all the registered handlers, until the right handler is 207 207 * found which returns zero. ··· 271 271 * Use reader/writer locks instead of plain spinlock. 272 272 */ 273 273 static LIST_HEAD(break_hook); 274 - static DEFINE_RWLOCK(break_hook_lock); 274 + static DEFINE_SPINLOCK(break_hook_lock); 275 275 276 276 void register_break_hook(struct break_hook *hook) 277 277 { 278 - write_lock(&break_hook_lock); 279 - list_add(&hook->node, &break_hook); 280 - write_unlock(&break_hook_lock); 278 + spin_lock(&break_hook_lock); 279 + list_add_rcu(&hook->node, &break_hook); 280 + spin_unlock(&break_hook_lock); 281 281 } 282 282 283 283 void unregister_break_hook(struct break_hook *hook) 284 284 { 285 - write_lock(&break_hook_lock); 286 - list_del(&hook->node); 287 - write_unlock(&break_hook_lock); 285 + spin_lock(&break_hook_lock); 286 + list_del_rcu(&hook->node); 287 + spin_unlock(&break_hook_lock); 288 + synchronize_rcu(); 288 289 } 289 290 290 291 static int call_break_hook(struct pt_regs *regs, unsigned int esr) ··· 293 292 struct break_hook *hook; 294 293 int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; 295 294 296 - read_lock(&break_hook_lock); 297 - list_for_each_entry(hook, &break_hook, node) 295 + rcu_read_lock(); 296 + list_for_each_entry_rcu(hook, &break_hook, node) 298 297 if ((esr & hook->esr_mask) == hook->esr_val) 299 298 fn = hook->fn; 300 - read_unlock(&break_hook_lock); 299 + rcu_read_unlock(); 301 300 302 301 return fn ? fn(regs, esr) : DBG_HOOK_ERROR; 303 302 }
+3 -3
arch/arm64/kernel/insn.c
··· 85 85 aarch64_insn_is_bcond(insn)); 86 86 } 87 87 88 - static DEFINE_SPINLOCK(patch_lock); 88 + static DEFINE_RAW_SPINLOCK(patch_lock); 89 89 90 90 static void __kprobes *patch_map(void *addr, int fixmap) 91 91 { ··· 131 131 unsigned long flags = 0; 132 132 int ret; 133 133 134 - spin_lock_irqsave(&patch_lock, flags); 134 + raw_spin_lock_irqsave(&patch_lock, flags); 135 135 waddr = patch_map(addr, FIX_TEXT_POKE0); 136 136 137 137 ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); 138 138 139 139 patch_unmap(FIX_TEXT_POKE0); 140 - spin_unlock_irqrestore(&patch_lock, flags); 140 + raw_spin_unlock_irqrestore(&patch_lock, flags); 141 141 142 142 return ret; 143 143 }
+2
arch/arm64/kernel/setup.c
··· 364 364 to_free = ram_end - orig_start; 365 365 366 366 size = orig_end - orig_start; 367 + if (!size) 368 + return; 367 369 368 370 /* initrd needs to be relocated completely inside linear mapping */ 369 371 new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
+1
arch/arm64/mm/fault.c
··· 287 287 * starvation. 288 288 */ 289 289 mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; 290 + mm_flags |= FAULT_FLAG_TRIED; 290 291 goto retry; 291 292 } 292 293 }
+1
arch/h8300/include/asm/Kbuild
··· 73 73 generic-y += ucontext.h 74 74 generic-y += unaligned.h 75 75 generic-y += vga.h 76 + generic-y += word-at-a-time.h 76 77 generic-y += xor.h
+1
arch/mips/include/asm/io.h
··· 256 256 */ 257 257 #define ioremap_nocache(offset, size) \ 258 258 __ioremap_mode((offset), (size), _CACHE_UNCACHED) 259 + #define ioremap_uc ioremap_nocache 259 260 260 261 /* 261 262 * ioremap_cachable - map bus memory into CPU space
+7 -12
arch/mips/include/uapi/asm/swab.h
··· 13 13 14 14 #define __SWAB_64_THRU_32__ 15 15 16 - #if (defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ 17 - defined(_MIPS_ARCH_LOONGSON3A) 16 + #if !defined(__mips16) && \ 17 + ((defined(__mips_isa_rev) && (__mips_isa_rev >= 2)) || \ 18 + defined(_MIPS_ARCH_LOONGSON3A)) 18 19 19 - static inline __attribute__((nomips16)) __attribute_const__ 20 - __u16 __arch_swab16(__u16 x) 20 + static inline __attribute_const__ __u16 __arch_swab16(__u16 x) 21 21 { 22 22 __asm__( 23 23 " .set push \n" 24 24 " .set arch=mips32r2 \n" 25 - " .set nomips16 \n" 26 25 " wsbh %0, %1 \n" 27 26 " .set pop \n" 28 27 : "=r" (x) ··· 31 32 } 32 33 #define __arch_swab16 __arch_swab16 33 34 34 - static inline __attribute__((nomips16)) __attribute_const__ 35 - __u32 __arch_swab32(__u32 x) 35 + static inline __attribute_const__ __u32 __arch_swab32(__u32 x) 36 36 { 37 37 __asm__( 38 38 " .set push \n" 39 39 " .set arch=mips32r2 \n" 40 - " .set nomips16 \n" 41 40 " wsbh %0, %1 \n" 42 41 " rotr %0, %0, 16 \n" 43 42 " .set pop \n" ··· 51 54 * 64-bit kernel on r2 CPUs. 52 55 */ 53 56 #ifdef __mips64 54 - static inline __attribute__((nomips16)) __attribute_const__ 55 - __u64 __arch_swab64(__u64 x) 57 + static inline __attribute_const__ __u64 __arch_swab64(__u64 x) 56 58 { 57 59 __asm__( 58 60 " .set push \n" 59 61 " .set arch=mips64r2 \n" 60 - " .set nomips16 \n" 61 62 " dsbh %0, %1 \n" 62 63 " dshd %0, %0 \n" 63 64 " .set pop \n" ··· 66 71 } 67 72 #define __arch_swab64 __arch_swab64 68 73 #endif /* __mips64 */ 69 - #endif /* MIPS R2 or newer or Loongson 3A */ 74 + #endif /* (not __mips16) and (MIPS R2 or newer or Loongson 3A) */ 70 75 #endif /* _ASM_SWAB_H */
-1
arch/powerpc/include/asm/Kbuild
··· 7 7 generic-y += preempt.h 8 8 generic-y += rwsem.h 9 9 generic-y += vtime.h 10 - generic-y += word-at-a-time.h
+5
arch/powerpc/include/asm/word-at-a-time.h
··· 40 40 return (val + c->high_bits) & ~rhs; 41 41 } 42 42 43 + static inline unsigned long zero_bytemask(unsigned long mask) 44 + { 45 + return ~1ul << __fls(mask); 46 + } 47 + 43 48 #else 44 49 45 50 #ifdef CONFIG_64BIT
+1 -1
arch/s390/boot/compressed/Makefile
··· 10 10 11 11 KBUILD_CFLAGS := -m64 -D__KERNEL__ $(LINUX_INCLUDE) -O2 12 12 KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING 13 - KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks 13 + KBUILD_CFLAGS += $(cflags-y) -fno-delete-null-pointer-checks -msoft-float 14 14 KBUILD_CFLAGS += $(call cc-option,-mpacked-stack) 15 15 KBUILD_CFLAGS += $(call cc-option,-ffreestanding) 16 16
+1 -1
arch/s390/configs/default_defconfig
··· 381 381 CONFIG_SCSI_DEBUG=m 382 382 CONFIG_ZFCP=y 383 383 CONFIG_SCSI_VIRTIO=m 384 - CONFIG_SCSI_DH=m 384 + CONFIG_SCSI_DH=y 385 385 CONFIG_SCSI_DH_RDAC=m 386 386 CONFIG_SCSI_DH_HP_SW=m 387 387 CONFIG_SCSI_DH_EMC=m
+1 -1
arch/s390/configs/gcov_defconfig
··· 377 377 CONFIG_SCSI_DEBUG=m 378 378 CONFIG_ZFCP=y 379 379 CONFIG_SCSI_VIRTIO=m 380 - CONFIG_SCSI_DH=m 380 + CONFIG_SCSI_DH=y 381 381 CONFIG_SCSI_DH_RDAC=m 382 382 CONFIG_SCSI_DH_HP_SW=m 383 383 CONFIG_SCSI_DH_EMC=m
+1 -1
arch/s390/configs/performance_defconfig
··· 377 377 CONFIG_SCSI_DEBUG=m 378 378 CONFIG_ZFCP=y 379 379 CONFIG_SCSI_VIRTIO=m 380 - CONFIG_SCSI_DH=m 380 + CONFIG_SCSI_DH=y 381 381 CONFIG_SCSI_DH_RDAC=m 382 382 CONFIG_SCSI_DH_HP_SW=m 383 383 CONFIG_SCSI_DH_EMC=m
+1 -1
arch/s390/include/asm/numa.h
··· 19 19 int __node_distance(int a, int b); 20 20 void numa_update_cpu_topology(void); 21 21 22 - extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 22 + extern cpumask_t node_to_cpumask_map[MAX_NUMNODES]; 23 23 extern int numa_debug_enabled; 24 24 25 25 #else
+1 -1
arch/s390/include/asm/topology.h
··· 68 68 #define cpumask_of_node cpumask_of_node 69 69 static inline const struct cpumask *cpumask_of_node(int node) 70 70 { 71 - return node_to_cpumask_map[node]; 71 + return &node_to_cpumask_map[node]; 72 72 } 73 73 74 74 /*
+1
arch/s390/kernel/asm-offsets.c
··· 176 176 DEFINE(__LC_PASTE, offsetof(struct _lowcore, paste)); 177 177 DEFINE(__LC_FP_CREG_SAVE_AREA, offsetof(struct _lowcore, fpt_creg_save_area)); 178 178 DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); 179 + DEFINE(__LC_PERCPU_OFFSET, offsetof(struct _lowcore, percpu_offset)); 179 180 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 180 181 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 181 182 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
+29 -1
arch/s390/kernel/entry.S
··· 733 733 stg %r3,__SF_EMPTY(%r15) 734 734 larl %r1,.Lpsw_idle_lpsw+4 735 735 stg %r1,__SF_EMPTY+8(%r15) 736 + #ifdef CONFIG_SMP 737 + larl %r1,smp_cpu_mtid 738 + llgf %r1,0(%r1) 739 + ltgr %r1,%r1 740 + jz .Lpsw_idle_stcctm 741 + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15) 742 + .Lpsw_idle_stcctm: 743 + #endif 736 744 STCK __CLOCK_IDLE_ENTER(%r2) 737 745 stpt __TIMER_IDLE_ENTER(%r2) 738 746 .Lpsw_idle_lpsw: ··· 1167 1159 jhe 1f 1168 1160 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 1169 1161 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 1170 - 1: # account system time going idle 1162 + 1: # calculate idle cycles 1163 + #ifdef CONFIG_SMP 1164 + clg %r9,BASED(.Lcleanup_idle_insn) 1165 + jl 3f 1166 + larl %r1,smp_cpu_mtid 1167 + llgf %r1,0(%r1) 1168 + ltgr %r1,%r1 1169 + jz 3f 1170 + .insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+80(%r15) 1171 + larl %r3,mt_cycles 1172 + ag %r3,__LC_PERCPU_OFFSET 1173 + la %r4,__SF_EMPTY+16(%r15) 1174 + 2: lg %r0,0(%r3) 1175 + slg %r0,0(%r4) 1176 + alg %r0,64(%r4) 1177 + stg %r0,0(%r3) 1178 + la %r3,8(%r3) 1179 + la %r4,8(%r4) 1180 + brct %r1,2b 1181 + #endif 1182 + 3: # account system time going idle 1171 1183 lg %r9,__LC_STEAL_TIMER 1172 1184 alg %r9,__CLOCK_IDLE_ENTER(%r2) 1173 1185 slg %r9,__LC_LAST_UPDATE_CLOCK
+37 -29
arch/s390/kernel/vtime.c
··· 25 25 static atomic64_t virt_timer_current; 26 26 static atomic64_t virt_timer_elapsed; 27 27 28 - static DEFINE_PER_CPU(u64, mt_cycles[32]); 28 + DEFINE_PER_CPU(u64, mt_cycles[8]); 29 29 static DEFINE_PER_CPU(u64, mt_scaling_mult) = { 1 }; 30 30 static DEFINE_PER_CPU(u64, mt_scaling_div) = { 1 }; 31 31 static DEFINE_PER_CPU(u64, mt_scaling_jiffies); ··· 60 60 return elapsed >= atomic64_read(&virt_timer_current); 61 61 } 62 62 63 + static void update_mt_scaling(void) 64 + { 65 + u64 cycles_new[8], *cycles_old; 66 + u64 delta, fac, mult, div; 67 + int i; 68 + 69 + stcctm5(smp_cpu_mtid + 1, cycles_new); 70 + cycles_old = this_cpu_ptr(mt_cycles); 71 + fac = 1; 72 + mult = div = 0; 73 + for (i = 0; i <= smp_cpu_mtid; i++) { 74 + delta = cycles_new[i] - cycles_old[i]; 75 + div += delta; 76 + mult *= i + 1; 77 + mult += delta * fac; 78 + fac *= i + 1; 79 + } 80 + div *= fac; 81 + if (div > 0) { 82 + /* Update scaling factor */ 83 + __this_cpu_write(mt_scaling_mult, mult); 84 + __this_cpu_write(mt_scaling_div, div); 85 + memcpy(cycles_old, cycles_new, 86 + sizeof(u64) * (smp_cpu_mtid + 1)); 87 + } 88 + __this_cpu_write(mt_scaling_jiffies, jiffies_64); 89 + } 90 + 63 91 /* 64 92 * Update process times based on virtual cpu times stored by entry.S 65 93 * to the lowcore fields user_timer, system_timer & steal_clock. ··· 97 69 struct thread_info *ti = task_thread_info(tsk); 98 70 u64 timer, clock, user, system, steal; 99 71 u64 user_scaled, system_scaled; 100 - int i; 101 72 102 73 timer = S390_lowcore.last_update_timer; 103 74 clock = S390_lowcore.last_update_clock; ··· 112 85 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 113 86 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 114 87 115 - /* Do MT utilization calculation */ 88 + /* Update MT utilization calculation */ 116 89 if (smp_cpu_mtid && 117 - time_after64(jiffies_64, __this_cpu_read(mt_scaling_jiffies))) { 118 - u64 cycles_new[32], *cycles_old; 119 - u64 delta, fac, mult, div; 120 - 121 - cycles_old = this_cpu_ptr(mt_cycles); 122 - if (stcctm5(smp_cpu_mtid + 1, cycles_new) < 2) { 123 - fac = 1; 124 - mult = div = 0; 125 - for (i = 0; i <= smp_cpu_mtid; i++) { 126 - delta = cycles_new[i] - cycles_old[i]; 127 - div += delta; 128 - mult *= i + 1; 129 - mult += delta * fac; 130 - fac *= i + 1; 131 - } 132 - div *= fac; 133 - if (div > 0) { 134 - /* Update scaling factor */ 135 - __this_cpu_write(mt_scaling_mult, mult); 136 - __this_cpu_write(mt_scaling_div, div); 137 - memcpy(cycles_old, cycles_new, 138 - sizeof(u64) * (smp_cpu_mtid + 1)); 139 - } 140 - } 141 - __this_cpu_write(mt_scaling_jiffies, jiffies_64); 142 - } 90 + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 91 + update_mt_scaling(); 143 92 144 93 user = S390_lowcore.user_timer - ti->user_timer; 145 94 S390_lowcore.steal_timer -= user; ··· 183 180 timer = S390_lowcore.last_update_timer; 184 181 S390_lowcore.last_update_timer = get_vtimer(); 185 182 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 183 + 184 + /* Update MT utilization calculation */ 185 + if (smp_cpu_mtid && 186 + time_after64(jiffies_64, this_cpu_read(mt_scaling_jiffies))) 187 + update_mt_scaling(); 186 188 187 189 system = S390_lowcore.system_timer - ti->system_timer; 188 190 S390_lowcore.steal_timer -= system;
+2 -2
arch/s390/numa/mode_emu.c
··· 368 368 cpumask_copy(&top->thread_mask, &core->mask); 369 369 cpumask_copy(&top->core_mask, &core_mc(core)->mask); 370 370 cpumask_copy(&top->book_mask, &core_book(core)->mask); 371 - cpumask_set_cpu(cpu, node_to_cpumask_map[core_node(core)->id]); 371 + cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]); 372 372 top->node_id = core_node(core)->id; 373 373 } 374 374 } ··· 383 383 384 384 /* Clear all node masks */ 385 385 for (i = 0; i < MAX_NUMNODES; i++) 386 - cpumask_clear(node_to_cpumask_map[i]); 386 + cpumask_clear(&node_to_cpumask_map[i]); 387 387 388 388 /* Rebuild all masks */ 389 389 toptree_for_each(core, numa, CORE)
+2 -2
arch/s390/numa/numa.c
··· 23 23 pg_data_t *node_data[MAX_NUMNODES]; 24 24 EXPORT_SYMBOL(node_data); 25 25 26 - cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 26 + cpumask_t node_to_cpumask_map[MAX_NUMNODES]; 27 27 EXPORT_SYMBOL(node_to_cpumask_map); 28 28 29 29 const struct numa_mode numa_mode_plain = { ··· 144 144 static int __init numa_init_early(void) 145 145 { 146 146 /* Attach all possible CPUs to node 0 for now. */ 147 - cpumask_copy(node_to_cpumask_map[0], cpu_possible_mask); 147 + cpumask_copy(&node_to_cpumask_map[0], cpu_possible_mask); 148 148 return 0; 149 149 } 150 150 early_initcall(numa_init_early);
-1
arch/tile/include/asm/Kbuild
··· 40 40 generic-y += termios.h 41 41 generic-y += trace_clock.h 42 42 generic-y += types.h 43 - generic-y += word-at-a-time.h 44 43 generic-y += xor.h
+7 -1
arch/tile/include/asm/word-at-a-time.h
··· 6 6 struct word_at_a_time { /* unused */ }; 7 7 #define WORD_AT_A_TIME_CONSTANTS {} 8 8 9 - /* Generate 0x01 byte values for non-zero bytes using a SIMD instruction. */ 9 + /* Generate 0x01 byte values for zero bytes using a SIMD instruction. */ 10 10 static inline unsigned long has_zero(unsigned long val, unsigned long *data, 11 11 const struct word_at_a_time *c) 12 12 { ··· 32 32 return __builtin_ctzl(mask) >> 3; 33 33 #endif 34 34 } 35 + 36 + #ifdef __BIG_ENDIAN 37 + #define zero_bytemask(mask) (~1ul << (63 - __builtin_clzl(mask))) 38 + #else 39 + #define zero_bytemask(mask) ((2ul << __builtin_ctzl(mask)) - 1) 40 + #endif 35 41 36 42 #endif /* _ASM_WORD_AT_A_TIME_H */
+1
arch/x86/Kconfig
··· 1308 1308 config X86_PAE 1309 1309 bool "PAE (Physical Address Extension) Support" 1310 1310 depends on X86_32 && !HIGHMEM4G 1311 + select SWIOTLB 1311 1312 ---help--- 1312 1313 PAE is required for NX support, and furthermore enables 1313 1314 larger swapspace support for non-overcommit purposes. It
+2 -2
arch/x86/include/asm/xen/hypercall.h
··· 336 336 return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32); 337 337 } 338 338 339 - static inline int 339 + static inline long 340 340 HYPERVISOR_memory_op(unsigned int cmd, void *arg) 341 341 { 342 - return _hypercall2(int, memory_op, cmd, arg); 342 + return _hypercall2(long, memory_op, cmd, arg); 343 343 } 344 344 345 345 static inline int
+24
arch/x86/xen/enlighten.c
··· 33 33 #include <linux/memblock.h> 34 34 #include <linux/edd.h> 35 35 36 + #ifdef CONFIG_KEXEC_CORE 37 + #include <linux/kexec.h> 38 + #endif 39 + 36 40 #include <xen/xen.h> 37 41 #include <xen/events.h> 38 42 #include <xen/interface/xen.h> ··· 1081 1077 /* Fast syscall setup is all done in hypercalls, so 1082 1078 these are all ignored. Stub them out here to stop 1083 1079 Xen console noise. */ 1080 + break; 1084 1081 1085 1082 default: 1086 1083 if (!pmu_msr_write(msr, low, high, &ret)) ··· 1812 1807 .notifier_call = xen_hvm_cpu_notify, 1813 1808 }; 1814 1809 1810 + #ifdef CONFIG_KEXEC_CORE 1811 + static void xen_hvm_shutdown(void) 1812 + { 1813 + native_machine_shutdown(); 1814 + if (kexec_in_progress) 1815 + xen_reboot(SHUTDOWN_soft_reset); 1816 + } 1817 + 1818 + static void xen_hvm_crash_shutdown(struct pt_regs *regs) 1819 + { 1820 + native_machine_crash_shutdown(regs); 1821 + xen_reboot(SHUTDOWN_soft_reset); 1822 + } 1823 + #endif 1824 + 1815 1825 static void __init xen_hvm_guest_init(void) 1816 1826 { 1817 1827 if (xen_pv_domain()) ··· 1846 1826 x86_init.irqs.intr_init = xen_init_IRQ; 1847 1827 xen_hvm_init_time_ops(); 1848 1828 xen_hvm_init_mmu_ops(); 1829 + #ifdef CONFIG_KEXEC_CORE 1830 + machine_ops.shutdown = xen_hvm_shutdown; 1831 + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; 1832 + #endif 1849 1833 } 1850 1834 #endif 1851 1835
+18 -1
arch/x86/xen/p2m.c
··· 112 112 static pte_t *p2m_missing_pte; 113 113 static pte_t *p2m_identity_pte; 114 114 115 + /* 116 + * Hint at last populated PFN. 117 + * 118 + * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack 119 + * can avoid scanning the whole P2M (which may be sized to account for 120 + * hotplugged memory). 121 + */ 122 + static unsigned long xen_p2m_last_pfn; 123 + 115 124 static inline unsigned p2m_top_index(unsigned long pfn) 116 125 { 117 126 BUG_ON(pfn >= MAX_P2M_PFN); ··· 279 270 else 280 271 HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = 281 272 virt_to_mfn(p2m_top_mfn); 282 - HYPERVISOR_shared_info->arch.max_pfn = xen_max_p2m_pfn; 273 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; 283 274 HYPERVISOR_shared_info->arch.p2m_generation = 0; 284 275 HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; 285 276 HYPERVISOR_shared_info->arch.p2m_cr3 = ··· 414 405 { 415 406 static struct vm_struct vm; 416 407 unsigned long p2m_limit; 408 + 409 + xen_p2m_last_pfn = xen_max_p2m_pfn; 417 410 418 411 p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; 419 412 vm.flags = VM_ALLOC; ··· 617 606 618 607 if (p2m) 619 608 free_p2m_page(p2m); 609 + } 610 + 611 + /* Expanded the p2m? */ 612 + if (pfn > xen_p2m_last_pfn) { 613 + xen_p2m_last_pfn = pfn; 614 + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; 620 615 } 621 616 622 617 return true;
+2 -2
arch/x86/xen/setup.c
··· 548 548 { 549 549 unsigned long max_pages, limit; 550 550 domid_t domid = DOMID_SELF; 551 - int ret; 551 + long ret; 552 552 553 553 limit = xen_get_pages_limit(); 554 554 max_pages = limit; ··· 798 798 xen_ignore_unusable(); 799 799 800 800 /* Make sure the Xen-supplied memory map is well-ordered. */ 801 - sanitize_e820_map(xen_e820_map, xen_e820_map_entries, 801 + sanitize_e820_map(xen_e820_map, ARRAY_SIZE(xen_e820_map), 802 802 &xen_e820_map_entries); 803 803 804 804 max_pages = xen_get_max_pages();
+2 -3
drivers/base/regmap/regmap-debugfs.c
··· 32 32 /* Calculate the length of a fixed format */ 33 33 static size_t regmap_calc_reg_len(int max_val, char *buf, size_t buf_size) 34 34 { 35 - snprintf(buf, buf_size, "%x", max_val); 36 - return strlen(buf); 35 + return snprintf(NULL, 0, "%x", max_val); 37 36 } 38 37 39 38 static ssize_t regmap_name_read_file(struct file *file, ··· 431 432 /* If we're in the region the user is trying to read */ 432 433 if (p >= *ppos) { 433 434 /* ...but not beyond it */ 434 - if (buf_pos >= count - 1 - tot_len) 435 + if (buf_pos + tot_len + 1 >= count) 435 436 break; 436 437 437 438 /* Format the register */
-1
drivers/bus/Kconfig
··· 36 36 37 37 config ARM_CCI500_PMU 38 38 bool "ARM CCI500 PMU support" 39 - default y 40 39 depends on (ARM && CPU_V7) || ARM64 41 40 depends on PERF_EVENTS 42 41 select ARM_CCI_PMU
+5 -5
drivers/clk/samsung/clk-cpu.c
··· 164 164 * the values for DIV_COPY and DIV_HPM dividers need not be set. 165 165 */ 166 166 div0 = cfg_data->div0; 167 - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { 167 + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { 168 168 div1 = cfg_data->div1; 169 169 if (readl(base + E4210_SRC_CPU) & E4210_MUX_HPM_MASK) 170 170 div1 = readl(base + E4210_DIV_CPU1) & ··· 185 185 alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1; 186 186 WARN_ON(alt_div >= MAX_DIV); 187 187 188 - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 188 + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { 189 189 /* 190 190 * In Exynos4210, ATB clock parent is also mout_core. So 191 191 * ATB clock also needs to be mantained at safe speed. ··· 206 206 writel(div0, base + E4210_DIV_CPU0); 207 207 wait_until_divider_stable(base + E4210_DIV_STAT_CPU0, DIV_MASK_ALL); 208 208 209 - if (test_bit(CLK_CPU_HAS_DIV1, &cpuclk->flags)) { 209 + if (cpuclk->flags & CLK_CPU_HAS_DIV1) { 210 210 writel(div1, base + E4210_DIV_CPU1); 211 211 wait_until_divider_stable(base + E4210_DIV_STAT_CPU1, 212 212 DIV_MASK_ALL); ··· 225 225 unsigned long mux_reg; 226 226 227 227 /* find out the divider values to use for clock data */ 228 - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 228 + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { 229 229 while ((cfg_data->prate * 1000) != ndata->new_rate) { 230 230 if (cfg_data->prate == 0) 231 231 return -EINVAL; ··· 240 240 writel(mux_reg & ~(1 << 16), base + E4210_SRC_CPU); 241 241 wait_until_mux_stable(base + E4210_STAT_CPU, 16, 1); 242 242 243 - if (test_bit(CLK_CPU_NEEDS_DEBUG_ALT_DIV, &cpuclk->flags)) { 243 + if (cpuclk->flags & CLK_CPU_NEEDS_DEBUG_ALT_DIV) { 244 244 div |= (cfg_data->div0 & E4210_DIV0_ATB_MASK); 245 245 div_mask |= E4210_DIV0_ATB_MASK; 246 246 }
+1 -1
drivers/clk/ti/clk-3xxx.c
··· 374 374 DT_CLK(NULL, "gpio2_ick", "gpio2_ick"), 375 375 DT_CLK(NULL, "wdt3_ick", "wdt3_ick"), 376 376 DT_CLK(NULL, "uart3_ick", "uart3_ick"), 377 - DT_CLK(NULL, "uart4_ick", "uart4_ick"), 378 377 DT_CLK(NULL, "gpt9_ick", "gpt9_ick"), 379 378 DT_CLK(NULL, "gpt8_ick", "gpt8_ick"), 380 379 DT_CLK(NULL, "gpt7_ick", "gpt7_ick"), ··· 518 519 static struct ti_dt_clk omap36xx_clks[] = { 519 520 DT_CLK(NULL, "omap_192m_alwon_fck", "omap_192m_alwon_fck"), 520 521 DT_CLK(NULL, "uart4_fck", "uart4_fck"), 522 + DT_CLK(NULL, "uart4_ick", "uart4_ick"), 521 523 { .node_name = NULL }, 522 524 }; 523 525
+1 -17
drivers/clk/ti/clk-7xx.c
··· 18 18 19 19 #include "clock.h" 20 20 21 - #define DRA7_DPLL_ABE_DEFFREQ 180633600 22 21 #define DRA7_DPLL_GMAC_DEFFREQ 1000000000 23 22 #define DRA7_DPLL_USB_DEFFREQ 960000000 24 23 ··· 312 313 int __init dra7xx_dt_clk_init(void) 313 314 { 314 315 int rc; 315 - struct clk *abe_dpll_mux, *sys_clkin2, *dpll_ck, *hdcp_ck; 316 + struct clk *dpll_ck, *hdcp_ck; 316 317 317 318 ti_dt_clocks_register(dra7xx_clks); 318 319 319 320 omap2_clk_disable_autoidle_all(); 320 - 321 - abe_dpll_mux = clk_get_sys(NULL, "abe_dpll_sys_clk_mux"); 322 - sys_clkin2 = clk_get_sys(NULL, "sys_clkin2"); 323 - dpll_ck = clk_get_sys(NULL, "dpll_abe_ck"); 324 - 325 - rc = clk_set_parent(abe_dpll_mux, sys_clkin2); 326 - if (!rc) 327 - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ); 328 - if (rc) 329 - pr_err("%s: failed to configure ABE DPLL!\n", __func__); 330 - 331 - dpll_ck = clk_get_sys(NULL, "dpll_abe_m2x2_ck"); 332 - rc = clk_set_rate(dpll_ck, DRA7_DPLL_ABE_DEFFREQ * 2); 333 - if (rc) 334 - pr_err("%s: failed to configure ABE DPLL m2x2!\n", __func__); 335 321 336 322 dpll_ck = clk_get_sys(NULL, "dpll_gmac_ck"); 337 323 rc = clk_set_rate(dpll_ck, DRA7_DPLL_GMAC_DEFFREQ);
+2 -2
drivers/clk/ti/clkt_dflt.c
··· 222 222 } 223 223 } 224 224 225 - if (unlikely(!clk->enable_reg)) { 225 + if (unlikely(IS_ERR(clk->enable_reg))) { 226 226 pr_err("%s: %s missing enable_reg\n", __func__, 227 227 clk_hw_get_name(hw)); 228 228 ret = -EINVAL; ··· 264 264 u32 v; 265 265 266 266 clk = to_clk_hw_omap(hw); 267 - if (!clk->enable_reg) { 267 + if (IS_ERR(clk->enable_reg)) { 268 268 /* 269 269 * 'independent' here refers to a clock which is not 270 270 * controlled by its parent.
+3
drivers/cpufreq/acpi-cpufreq.c
··· 149 149 { 150 150 struct acpi_cpufreq_data *data = policy->driver_data; 151 151 152 + if (unlikely(!data)) 153 + return -ENODEV; 154 + 152 155 return cpufreq_show_cpus(data->freqdomain_cpus, buf); 153 156 } 154 157
+3 -1
drivers/cpufreq/cpufreq.c
··· 1436 1436 * since this is a core component, and is essential for the 1437 1437 * subsequent light-weight ->init() to succeed. 1438 1438 */ 1439 - if (cpufreq_driver->exit) 1439 + if (cpufreq_driver->exit) { 1440 1440 cpufreq_driver->exit(policy); 1441 + policy->freq_table = NULL; 1442 + } 1441 1443 } 1442 1444 1443 1445 /**
+4 -3
drivers/devfreq/devfreq.c
··· 492 492 if (err) { 493 493 put_device(&devfreq->dev); 494 494 mutex_unlock(&devfreq->lock); 495 - goto err_dev; 495 + goto err_out; 496 496 } 497 497 498 498 mutex_unlock(&devfreq->lock); ··· 518 518 err_init: 519 519 list_del(&devfreq->node); 520 520 device_unregister(&devfreq->dev); 521 - err_dev: 522 521 kfree(devfreq); 523 522 err_out: 524 523 return ERR_PTR(err); ··· 794 795 ret = PTR_ERR(governor); 795 796 goto out; 796 797 } 797 - if (df->governor == governor) 798 + if (df->governor == governor) { 799 + ret = 0; 798 800 goto out; 801 + } 799 802 800 803 if (df->governor) { 801 804 ret = df->governor->event_handler(df, DEVFREQ_GOV_STOP, NULL);
+4 -2
drivers/mcb/mcb-pci.c
··· 74 74 ret = -ENOTSUPP; 75 75 dev_err(&pdev->dev, 76 76 "IO mapped PCI devices are not supported\n"); 77 - goto out_release; 77 + goto out_iounmap; 78 78 } 79 79 80 80 pci_set_drvdata(pdev, priv); ··· 89 89 90 90 ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); 91 91 if (ret < 0) 92 - goto out_iounmap; 92 + goto out_mcb_bus; 93 93 num_cells = ret; 94 94 95 95 dev_dbg(&pdev->dev, "Found %d cells\n", num_cells); ··· 98 98 99 99 return 0; 100 100 101 + out_mcb_bus: 102 + mcb_release_bus(priv->bus); 101 103 out_iounmap: 102 104 iounmap(priv->base); 103 105 out_release:
+1 -1
drivers/md/dm-cache-policy-cleaner.c
··· 436 436 static struct dm_cache_policy_type wb_policy_type = { 437 437 .name = "cleaner", 438 438 .version = {1, 0, 0}, 439 - .hint_size = 0, 439 + .hint_size = 4, 440 440 .owner = THIS_MODULE, 441 441 .create = wb_create 442 442 };
+3 -3
drivers/md/dm-exception-store.c
··· 203 203 return -EINVAL; 204 204 } 205 205 206 - tmp_store = kmalloc(sizeof(*tmp_store), GFP_KERNEL); 206 + tmp_store = kzalloc(sizeof(*tmp_store), GFP_KERNEL); 207 207 if (!tmp_store) { 208 208 ti->error = "Exception store allocation failed"; 209 209 return -ENOMEM; ··· 215 215 else if (persistent == 'N') 216 216 type = get_type("N"); 217 217 else { 218 - ti->error = "Persistent flag is not P or N"; 218 + ti->error = "Exception store type is not P or N"; 219 219 r = -EINVAL; 220 220 goto bad_type; 221 221 } ··· 233 233 if (r) 234 234 goto bad; 235 235 236 - r = type->ctr(tmp_store, 0, NULL); 236 + r = type->ctr(tmp_store, (strlen(argv[0]) > 1 ? &argv[0][1] : NULL)); 237 237 if (r) { 238 238 ti->error = "Exception store type constructor failed"; 239 239 goto bad;
+3 -2
drivers/md/dm-exception-store.h
··· 42 42 const char *name; 43 43 struct module *module; 44 44 45 - int (*ctr) (struct dm_exception_store *store, 46 - unsigned argc, char **argv); 45 + int (*ctr) (struct dm_exception_store *store, char *options); 47 46 48 47 /* 49 48 * Destroys this object when you've finished with it. ··· 122 123 unsigned chunk_shift; 123 124 124 125 void *context; 126 + 127 + bool userspace_supports_overflow; 125 128 }; 126 129 127 130 /*
+1 -2
drivers/md/dm-raid.c
··· 329 329 */ 330 330 if (min_region_size > (1 << 13)) { 331 331 /* If not a power of 2, make it the next power of 2 */ 332 - if (min_region_size & (min_region_size - 1)) 333 - region_size = 1 << fls(region_size); 332 + region_size = roundup_pow_of_two(min_region_size); 334 333 DMINFO("Choosing default region size of %lu sectors", 335 334 region_size); 336 335 } else {
+14 -3
drivers/md/dm-snap-persistent.c
··· 7 7 8 8 #include "dm-exception-store.h" 9 9 10 + #include <linux/ctype.h> 10 11 #include <linux/mm.h> 11 12 #include <linux/pagemap.h> 12 13 #include <linux/vmalloc.h> ··· 844 843 DMWARN("write header failed"); 845 844 } 846 845 847 - static int persistent_ctr(struct dm_exception_store *store, 848 - unsigned argc, char **argv) 846 + static int persistent_ctr(struct dm_exception_store *store, char *options) 849 847 { 850 848 struct pstore *ps; 851 849 ··· 873 873 return -ENOMEM; 874 874 } 875 875 876 + if (options) { 877 + char overflow = toupper(options[0]); 878 + if (overflow == 'O') 879 + store->userspace_supports_overflow = true; 880 + else { 881 + DMERR("Unsupported persistent store option: %s", options); 882 + return -EINVAL; 883 + } 884 + } 885 + 876 886 store->context = ps; 877 887 878 888 return 0; ··· 898 888 case STATUSTYPE_INFO: 899 889 break; 900 890 case STATUSTYPE_TABLE: 901 - DMEMIT(" P %llu", (unsigned long long)store->chunk_size); 891 + DMEMIT(" %s %llu", store->userspace_supports_overflow ? "PO" : "P", 892 + (unsigned long long)store->chunk_size); 902 893 } 903 894 904 895 return sz;
+1 -2
drivers/md/dm-snap-transient.c
··· 70 70 *metadata_sectors = 0; 71 71 } 72 72 73 - static int transient_ctr(struct dm_exception_store *store, 74 - unsigned argc, char **argv) 73 + static int transient_ctr(struct dm_exception_store *store, char *options) 75 74 { 76 75 struct transient_c *tc; 77 76
+9 -5
drivers/md/dm-snap.c
··· 1098 1098 } 1099 1099 1100 1100 /* 1101 - * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> 1101 + * Construct a snapshot mapping: <origin_dev> <COW-dev> <p|po|n> <chunk-size> 1102 1102 */ 1103 1103 static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) 1104 1104 { ··· 1302 1302 1303 1303 u.store_swap = snap_dest->store; 1304 1304 snap_dest->store = snap_src->store; 1305 + snap_dest->store->userspace_supports_overflow = u.store_swap->userspace_supports_overflow; 1305 1306 snap_src->store = u.store_swap; 1306 1307 1307 1308 snap_dest->store->snap = snap_dest; ··· 1740 1739 1741 1740 pe = __find_pending_exception(s, pe, chunk); 1742 1741 if (!pe) { 1743 - s->snapshot_overflowed = 1; 1744 - DMERR("Snapshot overflowed: Unable to allocate exception."); 1742 + if (s->store->userspace_supports_overflow) { 1743 + s->snapshot_overflowed = 1; 1744 + DMERR("Snapshot overflowed: Unable to allocate exception."); 1745 + } else 1746 + __invalidate_snapshot(s, -ENOMEM); 1745 1747 r = -EIO; 1746 1748 goto out_unlock; 1747 1749 } ··· 2369 2365 2370 2366 static struct target_type snapshot_target = { 2371 2367 .name = "snapshot", 2372 - .version = {1, 14, 0}, 2368 + .version = {1, 15, 0}, 2373 2369 .module = THIS_MODULE, 2374 2370 .ctr = snapshot_ctr, 2375 2371 .dtr = snapshot_dtr, ··· 2383 2379 2384 2380 static struct target_type merge_target = { 2385 2381 .name = dm_snapshot_merge_target_name, 2386 - .version = {1, 3, 0}, 2382 + .version = {1, 4, 0}, 2387 2383 .module = THIS_MODULE, 2388 2384 .ctr = snapshot_ctr, 2389 2385 .dtr = snapshot_dtr,
+5 -6
drivers/md/dm.c
··· 1001 1001 struct dm_rq_target_io *tio = info->tio; 1002 1002 struct bio *bio = info->orig; 1003 1003 unsigned int nr_bytes = info->orig->bi_iter.bi_size; 1004 + int error = clone->bi_error; 1004 1005 1005 1006 bio_put(clone); 1006 1007 ··· 1012 1011 * the remainder. 1013 1012 */ 1014 1013 return; 1015 - else if (bio->bi_error) { 1014 + else if (error) { 1016 1015 /* 1017 1016 * Don't notice the error to the upper layer yet. 1018 1017 * The error handling decision is made by the target driver, 1019 1018 * when the request is completed. 1020 1019 */ 1021 - tio->error = bio->bi_error; 1020 + tio->error = error; 1022 1021 return; 1023 1022 } 1024 1023 ··· 2838 2837 2839 2838 might_sleep(); 2840 2839 2841 - map = dm_get_live_table(md, &srcu_idx); 2842 - 2843 2840 spin_lock(&_minor_lock); 2844 2841 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); 2845 2842 set_bit(DMF_FREEING, &md->flags); ··· 2851 2852 * do not race with internal suspend. 2852 2853 */ 2853 2854 mutex_lock(&md->suspend_lock); 2855 + map = dm_get_live_table(md, &srcu_idx); 2854 2856 if (!dm_suspended_md(md)) { 2855 2857 dm_table_presuspend_targets(map); 2856 2858 dm_table_postsuspend_targets(map); 2857 2859 } 2858 - mutex_unlock(&md->suspend_lock); 2859 - 2860 2860 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */ 2861 2861 dm_put_live_table(md, srcu_idx); 2862 + mutex_unlock(&md->suspend_lock); 2862 2863 2863 2864 /* 2864 2865 * Rare, but there may be I/O requests still going to complete,
+2 -2
drivers/md/raid1.c
··· 2382 2382 } 2383 2383 spin_unlock_irqrestore(&conf->device_lock, flags); 2384 2384 while (!list_empty(&tmp)) { 2385 - r1_bio = list_first_entry(&conf->bio_end_io_list, 2386 - struct r1bio, retry_list); 2385 + r1_bio = list_first_entry(&tmp, struct r1bio, 2386 + retry_list); 2387 2387 list_del(&r1_bio->retry_list); 2388 2388 raid_end_bio_io(r1_bio); 2389 2389 }
+2 -2
drivers/md/raid10.c
··· 2688 2688 } 2689 2689 spin_unlock_irqrestore(&conf->device_lock, flags); 2690 2690 while (!list_empty(&tmp)) { 2691 - r10_bio = list_first_entry(&conf->bio_end_io_list, 2692 - struct r10bio, retry_list); 2691 + r10_bio = list_first_entry(&tmp, struct r10bio, 2692 + retry_list); 2693 2693 list_del(&r10_bio->retry_list); 2694 2694 raid_end_bio_io(r10_bio); 2695 2695 }
+1 -1
drivers/misc/mei/hbm.c
··· 1209 1209 * after the host receives the enum_resp 1210 1210 * message clients may be added or removed 1211 1211 */ 1212 - if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS && 1212 + if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS || 1213 1213 dev->hbm_state >= MEI_HBM_STOPPED) { 1214 1214 dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n", 1215 1215 dev->dev_state, dev->hbm_state);
+9 -5
drivers/mmc/host/omap_hsmmc.c
··· 182 182 struct clk *fclk; 183 183 struct clk *dbclk; 184 184 struct regulator *pbias; 185 + bool pbias_enabled; 185 186 void __iomem *base; 186 187 int vqmmc_enabled; 187 188 resource_size_t mapbase; ··· 329 328 return ret; 330 329 } 331 330 332 - if (!regulator_is_enabled(host->pbias)) { 331 + if (host->pbias_enabled == 0) { 333 332 ret = regulator_enable(host->pbias); 334 333 if (ret) { 335 334 dev_err(host->dev, "pbias reg enable fail\n"); 336 335 return ret; 337 336 } 337 + host->pbias_enabled = 1; 338 338 } 339 339 } else { 340 - if (regulator_is_enabled(host->pbias)) { 340 + if (host->pbias_enabled == 1) { 341 341 ret = regulator_disable(host->pbias); 342 342 if (ret) { 343 343 dev_err(host->dev, "pbias reg disable fail\n"); 344 344 return ret; 345 345 } 346 + host->pbias_enabled = 0; 346 347 } 347 348 } 348 349 ··· 478 475 mmc->supply.vmmc = devm_regulator_get_optional(host->dev, "vmmc"); 479 476 if (IS_ERR(mmc->supply.vmmc)) { 480 477 ret = PTR_ERR(mmc->supply.vmmc); 481 - if (ret != -ENODEV) 478 + if ((ret != -ENODEV) && host->dev->of_node) 482 479 return ret; 483 480 dev_dbg(host->dev, "unable to get vmmc regulator %ld\n", 484 481 PTR_ERR(mmc->supply.vmmc)); ··· 493 490 mmc->supply.vqmmc = devm_regulator_get_optional(host->dev, "vmmc_aux"); 494 491 if (IS_ERR(mmc->supply.vqmmc)) { 495 492 ret = PTR_ERR(mmc->supply.vqmmc); 496 - if (ret != -ENODEV) 493 + if ((ret != -ENODEV) && host->dev->of_node) 497 494 return ret; 498 495 dev_dbg(host->dev, "unable to get vmmc_aux regulator %ld\n", 499 496 PTR_ERR(mmc->supply.vqmmc)); ··· 503 500 host->pbias = devm_regulator_get_optional(host->dev, "pbias"); 504 501 if (IS_ERR(host->pbias)) { 505 502 ret = PTR_ERR(host->pbias); 506 - if (ret != -ENODEV) 503 + if ((ret != -ENODEV) && host->dev->of_node) 507 504 return ret; 508 505 dev_dbg(host->dev, "unable to get pbias regulator %ld\n", 509 506 PTR_ERR(host->pbias)); ··· 2056 2053 host->base = base + pdata->reg_offset; 2057 2054 host->power_mode = MMC_POWER_OFF; 2058 2055 host->next_data.cookie = 1; 2056 + host->pbias_enabled = 0; 2059 2057 host->vqmmc_enabled = 0; 2060 2058 2061 2059 ret = omap_hsmmc_gpio_init(mmc, host, pdata);
+1
drivers/mmc/host/sdhci-of-at91.c
··· 43 43 44 44 static const struct sdhci_pltfm_data soc_data_sama5d2 = { 45 45 .ops = &sdhci_at91_sama5d2_ops, 46 + .quirks2 = SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST, 46 47 }; 47 48 48 49 static const struct of_device_id sdhci_at91_dt_match[] = {
+5 -1
drivers/mmc/host/sdhci-pxav3.c
··· 135 135 struct sdhci_pxa *pxa = pltfm_host->priv; 136 136 struct resource *res; 137 137 138 + host->quirks &= ~SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; 138 139 host->quirks |= SDHCI_QUIRK_MISSING_CAPS; 139 140 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 140 141 "conf-sdio3"); ··· 291 290 uhs == MMC_TIMING_UHS_DDR50) { 292 291 reg_val &= ~SDIO3_CONF_CLK_INV; 293 292 reg_val |= SDIO3_CONF_SD_FB_CLK; 293 + } else if (uhs == MMC_TIMING_MMC_HS) { 294 + reg_val &= ~SDIO3_CONF_CLK_INV; 295 + reg_val &= ~SDIO3_CONF_SD_FB_CLK; 294 296 } else { 295 297 reg_val |= SDIO3_CONF_CLK_INV; 296 298 reg_val &= ~SDIO3_CONF_SD_FB_CLK; ··· 402 398 if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { 403 399 ret = armada_38x_quirks(pdev, host); 404 400 if (ret < 0) 405 - goto err_clk_get; 401 + goto err_mbus_win; 406 402 ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); 407 403 if (ret < 0) 408 404 goto err_mbus_win;
+2
drivers/mmc/host/sdhci.c
··· 1160 1160 host->mmc->actual_clock = 0; 1161 1161 1162 1162 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); 1163 + if (host->quirks2 & SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST) 1164 + mdelay(1); 1163 1165 1164 1166 if (clock == 0) 1165 1167 return;
+5
drivers/mmc/host/sdhci.h
··· 412 412 #define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) 413 413 /* Broken Clock divider zero in controller */ 414 414 #define SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN (1<<15) 415 + /* 416 + * When internal clock is disabled, a delay is needed before modifying the 417 + * SD clock frequency or enabling back the internal clock. 418 + */ 419 + #define SDHCI_QUIRK2_NEED_DELAY_AFTER_INT_CLK_RST (1<<16) 415 420 416 421 int irq; /* Device IRQ */ 417 422 void __iomem *ioaddr; /* Mapped address */
+1 -1
drivers/mtd/nand/mxc_nand.c
··· 879 879 oob_chunk_size); 880 880 881 881 /* the last chunk */ 882 - memcpy16_toio(&s[oob_chunk_size * sparebuf_size], 882 + memcpy16_toio(&s[i * sparebuf_size], 883 883 &d[i * oob_chunk_size], 884 884 host->used_oobsize - i * oob_chunk_size); 885 885 }
+10 -17
drivers/mtd/nand/sunxi_nand.c
··· 147 147 #define NFC_ECC_MODE GENMASK(15, 12) 148 148 #define NFC_RANDOM_SEED GENMASK(30, 16) 149 149 150 + /* NFC_USER_DATA helper macros */ 151 + #define NFC_BUF_TO_USER_DATA(buf) ((buf)[0] | ((buf)[1] << 8) | \ 152 + ((buf)[2] << 16) | ((buf)[3] << 24)) 153 + 150 154 #define NFC_DEFAULT_TIMEOUT_MS 1000 151 155 152 156 #define NFC_SRAM_SIZE 1024 ··· 650 646 offset = layout->eccpos[i * ecc->bytes] - 4 + mtd->writesize; 651 647 652 648 /* Fill OOB data in */ 653 - if (oob_required) { 654 - tmp = 0xffffffff; 655 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, 656 - 4); 657 - } else { 658 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, 659 - chip->oob_poi + offset - mtd->writesize, 660 - 4); 661 - } 649 + writel(NFC_BUF_TO_USER_DATA(chip->oob_poi + 650 + layout->oobfree[i].offset), 651 + nfc->regs + NFC_REG_USER_DATA_BASE); 662 652 663 653 chip->cmdfunc(mtd, NAND_CMD_RNDIN, offset, -1); 664 654 ··· 782 784 offset += ecc->size; 783 785 784 786 /* Fill OOB data in */ 785 - if (oob_required) { 786 - tmp = 0xffffffff; 787 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, &tmp, 788 - 4); 789 - } else { 790 - memcpy_toio(nfc->regs + NFC_REG_USER_DATA_BASE, oob, 791 - 4); 792 - } 787 + writel(NFC_BUF_TO_USER_DATA(oob), 788 + nfc->regs + NFC_REG_USER_DATA_BASE); 793 789 794 790 tmp = NFC_DATA_TRANS | NFC_DATA_SWAP_METHOD | NFC_ACCESS_DIR | 795 791 (1 << 30); ··· 1381 1389 node); 1382 1390 nand_release(&chip->mtd); 1383 1391 sunxi_nand_ecc_cleanup(&chip->nand.ecc); 1392 + list_del(&chip->node); 1384 1393 } 1385 1394 } 1386 1395
+4 -4
drivers/nvmem/core.c
··· 67 67 int rc; 68 68 69 69 /* Stop the user from reading */ 70 - if (pos > nvmem->size) 70 + if (pos >= nvmem->size) 71 71 return 0; 72 72 73 73 if (pos + count > nvmem->size) ··· 92 92 int rc; 93 93 94 94 /* Stop the user from writing */ 95 - if (pos > nvmem->size) 95 + if (pos >= nvmem->size) 96 96 return 0; 97 97 98 98 if (pos + count > nvmem->size) ··· 825 825 return rc; 826 826 827 827 /* shift bits in-place */ 828 - if (cell->bit_offset || cell->bit_offset) 828 + if (cell->bit_offset || cell->nbits) 829 829 nvmem_shift_read_buffer_in_place(cell, buf); 830 830 831 831 *len = cell->bytes; ··· 938 938 rc = regmap_raw_write(nvmem->regmap, cell->offset, buf, cell->bytes); 939 939 940 940 /* free the tmp buffer */ 941 - if (cell->bit_offset) 941 + if (cell->bit_offset || cell->nbits) 942 942 kfree(buf); 943 943 944 944 if (IS_ERR_VALUE(rc))
+10 -1
drivers/nvmem/sunxi_sid.c
··· 103 103 struct nvmem_device *nvmem; 104 104 struct regmap *regmap; 105 105 struct sunxi_sid *sid; 106 - int i, size; 106 + int ret, i, size; 107 107 char *randomness; 108 108 109 109 sid = devm_kzalloc(dev, sizeof(*sid), GFP_KERNEL); ··· 131 131 return PTR_ERR(nvmem); 132 132 133 133 randomness = kzalloc(sizeof(u8) * size, GFP_KERNEL); 134 + if (!randomness) { 135 + ret = -EINVAL; 136 + goto err_unreg_nvmem; 137 + } 138 + 134 139 for (i = 0; i < size; i++) 135 140 randomness[i] = sunxi_sid_read_byte(sid, i); 136 141 ··· 145 140 platform_set_drvdata(pdev, nvmem); 146 141 147 142 return 0; 143 + 144 + err_unreg_nvmem: 145 + nvmem_unregister(nvmem); 146 + return ret; 148 147 } 149 148 150 149 static int sunxi_sid_remove(struct platform_device *pdev)
+1
drivers/phy/phy-berlin-sata.c
··· 276 276 { .compatible = "marvell,berlin2q-sata-phy" }, 277 277 { }, 278 278 }; 279 + MODULE_DEVICE_TABLE(of, phy_berlin_sata_of_match); 279 280 280 281 static struct platform_driver phy_berlin_sata_driver = { 281 282 .probe = phy_berlin_sata_probe,
+11
drivers/phy/phy-qcom-ufs.c
··· 432 432 out: 433 433 return ret; 434 434 } 435 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_ref_clk); 435 436 436 437 static 437 438 int ufs_qcom_phy_disable_vreg(struct phy *phy, ··· 475 474 phy->is_ref_clk_enabled = false; 476 475 } 477 476 } 477 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_ref_clk); 478 478 479 479 #define UFS_REF_CLK_EN (1 << 5) 480 480 ··· 519 517 { 520 518 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, true); 521 519 } 520 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_dev_ref_clk); 522 521 523 522 void ufs_qcom_phy_disable_dev_ref_clk(struct phy *generic_phy) 524 523 { 525 524 ufs_qcom_phy_dev_ref_clk_ctrl(generic_phy, false); 526 525 } 526 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_dev_ref_clk); 527 527 528 528 /* Turn ON M-PHY RMMI interface clocks */ 529 529 int ufs_qcom_phy_enable_iface_clk(struct phy *generic_phy) ··· 554 550 out: 555 551 return ret; 556 552 } 553 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_enable_iface_clk); 557 554 558 555 /* Turn OFF M-PHY RMMI interface clocks */ 559 556 void ufs_qcom_phy_disable_iface_clk(struct phy *generic_phy) ··· 567 562 phy->is_iface_clk_enabled = false; 568 563 } 569 564 } 565 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_disable_iface_clk); 570 566 571 567 int ufs_qcom_phy_start_serdes(struct phy *generic_phy) 572 568 { ··· 584 578 585 579 return ret; 586 580 } 581 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_start_serdes); 587 582 588 583 int ufs_qcom_phy_set_tx_lane_enable(struct phy *generic_phy, u32 tx_lanes) 589 584 { ··· 602 595 603 596 return ret; 604 597 } 598 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_set_tx_lane_enable); 605 599 606 600 void ufs_qcom_phy_save_controller_version(struct phy *generic_phy, 607 601 u8 major, u16 minor, u16 step) ··· 613 605 ufs_qcom_phy->host_ctrl_rev_minor = minor; 614 606 ufs_qcom_phy->host_ctrl_rev_step = step; 615 607 } 608 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_save_controller_version); 616 609 617 610 int ufs_qcom_phy_calibrate_phy(struct phy *generic_phy, bool is_rate_B) 618 611 { ··· 634 625 635 626 return ret; 636 627 } 628 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_calibrate_phy); 637 629 638 630 int ufs_qcom_phy_remove(struct phy *generic_phy, 639 631 struct ufs_qcom_phy *ufs_qcom_phy) ··· 672 662 return ufs_qcom_phy->phy_spec_ops-> 673 663 is_physical_coding_sublayer_ready(ufs_qcom_phy); 674 664 } 665 + EXPORT_SYMBOL_GPL(ufs_qcom_phy_is_pcs_ready); 675 666 676 667 int ufs_qcom_phy_power_on(struct phy *generic_phy) 677 668 {
+6
drivers/phy/phy-rockchip-usb.c
··· 98 98 struct device_node *child; 99 99 struct regmap *grf; 100 100 unsigned int reg_offset; 101 + int err; 101 102 102 103 grf = syscon_regmap_lookup_by_phandle(dev->of_node, "rockchip,grf"); 103 104 if (IS_ERR(grf)) { ··· 130 129 return PTR_ERR(rk_phy->phy); 131 130 } 132 131 phy_set_drvdata(rk_phy->phy, rk_phy); 132 + 133 + /* only power up usb phy when it use, so disable it when init*/ 134 + err = rockchip_usb_phy_power(rk_phy, 1); 135 + if (err) 136 + return err; 133 137 } 134 138 135 139 phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+2 -2
drivers/regulator/axp20x-regulator.c
··· 192 192 AXP_DESC(AXP22X, DCDC3, "dcdc3", "vin3", 600, 1860, 20, 193 193 AXP22X_DCDC3_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), 194 194 AXP_DESC(AXP22X, DCDC4, "dcdc4", "vin4", 600, 1540, 20, 195 - AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(3)), 195 + AXP22X_DCDC4_V_OUT, 0x3f, AXP22X_PWR_OUT_CTRL1, BIT(4)), 196 196 AXP_DESC(AXP22X, DCDC5, "dcdc5", "vin5", 1000, 2550, 50, 197 - AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(4)), 197 + AXP22X_DCDC5_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(5)), 198 198 /* secondary switchable output of DCDC1 */ 199 199 AXP_DESC_SW(AXP22X, DC1SW, "dc1sw", "dcdc1", 1600, 3400, 100, 200 200 AXP22X_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(7)),
+4
drivers/regulator/core.c
··· 1403 1403 return 0; 1404 1404 } 1405 1405 1406 + /* Did the lookup explicitly defer for us? */ 1407 + if (ret == -EPROBE_DEFER) 1408 + return ret; 1409 + 1406 1410 if (have_full_constraints()) { 1407 1411 r = dummy_regulator_rdev; 1408 1412 } else {
+21 -7
drivers/scsi/3w-9xxx.c
··· 212 212 .llseek = noop_llseek, 213 213 }; 214 214 215 + /* 216 + * The controllers use an inline buffer instead of a mapped SGL for small, 217 + * single entry buffers. Note that we treat a zero-length transfer like 218 + * a mapped SGL. 219 + */ 220 + static bool twa_command_mapped(struct scsi_cmnd *cmd) 221 + { 222 + return scsi_sg_count(cmd) != 1 || 223 + scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH; 224 + } 225 + 215 226 /* This function will complete an aen request from the isr */ 216 227 static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id) 217 228 { ··· 1350 1339 } 1351 1340 1352 1341 /* Now complete the io */ 1353 - scsi_dma_unmap(cmd); 1342 + if (twa_command_mapped(cmd)) 1343 + scsi_dma_unmap(cmd); 1354 1344 cmd->scsi_done(cmd); 1355 1345 tw_dev->state[request_id] = TW_S_COMPLETED; 1356 1346 twa_free_request_id(tw_dev, request_id); ··· 1594 1582 struct scsi_cmnd *cmd = tw_dev->srb[i]; 1595 1583 1596 1584 cmd->result = (DID_RESET << 16); 1597 - scsi_dma_unmap(cmd); 1585 + if (twa_command_mapped(cmd)) 1586 + scsi_dma_unmap(cmd); 1598 1587 cmd->scsi_done(cmd); 1599 1588 } 1600 1589 } ··· 1778 1765 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL); 1779 1766 switch (retval) { 1780 1767 case SCSI_MLQUEUE_HOST_BUSY: 1781 - scsi_dma_unmap(SCpnt); 1768 + if (twa_command_mapped(SCpnt)) 1769 + scsi_dma_unmap(SCpnt); 1782 1770 twa_free_request_id(tw_dev, request_id); 1783 1771 break; 1784 1772 case 1: 1785 1773 SCpnt->result = (DID_ERROR << 16); 1786 - scsi_dma_unmap(SCpnt); 1774 + if (twa_command_mapped(SCpnt)) 1775 + scsi_dma_unmap(SCpnt); 1787 1776 done(SCpnt); 1788 1777 tw_dev->state[request_id] = TW_S_COMPLETED; 1789 1778 twa_free_request_id(tw_dev, request_id); ··· 1846 1831 /* Map sglist from scsi layer to cmd packet */ 1847 1832 1848 1833 if (scsi_sg_count(srb)) { 1849 - if ((scsi_sg_count(srb) == 1) && 1850 - (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) { 1834 + if (!twa_command_mapped(srb)) { 1851 1835 if (srb->sc_data_direction == DMA_TO_DEVICE || 1852 1836 srb->sc_data_direction == DMA_BIDIRECTIONAL) 1853 1837 scsi_sg_copy_to_buffer(srb, ··· 1919 1905 { 1920 1906 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1921 1907 1922 - if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH && 1908 + if (!twa_command_mapped(cmd) && 1923 1909 (cmd->sc_data_direction == DMA_FROM_DEVICE || 1924 1910 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) { 1925 1911 if (scsi_sg_count(cmd) == 1) {
+11 -6
drivers/scsi/libiscsi.c
··· 976 976 wake_up(&conn->ehwait); 977 977 } 978 978 979 - static void iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 979 + static int iscsi_send_nopout(struct iscsi_conn *conn, struct iscsi_nopin *rhdr) 980 980 { 981 981 struct iscsi_nopout hdr; 982 982 struct iscsi_task *task; 983 983 984 984 if (!rhdr && conn->ping_task) 985 - return; 985 + return -EINVAL; 986 986 987 987 memset(&hdr, 0, sizeof(struct iscsi_nopout)); 988 988 hdr.opcode = ISCSI_OP_NOOP_OUT | ISCSI_OP_IMMEDIATE; ··· 996 996 hdr.ttt = RESERVED_ITT; 997 997 998 998 task = __iscsi_conn_send_pdu(conn, (struct iscsi_hdr *)&hdr, NULL, 0); 999 - if (!task) 999 + if (!task) { 1000 1000 iscsi_conn_printk(KERN_ERR, conn, "Could not send nopout\n"); 1001 - else if (!rhdr) { 1001 + return -EIO; 1002 + } else if (!rhdr) { 1002 1003 /* only track our nops */ 1003 1004 conn->ping_task = task; 1004 1005 conn->last_ping = jiffies; 1005 1006 } 1007 + 1008 + return 0; 1006 1009 } 1007 1010 1008 1011 static int iscsi_nop_out_rsp(struct iscsi_task *task, ··· 2095 2092 if (time_before_eq(last_recv + recv_timeout, jiffies)) { 2096 2093 /* send a ping to try to provoke some traffic */ 2097 2094 ISCSI_DBG_CONN(conn, "Sending nopout as ping\n"); 2098 - iscsi_send_nopout(conn, NULL); 2099 - next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 2095 + if (iscsi_send_nopout(conn, NULL)) 2096 + next_timeout = jiffies + (1 * HZ); 2097 + else 2098 + next_timeout = conn->last_ping + (conn->ping_timeout * HZ); 2100 2099 } else 2101 2100 next_timeout = last_recv + recv_timeout; 2102 2101
+1 -1
drivers/scsi/scsi_dh.c
··· 111 111 112 112 dh = __scsi_dh_lookup(name); 113 113 if (!dh) { 114 - request_module(name); 114 + request_module("scsi_dh_%s", name); 115 115 dh = __scsi_dh_lookup(name); 116 116 } 117 117
+4 -3
drivers/spi/spi-davinci.c
··· 992 992 goto free_master; 993 993 } 994 994 995 - dspi->irq = platform_get_irq(pdev, 0); 996 - if (dspi->irq <= 0) { 995 + ret = platform_get_irq(pdev, 0); 996 + if (ret == 0) 997 997 ret = -EINVAL; 998 + if (ret < 0) 998 999 goto free_master; 999 - } 1000 + dspi->irq = ret; 1000 1001 1001 1002 ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq, 1002 1003 dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
+1
drivers/staging/speakup/fakekey.c
··· 81 81 __this_cpu_write(reporting_keystroke, true); 82 82 input_report_key(virt_keyboard, KEY_DOWN, PRESSED); 83 83 input_report_key(virt_keyboard, KEY_DOWN, RELEASED); 84 + input_sync(virt_keyboard); 84 85 __this_cpu_write(reporting_keystroke, false); 85 86 86 87 /* reenable preemption */
+5 -10
drivers/tty/n_tty.c
··· 343 343 spin_lock_irqsave(&tty->ctrl_lock, flags); 344 344 tty->ctrl_status |= TIOCPKT_FLUSHREAD; 345 345 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 346 - if (waitqueue_active(&tty->link->read_wait)) 347 - wake_up_interruptible(&tty->link->read_wait); 346 + wake_up_interruptible(&tty->link->read_wait); 348 347 } 349 348 } 350 349 ··· 1381 1382 put_tty_queue(c, ldata); 1382 1383 smp_store_release(&ldata->canon_head, ldata->read_head); 1383 1384 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1384 - if (waitqueue_active(&tty->read_wait)) 1385 - wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1385 + wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1386 1386 return 0; 1387 1387 } 1388 1388 } ··· 1665 1667 1666 1668 if ((read_cnt(ldata) >= ldata->minimum_to_wake) || L_EXTPROC(tty)) { 1667 1669 kill_fasync(&tty->fasync, SIGIO, POLL_IN); 1668 - if (waitqueue_active(&tty->read_wait)) 1669 - wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1670 + wake_up_interruptible_poll(&tty->read_wait, POLLIN); 1670 1671 } 1671 1672 } 1672 1673 ··· 1884 1887 } 1885 1888 1886 1889 /* The termios change make the tty ready for I/O */ 1887 - if (waitqueue_active(&tty->write_wait)) 1888 - wake_up_interruptible(&tty->write_wait); 1889 - if (waitqueue_active(&tty->read_wait)) 1890 - wake_up_interruptible(&tty->read_wait); 1890 + wake_up_interruptible(&tty->write_wait); 1891 + wake_up_interruptible(&tty->read_wait); 1891 1892 } 1892 1893 1893 1894 /**
+8
drivers/tty/serial/8250/8250_port.c
··· 261 261 UART_FCR7_64BYTE, 262 262 .flags = UART_CAP_FIFO, 263 263 }, 264 + [PORT_RT2880] = { 265 + .name = "Palmchip BK-3103", 266 + .fifo_size = 16, 267 + .tx_loadsz = 16, 268 + .fcr = UART_FCR_ENABLE_FIFO | UART_FCR_R_TRIG_10, 269 + .rxtrig_bytes = {1, 4, 8, 14}, 270 + .flags = UART_CAP_FIFO, 271 + }, 264 272 }; 265 273 266 274 /* Uart divisor latch read */
+1 -1
drivers/tty/serial/atmel_serial.c
··· 2786 2786 ret = atmel_init_gpios(port, &pdev->dev); 2787 2787 if (ret < 0) { 2788 2788 dev_err(&pdev->dev, "Failed to initialize GPIOs."); 2789 - goto err; 2789 + goto err_clear_bit; 2790 2790 } 2791 2791 2792 2792 ret = atmel_init_port(port, pdev);
+14 -6
drivers/tty/serial/imx.c
··· 1631 1631 int locked = 1; 1632 1632 int retval; 1633 1633 1634 - retval = clk_prepare_enable(sport->clk_per); 1634 + retval = clk_enable(sport->clk_per); 1635 1635 if (retval) 1636 1636 return; 1637 - retval = clk_prepare_enable(sport->clk_ipg); 1637 + retval = clk_enable(sport->clk_ipg); 1638 1638 if (retval) { 1639 - clk_disable_unprepare(sport->clk_per); 1639 + clk_disable(sport->clk_per); 1640 1640 return; 1641 1641 } 1642 1642 ··· 1675 1675 if (locked) 1676 1676 spin_unlock_irqrestore(&sport->port.lock, flags); 1677 1677 1678 - clk_disable_unprepare(sport->clk_ipg); 1679 - clk_disable_unprepare(sport->clk_per); 1678 + clk_disable(sport->clk_ipg); 1679 + clk_disable(sport->clk_per); 1680 1680 } 1681 1681 1682 1682 /* ··· 1777 1777 1778 1778 retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); 1779 1779 1780 - clk_disable_unprepare(sport->clk_ipg); 1780 + clk_disable(sport->clk_ipg); 1781 + if (retval) { 1782 + clk_unprepare(sport->clk_ipg); 1783 + goto error_console; 1784 + } 1785 + 1786 + retval = clk_prepare(sport->clk_per); 1787 + if (retval) 1788 + clk_disable_unprepare(sport->clk_ipg); 1781 1789 1782 1790 error_console: 1783 1791 return retval;
+17 -5
drivers/tty/tty_buffer.c
··· 242 242 atomic_inc(&buf->priority); 243 243 244 244 mutex_lock(&buf->lock); 245 - while ((next = buf->head->next) != NULL) { 245 + /* paired w/ release in __tty_buffer_request_room; ensures there are 246 + * no pending memory accesses to the freed buffer 247 + */ 248 + while ((next = smp_load_acquire(&buf->head->next)) != NULL) { 246 249 tty_buffer_free(port, buf->head); 247 250 buf->head = next; 248 251 } ··· 293 290 if (n != NULL) { 294 291 n->flags = flags; 295 292 buf->tail = n; 296 - b->commit = b->used; 293 + /* paired w/ acquire in flush_to_ldisc(); ensures 294 + * flush_to_ldisc() sees buffer data. 295 + */ 296 + smp_store_release(&b->commit, b->used); 297 297 /* paired w/ acquire in flush_to_ldisc(); ensures the 298 298 * latest commit value can be read before the head is 299 299 * advanced to the next buffer ··· 399 393 { 400 394 struct tty_bufhead *buf = &port->buf; 401 395 402 - buf->tail->commit = buf->tail->used; 396 + /* paired w/ acquire in flush_to_ldisc(); ensures 397 + * flush_to_ldisc() sees buffer data. 398 + */ 399 + smp_store_release(&buf->tail->commit, buf->tail->used); 403 400 schedule_work(&buf->work); 404 401 } 405 402 EXPORT_SYMBOL(tty_schedule_flip); ··· 476 467 struct tty_struct *tty; 477 468 struct tty_ldisc *disc; 478 469 479 - tty = port->itty; 470 + tty = READ_ONCE(port->itty); 480 471 if (tty == NULL) 481 472 return; 482 473 ··· 500 491 * is advancing to the next buffer 501 492 */ 502 493 next = smp_load_acquire(&head->next); 503 - count = head->commit - head->read; 494 + /* paired w/ release in __tty_buffer_request_room() or in 495 + * tty_buffer_flush(); ensures we see the committed buffer data 496 + */ 497 + count = smp_load_acquire(&head->commit) - head->read; 504 498 if (!count) { 505 499 if (next == NULL) { 506 500 check_other_closed(tty);
+34 -6
drivers/tty/tty_io.c
··· 2128 2128 if (!noctty && 2129 2129 current->signal->leader && 2130 2130 !current->signal->tty && 2131 - tty->session == NULL) 2132 - __proc_set_tty(tty); 2131 + tty->session == NULL) { 2132 + /* 2133 + * Don't let a process that only has write access to the tty 2134 + * obtain the privileges associated with having a tty as 2135 + * controlling terminal (being able to reopen it with full 2136 + * access through /dev/tty, being able to perform pushback). 2137 + * Many distributions set the group of all ttys to "tty" and 2138 + * grant write-only access to all terminals for setgid tty 2139 + * binaries, which should not imply full privileges on all ttys. 2140 + * 2141 + * This could theoretically break old code that performs open() 2142 + * on a write-only file descriptor. In that case, it might be 2143 + * necessary to also permit this if 2144 + * inode_permission(inode, MAY_READ) == 0. 2145 + */ 2146 + if (filp->f_mode & FMODE_READ) 2147 + __proc_set_tty(tty); 2148 + } 2133 2149 spin_unlock_irq(&current->sighand->siglock); 2134 2150 read_unlock(&tasklist_lock); 2135 2151 tty_unlock(tty); ··· 2434 2418 * Takes ->siglock() when updating signal->tty 2435 2419 */ 2436 2420 2437 - static int tiocsctty(struct tty_struct *tty, int arg) 2421 + static int tiocsctty(struct tty_struct *tty, struct file *file, int arg) 2438 2422 { 2439 2423 int ret = 0; 2440 2424 ··· 2468 2452 goto unlock; 2469 2453 } 2470 2454 } 2455 + 2456 + /* See the comment in tty_open(). */ 2457 + if ((file->f_mode & FMODE_READ) == 0 && !capable(CAP_SYS_ADMIN)) { 2458 + ret = -EPERM; 2459 + goto unlock; 2460 + } 2461 + 2471 2462 proc_set_tty(tty); 2472 2463 unlock: 2473 2464 read_unlock(&tasklist_lock); ··· 2867 2844 no_tty(); 2868 2845 return 0; 2869 2846 case TIOCSCTTY: 2870 - return tiocsctty(tty, arg); 2847 + return tiocsctty(tty, file, arg); 2871 2848 case TIOCGPGRP: 2872 2849 return tiocgpgrp(tty, real_tty, p); 2873 2850 case TIOCSPGRP: ··· 3174 3151 static int tty_cdev_add(struct tty_driver *driver, dev_t dev, 3175 3152 unsigned int index, unsigned int count) 3176 3153 { 3154 + int err; 3155 + 3177 3156 /* init here, since reused cdevs cause crashes */ 3178 3157 driver->cdevs[index] = cdev_alloc(); 3179 3158 if (!driver->cdevs[index]) 3180 3159 return -ENOMEM; 3181 - cdev_init(driver->cdevs[index], &tty_fops); 3160 + driver->cdevs[index]->ops = &tty_fops; 3182 3161 driver->cdevs[index]->owner = driver->owner; 3183 - return cdev_add(driver->cdevs[index], dev, count); 3162 + err = cdev_add(driver->cdevs[index], dev, count); 3163 + if (err) 3164 + kobject_put(&driver->cdevs[index]->kobj); 3165 + return err; 3184 3166 } 3185 3167 3186 3168 /**
+13
drivers/usb/core/quirks.c
··· 54 54 { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, 55 55 { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, 56 56 57 + /* Logitech ConferenceCam CC3000e */ 58 + { USB_DEVICE(0x046d, 0x0847), .driver_info = USB_QUIRK_DELAY_INIT }, 59 + { USB_DEVICE(0x046d, 0x0848), .driver_info = USB_QUIRK_DELAY_INIT }, 60 + 61 + /* Logitech PTZ Pro Camera */ 62 + { USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT }, 63 + 57 64 /* Logitech Quickcam Fusion */ 58 65 { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, 59 66 ··· 84 77 85 78 /* Philips PSC805 audio device */ 86 79 { USB_DEVICE(0x0471, 0x0155), .driver_info = USB_QUIRK_RESET_RESUME }, 80 + 81 + /* Plantronic Audio 655 DSP */ 82 + { USB_DEVICE(0x047f, 0xc008), .driver_info = USB_QUIRK_RESET_RESUME }, 83 + 84 + /* Plantronic Audio 648 USB */ 85 + { USB_DEVICE(0x047f, 0xc013), .driver_info = USB_QUIRK_RESET_RESUME }, 87 86 88 87 /* Artisman Watchdog Dongle */ 89 88 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
+3 -1
drivers/usb/gadget/udc/bdc/bdc_ep.c
··· 159 159 bd_table->start_bd = dma_pool_alloc(bdc->bd_table_pool, 160 160 GFP_ATOMIC, 161 161 &dma); 162 - if (!bd_table->start_bd) 162 + if (!bd_table->start_bd) { 163 + kfree(bd_table); 163 164 goto fail; 165 + } 164 166 165 167 bd_table->dma = dma; 166 168
+1 -1
drivers/usb/misc/chaoskey.c
··· 472 472 if (this_time > max) 473 473 this_time = max; 474 474 475 - memcpy(data, dev->buf, this_time); 475 + memcpy(data, dev->buf + dev->used, this_time); 476 476 477 477 dev->used += this_time; 478 478
+6 -1
drivers/usb/renesas_usbhs/common.c
··· 476 476 .compatible = "renesas,usbhs-r8a7794", 477 477 .data = (void *)USBHS_TYPE_RCAR_GEN2, 478 478 }, 479 + { 480 + /* Gen3 is compatible with Gen2 */ 481 + .compatible = "renesas,usbhs-r8a7795", 482 + .data = (void *)USBHS_TYPE_RCAR_GEN2, 483 + }, 479 484 { }, 480 485 }; 481 486 MODULE_DEVICE_TABLE(of, usbhs_of_match); ··· 498 493 return NULL; 499 494 500 495 dparam = &info->driver_param; 501 - dparam->type = of_id ? (u32)of_id->data : 0; 496 + dparam->type = of_id ? (uintptr_t)of_id->data : 0; 502 497 if (!of_property_read_u32(dev->of_node, "renesas,buswait", &tmp)) 503 498 dparam->buswait_bwait = tmp; 504 499 gpio = of_get_named_gpio_flags(dev->of_node, "renesas,enable-gpio", 0,
+5 -3
drivers/video/fbdev/broadsheetfb.c
··· 752 752 if ((fw_entry->size < 8*1024) || (fw_entry->size > 64*1024)) { 753 753 dev_err(dev, "Invalid waveform\n"); 754 754 err = -EINVAL; 755 - goto err_failed; 755 + goto err_fw; 756 756 } 757 757 758 758 mutex_lock(&(par->io_lock)); ··· 762 762 mutex_unlock(&(par->io_lock)); 763 763 if (err < 0) { 764 764 dev_err(dev, "Failed to store broadsheet waveform\n"); 765 - goto err_failed; 765 + goto err_fw; 766 766 } 767 767 768 768 dev_info(dev, "Stored broadsheet waveform, size %zd\n", fw_entry->size); 769 769 770 - return len; 770 + err = len; 771 771 772 + err_fw: 773 + release_firmware(fw_entry); 772 774 err_failed: 773 775 return err; 774 776 }
+8 -1
drivers/video/fbdev/fsl-diu-fb.c
··· 1628 1628 static int fsl_diu_resume(struct platform_device *ofdev) 1629 1629 { 1630 1630 struct fsl_diu_data *data; 1631 + unsigned int i; 1631 1632 1632 1633 data = dev_get_drvdata(&ofdev->dev); 1633 - enable_lcdc(data->fsl_diu_info); 1634 + 1635 + fsl_diu_enable_interrupts(data); 1636 + update_lcdc(data->fsl_diu_info); 1637 + for (i = 0; i < NUM_AOIS; i++) { 1638 + if (data->mfb[i].count) 1639 + fsl_diu_enable_panel(&data->fsl_diu_info[i]); 1640 + } 1634 1641 1635 1642 return 0; 1636 1643 }
+1
drivers/video/fbdev/mb862xx/mb862xxfbdrv.c
··· 831 831 { .compatible = "fujitsu,coral", }, 832 832 { /* end */ } 833 833 }; 834 + MODULE_DEVICE_TABLE(of, of_platform_mb862xx_tbl); 834 835 835 836 static struct platform_driver of_platform_mb862xxfb_driver = { 836 837 .driver = {
+1 -1
drivers/video/fbdev/omap2/displays-new/connector-dvi.c
··· 294 294 295 295 adapter_node = of_parse_phandle(node, "ddc-i2c-bus", 0); 296 296 if (adapter_node) { 297 - adapter = of_find_i2c_adapter_by_node(adapter_node); 297 + adapter = of_get_i2c_adapter_by_node(adapter_node); 298 298 if (adapter == NULL) { 299 299 dev_err(&pdev->dev, "failed to parse ddc-i2c-bus\n"); 300 300 omap_dss_put_device(ddata->in);
+1
drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
··· 898 898 { .compatible = "omapdss,sony,acx565akm", }, 899 899 {}, 900 900 }; 901 + MODULE_DEVICE_TABLE(of, acx565akm_of_match); 901 902 902 903 static struct spi_driver acx565akm_driver = { 903 904 .driver = {
+9 -3
drivers/video/fbdev/tridentfb.c
··· 226 226 writemmr(par, DST1, point(x, y)); 227 227 writemmr(par, DST2, point(x + w - 1, y + h - 1)); 228 228 229 - memcpy(par->io_virt + 0x10000, data, 4 * size); 229 + iowrite32_rep(par->io_virt + 0x10000, data, size); 230 230 } 231 231 232 232 static void blade_copy_rect(struct tridentfb_par *par, ··· 673 673 static inline void set_lwidth(struct tridentfb_par *par, int width) 674 674 { 675 675 write3X4(par, VGA_CRTC_OFFSET, width & 0xFF); 676 - write3X4(par, AddColReg, 677 - (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); 676 + /* chips older than TGUI9660 have only 1 width bit in AddColReg */ 677 + /* touching the other one breaks I2C/DDC */ 678 + if (par->chip_id == TGUI9440 || par->chip_id == CYBER9320) 679 + write3X4(par, AddColReg, 680 + (read3X4(par, AddColReg) & 0xEF) | ((width & 0x100) >> 4)); 681 + else 682 + write3X4(par, AddColReg, 683 + (read3X4(par, AddColReg) & 0xCF) | ((width & 0x300) >> 4)); 678 684 } 679 685 680 686 /* For resolutions smaller than FP resolution stretch */
+1
drivers/video/of_display_timing.c
··· 210 210 */ 211 211 pr_err("%s: error in timing %d\n", 212 212 of_node_full_name(np), disp->num_timings + 1); 213 + kfree(dt); 213 214 goto timingfail; 214 215 } 215 216
+4
fs/btrfs/disk-io.c
··· 2847 2847 !extent_buffer_uptodate(chunk_root->node)) { 2848 2848 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2849 2849 sb->s_id); 2850 + if (!IS_ERR(chunk_root->node)) 2851 + free_extent_buffer(chunk_root->node); 2850 2852 chunk_root->node = NULL; 2851 2853 goto fail_tree_roots; 2852 2854 } ··· 2887 2885 !extent_buffer_uptodate(tree_root->node)) { 2888 2886 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2889 2887 sb->s_id); 2888 + if (!IS_ERR(tree_root->node)) 2889 + free_extent_buffer(tree_root->node); 2890 2890 tree_root->node = NULL; 2891 2891 goto recovery_tree_root; 2892 2892 }
+5 -5
fs/btrfs/export.c
··· 112 112 u32 generation; 113 113 114 114 if (fh_type == FILEID_BTRFS_WITH_PARENT) { 115 - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE) 115 + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE) 116 116 return NULL; 117 117 root_objectid = fid->root_objectid; 118 118 } else if (fh_type == FILEID_BTRFS_WITH_PARENT_ROOT) { 119 - if (fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) 119 + if (fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) 120 120 return NULL; 121 121 root_objectid = fid->parent_root_objectid; 122 122 } else ··· 136 136 u32 generation; 137 137 138 138 if ((fh_type != FILEID_BTRFS_WITH_PARENT || 139 - fh_len != BTRFS_FID_SIZE_CONNECTABLE) && 139 + fh_len < BTRFS_FID_SIZE_CONNECTABLE) && 140 140 (fh_type != FILEID_BTRFS_WITH_PARENT_ROOT || 141 - fh_len != BTRFS_FID_SIZE_CONNECTABLE_ROOT) && 141 + fh_len < BTRFS_FID_SIZE_CONNECTABLE_ROOT) && 142 142 (fh_type != FILEID_BTRFS_WITHOUT_PARENT || 143 - fh_len != BTRFS_FID_SIZE_NON_CONNECTABLE)) 143 + fh_len < BTRFS_FID_SIZE_NON_CONNECTABLE)) 144 144 return NULL; 145 145 146 146 objectid = fid->objectid;
+8 -1
fs/btrfs/extent-tree.c
··· 2828 2828 struct btrfs_delayed_ref_head *head; 2829 2829 int ret; 2830 2830 int run_all = count == (unsigned long)-1; 2831 + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; 2831 2832 2832 2833 /* We'll clean this up in btrfs_cleanup_transaction */ 2833 2834 if (trans->aborted) ··· 2845 2844 #ifdef SCRAMBLE_DELAYED_REFS 2846 2845 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root); 2847 2846 #endif 2847 + trans->can_flush_pending_bgs = false; 2848 2848 ret = __btrfs_run_delayed_refs(trans, root, count); 2849 2849 if (ret < 0) { 2850 2850 btrfs_abort_transaction(trans, root, ret); ··· 2895 2893 } 2896 2894 out: 2897 2895 assert_qgroups_uptodate(trans); 2896 + trans->can_flush_pending_bgs = can_flush_pending_bgs; 2898 2897 return 0; 2899 2898 } 2900 2899 ··· 4309 4306 * the block groups that were made dirty during the lifetime of the 4310 4307 * transaction. 4311 4308 */ 4312 - if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { 4309 + if (trans->can_flush_pending_bgs && 4310 + trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) { 4313 4311 btrfs_create_pending_block_groups(trans, trans->root); 4314 4312 btrfs_trans_release_chunk_metadata(trans); 4315 4313 } ··· 9564 9560 struct btrfs_block_group_item item; 9565 9561 struct btrfs_key key; 9566 9562 int ret = 0; 9563 + bool can_flush_pending_bgs = trans->can_flush_pending_bgs; 9567 9564 9565 + trans->can_flush_pending_bgs = false; 9568 9566 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 9569 9567 if (ret) 9570 9568 goto next; ··· 9587 9581 next: 9588 9582 list_del_init(&block_group->bg_list); 9589 9583 } 9584 + trans->can_flush_pending_bgs = can_flush_pending_bgs; 9590 9585 } 9591 9586 9592 9587 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
+11 -8
fs/btrfs/extent_io.c
··· 3132 3132 get_extent_t *get_extent, 3133 3133 struct extent_map **em_cached, 3134 3134 struct bio **bio, int mirror_num, 3135 - unsigned long *bio_flags, int rw) 3135 + unsigned long *bio_flags, int rw, 3136 + u64 *prev_em_start) 3136 3137 { 3137 3138 struct inode *inode; 3138 3139 struct btrfs_ordered_extent *ordered; 3139 3140 int index; 3140 - u64 prev_em_start = (u64)-1; 3141 3141 3142 3142 inode = pages[0]->mapping->host; 3143 3143 while (1) { ··· 3153 3153 3154 3154 for (index = 0; index < nr_pages; index++) { 3155 3155 __do_readpage(tree, pages[index], get_extent, em_cached, bio, 3156 - mirror_num, bio_flags, rw, &prev_em_start); 3156 + mirror_num, bio_flags, rw, prev_em_start); 3157 3157 page_cache_release(pages[index]); 3158 3158 } 3159 3159 } ··· 3163 3163 int nr_pages, get_extent_t *get_extent, 3164 3164 struct extent_map **em_cached, 3165 3165 struct bio **bio, int mirror_num, 3166 - unsigned long *bio_flags, int rw) 3166 + unsigned long *bio_flags, int rw, 3167 + u64 *prev_em_start) 3167 3168 { 3168 3169 u64 start = 0; 3169 3170 u64 end = 0; ··· 3185 3184 index - first_index, start, 3186 3185 end, get_extent, em_cached, 3187 3186 bio, mirror_num, bio_flags, 3188 - rw); 3187 + rw, prev_em_start); 3189 3188 start = page_start; 3190 3189 end = start + PAGE_CACHE_SIZE - 1; 3191 3190 first_index = index; ··· 3196 3195 __do_contiguous_readpages(tree, &pages[first_index], 3197 3196 index - first_index, start, 3198 3197 end, get_extent, em_cached, bio, 3199 - mirror_num, bio_flags, rw); 3198 + mirror_num, bio_flags, rw, 3199 + prev_em_start); 3200 3200 } 3201 3201 3202 3202 static int __extent_read_full_page(struct extent_io_tree *tree, ··· 4209 4207 struct page *page; 4210 4208 struct extent_map *em_cached = NULL; 4211 4209 int nr = 0; 4210 + u64 prev_em_start = (u64)-1; 4212 4211 4213 4212 for (page_idx = 0; page_idx < nr_pages; page_idx++) { 4214 4213 page = list_entry(pages->prev, struct page, lru); ··· 4226 4223 if (nr < ARRAY_SIZE(pagepool)) 4227 4224 continue; 4228 4225 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4229 - &bio, 0, &bio_flags, READ); 4226 + &bio, 0, &bio_flags, READ, &prev_em_start); 4230 4227 nr = 0; 4231 4228 } 4232 4229 if (nr) 4233 4230 __extent_readpages(tree, pagepool, nr, get_extent, &em_cached, 4234 - &bio, 0, &bio_flags, READ); 4231 + &bio, 0, &bio_flags, READ, &prev_em_start); 4235 4232 4236 4233 if (em_cached) 4237 4234 free_extent_map(em_cached);
+5 -3
fs/btrfs/send.c
··· 1920 1920 /* 1921 1921 * We know that it is or will be overwritten. Check this now. 1922 1922 * The current inode being processed might have been the one that caused 1923 - * inode 'ino' to be orphanized, therefore ow_inode can actually be the 1924 - * same as sctx->send_progress. 1923 + * inode 'ino' to be orphanized, therefore check if ow_inode matches 1924 + * the current inode being processed. 1925 1925 */ 1926 - if (ow_inode <= sctx->send_progress) 1926 + if ((ow_inode < sctx->send_progress) || 1927 + (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && 1928 + gen == sctx->cur_inode_gen)) 1927 1929 ret = 1; 1928 1930 else 1929 1931 ret = 0;
+1
fs/btrfs/transaction.c
··· 557 557 h->delayed_ref_elem.seq = 0; 558 558 h->type = type; 559 559 h->allocating_chunk = false; 560 + h->can_flush_pending_bgs = true; 560 561 h->reloc_reserved = false; 561 562 h->sync = false; 562 563 INIT_LIST_HEAD(&h->qgroup_ref_list);
+1
fs/btrfs/transaction.h
··· 118 118 short aborted; 119 119 short adding_csums; 120 120 bool allocating_chunk; 121 + bool can_flush_pending_bgs; 121 122 bool reloc_reserved; 122 123 bool sync; 123 124 unsigned int type;
+1 -1
fs/cifs/cifsfs.h
··· 136 136 extern const struct export_operations cifs_export_ops; 137 137 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 138 138 139 - #define CIFS_VERSION "2.07" 139 + #define CIFS_VERSION "2.08" 140 140 #endif /* _CIFSFS_H */
-34
fs/cifs/inode.c
··· 2034 2034 struct tcon_link *tlink = NULL; 2035 2035 struct cifs_tcon *tcon = NULL; 2036 2036 struct TCP_Server_Info *server; 2037 - struct cifs_io_parms io_parms; 2038 2037 2039 2038 /* 2040 2039 * To avoid spurious oplock breaks from server, in the case of ··· 2055 2056 rc = -ENOSYS; 2056 2057 cifsFileInfo_put(open_file); 2057 2058 cifs_dbg(FYI, "SetFSize for attrs rc = %d\n", rc); 2058 - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 2059 - unsigned int bytes_written; 2060 - 2061 - io_parms.netfid = open_file->fid.netfid; 2062 - io_parms.pid = open_file->pid; 2063 - io_parms.tcon = tcon; 2064 - io_parms.offset = 0; 2065 - io_parms.length = attrs->ia_size; 2066 - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, 2067 - NULL, NULL, 1); 2068 - cifs_dbg(FYI, "Wrt seteof rc %d\n", rc); 2069 - } 2070 2059 } else 2071 2060 rc = -EINVAL; 2072 2061 ··· 2080 2093 else 2081 2094 rc = -ENOSYS; 2082 2095 cifs_dbg(FYI, "SetEOF by path (setattrs) rc = %d\n", rc); 2083 - if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { 2084 - __u16 netfid; 2085 - int oplock = 0; 2086 2096 2087 - rc = SMBLegacyOpen(xid, tcon, full_path, FILE_OPEN, 2088 - GENERIC_WRITE, CREATE_NOT_DIR, &netfid, 2089 - &oplock, NULL, cifs_sb->local_nls, 2090 - cifs_remap(cifs_sb)); 2091 - if (rc == 0) { 2092 - unsigned int bytes_written; 2093 - 2094 - io_parms.netfid = netfid; 2095 - io_parms.pid = current->tgid; 2096 - io_parms.tcon = tcon; 2097 - io_parms.offset = 0; 2098 - io_parms.length = attrs->ia_size; 2099 - rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, NULL, 2100 - NULL, 1); 2101 - cifs_dbg(FYI, "wrt seteof rc %d\n", rc); 2102 - CIFSSMBClose(xid, tcon, netfid); 2103 - } 2104 - } 2105 2097 if (tlink) 2106 2098 cifs_put_tlink(tlink); 2107 2099
+1 -1
fs/cifs/smb2pdu.c
··· 922 922 if (tcon && tcon->bad_network_name) 923 923 return -ENOENT; 924 924 925 - if ((tcon->seal) && 925 + if ((tcon && tcon->seal) && 926 926 ((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) { 927 927 cifs_dbg(VFS, "encryption requested but no server support"); 928 928 return -EOPNOTSUPP;
+6 -2
fs/namei.c
··· 1558 1558 negative = d_is_negative(dentry); 1559 1559 if (read_seqcount_retry(&dentry->d_seq, seq)) 1560 1560 return -ECHILD; 1561 - if (negative) 1562 - return -ENOENT; 1563 1561 1564 1562 /* 1565 1563 * This sequence count validates that the parent had no ··· 1578 1580 goto unlazy; 1579 1581 } 1580 1582 } 1583 + /* 1584 + * Note: do negative dentry check after revalidation in 1585 + * case that drops it. 1586 + */ 1587 + if (negative) 1588 + return -ENOENT; 1581 1589 path->mnt = mnt; 1582 1590 path->dentry = dentry; 1583 1591 if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
+13 -2
fs/nfs/nfs4proc.c
··· 1458 1458 if (delegation) 1459 1459 delegation_flags = delegation->flags; 1460 1460 rcu_read_unlock(); 1461 - if (data->o_arg.claim == NFS4_OPEN_CLAIM_DELEGATE_CUR) { 1461 + switch (data->o_arg.claim) { 1462 + default: 1463 + break; 1464 + case NFS4_OPEN_CLAIM_DELEGATE_CUR: 1465 + case NFS4_OPEN_CLAIM_DELEG_CUR_FH: 1462 1466 pr_err_ratelimited("NFS: Broken NFSv4 server %s is " 1463 1467 "returning a delegation for " 1464 1468 "OPEN(CLAIM_DELEGATE_CUR)\n", 1465 1469 clp->cl_hostname); 1466 - } else if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1470 + return; 1471 + } 1472 + if ((delegation_flags & 1UL<<NFS_DELEGATION_NEED_RECLAIM) == 0) 1467 1473 nfs_inode_set_delegation(state->inode, 1468 1474 data->owner->so_cred, 1469 1475 &data->o_res); ··· 1777 1771 if (IS_ERR(opendata)) 1778 1772 return PTR_ERR(opendata); 1779 1773 nfs4_stateid_copy(&opendata->o_arg.u.delegation, stateid); 1774 + write_seqlock(&state->seqlock); 1775 + nfs4_stateid_copy(&state->stateid, &state->open_stateid); 1776 + write_sequnlock(&state->seqlock); 1780 1777 clear_bit(NFS_DELEGATED_STATE, &state->flags); 1781 1778 switch (type & (FMODE_READ|FMODE_WRITE)) { 1782 1779 case FMODE_READ|FMODE_WRITE: ··· 1872 1863 data->rpc_done = 0; 1873 1864 data->rpc_status = 0; 1874 1865 data->timestamp = jiffies; 1866 + if (data->is_recover) 1867 + nfs4_set_sequence_privileged(&data->c_arg.seq_args); 1875 1868 task = rpc_run_task(&task_setup_data); 1876 1869 if (IS_ERR(task)) 1877 1870 return PTR_ERR(task);
+2 -1
fs/nfs/nfs4state.c
··· 1725 1725 if (!test_and_clear_bit(ops->owner_flag_bit, 1726 1726 &sp->so_flags)) 1727 1727 continue; 1728 - atomic_inc(&sp->so_count); 1728 + if (!atomic_inc_not_zero(&sp->so_count)) 1729 + continue; 1729 1730 spin_unlock(&clp->cl_lock); 1730 1731 rcu_read_unlock(); 1731 1732
+1 -1
fs/nfs/nfs4trace.h
··· 409 409 __entry->flags = flags; 410 410 __entry->fmode = (__force unsigned int)ctx->mode; 411 411 __entry->dev = ctx->dentry->d_sb->s_dev; 412 - if (!IS_ERR(state)) 412 + if (!IS_ERR_OR_NULL(state)) 413 413 inode = state->inode; 414 414 if (inode != NULL) { 415 415 __entry->fileid = NFS_FILEID(inode);
+7 -7
fs/nfs/write.c
··· 569 569 if (!nfs_pageio_add_request(pgio, req)) { 570 570 nfs_redirty_request(req); 571 571 ret = pgio->pg_error; 572 - } 572 + } else 573 + nfs_add_stats(page_file_mapping(page)->host, 574 + NFSIOS_WRITEPAGES, 1); 573 575 out: 574 576 return ret; 575 577 } 576 578 577 579 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 578 580 { 579 - struct inode *inode = page_file_mapping(page)->host; 580 581 int ret; 581 - 582 - nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 583 - nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); 584 582 585 583 nfs_pageio_cond_complete(pgio, page_file_index(page)); 586 584 ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); ··· 595 597 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) 596 598 { 597 599 struct nfs_pageio_descriptor pgio; 600 + struct inode *inode = page_file_mapping(page)->host; 598 601 int err; 599 602 600 - nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), 603 + nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 604 + nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), 601 605 false, &nfs_async_write_completion_ops); 602 606 err = nfs_do_writepage(page, wbc, &pgio); 603 607 nfs_pageio_complete(&pgio); ··· 1223 1223 return 1; 1224 1224 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1225 1225 list_empty_careful(&flctx->flc_posix))) 1226 - return 0; 1226 + return 1; 1227 1227 1228 1228 /* Check to see if there are whole file write locks */ 1229 1229 ret = 0;
+5
include/linux/irqdomain.h
··· 161 161 IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), 162 162 }; 163 163 164 + static inline struct device_node *irq_domain_get_of_node(struct irq_domain *d) 165 + { 166 + return d->of_node; 167 + } 168 + 164 169 #ifdef CONFIG_IRQ_DOMAIN 165 170 struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, 166 171 irq_hw_number_t hwirq_max, int direct_max,
+1 -1
include/linux/usb/renesas_usbhs.h
··· 157 157 */ 158 158 int pio_dma_border; /* default is 64byte */ 159 159 160 - u32 type; 160 + uintptr_t type; 161 161 u32 enable_gpio; 162 162 163 163 /*
+8
include/xen/interface/sched.h
··· 107 107 #define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */ 108 108 #define SHUTDOWN_crash 3 /* Tell controller we've crashed. */ 109 109 #define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */ 110 + /* 111 + * Domain asked to perform 'soft reset' for it. The expected behavior is to 112 + * reset internal Xen state for the domain returning it to the point where it 113 + * was created but leaving the domain's memory contents and vCPU contexts 114 + * intact. This will allow the domain to start over and set up all Xen specific 115 + * interfaces again. 116 + */ 117 + #define SHUTDOWN_soft_reset 5 110 118 111 119 #endif /* __XEN_PUBLIC_SCHED_H__ */
+1 -1
kernel/irq/handle.c
··· 22 22 23 23 /** 24 24 * handle_bad_irq - handle spurious and unhandled irqs 25 - * @irq: the interrupt number 26 25 * @desc: description of the interrupt 27 26 * 28 27 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. ··· 34 35 kstat_incr_irqs_this_cpu(desc); 35 36 ack_bad_irq(irq); 36 37 } 38 + EXPORT_SYMBOL_GPL(handle_bad_irq); 37 39 38 40 /* 39 41 * Special, empty irq handler:
-3
kernel/sched/core.c
··· 7261 7261 alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); 7262 7262 alloc_cpumask_var(&fallback_doms, GFP_KERNEL); 7263 7263 7264 - /* nohz_full won't take effect without isolating the cpus. */ 7265 - tick_nohz_full_add_cpus_to(cpu_isolated_map); 7266 - 7267 7264 sched_init_numa(); 7268 7265 7269 7266 /*
+5 -3
kernel/sched/deadline.c
··· 1066 1066 int target = find_later_rq(p); 1067 1067 1068 1068 if (target != -1 && 1069 - dl_time_before(p->dl.deadline, 1070 - cpu_rq(target)->dl.earliest_dl.curr)) 1069 + (dl_time_before(p->dl.deadline, 1070 + cpu_rq(target)->dl.earliest_dl.curr) || 1071 + (cpu_rq(target)->dl.dl_nr_running == 0))) 1071 1072 cpu = target; 1072 1073 } 1073 1074 rcu_read_unlock(); ··· 1418 1417 1419 1418 later_rq = cpu_rq(cpu); 1420 1419 1421 - if (!dl_time_before(task->dl.deadline, 1420 + if (later_rq->dl.dl_nr_running && 1421 + !dl_time_before(task->dl.deadline, 1422 1422 later_rq->dl.earliest_dl.curr)) { 1423 1423 /* 1424 1424 * Target rq has tasks of equal or earlier deadline,
+5 -4
kernel/sched/fair.c
··· 2370 2370 */ 2371 2371 tg_weight = atomic_long_read(&tg->load_avg); 2372 2372 tg_weight -= cfs_rq->tg_load_avg_contrib; 2373 - tg_weight += cfs_rq_load_avg(cfs_rq); 2373 + tg_weight += cfs_rq->load.weight; 2374 2374 2375 2375 return tg_weight; 2376 2376 } ··· 2380 2380 long tg_weight, load, shares; 2381 2381 2382 2382 tg_weight = calc_tg_weight(tg, cfs_rq); 2383 - load = cfs_rq_load_avg(cfs_rq); 2383 + load = cfs_rq->load.weight; 2384 2384 2385 2385 shares = (tg->shares * load); 2386 2386 if (tg_weight) ··· 2686 2686 static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) 2687 2687 { 2688 2688 struct sched_avg *sa = &cfs_rq->avg; 2689 - int decayed; 2689 + int decayed, removed = 0; 2690 2690 2691 2691 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2692 2692 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2693 2693 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2694 2694 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2695 + removed = 1; 2695 2696 } 2696 2697 2697 2698 if (atomic_long_read(&cfs_rq->removed_util_avg)) { ··· 2709 2708 cfs_rq->load_last_update_time_copy = sa->last_update_time; 2710 2709 #endif 2711 2710 2712 - return decayed; 2711 + return decayed || removed; 2713 2712 } 2714 2713 2715 2714 /* Update task and its cfs_rq load average */
+2
kernel/sched/idle.c
··· 57 57 rcu_idle_enter(); 58 58 trace_cpu_idle_rcuidle(0, smp_processor_id()); 59 59 local_irq_enable(); 60 + stop_critical_timings(); 60 61 while (!tif_need_resched() && 61 62 (cpu_idle_force_poll || tick_check_broadcast_expired())) 62 63 cpu_relax(); 64 + start_critical_timings(); 63 65 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 64 66 rcu_idle_exit(); 65 67 return 1;
+2 -1
lib/string.c
··· 203 203 unsigned long c, data; 204 204 205 205 c = *(unsigned long *)(src+res); 206 - *(unsigned long *)(dest+res) = c; 207 206 if (has_zero(c, &data, &constants)) { 208 207 data = prep_zero_mask(c, data, &constants); 209 208 data = create_zero_mask(data); 209 + *(unsigned long *)(dest+res) = c & zero_bytemask(data); 210 210 return res + find_zero(data); 211 211 } 212 + *(unsigned long *)(dest+res) = c; 212 213 res += sizeof(unsigned long); 213 214 count -= sizeof(unsigned long); 214 215 max -= sizeof(unsigned long);
+16 -18
mm/filemap.c
··· 2473 2473 iov_iter_count(i)); 2474 2474 2475 2475 again: 2476 + /* 2477 + * Bring in the user page that we will copy from _first_. 2478 + * Otherwise there's a nasty deadlock on copying from the 2479 + * same page as we're writing to, without it being marked 2480 + * up-to-date. 2481 + * 2482 + * Not only is this an optimisation, but it is also required 2483 + * to check that the address is actually valid, when atomic 2484 + * usercopies are used, below. 2485 + */ 2486 + if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2487 + status = -EFAULT; 2488 + break; 2489 + } 2490 + 2476 2491 status = a_ops->write_begin(file, mapping, pos, bytes, flags, 2477 2492 &page, &fsdata); 2478 2493 if (unlikely(status < 0)) ··· 2495 2480 2496 2481 if (mapping_writably_mapped(mapping)) 2497 2482 flush_dcache_page(page); 2498 - /* 2499 - * 'page' is now locked. If we are trying to copy from a 2500 - * mapping of 'page' in userspace, the copy might fault and 2501 - * would need PageUptodate() to complete. But, page can not be 2502 - * made Uptodate without acquiring the page lock, which we hold. 2503 - * Deadlock. Avoid with pagefault_disable(). Fix up below with 2504 - * iov_iter_fault_in_readable(). 2505 - */ 2506 - pagefault_disable(); 2483 + 2507 2484 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); 2508 - pagefault_enable(); 2509 2485 flush_dcache_page(page); 2510 2486 2511 2487 status = a_ops->write_end(file, mapping, pos, bytes, copied, ··· 2519 2513 */ 2520 2514 bytes = min_t(unsigned long, PAGE_CACHE_SIZE - offset, 2521 2515 iov_iter_single_seg_count(i)); 2522 - /* 2523 - * This is the fallback to recover if the copy from 2524 - * userspace above faults. 2525 - */ 2526 - if (unlikely(iov_iter_fault_in_readable(i, bytes))) { 2527 - status = -EFAULT; 2528 - break; 2529 - } 2530 2516 goto again; 2531 2517 } 2532 2518 pos += copied;
+4 -2
net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
··· 136 136 ctxt->direction = DMA_FROM_DEVICE; 137 137 ctxt->read_hdr = head; 138 138 pages_needed = min_t(int, pages_needed, xprt->sc_max_sge_rd); 139 - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 139 + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, 140 + rs_length); 140 141 141 142 for (pno = 0; pno < pages_needed; pno++) { 142 143 int len = min_t(int, rs_length, PAGE_SIZE - pg_off); ··· 236 235 ctxt->direction = DMA_FROM_DEVICE; 237 236 ctxt->frmr = frmr; 238 237 pages_needed = min_t(int, pages_needed, xprt->sc_frmr_pg_list_len); 239 - read = min_t(int, pages_needed << PAGE_SHIFT, rs_length); 238 + read = min_t(int, (pages_needed << PAGE_SHIFT) - *page_offset, 239 + rs_length); 240 240 241 241 frmr->kva = page_address(rqstp->rq_arg.pages[pg_no]); 242 242 frmr->direction = DMA_FROM_DEVICE;
+1 -1
net/sunrpc/xprtrdma/transport.c
··· 270 270 271 271 xprt_clear_connected(xprt); 272 272 273 - rpcrdma_buffer_destroy(&r_xprt->rx_buf); 274 273 rpcrdma_ep_destroy(&r_xprt->rx_ep, &r_xprt->rx_ia); 274 + rpcrdma_buffer_destroy(&r_xprt->rx_buf); 275 275 rpcrdma_ia_close(&r_xprt->rx_ia); 276 276 277 277 xprt_rdma_free_addresses(xprt);
+6 -3
net/sunrpc/xprtrdma/verbs.c
··· 755 755 756 756 cancel_delayed_work_sync(&ep->rep_connect_worker); 757 757 758 - if (ia->ri_id->qp) { 758 + if (ia->ri_id->qp) 759 759 rpcrdma_ep_disconnect(ep, ia); 760 + 761 + rpcrdma_clean_cq(ep->rep_attr.recv_cq); 762 + rpcrdma_clean_cq(ep->rep_attr.send_cq); 763 + 764 + if (ia->ri_id->qp) { 760 765 rdma_destroy_qp(ia->ri_id); 761 766 ia->ri_id->qp = NULL; 762 767 } 763 768 764 - rpcrdma_clean_cq(ep->rep_attr.recv_cq); 765 769 rc = ib_destroy_cq(ep->rep_attr.recv_cq); 766 770 if (rc) 767 771 dprintk("RPC: %s: ib_destroy_cq returned %i\n", 768 772 __func__, rc); 769 773 770 - rpcrdma_clean_cq(ep->rep_attr.send_cq); 771 774 rc = ib_destroy_cq(ep->rep_attr.send_cq); 772 775 if (rc) 773 776 dprintk("RPC: %s: ib_destroy_cq returned %i\n",
+1
sound/pci/hda/patch_cirrus.c
··· 633 633 SND_PCI_QUIRK(0x106b, 0x5e00, "MacBookPro 11,2", CS4208_MBP11), 634 634 SND_PCI_QUIRK(0x106b, 0x7100, "MacBookAir 6,1", CS4208_MBA6), 635 635 SND_PCI_QUIRK(0x106b, 0x7200, "MacBookAir 6,2", CS4208_MBA6), 636 + SND_PCI_QUIRK(0x106b, 0x7b00, "MacBookPro 12,1", CS4208_MBP11), 636 637 {} /* terminator */ 637 638 }; 638 639
+1
sound/pci/hda/patch_realtek.c
··· 5306 5306 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 5307 5307 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK), 5308 5308 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 5309 + SND_PCI_QUIRK(0x17aa, 0x2223, "ThinkPad T550", ALC292_FIXUP_TPT440_DOCK), 5309 5310 SND_PCI_QUIRK(0x17aa, 0x2226, "ThinkPad X250", ALC292_FIXUP_TPT440_DOCK), 5310 5311 SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC), 5311 5312 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
+5 -1
sound/pci/hda/patch_sigmatel.c
··· 4520 4520 return err; 4521 4521 4522 4522 spec = codec->spec; 4523 - codec->power_save_node = 1; 4523 + /* enable power_save_node only for new 92HD89xx chips, as it causes 4524 + * click noises on old 92HD73xx chips. 4525 + */ 4526 + if ((codec->core.vendor_id & 0xfffffff0) != 0x111d7670) 4527 + codec->power_save_node = 1; 4524 4528 spec->linear_tone_beep = 0; 4525 4529 spec->gen.mixer_nid = 0x1d; 4526 4530 spec->have_spdif_mux = 1;
+4
sound/soc/au1x/db1200.c
··· 129 129 .cpu_dai_name = "au1xpsc_i2s.2", 130 130 .platform_name = "au1xpsc-pcm.2", 131 131 .codec_name = "wm8731.0-001b", 132 + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | 133 + SND_SOC_DAIFMT_CBM_CFM, 132 134 .ops = &db1200_i2s_wm8731_ops, 133 135 }; 134 136 ··· 148 146 .cpu_dai_name = "au1xpsc_i2s.3", 149 147 .platform_name = "au1xpsc-pcm.3", 150 148 .codec_name = "wm8731.0-001b", 149 + .dai_fmt = SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_NB_NF | 150 + SND_SOC_DAIFMT_CBM_CFM, 151 151 .ops = &db1200_i2s_wm8731_ops, 152 152 }; 153 153
+3 -3
sound/soc/codecs/rt5645.c
··· 519 519 RT5645_L_VOL_SFT + 1, RT5645_R_VOL_SFT + 1, 63, 0, adc_vol_tlv), 520 520 521 521 /* ADC Boost Volume Control */ 522 - SOC_DOUBLE_TLV("STO1 ADC Boost Gain", RT5645_ADC_BST_VOL1, 522 + SOC_DOUBLE_TLV("ADC Boost Capture Volume", RT5645_ADC_BST_VOL1, 523 523 RT5645_STO1_ADC_L_BST_SFT, RT5645_STO1_ADC_R_BST_SFT, 3, 0, 524 524 adc_bst_tlv), 525 - SOC_DOUBLE_TLV("STO2 ADC Boost Gain", RT5645_ADC_BST_VOL1, 526 - RT5645_STO2_ADC_L_BST_SFT, RT5645_STO2_ADC_R_BST_SFT, 3, 0, 525 + SOC_DOUBLE_TLV("Mono ADC Boost Capture Volume", RT5645_ADC_BST_VOL2, 526 + RT5645_MONO_ADC_L_BST_SFT, RT5645_MONO_ADC_R_BST_SFT, 3, 0, 527 527 adc_bst_tlv), 528 528 529 529 /* I2S2 function select */
+9 -7
sound/soc/codecs/rt5645.h
··· 39 39 #define RT5645_STO1_ADC_DIG_VOL 0x1c 40 40 #define RT5645_MONO_ADC_DIG_VOL 0x1d 41 41 #define RT5645_ADC_BST_VOL1 0x1e 42 - /* Mixer - D-D */ 43 42 #define RT5645_ADC_BST_VOL2 0x20 43 + /* Mixer - D-D */ 44 44 #define RT5645_STO1_ADC_MIXER 0x27 45 45 #define RT5645_MONO_ADC_MIXER 0x28 46 46 #define RT5645_AD_DA_MIXER 0x29 ··· 315 315 #define RT5645_STO1_ADC_R_BST_SFT 12 316 316 #define RT5645_STO1_ADC_COMP_MASK (0x3 << 10) 317 317 #define RT5645_STO1_ADC_COMP_SFT 10 318 - #define RT5645_STO2_ADC_L_BST_MASK (0x3 << 8) 319 - #define RT5645_STO2_ADC_L_BST_SFT 8 320 - #define RT5645_STO2_ADC_R_BST_MASK (0x3 << 6) 321 - #define RT5645_STO2_ADC_R_BST_SFT 6 322 - #define RT5645_STO2_ADC_COMP_MASK (0x3 << 4) 323 - #define RT5645_STO2_ADC_COMP_SFT 4 318 + 319 + /* ADC Boost Volume Control (0x20) */ 320 + #define RT5645_MONO_ADC_L_BST_MASK (0x3 << 14) 321 + #define RT5645_MONO_ADC_L_BST_SFT 14 322 + #define RT5645_MONO_ADC_R_BST_MASK (0x3 << 12) 323 + #define RT5645_MONO_ADC_R_BST_SFT 12 324 + #define RT5645_MONO_ADC_COMP_MASK (0x3 << 10) 325 + #define RT5645_MONO_ADC_COMP_SFT 10 324 326 325 327 /* Stereo2 ADC Mixer Control (0x26) */ 326 328 #define RT5645_STO2_ADC_SRC_MASK (0x1 << 15)
+3 -3
sound/soc/codecs/sgtl5000.c
··· 1376 1376 sgtl5000->micbias_resistor << SGTL5000_BIAS_R_SHIFT); 1377 1377 1378 1378 snd_soc_update_bits(codec, SGTL5000_CHIP_MIC_CTRL, 1379 - SGTL5000_BIAS_R_MASK, 1380 - sgtl5000->micbias_voltage << SGTL5000_BIAS_R_SHIFT); 1379 + SGTL5000_BIAS_VOLT_MASK, 1380 + sgtl5000->micbias_voltage << SGTL5000_BIAS_VOLT_SHIFT); 1381 1381 /* 1382 1382 * disable DAP 1383 1383 * TODO: ··· 1549 1549 else { 1550 1550 sgtl5000->micbias_voltage = 0; 1551 1551 dev_err(&client->dev, 1552 - "Unsuitable MicBias resistor\n"); 1552 + "Unsuitable MicBias voltage\n"); 1553 1553 } 1554 1554 } else { 1555 1555 sgtl5000->micbias_voltage = 0;
+1 -1
sound/soc/codecs/tas2552.c
··· 549 549 /* 550 550 * DAC digital volumes. From -7 to 24 dB in 1 dB steps 551 551 */ 552 - static DECLARE_TLV_DB_SCALE(dac_tlv, -7, 100, 0); 552 + static DECLARE_TLV_DB_SCALE(dac_tlv, -700, 100, 0); 553 553 554 554 static const char * const tas2552_din_source_select[] = { 555 555 "Muted",
+11 -8
sound/soc/codecs/tlv320aic3x.c
··· 1509 1509 snd_soc_write(codec, PGAL_2_LLOPM_VOL, DEFAULT_VOL); 1510 1510 snd_soc_write(codec, PGAR_2_RLOPM_VOL, DEFAULT_VOL); 1511 1511 1512 - /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ 1513 - snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); 1514 - snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); 1515 - snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); 1516 - snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); 1517 - /* Line2 Line Out default volume, disconnect from Output Mixer */ 1518 - snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); 1519 - snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); 1512 + /* On tlv320aic3104, these registers are reserved and must not be written */ 1513 + if (aic3x->model != AIC3X_MODEL_3104) { 1514 + /* Line2 to HP Bypass default volume, disconnect from Output Mixer */ 1515 + snd_soc_write(codec, LINE2L_2_HPLOUT_VOL, DEFAULT_VOL); 1516 + snd_soc_write(codec, LINE2R_2_HPROUT_VOL, DEFAULT_VOL); 1517 + snd_soc_write(codec, LINE2L_2_HPLCOM_VOL, DEFAULT_VOL); 1518 + snd_soc_write(codec, LINE2R_2_HPRCOM_VOL, DEFAULT_VOL); 1519 + /* Line2 Line Out default volume, disconnect from Output Mixer */ 1520 + snd_soc_write(codec, LINE2L_2_LLOPM_VOL, DEFAULT_VOL); 1521 + snd_soc_write(codec, LINE2R_2_RLOPM_VOL, DEFAULT_VOL); 1522 + } 1520 1523 1521 1524 switch (aic3x->model) { 1522 1525 case AIC3X_MODEL_3X:
+4 -1
sound/soc/codecs/wm8962.c
··· 3760 3760 ret = snd_soc_register_codec(&i2c->dev, 3761 3761 &soc_codec_dev_wm8962, &wm8962_dai, 1); 3762 3762 if (ret < 0) 3763 - goto err_enable; 3763 + goto err_pm_runtime; 3764 3764 3765 3765 regcache_cache_only(wm8962->regmap, true); 3766 3766 ··· 3769 3769 3770 3770 return 0; 3771 3771 3772 + err_pm_runtime: 3773 + pm_runtime_disable(&i2c->dev); 3772 3774 err_enable: 3773 3775 regulator_bulk_disable(ARRAY_SIZE(wm8962->supplies), wm8962->supplies); 3774 3776 err: ··· 3780 3778 static int wm8962_i2c_remove(struct i2c_client *client) 3781 3779 { 3782 3780 snd_soc_unregister_codec(&client->dev); 3781 + pm_runtime_disable(&client->dev); 3783 3782 return 0; 3784 3783 } 3785 3784
+14 -5
sound/soc/dwc/designware_i2s.c
··· 131 131 132 132 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 133 133 for (i = 0; i < 4; i++) 134 - i2s_write_reg(dev->i2s_base, TOR(i), 0); 134 + i2s_read_reg(dev->i2s_base, TOR(i)); 135 135 } else { 136 136 for (i = 0; i < 4; i++) 137 - i2s_write_reg(dev->i2s_base, ROR(i), 0); 137 + i2s_read_reg(dev->i2s_base, ROR(i)); 138 138 } 139 139 } 140 140 141 141 static void i2s_start(struct dw_i2s_dev *dev, 142 142 struct snd_pcm_substream *substream) 143 143 { 144 - 144 + u32 i, irq; 145 145 i2s_write_reg(dev->i2s_base, IER, 1); 146 146 147 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 147 + if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 148 + for (i = 0; i < 4; i++) { 149 + irq = i2s_read_reg(dev->i2s_base, IMR(i)); 150 + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x30); 151 + } 148 152 i2s_write_reg(dev->i2s_base, ITER, 1); 149 - else 153 + } else { 154 + for (i = 0; i < 4; i++) { 155 + irq = i2s_read_reg(dev->i2s_base, IMR(i)); 156 + i2s_write_reg(dev->i2s_base, IMR(i), irq & ~0x03); 157 + } 150 158 i2s_write_reg(dev->i2s_base, IRER, 1); 159 + } 151 160 152 161 i2s_write_reg(dev->i2s_base, CER, 1); 153 162 }
+9 -10
sound/soc/fsl/imx-ssi.c
··· 95 95 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 96 96 case SND_SOC_DAIFMT_I2S: 97 97 /* data on rising edge of bclk, frame low 1clk before data */ 98 - strcr |= SSI_STCR_TFSI | SSI_STCR_TEFS | SSI_STCR_TXBIT0; 98 + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSI | 99 + SSI_STCR_TEFS; 99 100 scr |= SSI_SCR_NET; 100 101 if (ssi->flags & IMX_SSI_USE_I2S_SLAVE) { 101 102 scr &= ~SSI_I2S_MODE_MASK; ··· 105 104 break; 106 105 case SND_SOC_DAIFMT_LEFT_J: 107 106 /* data on rising edge of bclk, frame high with data */ 108 - strcr |= SSI_STCR_TXBIT0; 107 + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP; 109 108 break; 110 109 case SND_SOC_DAIFMT_DSP_B: 111 110 /* data on rising edge of bclk, frame high with data */ 112 - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0; 111 + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL; 113 112 break; 114 113 case SND_SOC_DAIFMT_DSP_A: 115 114 /* data on rising edge of bclk, frame high 1clk before data */ 116 - strcr |= SSI_STCR_TFSL | SSI_STCR_TXBIT0 | SSI_STCR_TEFS; 115 + strcr |= SSI_STCR_TXBIT0 | SSI_STCR_TSCKP | SSI_STCR_TFSL | 116 + SSI_STCR_TEFS; 117 117 break; 118 118 } 119 119 120 120 /* DAI clock inversion */ 121 121 switch (fmt & SND_SOC_DAIFMT_INV_MASK) { 122 122 case SND_SOC_DAIFMT_IB_IF: 123 - strcr |= SSI_STCR_TFSI; 124 - strcr &= ~SSI_STCR_TSCKP; 123 + strcr ^= SSI_STCR_TSCKP | SSI_STCR_TFSI; 125 124 break; 126 125 case SND_SOC_DAIFMT_IB_NF: 127 - strcr &= ~(SSI_STCR_TSCKP | SSI_STCR_TFSI); 126 + strcr ^= SSI_STCR_TSCKP; 128 127 break; 129 128 case SND_SOC_DAIFMT_NB_IF: 130 - strcr |= SSI_STCR_TFSI | SSI_STCR_TSCKP; 129 + strcr ^= SSI_STCR_TFSI; 131 130 break; 132 131 case SND_SOC_DAIFMT_NB_NF: 133 - strcr &= ~SSI_STCR_TFSI; 134 - strcr |= SSI_STCR_TSCKP; 135 132 break; 136 133 } 137 134
+2 -1
sound/synth/emux/emux_oss.c
··· 69 69 struct snd_seq_oss_reg *arg; 70 70 struct snd_seq_device *dev; 71 71 72 - if (snd_seq_device_new(emu->card, 0, SNDRV_SEQ_DEV_ID_OSS, 72 + /* using device#1 here for avoiding conflicts with OPL3 */ 73 + if (snd_seq_device_new(emu->card, 1, SNDRV_SEQ_DEV_ID_OSS, 73 74 sizeof(struct snd_seq_oss_reg), &dev) < 0) 74 75 return; 75 76
+1 -1
tools/perf/util/Build
··· 17 17 libperf-y += llvm-utils.o 18 18 libperf-y += parse-options.o 19 19 libperf-y += parse-events.o 20 + libperf-y += perf_regs.o 20 21 libperf-y += path.o 21 22 libperf-y += rbtree.o 22 23 libperf-y += bitmap.o ··· 104 103 105 104 libperf-y += scripting-engines/ 106 105 107 - libperf-$(CONFIG_PERF_REGS) += perf_regs.o 108 106 libperf-$(CONFIG_ZLIB) += zlib.o 109 107 libperf-$(CONFIG_LZMA) += lzma.o 110 108
+2
tools/perf/util/perf_regs.c
··· 6 6 SMPL_REG_END 7 7 }; 8 8 9 + #ifdef HAVE_PERF_REGS_SUPPORT 9 10 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id) 10 11 { 11 12 int i, idx = 0; ··· 30 29 *valp = regs->cache_regs[id]; 31 30 return 0; 32 31 } 32 + #endif
+1
tools/perf/util/perf_regs.h
··· 2 2 #define __PERF_REGS_H 3 3 4 4 #include <linux/types.h> 5 + #include <linux/compiler.h> 5 6 6 7 struct regs_dump; 7 8